repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
ctuning/ck-env
|
soft/env.msys2/customize.py
|
Python
|
bsd-3-clause
| 1,741
| 0.029868
|
#
# Collective Knowledge (individual environment - setup)
#
# See CK LICENSE.txt for licensing details
# See CK COPYRIGHT.txt for copyright details
#
# Developer: Grigori Fursin, Grigori.Fursin@cTuning.org, http://fursin.net
#
import os
extra_dirs=['C:\\msys64', 'C:\\tools\\msys2', 'D:\\msys64', 'D:\\tools\\msys2']
##############################################################################
# customize directories to automatically find and register software
def dirs(i):
hosd=i['host_os_dict']
phosd=hosd.get('ck_name','')
dirs=i.get('dirs', [])
if phosd=='win':
for d in extra_dirs:
if os.path.isdir(d):
dirs.append(d)
return {'return':0, 'dirs':dirs}
##############################################################################
# parse software version
def parse_version(i):
lst=i['output']
ver=''
for q in lst:
q=q.strip()
if q!='':
j=q.lower().find('version ')
if j>0:
ver=q[j+8:].strip()
j2=ver.find(' ')
if j2>0:
ver=ver[:j2]
ver=ver.strip()
|
break
return {'return':0, 'version':ver}
##############################################################################
# setup environment
def setup(i):
s=''
cus=i['customize']
env=i['env']
fp=cus.get('full_path
|
','')
ep=cus['env_prefix']
if fp=='':
return {'return':1, 'error':'full path required by the soft customization script is empty'}
p1=os.path.dirname(fp)
p2=os.path.dirname(p1)
p3=os.path.dirname(p2)
env[ep]=p3
env[ep+'_BIN']=p1
env[ep+'_BASH']=fp
return {'return':0, 'bat':s}
|
aowen87/PhyloViewer
|
src/condense.py
|
Python
|
gpl-3.0
| 3,122
| 0.012812
|
#! /usr/bin/python
'''
@author: Alister Maguire
Given a counts file and a taxa file, condense
repeated genus' and their counts, and output
a file that maps genus names to their counts
for each experiment.
'''
import argparse
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("counts_file")
parser.add_argument("taxa_file")
args = parser.parse_args()
taxa_f = open(args.taxa_fil
|
e, "r")
counts_f = open(args.counts_file, "r")
condensed_counts = []
genus_dct = {}
genus_lst = []
count_lst = []
count_dct =
|
{}
taxa = taxa_f.readlines()
counts = counts_f.readlines()
for c in counts:
count_lst.append(c.split())
#create a dictionary that associates
#experiment IDs with lists for counts
c_size = len(counts)
for i in range(1, c_size):
count_dct[count_lst[i][0]] = []
#retrieve the genus names and their
#associated OTU values (look for repeats)
for i in range(len(taxa)):
taxa[i] = taxa[i].split()
j = -3
genus = taxa[i][j]
j -= 1
#condense genus names that have been
#split into pieces
while not is_number(taxa[i][j]):
genus = taxa[i][j] + " " + genus
j -= 1
#if genus in exempt:
# continue
if genus not in genus_dct:
genus_dct[genus] = []
genus_dct[genus].append(taxa[i][0])
genus_lst.append(genus)
g_size = len(genus_lst)
#create a list for condensed counts
#that we can use to map genus' with their counts
for i in range(1, len(count_lst)):
condensed_counts.append([])
condensed_counts[i-1] = ([0]*(g_size+1))
for i in range(0, g_size):
for j in range(1, len(count_lst)):
total = 0
for otu in genus_dct[genus_lst[i]]:
#the otu number is an index into the counts list
idx = int(otu[3:]) + 1
total += int(count_lst[j][idx])
condensed_counts[j-1][0] = count_lst[j][0]
condensed_counts[j-1][i] = total
genus_counts_f = open("condensed_counts.txt", "w+")
#Write the new file that assoicates genus names
#with experiment counts. The first line of the
#file contains all of the genus names, and the position
#of this name is an index into the experiment counts.
#The following lines are of the form
# Experiment_ID, count0, count1, ...., countn
#
genus_keys = ""
for genus in genus_lst:
genus_keys = genus_keys + ", " + genus
genus_keys = genus_keys[2:] + "\n"
genus_counts_f.write(genus_keys)
for row in condensed_counts:
exp_counts = ""
for col in row:
exp_counts = exp_counts + ", " + str(col)
exp_counts = exp_counts[2:] + "\n"
genus_counts_f.write(exp_counts)
genus_counts_f.close()
taxa_f.close()
counts_f.close()
|
gungorbudak/sslsa
|
sslsa.py
|
Python
|
mit
| 4,211
| 0.00095
|
#!C:\Python27
# sslsa.py
# Structural Superimposition of Local Sequence Alignment
# A program which finds out whether a local sequence
# alignment of two protein sequences also implies structural
# similarity of the aligned parts
import os
import sys
import glob
from Bio.PDB import *
from Bio import pairwise2
from Bio.SubsMat import MatrixInfo as matlist
# Obtain structures directory
str_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'structures')
# Create it if it doesn't exist
if not os.path.isdir(str_dir):
os.makedirs(str_dir)
# Get PDB IDs from the user
if len(sys.argv) > 2:
pdb_ids = [sys.argv[1], sys.argv[2]]
else:
sys.exit("Two separate valid PDB IDs must be given")
# Initiate PDB list object
pdb_list = PDBList(server="http://www.rcsb.org/pdb/files")
# Retrieve PDB files from the server to structures directory
pdb_list.retrieve_pdb_file(pdb_ids[0], obsolete=False, pdir=str_dir)
pdb_list.retrieve_pdb_file(pdb_ids[1], obsolete=False, pdir=str_dir)
# Generate PDB file paths
pdb_paths = [''.join(glob.glob(os.path.join(str_dir, '*' + pdb_ids[0] + '.ent'))), ''.join(glob.glob(os.path.join(str_dir, '*' + pdb_ids[1] + '.ent')))]
# Initiate PDB parser object
pdb_parser = PDBParser(QUIET=True)
# Generate PDB structures using PDB parser
pdb_strs = [pdb_parser.get_structure(pdb_ids[0], pdb_paths[0]), pdb_parser.get_structure(pdb_ids[1], pdb_paths[1])]
# Initiate an empty list for storing PDB sequences
pdb_seqs = ["", ""]
# Initiate CA polypeptide builder used to get sequences of each protein
ppb = CaPPBuilder()
for i in range(len(pdb_seqs)):
for pp in ppb.build_peptides(pdb_strs[i]):
pdb_seqs[i] += str(pp.get_sequence())
# Get BLOSUM62 matrix
matrix = matlist.blosum62
# Set gap penalties or get from the user
if len(sys.argv) >= 4:
gap_open = int(sys.argv[3])
else:
gap_open = -10
if len(sys.argv) == 5:
gap_extend = int(sys.argv[4])
else:
gap_extend = -5
# Do the pairwise alignment and get alignments
alns = pairwise2.align.localds(pdb_seqs[0], pdb_seqs[1], matrix, gap_open, gap_extend)
# Obtain the best alignment
best_aln = alns[0]
# Decompose best alignment into its components
aln_first, aln_second, score, begin, end = best_aln
# Print the alignment and alignment length
print aln_first[begin:end] + "\n" + aln_second[begin:end]
print "Alignment length: " + str(end - begin)
# Initiate an empty list to store atom objects
pdb_atms = [[], []]
for i in range(len(pdb_atms)):
# Get only the first model and use it
model = pdb_strs[i][0]
for chain in model:
for residue in chain:
# Only if the residue has CA atom
if "CA" in residue:
# Append the atom object
pdb_atms[i].append(residue["CA"])
# Initiate another e
|
mpty string for mapping the atom object
|
s
pdb_atms_mapped = [[], []]
# i is the index for the two alignments, j is for the first
# atom object list and k is for the other atom object list
i, j, k = 0, 0, 0
while i < len(aln_first[:end]):
# Check if there is no gap in either part of
# the alignment because there will be no atom
# for ones with -
if aln_first[i] != "-" and aln_second[i] != "-":
# Check if it's the beginning of the alignment
# here's where we need to start mapping
if i >= begin:
# Append the atom objects accordingly
pdb_atms_mapped[0].append(pdb_atms[0][j])
pdb_atms_mapped[1].append(pdb_atms[1][k])
# Move j to the next amino acid if it wasn't a gap
# that is we put its atom object in the previous
# step. If it's a gap, stay at the same atom object
if aln_first[i] != "-":
j += 1
# Move k to the next amino acid if it wasn't a gap
# that is we put its atom object in the previous
# step. If it's a gap, stay at the same atom object
if aln_second[i] != "-":
k += 1
# Move i to the next amino acid in the alignment
# because we process it no matter what
i += 1
# Initiate the superimposer
superimposer = Superimposer()
# Set (translate/rotate) atoms minimizing RMSD
superimposer.set_atoms(pdb_atms_mapped[0], pdb_atms_mapped[1])
# Print RMSD
print "RMSD: " + str(superimposer.rms)
|
diogo149/Lasagne
|
lasagne/tests/layers/test_input.py
|
Python
|
mit
| 1,255
| 0.001594
|
import numpy
import pytest
import theano
clas
|
s TestInputLayer:
@pytest.fixture
def layer(se
|
lf):
from lasagne.layers.input import InputLayer
return InputLayer((3, 2))
def test_input_var(self, layer):
assert layer.input_var.ndim == 2
def test_get_output_shape(self, layer):
assert layer.get_output_shape() == (3, 2)
def test_get_output_without_arguments(self, layer):
assert layer.get_output() is layer.input_var
def test_get_output_input_is_variable(self, layer):
variable = theano.Variable("myvariable")
assert layer.get_output(variable) is variable
def test_get_output_input_is_array(self, layer):
input = [[1,2,3]]
output = layer.get_output(input)
assert numpy.all(output.eval() == input)
def test_get_output_input_is_a_mapping(self, layer):
input = {layer: theano.tensor.matrix()}
assert layer.get_output(input) is input[layer]
def test_input_var_name(self, layer):
assert layer.input_var.name == "input"
def test_named_layer_input_var_name(self):
from lasagne.layers.input import InputLayer
layer = InputLayer((3, 2), name="foo")
assert layer.input_var.name == "foo.input"
|
4shadoww/usploit
|
lib/dns/rdtypes/ANY/HINFO.py
|
Python
|
mit
| 2,266
| 0.000883
|
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import struct
|
import dns.exception
import dns.immutable
import dns.rdata
import dns.tokenizer
@dns.immutable.immutable
class HINFO(dns.rdata.Rdata):
"""HINFO record"""
# see: RFC 1035
__slots__ = ['cpu', 'os']
def __init__(self, rdclass, rdtype, cpu, os):
super().__init__(rdclass, rdtype)
self.cpu =
|
self._as_bytes(cpu, True, 255)
self.os = self._as_bytes(os, True, 255)
def to_text(self, origin=None, relativize=True, **kw):
return '"{}" "{}"'.format(dns.rdata._escapify(self.cpu),
dns.rdata._escapify(self.os))
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True,
relativize_to=None):
cpu = tok.get_string(max_length=255)
os = tok.get_string(max_length=255)
return cls(rdclass, rdtype, cpu, os)
def _to_wire(self, file, compress=None, origin=None, canonicalize=False):
l = len(self.cpu)
assert l < 256
file.write(struct.pack('!B', l))
file.write(self.cpu)
l = len(self.os)
assert l < 256
file.write(struct.pack('!B', l))
file.write(self.os)
@classmethod
def from_wire_parser(cls, rdclass, rdtype, parser, origin=None):
cpu = parser.get_counted_bytes()
os = parser.get_counted_bytes()
return cls(rdclass, rdtype, cpu, os)
|
jlorieau/mollib
|
mollib/core/topology.py
|
Python
|
gpl-3.0
| 14,201
| 0.000282
|
# Bonding Topology of heavy atoms. This is a dict of a dict of sets
topology = {'PRO': {'N': {'C-1
|
', 'CA', 'CD'},
'CA': {'N', 'C', 'CB', 'HA'},
'HA': {'CA'},
'CB': {'CA', 'CG', 'HB2', 'HB3'},
'HB2': {'CB'},
'HB3': {'
|
CB'},
'CG': {'CB', 'CD', 'HG2', 'HG3'},
'HG2': {'CG'},
'HG3': {'CG'},
'CD': {'CG', 'N', 'HD2', 'HD3'},
'HD2': {'CD'},
'HD3': {'CD'},
'C': {'CA', 'O', 'N+1'},
'O': {'C'},
},
'GLY': {'N': {'C-1', 'CA', 'H'},
'H': {'N'},
'CA': {'N', 'C', 'HA2', 'HA3'},
'HA2': {'CA'},
'HA3': {'CA'},
'C': {'CA', 'O', 'N+1'},
'O': {'C'},
},
'ALA': {'N': {'C-1', 'CA', 'H'},
'H': {'N'},
'CA': {'N', 'C', 'CB', 'HA'},
'HA': {'CA'},
'CB': {'CA', 'HB1', 'HB2', 'HB3'},
'HB1': {'CB'},
'HB2': {'CB'},
'HB3': {'CB'},
'C': {'CA', 'O', 'N+1'},
'O': {'C'},
},
'ARG': {'N': {'C-1', 'CA', 'H'},
'H': {'N'},
'CA': {'N', 'C', 'CB', 'HA'},
'HA': {'CA'},
'CB': {'CA', 'HB2', 'HB3', 'CG'},
'HB2': {'CB'},
'HB3': {'CB'},
'CG': {'CB', 'HG2', 'HG3', 'CD'},
'HG2': {'CG'},
'HG3': {'CG'},
'CD': {'CG', 'HD2', 'HD3', 'NE'},
'HD2': {'CD'},
'HD3': {'CD'},
'NE': {'CD', 'HE', 'CZ'},
'HE': {'NE'},
'CZ': {'NE', 'NH1', 'NH2',},
'NH1': {'CZ', 'HH11', 'HH12'},
'HH11': {'NH1'},
'HH12': {'NH1'},
'NH2': {'CZ', 'HH21', 'HH22'},
'HH21': {'NH2'},
'HH22': {'NH2'},
'C': {'CA', 'O', 'N+1'},
'O': {'C'},
},
'ASN': {'N': {'C-1', 'CA', 'H'},
'H': {'N'},
'CA': {'N', 'C', 'CB', 'HA'},
'HA': {'CA'},
'CB': {'CA', 'HB2', 'HB3', 'CG'},
'HB2': {'CB'},
'HB3': {'CB'},
'CG': {'CB', 'OD1', 'ND2'},
'OD1': {'CG'},
'ND2': {'CG', 'HD21', 'HD22'},
'HD21': {'ND2'},
'HD22': {'ND2'},
'C': {'CA', 'O', 'N+1'},
'O': {'C'},
},
'ASP': {'N': {'C-1', 'CA', 'H'},
'H': {'N'},
'CA': {'N', 'C', 'CB', 'HA'},
'HA': {'CA'},
'CB': {'CA', 'HB2', 'HB3', 'CG'},
'HB2': {'CB'},
'HB3': {'CB'},
'CG': {'CB', 'OD1', 'OD2'},
'OD1': {'CG','HD1'},
'HD1': {'OD1'},
'OD2': {'CG','HD2'},
'HD2': {'OD2'},
'C': {'CA', 'O', 'N+1'},
'O': {'C'},
},
'CYS': {'N': {'C-1', 'CA', 'H'},
'H': {'N'},
'CA': {'N', 'C', 'CB', 'HA'},
'HA': {'CB'},
'CB': {'CA', 'SG', 'HB2', 'HB3'},
'HB2': {'CB'},
'HB3': {'CB'},
'SG': {'CB', 'HG'},
'HG': {'SG'},
'C': {'CA', 'O', 'N+1'},
'O': {'C'},
},
'GLN': {'N': {'C-1', 'CA', 'H'},
'H': {'N'},
'CA': {'N', 'C', 'CB', 'HA'},
'HA': {'CA'},
'CB': {'CA', 'HB2', 'HB3', 'CG'},
'HB2': {'CB'},
'HB3': {'CB'},
'CG': {'CB', 'HG2', 'HG3', 'CD'},
'HG2': {'CG'},
'HG3': {'CG'},
'CD': {'CG', 'OE1', 'NE2'},
'OE1': {'CD'},
'NE2': {'CD', 'HE21', 'HE22'},
'HE21': {'NE2'},
'HE22': {'NE2'},
'C': {'CA', 'O', 'N+1'},
'O': {'C'},
},
'GLU': {'N': {'C-1', 'CA', 'H'},
'H': {'N'},
'CA': {'N', 'C', 'CB', 'HA'},
'HA': {'CA'},
'CB': {'CA', 'HB2', 'HB3', 'CG'},
'HB2': {'CB'},
'HB3': {'CB'},
'CG': {'CB', 'HG2', 'HG3', 'CD'},
'HG2': {'CG'},
'HG3': {'CG'},
'CD': {'CG', 'OE1', 'OE2'},
'OE1': {'CD', 'HE1'},
'HE1': {'OE1'},
'OE2': {'CD', 'HE2'},
'HE2': {'OE2'},
'C': {'CA', 'O', 'N+1'},
'O': {'C'},
},
'HIS': {'N': {'C-1', 'CA', 'H'},
'H': {'N'},
'CA': {'N', 'C', 'CB', 'HA'},
'HA': {'CA'},
'CB': {'CA', 'HB2', 'HB3', 'CG'},
'HB2': {'CB'},
'HB3': {'CB'},
'CG': {'CB', 'ND1', 'CD2'},
'ND1': {'CG', 'CE1', 'HD1'},
'HD1': {'ND1'},
'CE1': {'ND1', 'NE2', 'HE1'},
'HE1': {'CE1'},
'NE2': {'CE1', 'CD2', 'HE2'},
'HE2': {'NE2'},
'CD2': {'CG', 'NE2', 'HD2'},
'HD2': {'CD2'},
'C': {'CA', 'O', 'N+1'},
'O': {'C'},
},
'ILE': {'N': {'C-1', 'CA', 'H'},
'H': {'N'},
'CA': {'N', 'C', 'CB', 'HA'},
'HA': {'CA'},
'CB': {'CA', 'HB', 'CG1', 'CG2'},
'HB': {'CB'},
'CG1': {'CB', 'HG12', 'HG13', 'CD1'},
'HG12': {'CG1'},
'HG13': {'CG1'},
'CD1': {'CG1', 'HD11', 'HD12', 'HD13'},
'HD11': {'CD1'},
'HD12': {'CD1'},
'HD13': {'CD1'},
'CG2': {'CB', 'HG21', 'HG22', 'HG23'},
'HG21': {'CG2'},
'HG22': {'CG2'},
'HG23': {'CG2'},
'C': {'CA', 'O', 'N+1'},
'O': {'C'},
},
'LEU': {'N': {'C-1', 'CA', 'H'},
'H': {'N'},
'CA': {'N', 'C', 'CB', 'HA'},
'HA': {'CA'},
'CB': {'CA', 'HB2', 'HB3', 'CG'},
'HB2': {'CB'},
'HB3': {'CB'},
'CG': {'CB', 'CD1', 'CD2', 'HG'},
'HG': {'CG'},
'CD1': {'CG', 'HD11', 'HD12', 'HD13'},
'HD11': {'CD1'},
'HD12': {'CD1'},
'HD13': {'CD1'},
'CD2': {'CG', 'HD21', 'HD22', 'HD23'},
'HD21': {'CD2'},
'HD22': {'CD2'},
'HD23': {'CD2'},
'C': {'CA', 'O', 'N+1'},
'O': {'C'},
},
'LYS': {'N': {'C-1', 'CA', 'H'},
'H': {'N'},
'CA': {'N', 'C', 'CB', 'HA'},
'HA': {'CA'},
'CB': {'CA', 'HB2', 'HB3', 'CG'},
|
vsserafim/aula_sockets_echo
|
client_01.py
|
Python
|
gpl-2.0
| 1,291
| 0
|
# -*- coding: utf-8 -*-
import socket
from string import strip
__author__ = 'Vinícius da Silveira Serafim <vinicius@serafim.eti.br>'
# endereço e porta do servidor para conexão
server_addr = ("127.0.0.1", 9000)
def main():
"""
Função principal.
"""
# (1) Criar socket cliente
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# (2) Conectar socket ao servidor
client_socket.connect(server_addr)
while True:
# (3) Ler uma linha do teclado
data = strip(raw_input("> "))
# se a linha está vazia, vamos desconectar
if not len(data):
break
# (4) Enviar linha ao servidor
sent_bytes = client_socket.send(data)
print "[>] Enviado: '%s' (%s bytes) ao servidor." % (data, sent_bytes)
# (5) Ler resposta do servidor
|
resp = client_socke
|
t.recv(128)
# se o servidor não enviou nada, a conexão foi encerrada
if not len(resp):
break
print "[<] Recebido: '%s' (%s bytes) do servidor %s:%s" %\
(resp, len(resp), server_addr[0], server_addr[1])
# (5) Desconectar
client_socket.close()
print "Conexão encerrada com o servidor %s:%s" % server_addr
if __name__ == '__main__':
main()
# eof
|
MalloyPower/parsing-python
|
front-end/testsuite-python-lib/Python-2.1/Lib/test/test_long.py
|
Python
|
mit
| 8,805
| 0.003748
|
from test_support import verify, verbose, TestFailed
from string import join
from random import random, randint
# SHIFT should match the value in longintrepr.h for best testing.
SHIFT = 15
BASE = 2 ** SHIFT
MASK = BASE - 1
# Max number of base BASE digits to use in test cases. Doubling
# this will at least quadruple the runtime.
MAXDIGITS = 10
# build some special values
special = map(long, [0, 1, 2, BASE, BASE >> 1])
special.append(0x5555555555555555L)
special.append(0xaaaaaaaaaaaaaaaaL)
# some solid strings of one bits
p2 = 4L # 0 and 1 already added
for i in range(2*SHIFT):
special.append(p2 - 1)
p2 = p2 << 1
del p2
# add complements & negations
special = special + map(lambda x: ~x, special) + \
map(lambda x: -x, special)
# ------------------------------------------------------------ utilities
# Use check instead of assert so the test still do
|
es something
# under -O.
def check(ok, *args):
if not ok:
raise TestFailed, join(map(str, args), " ")
# Get quasi-random long consisting of ndigits digits (in base BASE).
# quasi == the most-significant digit will not be 0, and the number
# is constructed to contain long strings of 0 and 1 bits. These are
# m
|
ore likely than random bits to provoke digit-boundary errors.
# The sign of the number is also random.
def getran(ndigits):
verify(ndigits > 0)
nbits_hi = ndigits * SHIFT
nbits_lo = nbits_hi - SHIFT + 1
answer = 0L
nbits = 0
r = int(random() * (SHIFT * 2)) | 1 # force 1 bits to start
while nbits < nbits_lo:
bits = (r >> 1) + 1
bits = min(bits, nbits_hi - nbits)
verify(1 <= bits <= SHIFT)
nbits = nbits + bits
answer = answer << bits
if r & 1:
answer = answer | ((1 << bits) - 1)
r = int(random() * (SHIFT * 2))
verify(nbits_lo <= nbits <= nbits_hi)
if random() < 0.5:
answer = -answer
return answer
# Get random long consisting of ndigits random digits (relative to base
# BASE). The sign bit is also random.
def getran2(ndigits):
answer = 0L
for i in range(ndigits):
answer = (answer << SHIFT) | randint(0, MASK)
if random() < 0.5:
answer = -answer
return answer
# --------------------------------------------------------------- divmod
def test_division_2(x, y):
q, r = divmod(x, y)
q2, r2 = x/y, x%y
pab, pba = x*y, y*x
check(pab == pba, "multiplication does not commute for", x, y)
check(q == q2, "divmod returns different quotient than / for", x, y)
check(r == r2, "divmod returns different mod than % for", x, y)
check(x == q*y + r, "x != q*y + r after divmod on", x, y)
if y > 0:
check(0 <= r < y, "bad mod from divmod on", x, y)
else:
check(y < r <= 0, "bad mod from divmod on", x, y)
def test_division(maxdigits=MAXDIGITS):
print "long / * % divmod"
digits = range(1, maxdigits+1)
for lenx in digits:
x = getran(lenx)
for leny in digits:
y = getran(leny) or 1L
test_division_2(x, y)
# -------------------------------------------------------------- ~ & | ^
def test_bitop_identities_1(x):
check(x & 0 == 0, "x & 0 != 0 for", x)
check(x | 0 == x, "x | 0 != x for", x)
check(x ^ 0 == x, "x ^ 0 != x for", x)
check(x & -1 == x, "x & -1 != x for", x)
check(x | -1 == -1, "x | -1 != -1 for", x)
check(x ^ -1 == ~x, "x ^ -1 != ~x for", x)
check(x == ~~x, "x != ~~x for", x)
check(x & x == x, "x & x != x for", x)
check(x | x == x, "x | x != x for", x)
check(x ^ x == 0, "x ^ x != 0 for", x)
check(x & ~x == 0, "x & ~x != 0 for", x)
check(x | ~x == -1, "x | ~x != -1 for", x)
check(x ^ ~x == -1, "x ^ ~x != -1 for", x)
check(-x == 1 + ~x == ~(x-1), "not -x == 1 + ~x == ~(x-1) for", x)
for n in range(2*SHIFT):
p2 = 2L ** n
check(x << n >> n == x, "x << n >> n != x for", x, n)
check(x / p2 == x >> n, "x / p2 != x >> n for x n p2", x, n, p2)
check(x * p2 == x << n, "x * p2 != x << n for x n p2", x, n, p2)
check(x & -p2 == x >> n << n == x & ~(p2 - 1),
"not x & -p2 == x >> n << n == x & ~(p2 - 1) for x n p2",
x, n, p2)
def test_bitop_identities_2(x, y):
check(x & y == y & x, "x & y != y & x for", x, y)
check(x | y == y | x, "x | y != y | x for", x, y)
check(x ^ y == y ^ x, "x ^ y != y ^ x for", x, y)
check(x ^ y ^ x == y, "x ^ y ^ x != y for", x, y)
check(x & y == ~(~x | ~y), "x & y != ~(~x | ~y) for", x, y)
check(x | y == ~(~x & ~y), "x | y != ~(~x & ~y) for", x, y)
check(x ^ y == (x | y) & ~(x & y),
"x ^ y != (x | y) & ~(x & y) for", x, y)
check(x ^ y == (x & ~y) | (~x & y),
"x ^ y == (x & ~y) | (~x & y) for", x, y)
check(x ^ y == (x | y) & (~x | ~y),
"x ^ y == (x | y) & (~x | ~y) for", x, y)
def test_bitop_identities_3(x, y, z):
check((x & y) & z == x & (y & z),
"(x & y) & z != x & (y & z) for", x, y, z)
check((x | y) | z == x | (y | z),
"(x | y) | z != x | (y | z) for", x, y, z)
check((x ^ y) ^ z == x ^ (y ^ z),
"(x ^ y) ^ z != x ^ (y ^ z) for", x, y, z)
check(x & (y | z) == (x & y) | (x & z),
"x & (y | z) != (x & y) | (x & z) for", x, y, z)
check(x | (y & z) == (x | y) & (x | z),
"x | (y & z) != (x | y) & (x | z) for", x, y, z)
def test_bitop_identities(maxdigits=MAXDIGITS):
print "long bit-operation identities"
for x in special:
test_bitop_identities_1(x)
digits = range(1, maxdigits+1)
for lenx in digits:
x = getran(lenx)
test_bitop_identities_1(x)
for leny in digits:
y = getran(leny)
test_bitop_identities_2(x, y)
test_bitop_identities_3(x, y, getran((lenx + leny)/2))
# ------------------------------------------------- hex oct repr str atol
def slow_format(x, base):
if (x, base) == (0, 8):
# this is an oddball!
return "0L"
digits = []
sign = 0
if x < 0:
sign, x = 1, -x
while x:
x, r = divmod(x, base)
digits.append(int(r))
digits.reverse()
digits = digits or [0]
return '-'[:sign] + \
{8: '0', 10: '', 16: '0x'}[base] + \
join(map(lambda i: "0123456789ABCDEF"[i], digits), '') + \
"L"
def test_format_1(x):
from string import atol
for base, mapper in (8, oct), (10, repr), (16, hex):
got = mapper(x)
expected = slow_format(x, base)
check(got == expected, mapper.__name__, "returned",
got, "but expected", expected, "for", x)
check(atol(got, 0) == x, 'atol("%s", 0) !=' % got, x)
# str() has to be checked a little differently since there's no
# trailing "L"
got = str(x)
expected = slow_format(x, 10)[:-1]
check(got == expected, mapper.__name__, "returned",
got, "but expected", expected, "for", x)
def test_format(maxdigits=MAXDIGITS):
print "long str/hex/oct/atol"
for x in special:
test_format_1(x)
for i in range(10):
for lenx in range(1, maxdigits+1):
x = getran(lenx)
test_format_1(x)
# ----------------------------------------------------------------- misc
def test_misc(maxdigits=MAXDIGITS):
print "long miscellaneous operations"
import sys
# check the extremes in int<->long conversion
hugepos = sys.maxint
hugeneg = -hugepos - 1
hugepos_aslong = long(hugepos)
hugeneg_aslong = long(hugeneg)
check(hugepos == hugepos_aslong, "long(sys.maxint) != sys.maxint")
check(hugeneg == hugeneg_aslong,
"long(-sys.maxint-1) != -sys.maxint-1")
# long -> int should not fail for hugepos_aslong or hugeneg_aslong
try:
check(int(hugepos_aslong) == hugepos,
"converting sys.maxint to long and back to int fails")
except OverflowError:
raise TestFailed, "int(long(sys.maxint)) overflowed!"
try:
check(int(hugeneg_aslong) == hugeneg,
"converting -sys.maxint-1 to long and back to int fails")
except OverflowError:
raise TestFailed, "int(long(-sys.maxin
|
huyilin/TopicLda
|
src/onlineuser.py
|
Python
|
gpl-3.0
| 1,278
| 0.021127
|
import cPickle, string, numpy, getopt, sys, random, time, re, pprint
import sys
import onlineldauser
import citydoc
import os
import subprocess
import MySQLdb
def main():
db=MySQLdb.Connect(host="localhost",
|
user="team06",
passwd="aiM7chah,d",
db="randomtrip")
cur=db.cursor()
batchsize = 1
D = 3.3e6
K = 100 # number of topics
documentstoanalyze=1; # number of batches
user_id='2
|
'
cur.execute("select tags from UserProfile where id=%s",user_id)
user_tags=cur.fetchone()
user_tags=[user_tags[0]]
print user_tags
vocab = file('./vocaball.txt').readlines()
W = len(vocab)
olda=onlineldauser.OnlineLDA(vocab)
(gamma, bound) = olda.update_lambda(user_tags)
(wordids, wordcts) = onlineldauser.parse_doc_list(user_tags, olda._vocab)
perwordbound = bound * len(user_tags) / (D * sum(map(sum, wordcts)))
gamma=str(gamma[0]).strip(' []').replace('\n','')
gamma=gamma.split()
gamma_db=''
for index,value in enumerate(gamma):
if float(value)>0.01:
gamma_db+=(str(index)+':'+value+',')
cur.execute("update UserProfile set user_vector=%s where id=%s",(gamma_db,user_id))
db.commit()
if __name__ == '__main__':
main()
|
emCOMP/twitter-mysql
|
bin/simple_import.py
|
Python
|
bsd-3-clause
| 6,719
| 0.01786
|
import MySQLdb
import csv
import cStringIO
import codecs
import pprint
from datetime import datetime
from decimal import *
class UTF8Recoder:
"""
Iterator that reads an encoded stream and reencodes the input to UTF-8
"""
def __init__(self, f, encoding):
self.reader = codecs.getreader(encoding)(f)
def __iter__(self):
return self
def next(self):
return self.reader.next().encode("utf-8")
class UnicodeReader:
"""
A CSV reader which will iterate over lines in the CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
f = UTF8Recoder(f, encoding)
self.reader = csv.reader(f, dialect=dialect, **kwds)
def next(self):
row = self.reader.next()
return [unicode(s, "utf-8") for s in row]
def __iter__(self):
return self
INSERT_STMT="""INSERT INTO `tweets_Oso`
(`created_at`,
`lang`,
`text`,
`uuid`,
`user_id`,
`geo_coordinates_0`,
`geo_coordinates_1`,
`user_screen_name`,
`user_description`,
`user_followers_count`,
`user_friends_count`,
`user_location`,
`entities_urls_0_expanded_url`,
`entities_urls_1_expanded_url`,
`entities_urls_2_expanded_url`,
`user_statuses_count`,
`entities_urls_0_display_url`,
`entities_urls_1_display_url`,
`entities_urls_2_display_url`,
`retweeted_status_id`,
`retweeted_status_user_screen_name`,
`retweeted_status_retweet_count`,
`retweeted_status_created_at`,
`retweeted_status_text`,
`retweeted_status_favorite_count`,
`retweeted_status_user_id`,
`retweeted_status_user_time_zone`,
`retweeted_status_user_friends_count`,
`retweeted_status_user_statuses_count`,
`retweeted_status_user_followers_count`,
`in_reply_to_screen_name`,
`in_reply_to_status_id`,
`in_reply_to_user_id`)
VALUES
(%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s)
"""
parser = argparse.ArgumentParser(description='whatevs')
parser.add_argument('host', help='host')
parser.add_argument('database', help='database name')
parser.add_argument('username', help="username")
parser.add_argument('-l', '--limit', help="limit", type=int, default=0)
#par
|
ser.add_argument('-o',
|
'--output', help="outfile")
parser.add_argument('-f', '--filename', help="input file")
parser.add_argument('-e', '--encoding', default="utf-8", help="json file encoding (default is utf-8)")
parser.add_argument('--db_encoding', default="utf8mb4", help="database encoding")
#parser.add_argument('-b', '--batchsize', default=1000, type=int, help="batch insert size")
parser.add_argument('-c', '--check', dest="check", action="store_true", help="check if tweet exists before inserting")
parser.add_argument('-r', '--no_retweets', dest="no_retweets", action="store_true", help="do not add embedded retweets")
args = parser.parse_args()
# ask for password
password = getpass.getpass("Enter password for %s@%s (%s) : "%(args.username, args.host, args.database))
# connect to db
db=MySQLdb.connect(args.host, args.username, password, args.database, charset=args.db_encoding, use_unicode=True)
c=db.cursor()
with open(args.filename, 'r') as infile:
coder = codecs.iterencode(codecs.iterdecode(infile, "utf-8"), "utf-8")
csvreader = csv.DictReader(coder, delimiter=',', quotechar='"')
queue = []
total = 0
try:
last_item = {}
for row in csvreader:
#print "%d - %s,%s,%s,%s"%(total, row["created_ts"], row["text"], row["retweeted_status.created_ts"], row["retweeted_status.text"])
last_item = row
created_ts = datetime.strptime( row["created_ts"], "%Y-%m-%dT%H:%M:%SZ" )
retweet_created = datetime.strptime( row["retweeted_status.created_ts"], "%Y-%m-%dT%H:%M:%SZ" ) if row["retweeted_status.created_ts"] else None
geo_0 = Decimal(row["geo.coordinates.0"] if len(row["geo.coordinates.0"]) < 16 else row["geo.coordinates.0"][:16]) if row["geo.coordinates.0"] else None
geo_1 = Decimal(Decimal(row["geo.coordinates.1"] if len(row["geo.coordinates.1"]) < 16 else row["geo.coordinates.1"][:16])) if row["geo.coordinates.1"] else None
#if geo_0 is not None:
# print geo_0, geo_1
item = (
created_ts,
row["lang"],
row["text"],
int(row["id"]),
int(row["user.id"]),
geo_0,
geo_1,
row["user.screen_name"],
row["user.description"],
int(row["user.followers_count"]),
int(row["user.friends_count"]),
row["user.location"],
row["entities.urls.0.expanded_url"],
row["entities.urls.1.expanded_url"],
row["entities.urls.2.expanded_url"],
int(row["user.statuses_count"]),
row["entities.urls.0.display_url"],
row["entities.urls.1.display_url"],
row["entities.urls.2.display_url"],
int(row["retweeted_status.id"]) if row["retweeted_status.id"] else None,
row["retweeted_status.user.screen_name"] if row["retweeted_status.user.screen_name"] else None,
int(row["retweeted_status.retweet_count"]) if row["retweeted_status.retweet_count"] else None,
retweet_created,
row["retweeted_status.text"] if row["retweeted_status.text"] else None,
int(row["retweeted_status.favorite_count"]) if row["retweeted_status.favorite_count"] else None,
int(row["retweeted_status.user.id"]) if row["retweeted_status.user.id"] else None,
row["retweeted_status.user.time_zone"] if row["retweeted_status.user.time_zone"] else None,
int(row["retweeted_status.user.friends_count"]) if row["retweeted_status.user.friends_count"] else None,
int(row["retweeted_status.user.statuses_count"]) if row["retweeted_status.user.statuses_count"] else None,
int(row["retweeted_status.user.followers_count"]) if row["retweeted_status.user.followers_count"] else None,
row["in_reply_to_screen_name"] if row["in_reply_to_screen_name"] else None,
int(row["in_reply_to_status_id"]) if row["in_reply_to_status_id"] else None,
int(row["in_reply_to_user_id"]) if row["in_reply_to_user_id"] else None
)
queue.append(item)
total += 1
if len(queue) >= MAX_NUM:
#print
#print "---------------------"
#print
c.executemany(INSERT_STMT, queue)
queue = []
print total
#print "---------------------"
#print
# insert the last few
c.executemany(INSERT_STMT, queue)
print "%d total inserted"%(total)
c.close()
except Exception, e:
print "error ", e
print "last item: "
pprint.pprint(last_item)
c.close()
db.close()
raise e
finally:
c.close()
db.commit()
db.close()
|
huangsam/chowist
|
places/migrations/0008_category.py
|
Python
|
mit
| 852
| 0.001174
|
# Generated by Django 3.0.8 on 2020-07-11 03:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("places", "0007_auto_20200711_0104"),
]
operations = [
migrations.CreateModel(
name="Category",
fields=[
(
"id",
mo
|
dels.AutoField(
auto_created=True,
primary_key=True,
seriali
|
ze=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=255)),
("places", models.ManyToManyField(to="places.Restaurant")),
],
options={"verbose_name_plural": "categories", "db_table": "category"},
),
]
|
omarkhan/opencraft
|
instance/tests/test_openstack.py
|
Python
|
agpl-3.0
| 3,877
| 0.00129
|
# -*- coding: utf-8 -*-
#
# OpenCraft -- tools to aid developing and hosting free software projects
# Copyright (C) 2015 OpenCraft <xavier@opencraft.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
OpenStack - Tests
"""
# Imports #####################################################################
import requests
from collections import namedtuple
from unittest.mock import Mock, call, patch
from instance import openstack
from instance.tests.base import TestCase
# Tests #######################################################################
class OpenStackTestCase(TestCase):
"""
Test cases for OpenStack helper functions
"""
def setUp(self):
super().setUp()
self.nova = Mock()
def test_create_server(self):
"""
Create a VM via nova
"""
self.nova.flavors.find.return_value = 'test-flavor'
self.nova.images.find.return_value = 'test-image'
openstack.create_server(self.nova, 'test-vm', {"ram": 4096, "disk": 40}, {"name": "Ubuntu 12.04"})
self.assertEqual(self.nova.mock_calls, [
call.flavors.find(disk=40, ram=4096),
call.images.find(name='Ubuntu 12.04'),
call.servers.create('test-vm', 'test-image', 'test-flavor', key_name=None)
])
def test_delete_servers_by_name(self):
"""
Delete all servers with a given name
"""
server_class = namedtuple('server_class', 'name pk')
self.nova.servers.list.return_value = [
server_class(name='server-a', pk=1),
server_class(name='server-a', pk=2),
server_class(name='server-b', pk=3),
]
openstack.delete_servers_by_name(self.nova, 'server-a')
self.assertEqual(self.nova.mock_calls, [
|
call.servers.list(),
call.servers.delete(server_class(name='server-a', pk=1)),
call.servers.delete(server_class(name='server-a', pk=2)),
])
def test_get_server_public_address_none(self):
"""
No public IP when none has been assigned yet
"""
server_class = namedtuple('Server', 'addresses')
server = server_class(addresses=[])
self.assertEqual(openstack.get_server_pub
|
lic_address(server), None)
@patch('requests.packages.urllib3.util.retry.Retry.sleep')
@patch('http.client.HTTPConnection.getresponse')
@patch('http.client.HTTPConnection.request')
def test_nova_client_connection_error(self, mock_request, mock_getresponse, mock_retry_sleep):
"""
Connection error during a request from the nova client
Ensure requests are retried before giving up, with a backoff sleep between attempts
"""
def getresponse_call(*args, **kwargs):
""" Invoked by the nova client when making a HTTP request (via requests/urllib3) """
raise ConnectionResetError('[Errno 104] Connection reset by peer')
mock_getresponse.side_effect = getresponse_call
nova = openstack.get_nova_client()
with self.assertRaises(requests.exceptions.ConnectionError):
nova.servers.get('test-id')
self.assertEqual(mock_getresponse.call_count, 11)
self.assertEqual(mock_retry_sleep.call_count, 10)
|
mathiasertl/django-ca
|
ca/django_ca/tests/tests_extensions.py
|
Python
|
gpl-3.0
| 71,893
| 0.002406
|
# This file is part of django-ca (https://github.com/mathiasertl/django-ca).
#
# django-ca is free software: you can redistribute it and/or modify it under the terms of the GNU
# General Public Licen
|
se as published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# django-ca is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with django-ca. If not,
#
|
see <http://www.gnu.org/licenses/>.
"""Test cases for :py:mod:`django_ca.extensions`."""
import doctest
import os
import sys
import typing
from unittest import TestLoader
from unittest import TestSuite
from cryptography import x509
from cryptography.x509 import TLSFeatureType
from cryptography.x509.oid import AuthorityInformationAccessOID
from cryptography.x509.oid import ExtendedKeyUsageOID
from cryptography.x509.oid import ExtensionOID
from cryptography.x509.oid import ObjectIdentifier
from django.conf import settings
from django.test import TestCase
from django.utils.functional import cached_property
from ..extensions import KEY_TO_EXTENSION
from ..extensions import OID_TO_EXTENSION
from ..extensions import AuthorityInformationAccess
from ..extensions import AuthorityKeyIdentifier
from ..extensions import BasicConstraints
from ..extensions import CertificatePolicies
from ..extensions import CRLDistributionPoints
from ..extensions import ExtendedKeyUsage
from ..extensions import Extension
from ..extensions import FreshestCRL
from ..extensions import InhibitAnyPolicy
from ..extensions import IssuerAlternativeName
from ..extensions import KeyUsage
from ..extensions import NameConstraints
from ..extensions import OCSPNoCheck
from ..extensions import PolicyConstraints
from ..extensions import PrecertificateSignedCertificateTimestamps
from ..extensions import PrecertPoison
from ..extensions import SubjectAlternativeName
from ..extensions import SubjectKeyIdentifier
from ..extensions import TLSFeature
from ..extensions.base import UnrecognizedExtension
from ..extensions.utils import PolicyInformation
from ..models import X509CertMixin
from ..typehints import ParsablePolicyInformation
from ..utils import GeneralNameList
from .base import certs
from .base import dns
from .base import uri
from .base.extensions import CRLDistributionPointsTestCaseBase
from .base.extensions import ExtensionTestMixin
from .base.extensions import ListExtensionTestMixin
from .base.extensions import NullExtensionTestMixin
from .base.extensions import OrderedSetExtensionTestMixin
from .base.extensions import TestValues
from .base.mixins import TestCaseMixin
def load_tests( # pylint: disable=unused-argument
loader: TestLoader, tests: TestSuite, ignore: typing.Optional[str] = None
) -> TestSuite:
"""Load doctests."""
if sys.version_info >= (3, 7):
# Older python versions return a different str for classes
docs_path = os.path.join(settings.DOC_DIR, "python", "extensions.rst")
tests.addTests(
doctest.DocFileSuite(
docs_path,
module_relative=False,
globs={
"KEY_TO_EXTENSION": KEY_TO_EXTENSION,
"OID_TO_EXTENSION": OID_TO_EXTENSION,
},
)
)
tests.addTests(
doctest.DocTestSuite(
"django_ca.extensions",
extraglobs={
"ExtensionOID": ExtensionOID,
},
)
)
tests.addTests(
doctest.DocTestSuite(
"django_ca.extensions.base",
extraglobs={
"ExtendedKeyUsage": ExtendedKeyUsage,
"ExtendedKeyUsageOID": ExtendedKeyUsageOID,
"ExtensionOID": ExtensionOID,
"KeyUsage": KeyUsage,
"OCSPNoCheck": OCSPNoCheck,
"SubjectAlternativeName": SubjectAlternativeName,
"SubjectKeyIdentifier": SubjectKeyIdentifier,
},
)
)
tests.addTests(doctest.DocTestSuite("django_ca.extensions.utils"))
return tests
class AuthorityInformationAccessTestCase(ExtensionTestMixin[AuthorityInformationAccess], TestCase):
"""Test AuthorityInformationAccess extension."""
ext_class = AuthorityInformationAccess
ext_class_key = "authority_information_access"
ext_class_name = "AuthorityInformationAccess"
uri1 = "https://example1.com"
uri2 = "https://example2.net"
uri3 = "https://example3.org"
uri4 = "https://example4.at"
test_values = {
"empty": {
"values": [{}],
"expected": {"issuers": [], "ocsp": []},
"expected_bool": False,
"expected_repr": "issuers=[], ocsp=[]",
"expected_serialized": {},
"expected_text": "",
"extension_type": x509.AuthorityInformationAccess(descriptions=[]),
},
"issuer": {
"values": [
{"issuers": [uri1]},
{"issuers": [uri(uri1)]},
],
"expected": {"issuers": [uri(uri1)], "ocsp": []},
"expected_repr": f"issuers=['URI:{uri1}'], ocsp=[]",
"expected_serialized": {"issuers": [f"URI:{uri1}"]},
"expected_text": f"CA Issuers:\n * URI:{uri1}",
"extension_type": x509.AuthorityInformationAccess(
descriptions=[x509.AccessDescription(AuthorityInformationAccessOID.CA_ISSUERS, uri(uri1))]
),
},
"ocsp": {
"values": [
{"ocsp": [uri2]},
{"ocsp": [uri(uri2)]},
],
"expected": {"ocsp": [uri(uri2)], "issuers": []},
"expected_repr": f"issuers=[], ocsp=['URI:{uri2}']",
"expected_serialized": {"ocsp": [f"URI:{uri2}"]},
"expected_text": f"OCSP:\n * URI:{uri2}",
"extension_type": x509.AuthorityInformationAccess(
descriptions=[x509.AccessDescription(AuthorityInformationAccessOID.OCSP, uri(uri2))]
),
},
"both": {
"values": [
{"ocsp": [uri1], "issuers": [uri2]},
{"ocsp": [uri(uri1)], "issuers": [uri(uri2)]},
],
"expected": {"ocsp": [uri(uri1)], "issuers": [uri(uri2)]},
"expected_repr": f"issuers=['URI:{uri2}'], ocsp=['URI:{uri1}']",
"expected_serialized": {"ocsp": [f"URI:{uri1}"], "issuers": [f"URI:{uri2}"]},
"expected_text": f"CA Issuers:\n * URI:{uri2}\nOCSP:\n * URI:{uri1}",
"extension_type": x509.AuthorityInformationAccess(
descriptions=[
x509.AccessDescription(AuthorityInformationAccessOID.CA_ISSUERS, uri(uri2)),
x509.AccessDescription(AuthorityInformationAccessOID.OCSP, uri(uri1)),
]
),
},
"multiple": {
"values": [
{"ocsp": [uri1, uri2], "issuers": [uri3, uri4]},
{"ocsp": [uri1, uri(uri2)], "issuers": [uri3, uri(uri4)]},
{"ocsp": [uri(uri1), uri(uri2)], "issuers": [uri(uri3), uri(uri4)]},
],
"expected": {"ocsp": [uri(uri1), uri(uri2)], "issuers": [uri(uri3), uri(uri4)]},
"expected_repr": f"issuers=['URI:{uri3}', 'URI:{uri4}'], ocsp=['URI:{uri1}', 'URI:{uri2}']",
"expected_serialized": {
"ocsp": [f"URI:{uri1}", f"URI:{uri2}"],
"issuers": [f"URI:{uri3}", f"URI:{uri4}"],
},
"expected_text": f"CA Issuers:\n * URI:{uri3}\n * URI:{uri4}\n"
f"OCSP:\n * URI:{uri1}\n * URI:{uri2}",
"extension_type": x509.AuthorityInformationAccess(
descriptions=[
x509.AccessDescription(AuthorityInformationAccessOID.CA_ISSUERS, uri(uri3)),
x509.AccessDescription(AuthorityInformationAccessOID.CA_ISSUERS, uri(uri4)),
x509.AccessDescripti
|
jbaayen/sympy
|
sympy/integrals/risch.py
|
Python
|
bsd-3-clause
| 11,561
| 0.002681
|
from sympy.core.add import Add
from sympy.core.mul import Mul
from sympy.core.power import Pow
from sympy.core.function import Function
from sympy.core.symbol import Symbol, Wild
from sympy.core.basic import S, C, Atom, sympify
from sympy.core.numbers import Integer, Rational
from sympy.functions import exp, sin , cos , tan , cot , asin
from sympy.functions import log, sinh, cosh, tanh, coth, asinh
from sympy.functions import sqrt, erf
from sympy.solvers import solve
from sympy.simplify import simplify, together
from sympy.polys import Poly, quo, gcd, lcm, root_factors, \
monomials, factor, PolynomialError
from sympy.utilities.iterables import make_list
def components(f, x):
"""Returns a set of all functional components of the given expression
which includes symbols, function applications and compositions and
non-integer powers. Fractional powers are collected with with
minimal, positive exponents.
>>> from sympy import *
>>> x, y = symbols('xy')
>>> components(sin(x)*cos(x)**2, x)
set([x, cos(x), sin(x)])
"""
result = set()
if f.has(x):
if f.is_Symbol:
result.add(f)
elif f.is_Function or f.is_Derivative:
for g in f.args:
result |= components(g, x)
result.add(f)
elif f.is_Pow:
result |= components(f.base, x)
if not f.exp.is_Integer:
if f.exp.is_Rational:
result.add(f.base**Rational(1, f.exp.q))
else:
result |= components(f.exp, x) | set([f])
else:
for g in f.args:
result |= components(g, x)
return result
# name -> [] of symbols
_symbols_cache = {}
# NB @cacheit is not convenient here
def _symbols(name, n):
"""get vector of symbols local to this module"""
try:
lsyms = _symbols_cache[name]
except KeyError:
lsyms = []
_symbols_cache[name] = lsyms
while len(lsyms) < n:
lsyms.append( Symbol('%s%i' % (name, len(lsyms)), dummy=True) )
return lsyms[:n]
def heurisch(f, x, **kwargs):
"""Compute indefinite integral using heuristic Risch algorithm.
This is a huristic approach to indefinite integration in finite
terms using extened heuristic (parallel) Risch algorithm, based
on Manuel Bronstein's "Poor Man's Integrator".
The algorithm supports various classes of functions including
transcendental elementary or special functions like Airy,
Bessel, Whittaker and Lambert.
Note that this algorithm is not a decision procedure. If it isn't
able to compute antiderivative for a given function, then this is
not a proof that such a functions does not exist. One should use
recursive Risch algorithm in such case. It's an open question if
this algorithm can be made a full decision procedure.
This is an internal integrator procedure. You should use toplevel
'integrate' function in most cases, as this procedure needs some
preprocessing steps and otherwise may fail.
Specificaion
============
heurisch(f, x, rewrite=False, hints=None)
where
f : expression
x : symbol
rewrite -> force rewrite 'f' in terms of 'tan' and 'tanh'
hints -> a list of functions that may appear in antiderivate
- hints = None --> no suggestions at all
- hints = [ ] --> try to figure out
- hints = [f1, ..., fn] --> we know better
Examples
========
>>> from sympy import *
>>> x,y = symbols('xy')
>>> heurisch(y*tan(x), x)
y*log(1 + tan(x)**2)/2
See Manuel Bronstein's "Poor Man's Integrator":
[1] http://www-sop.inria.fr/cafe/Manuel.Bronstein/pmint/index.html
For more information on the implemented algorithm refer to:
[2] K. Geddes, L.Stefanus, On the Risch-Norman Integration
Method and its Implementation in Maple, Proceedings of
ISSAC'89, ACM Press, 212-217.
[3] J. H. Davenport, On the Parallel Risch Algorithm (I),
Proceedings of EUROCAM'82, LNCS 144, Springer, 144-157.
[4] J. H. Davenport, On the Parallel Risch Algorithm (III):
Use of Tangents, SIGSAM Bulletin 16 (1982), 3-6.
[5] J. H. Davenport, B. M. Trager, On the Parallel Risch
Algorithm (II), ACM Transactions on Mathematical
Software 11 (1985), 356-362.
"""
f = sympify(f)
if not f.is_Add:
indep, f = f.as_inde
|
pendent(x)
else:
indep = S.One
if not f.has(x):
return indep * f * x
rewritables = {
(sin, cos, cot) : tan,
(sinh, cosh, coth) : tanh,
}
rewrite = kwargs.pop('rewrite', False)
if rewrite:
for candidates, rule in rew
|
ritables.iteritems():
f = f.rewrite(candidates, rule)
else:
for candidates in rewritables.iterkeys():
if f.has(*candidates):
break
else:
rewrite = True
terms = components(f, x)
hints = kwargs.get('hints', None)
if hints is not None:
if not hints:
a = Wild('a', exclude=[x])
b = Wild('b', exclude=[x])
for g in set(terms):
if g.is_Function:
if g.func is exp:
M = g.args[0].match(a*x**2)
if M is not None:
terms.add(erf(sqrt(-M[a])*x))
elif g.is_Pow:
if g.exp.is_Rational and g.exp.q == 2:
M = g.base.match(a*x**2 + b)
if M is not None and M[b].is_positive:
if M[a].is_positive:
terms.add(asinh(sqrt(M[a]/M[b])*x))
elif M[a].is_negative:
terms.add(asin(sqrt(-M[a]/M[b])*x))
else:
terms |= set(hints)
for g in set(terms):
terms |= components(g.diff(x), x)
V = _symbols('x', len(terms))
mapping = dict(zip(terms, V))
rev_mapping = {}
for k, v in mapping.iteritems():
rev_mapping[v] = k
def substitute(expr):
return expr.subs(mapping)
diffs = [ substitute(simplify(g.diff(x))) for g in terms ]
denoms = [ g.as_numer_denom()[1] for g in diffs ]
denom = reduce(lambda p, q: lcm(p, q, V), denoms)
numers = [ Poly.cancel(denom * g, *V) for g in diffs ]
def derivation(h):
return Add(*[ d * h.diff(v) for d, v in zip(numers, V) ])
def deflation(p):
for y in V:
if not p.has_any_symbols(y):
continue
if derivation(p) is not S.Zero:
c, q = p.as_poly(y).as_primitive()
return deflation(c)*gcd(q, q.diff(y))
else:
return p
def splitter(p):
for y in V:
if not p.has_any_symbols(y):
continue
if derivation(y) is not S.Zero:
c, q = p.as_poly(y).as_primitive()
q = q.as_basic()
h = gcd(q, derivation(q), y)
s = quo(h, gcd(q, q.diff(y), y), y)
c_split = splitter(c)
if s.as_poly(y).degree == 0:
return (c_split[0], q * c_split[1])
q_split = splitter(Poly.cancel((q, s), *V))
return (c_split[0]*q_split[0]*s, c_split[1]*q_split[1])
else:
return (S.One, p)
special = {}
for term in terms:
if term.is_Function:
if term.func is tan:
special[1 + substitute(term)**2] = False
elif term.func is tanh:
special[1 + substitute(term)] = False
special[1 - substitute(term)] = False
elif term.func is C.LambertW:
special[substitute(term)] = True
F = substitute(f)
P, Q = F.as_numer_denom()
u_split = splitter(denom)
v_s
|
google/personfinder
|
tests/views/test_admin_global_index.py
|
Python
|
apache-2.0
| 6,931
| 0.000289
|
# Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the main global admin page."""
import copy
import config
import view_tests_base
class AdminGlobalIndexViewTests(view_tests_base.ViewTestsBase):
"""Tests the global admin index view."""
_PRIOR_CONFIG = {
'sms_number_to_repo': '{"+15551234567": "haiti"}',
'repo_aliases': '{"h": "haiti"}',
'brand': 'none',
'privacy_policy_url': 'www.example.com/privacy',
'tos_url': 'www.example.com/tos',
'feedback_url': 'www.example.com/feedback',
'captcha_site_key': 'captcha-key',
'captcha_secret_key': 'captcha-secret-key',
'analytics_id': 'analytics-id',
'amp_gtm_id': 'amp-gtm-id',
'maps_api_key': 'maps-api-key',
'translate_api_key': 'translate-api-key',
'notification_email': 'notifications@example.com',
'unreviewed_notes_threshold': 12,
}
_BASE_POST_PARAMS = {
'sms_number_to_repo': '{"+15551234567": "haiti"}',
'repo_aliases': '{"h": "haiti"}',
'brand': 'none',
'privacy_policy_url': 'www.example.com/privacy',
'tos_url': 'www.example.com/tos',
'feedback_url': 'www.example.com/feedback',
'captcha_site_key': 'captcha-key',
'captcha_secret_key': 'captcha-secret-key',
'analytics_id': 'analytics-id',
'amp_gtm_id': 'amp-gtm-id',
'maps_api_key': 'maps-api-key',
'translate_api_key': 'translate-api-key',
'notification_email': 'notifications@example.com',
'unreviewed_notes_threshold': '12',
}
def setUp(self):
super(AdminGlobalIndexViewTests, self).setUp()
self.data_generator.repo()
config.set_for_repo('*', **AdminGlobalIndexViewTests._PRIOR_CONFIG)
self.login_as_superadmin()
def test_get(self):
"""Tests GET requests."""
resp = self.client.get('/global/admin/', secure=True)
self.assertEqual(
resp.context.get('sms_config'), {
'sms_number_to_repo': '"{\\"+15551234567\\": \\"haiti\\"}"',
})
self.assertEqual(
resp.context.get('repo_alias_config'), {
'repo_aliases': '"{\\"h\\": \\"haiti\\"}"',
})
self.assertEqual(
resp.context.get('site_info_config'), {
'brand': 'none',
'privacy_policy_url': 'www.example.com/privacy',
'tos_url': 'www.example.com/tos',
'feedback_url': 'www.example.com/feedback',
})
self.assertEqual(
resp.context.get('recaptcha_config'), {
'captcha_site_key': 'captcha-key',
|
'captcha_secret_key': 'captcha-secret-key',
})
self.assertEqual(
resp.context.get('ganalytics_config'), {
'analytics_id': 'analytics-id',
'amp_gtm_id': 'amp-gtm-id',
})
self.assertEqual(
resp.context.get('gmaps_config'), {
'maps_api_key': 'maps-api-key',
})
self.assertEqual(
res
|
p.context.get('gtranslate_config'), {
'translate_api_key': 'translate-api-key',
})
self.assertEqual(
resp.context.get('notification_config'), {
'notification_email': 'notifications@example.com',
'unreviewed_notes_threshold': '12',
})
def test_edit_sms_config(self):
self._post_with_params(sms_number_to_repo='{"+1800pfhaiti": "haiti"}')
conf = config.Configuration('*')
self.assertEqual(conf.sms_number_to_repo, {'+1800pfhaiti': 'haiti'})
def test_edit_repo_alias_config(self):
self._post_with_params(repo_aliases='{"e": "ecuador"}')
conf = config.Configuration('*')
self.assertEqual(conf.repo_aliases, {'e': 'ecuador'})
def test_edit_site_info_config(self):
self._post_with_params(
brand='google',
privacy_policy_url='othersite.org/privacy',
tos_url='othersite.org/tos',
feedback_url='othersite.org/feedback')
conf = config.Configuration('*')
self.assertEqual(conf.brand, 'google')
self.assertEqual(conf.privacy_policy_url, 'othersite.org/privacy')
self.assertEqual(conf.tos_url, 'othersite.org/tos')
self.assertEqual(conf.feedback_url, 'othersite.org/feedback')
def test_edit_recaptcha_config(self):
self._post_with_params(
captcha_site_key='NEW-captcha-key',
captcha_secret_key='NEW-captcha-secret-key')
conf = config.Configuration('*')
self.assertEqual(conf.captcha_site_key, 'NEW-captcha-key')
self.assertEqual(conf.captcha_secret_key, 'NEW-captcha-secret-key')
def test_edit_ganalytics_config(self):
self._post_with_params(
analytics_id='NEW-analytics-id',
amp_gtm_id='NEW-amp-gtm-id')
conf = config.Configuration('*')
self.assertEqual(conf.analytics_id, 'NEW-analytics-id')
self.assertEqual(conf.amp_gtm_id, 'NEW-amp-gtm-id')
def test_edit_gmaps_config(self):
self._post_with_params(maps_api_key='NEW-maps-api-key')
conf = config.Configuration('*')
self.assertEqual(conf.maps_api_key, 'NEW-maps-api-key')
def test_edit_gtranslate_config(self):
self._post_with_params(translate_api_key='NEW-translate-api-key')
conf = config.Configuration('*')
self.assertEqual(conf.translate_api_key, 'NEW-translate-api-key')
def test_edit_notification_config(self):
self._post_with_params(
notification_email='notifications@othersite.org',
unreviewed_notes_threshold='86')
conf = config.Configuration('*')
self.assertEqual(conf.notification_email, 'notifications@othersite.org')
self.assertEqual(conf.unreviewed_notes_threshold, 86)
def _post_with_params(self, **kwargs):
get_doc = self.to_doc(self.client.get('/global/admin', secure=True))
xsrf_token = get_doc.cssselect_one('input[name="xsrf_token"]').get(
'value')
post_params = copy.deepcopy(AdminGlobalIndexViewTests._BASE_POST_PARAMS)
post_params['xsrf_token'] = xsrf_token
post_params.update(kwargs)
return self.client.post('/global/admin/', post_params, secure=True)
|
thisch/python-falafel
|
examples/project2/mypackage/modb/bar.py
|
Python
|
bsd-2-clause
| 246
| 0
|
from .. import TestCase
# NOTE: this test is not run by the tester because the name of this file
# does not match the testpattern regex in TestLoader.discover
cl
|
ass TestBar(TestCase):
def test_print(self):
se
|
lf.assertTrue(3+4 > 2)
|
socialplanning/SupervisorErrorMiddleware
|
supervisorerrormiddleware/tests/test.py
|
Python
|
gpl-2.0
| 2,244
| 0.008021
|
from supervisorerrormiddleware import SupervisorErrorMiddleware
import os
import sys
import paste.fixture
class DummyOutput:
def __init__(self):
self._buffer = []
def write(self, data):
self._buffer.append(data)
def flush(self):
self._buffer = []
def bad_app(environ, start_response):
if environ['PATH_INFO'] != '/good':
raise Exception("Bad Kitty")
else:
start_response("200 OK", [('Conte
|
nt-type', 'text/html')])
return ["Good Kitty"]
def test_without_supervisor():
old_stdout = sys.stdout
try:
sys.stdout = DummyOutput()
app = bad_app
app = SupervisorErrorMiddleware(app)
app = paste.fixture.TestApp(app)
failed = False
try:
app.get("/")
except:
failed = True
assert failed
output = "".join(sys.stdout._buffer)
sys.stdout.flush()
assert not "Bad Kitty" in output
assert not "GET" in output
response = app.get("/good")
output = "".join(sys.stdout._buffer)
sys.stdout.flush()
response.mustcontain("Good Kitty")
assert not "Bad Kitty" in output
assert not "GET" in output
finally:
sys.stdout = old_stdout
def test_with_supervisor():
#Why is there output when stdout is redirected? Because
#paste.fixture.TestApp gets around the redirection.
old_stdout = sys.stdout
try:
os.environ['SUPERVISOR_ENABLED'] = "1" #fake supervisor
sys.stdout = DummyOutput()
app = bad_app
app = SupervisorErrorMiddleware(app)
app = paste.fixture.TestApp(app)
failed = False
try:
app.get("/")
except:
failed = True
assert failed
output = "".join(sys.stdout._buffer)
sys.stdout.flush()
assert "Bad Kitty" in output
assert "GET" in output
response = app.get("/good")
output = "".join(sys.stdout._buffer)
sys.stdout.flush()
response.mustcontain("Good Kitty")
assert not "Bad Kitty" in output
assert not "GET" in output
finally:
sys.stdout = old_stdout
del os.environ['SUPERVISOR_ENABLED']
|
|
arannasousa/pagseguro_xml
|
pagseguro_xml/tests/test_classes_consultas/__init__.py
|
Python
|
gpl-2.0
| 490
| 0.002049
|
# coding=utf-8
# ---------------------------------------------------------------
# Desenvolvedor:
|
Arannã Sousa Santos
# Mês: 12
# Ano: 2015
# Projeto: pagseguro_xml
# e-mail: asousas@live.com
# ---------------------------------------------------------------
from .test_detalhes_v3 import ClasseTransacaoDetalhesTest
from .test_historico_v2 import ClasseTransacaoHistoricoTest
from .test_abandonadas_v2 import ClasseTransacaoAbandonadasTes
|
t
|
bkuczenski/lca-tools
|
antelope_background/background/flat_background.py
|
Python
|
gpl-2.0
| 18,042
| 0.001718
|
"""
class for storing static results of a tarjan ordering
"""
from scipy.sparse.csc import csc_matrix
from scipy.sparse.csr import csr_matrix
from scipy.sparse.linalg import inv, factorized, spsolve
from scipy.sparse import eye
from scipy.io import savemat, loadmat
import os
from collections import namedtuple
from ..engine import BackgroundEngine
from lcatools.interfaces import CONTEXT_STATUS_
from lcatools import from_json, to_json, comp_dir
SUPPORTED_FILETYPES = ('.mat', )
_FLATTEN_AF = False
class NoLciDatabase(Exception):
pass
class TermRef(object):
def __init__(self, flow_ref, direction, term_ref, scc_id=None):
"""
:param flow_ref:
:param direction: direction w.r.t. term
:param term_ref:
:param scc_id: None or 0 for singleton /emission; external_ref of a contained process for SCC
"""
self._f = flow_ref
self._d = {'Input': 0, 'Output': 1, 0: 0, 1: 1}[direction]
self._t = term_ref
self._s = 0
self.scc_id = scc_id
@property
def term_ref(self):
return self._t
@property
def flow_ref(self):
return self._f
@property
def direction(self):
return ('Input', 'Output')[self._d]
@property
def scc_id(self):
if self._s == 0:
return []
return self._s
@scc_id.setter
def scc_id(self, item):
if item is None:
self._s = 0
else:
self._s = item
def __array__(self):
return self.flow_ref, self._d, self.term_ref, self._s
def __iter__(self):
return iter(self.__array__())
ExchDef = namedtuple('ExchDef', ('process', 'flow', 'direction', 'term', 'value'))
def _iterate_a_matrix(a, y, threshold=1e-8, count=100, quiet=False, solver=None):
if solver == 'spsolve':
ima = eye(a.shape[0]) - a
x = spsolve(ima, y)
return csr_matrix(x).T
y = csr_matrix(y) # tested this with ecoinvent: convert to sparse: 280 ms; keep full: 4.5 sec
total = csr_matrix(y.shape)
if a is None:
return total
mycount = 0
sumtotal = 0.0
while mycount < count:
total += y
y = a.dot(y)
inc = sum(abs(y).data)
if inc == 0:
if not quiet:
print('exact result')
break
sumtotal += inc
if inc / sumtotal < threshold:
break
mycount += 1
if not quiet:
print('completed %d iterations' % mycount)
return total
def _unit_column_vector(dim, inx):
return csr_matrix(((1,), ((inx,), (0,))), shape=(dim, 1))
def split_af(_af, _inds):
"""
splits the input matrix into diagonal and off-diagonal portions, with the split being determined by _inds
:param _af:
:param _inds:
:return:
"""
_af = _af.tocoo()
_r = _af.row
_c = _af.col
_d = _af.data
_d_non = []
_d_scc = []
_shape = _af.shape
for i in range(len(_d)):
if _r[i] in _inds and _c[i] in _inds:
_d_non.append(0)
_d_scc.append(_d[i])
else:
_d_non.append(_d[i])
_d_scc.append(0)
_af_non = csc_matrix((_d_non, (_r, _c)), shape=_shape)
_af_scc = csc_matrix((_d_scc, (_r, _c)), shape=_shape)
assert (_af_non + _af_scc - _af).nnz == 0
return _af_non, _af_scc
def _determine_scc_inds(ts):
scc_inds = set()
for _s in ts.nontrivial_sccs():
if ts.is_background_scc(_s):
continue
for k in ts.scc(_s):
scc_inds.add(ts.fg_dict(k.index))
return scc_inds
def flatten(af, ad, bf, ts):
"""
Accepts a fully populated background engine as argument
:param af:
:param ad:
:param bf:
:param ts:
:return: af_flat, ad_flat, bf_flat
"""
scc_inds = _determine_scc_inds(ts)
non, scc = split_af(af, scc_inds)
scc_inv = inv(eye(ts.pdim).tocsc() - scc)
return non * scc_inv, ad * scc_inv, bf * scc_inv
class FlatBackground(object):
"""
Static, ordered background stored in an easily serializable way
"""
@classmethod
def from_index(cls, index, **kwargs):
"""
:param index: an index interface with operable processes() and terminate()
:param kwargs: origin, quiet
:return:
"""
be = BackgroundEngine(index)
be.add_all_ref_products()
return cls.from_background_engine(be, **kwargs)
@classmethod
def from_background_engine(cls, be, **kwargs):
af, ad, bf = be.make_foreground()
if _FLATTEN_AF:
af, ad, bf = flatten(af, ad, bf, be.tstack)
_map_nontrivial_sccs = {k: be.product_flow(k).process.external_ref for k in be.tstack.nontrivial_sccs()}
def _make_term_ref(pf):
try:
_scc_id = _map_nontrivial_sccs[be.tstack.scc_id(pf)]
except KeyError:
_scc_id = 0
return pf.flow.external_ref, pf.direction, pf.process.external_ref, _scc_id
def _make_term_ext(em):
try:
comp = em.compartment[-1]
except IndexError:
comp = None
return em.flow.external_ref, comp_dir(em.direction), comp, 0
return cls([_make_term_ref(x) for x in be.foreground_flows(outputs=False)],
[_make_term_ref(x) for x in be.background_flows()],
[_make_term_ext(x) for x in be.emissions],
af, ad, bf,
lci_db=be.lci_db,
**kwargs)
@classmethod
def from_file(cls, file, **kwargs):
ext = os.path.splitext(file)[1]
if ext == '.mat':
return cls.from_matfile(file, **kwargs)
elif ext == '.hdf':
return cls.from_hdf5(file, **kwargs)
else:
raise ValueError('Unsupported file type %s' % ext)
@classmethod
def from_hdf5(cls, fle, quiet=True):
raise NotImplementedError
@classmethod
def from_matfile(cls, file, quiet=True):
d = loadmat(file)
if 'A' in d:
lci_db = (d['A'].tocsr(), d['B'].tocsr())
else:
lci_db = None
ix = from_json(file + '.index.json.gz')
'''
def _unpack_term_ref(arr):
_xt = arr[3][0]
if len(_xt) == 1:
_xt = _xt[0]
return arr[0][0], arr[1][0][0], arr[2][0], _xt
return cls((_unpack_term_ref(f) for f in d['foreground']),
(_unpack_term_ref(f) for f in d['background']),
(_unpack_term_ref(f) for f in d['exterior']),
d['Af'].tocsr(), d['Ad'].tocsr(), d['Bf'].tocsr(),
lci_db=lci_db,
quiet=quiet)
'''
return cls(ix['foreground'], ix['background'], ix['exterior'],
d['Af'].tocsr(), d['Ad'].tocsr(), d['Bf'].tocsr(),
lci_db=lci_db,
quiet=quiet)
def __init__(self, foreground, background, exterior, af, ad, bf, lci_db=None, quiet=True):
"""
:param foreground: iterable of foreground Product Flows as TermRef params
:param background: iterable of background Product Flows
|
as TermRef params
:param exterior: iterable of Exterior flows as TermRef params
:param af: sparse, flattened Af
:param ad: sparse, flattened Ad
:param bf: sparse, flattened Bf
:param lci_db: [None] optional (A, B) 2-tuple
:param quiet: [True] does nothing for now
"""
self._fg = tuple([TermRef(*f) for f in foreground])
self._bg = tuple([TermRef(*x) for x in background])
self._ex = tuple([TermRef(*x) for x
|
in exterior])
self._af = af
self._ad = ad
self._bf = bf
if lci_db is None:
self._A = None
self._B = None
else:
self._A = lci_db[0].tocsr()
self._B = lci_db[1].tocsr()
self._lu = None # store LU decomposition
self._fg_index = {(k.term_ref, k.flow_ref): i for i, k in enumerate(self._fg)}
self._bg_index = {(k.term_ref, k.flow_r
|
runt18/osquery
|
tools/codegen/gentable.py
|
Python
|
bsd-3-clause
| 11,592
| 0.001725
|
#!/usr/bin/env python
# Copyright (c) 2014, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import ast
import jinja2
import logging
import os
import sys
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
sys.path.append(SCRIPT_DIR + "/../tests")
from utils import platform
# the log format for the logging module
LOG_FORMAT = "%(levelname)s [Line %(lineno)d]: %(message)s"
# Read all implementation templates
TEMPLATES = {}
# Temporary reserved column names
RESERVED = ["n", "index"]
# Set the platform in osquery-language
PLATFORM = platform()
# Supported SQL types for spec
class DataType(object):
def __init__(self, affinity, cpp_type="std::string"):
'''A column datatype is a pair of a SQL affinity to C++ type.'''
self.affinity = affinity
self.type = cpp_type
def __repr__(self):
return self.affinity
# Define column-type MACROs for the table specs
TEXT = DataType("TEXT")
DATE = DataType("TEXT")
DATETIME = DataType("TEXT")
INTEGER = DataType("INTEGER", "int")
BIGINT = DataType("BIGINT", "long long int")
UNSIGNED_BIGINT = DataType("UNSIGNED_BIGINT", "long long unsigned int")
DOUBLE = DataType("DOUBLE", "double")
# Define table-category MACROS from the table specs
UNKNOWN = "UNKNOWN"
UTILITY = "UTILITY"
SYSTEM = "SYSTEM"
NETWORK = "NETWORK"
EVENTS = "EVENTS"
APPLICATION = "APPLICATION"
def usage():
""" print program usage """
print(
"Usage: {0!s} <spec.table> <file.cpp> [disable_blacklist]".format(sys.argv[0]))
def to_camel_case(snake_case):
""" convert a snake_case string to camelCase """
components = snake_case.split('_')
return components[0] + "".join(x.title() for x in components[1:])
def lightred(msg):
return "\033[1;31m {0!s} \033[0m".format(str(msg))
def is_blacklisted(table_name, path=None, blacklist=None):
"""Allow blacklisting by tablename."""
if blacklist is None:
specs_path = os.path.dirname(path)
if os.path.basename(specs_path) != "specs":
specs_path = os.path.basename(specs_path)
blacklist_path = os.path.join(specs_path, "blacklist")
if not os.path.exists(blacklist_path):
return False
try:
with open(blacklist_path, "r") as fh:
blacklist = [
line.strip() for line in fh.read().split("\n")
if len(line.strip()) > 0 and line.strip()[0] != "#"
]
except:
# Blacklist is not readable.
return False
if not blacklist:
return False
# table_name based blacklisting!
for item in blacklist:
item = item.split(":")
# If this item is restricted to a platform and the platform
# and table name match
if len(item) > 1 and PLATFORM == item[0] and table_name == item[1]:
return True
elif len(item) == 1 and table_name == item[0]:
return True
return False
def setup_templates(templates_path):
if not os.path.exists(templates_path):
templates_path = os.path.join(os.path.dirname(tables_path), "templates")
if not os.path.exists(templates_path):
print ("Cannot read templates path: {0!s}".format((templates_path)))
exit(1)
for template in os.listdir(templates_path):
template_name = template.split(".", 1)[0]
with open(os.path.join(templates_path, template), "rb") as fh:
TEMPLATES[template_name] = fh.read().replace("\\\n", "")
class Singleton(object):
"""
Make sure that anything that subclasses Singleton can only be instantiated
once
"""
_instance = None
def __new__(self, *args, **kwargs):
if not self._instance:
self._instance = super(Singleton, self).__new__(
self, *args, **kwargs)
return self._instance
class TableState(Singleton):
"""
Maintain the state of of the table commands during the execution of
the config file
"""
def __init__(self):
self.table_name = ""
self.schema = []
self.header = ""
self.impl = ""
self.function = ""
self.class_name = ""
self.description = ""
self.attributes = {}
self.examples = []
def columns(self):
return [i for i in self.schema if isinstance(i, Column)]
def foreign_keys(self):
return [i for i in self.schema if isinstance(i, ForeignKey)]
def generate(self, path, template="default"):
"""Generate the virtual table files"""
logging.debug("TableState.generate")
self.impl_content = jinja2.Template(TEMPLATES[template]).render(
table_name=self.table_name,
table_name_cc=to_camel_case(self.table_name),
schema=self.columns(),
header=self.header,
impl=self.impl,
function=self.function,
class_name=self.class_name,
attributes=self.attributes,
examples=self.examples,
)
if self.table_name == "" or self.function == "":
print (lightred("Invalid table spec: {0!s}".format((path)
|
)))
exit(1)
# Check for reserved column names
for column in self.columns():
if column.name in RESERVED:
print (lightred(("Cannot use column name: %s in ta
|
ble: %s "
"(the column name is reserved)" % (
column.name, self.table_name))))
exit(1)
path_bits = path.split("/")
for i in range(1, len(path_bits)):
dir_path = ""
for j in range(i):
dir_path += "{0!s}/".format(path_bits[j])
if not os.path.exists(dir_path):
try:
os.mkdir(dir_path)
except:
# May encounter a race when using a make jobserver.
pass
logging.debug("generating {0!s}".format(path))
with open(path, "w+") as file_h:
file_h.write(self.impl_content)
def blacklist(self, path):
print (lightred("Blacklisting generated {0!s}".format(path)))
logging.debug("blacklisting {0!s}".format(path))
self.generate(path, template="blacklist")
table = TableState()
class Column(object):
"""
Part of an osquery table schema.
Define a column by name and type with an optional description to assist
documentation generation and reference.
"""
def __init__(self, name, col_type, description="", **kwargs):
self.name = name
self.type = col_type
self.description = description
self.options = kwargs
class ForeignKey(object):
"""
Part of an osquery table schema.
Loosely define a column in a table spec as a Foreign key in another table.
"""
def __init__(self, **kwargs):
self.column = kwargs.get("column", "")
self.table = kwargs.get("table", "")
def table_name(name):
"""define the virtual table name"""
logging.debug("- table_name")
logging.debug(" - called with: {0!s}".format(name))
table.table_name = name
table.description = ""
table.attributes = {}
table.examples = []
def schema(schema_list):
"""
define a list of Column object which represent the columns of your virtual
table
"""
logging.debug("- schema")
for it in schema_list:
if isinstance(it, Column):
logging.debug(" - column: {0!s} ({1!s})".format(it.name, it.type))
if isinstance(it, ForeignKey):
logging.debug(" - foreign_key: {0!s} ({1!s})".format(it.column, it.table))
table.schema = schema_list
def description(text):
table.description = text
def select_
|
texastribune/tt_streams
|
example/manage.py
|
Python
|
apache-2.0
| 320
| 0
|
#!/usr/bin/env python
|
import os
import sys
if __name__ == "__main__":
os.en
|
viron.setdefault("DJANGO_SETTINGS_MODULE", "project.settings")
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
GreenLunar/Bookie
|
bookie/lib/readable.py
|
Python
|
agpl-3.0
| 6,558
| 0
|
"""Handle processing and setting web content into Readability/cleaned
"""
import httplib
import logging
import lxml
import socket
import urllib2
from BaseHTTPServer import BaseHTTPRequestHandler as HTTPH
from breadability.readable import Article
from urlparse import urlparse
LOG = logging.getLogger(__name__)
class DictObj(dict):
def __getattr__(self, name):
try:
return self.__getitem__(name)
except KeyError:
return super(DictObj, self).__getattr__(name)
USER_AGENT = 'bookie / ({url})'.format(
url="https://github.com/bookieio/bookie",
)
STATUS_CODES = DictObj({
'1': 1, # used for manu
|
al parsed
'200': 200,
'404': 404,
'403': 403,
'429': 429, # wtf, 429 doesn't exist...
# errors like 9's
'900': 900, # used for unparseable
'901': 901, # url is not parseable/usable
'902': 902, # socket.error during download
'903': 903, # httplib.IncompleteRead error
'904': 904, # lxml erro
|
r about document is empty
'905': 905, # httplib.BadStatusLine
})
IMAGE_TYPES = DictObj({
'png': 'image/png',
'jpeg': 'image/jpeg',
'jpg': 'image/jpg',
'gif': 'image/gif',
})
class Readable(object):
"""Understand the base concept of making readable"""
is_error = False
content = None
content_type = None
headers = None
status_message = None
status = None
url = None
def error(self, code, msg):
"""This readable request was an error, assign it so"""
self.status = code
self.status_message = str(msg)
def is_error(self):
"""Check if this is indeed an error or not"""
if self.status not in [STATUS_CODES['200'], ]:
return True
else:
return False
def is_image(self):
"""Check if the current object is an image"""
# we can only get this if we have headers
LOG.debug('content type')
LOG.debug(self.content_type)
if (self.content_type is not None and
self.content_type.lower() in IMAGE_TYPES.values()):
return True
else:
return False
def set_content(self, content, content_type=None):
"""assign the content and potentially content type header"""
self.content = content
if content_type:
self.content_type = content_type
class ReadContent(object):
"""Handle some given content and parse the readable out of it"""
@staticmethod
def parse(content, content_type=None, url=None):
"""Handle the parsing out of the html content given"""
read = Readable()
document = Article(content.read(), url=url)
if not document.readable:
read.error(STATUS_CODES['900'], "Could not parse content.")
else:
read.set_content(document.readable,
content_type=content_type)
read.status = STATUS_CODES['1']
return read
class ReadUrl(object):
"""Fetch a url and read some content out of it"""
@staticmethod
def parse(url):
"""Fetch the given url and parse out a Readable Obj for the content"""
read = Readable()
if not isinstance(url, unicode):
url = url.decode('utf-8')
# first check if we have a special url with the #! content in it
if u'#!' in url:
# rewrite it with _escaped_fragment_=xxx
# we should be doing with this some regex, but cheating for now
idx = url.index(u'#')
fragment = url[idx:]
clean_url = u"{0}?_escaped_fragment_={1}".format(url[0:idx],
fragment)
else:
# we need to clean up the url first, we can't have any anchor tag
# on the url or urllib2 gets cranky
parsed = urlparse(url)
# We cannot parse urls that aren't http, https, or ftp://
if (parsed.scheme not in (u'http', u'https', u'ftp')):
read.error(
STATUS_CODES['901'],
'Invalid url scheme for readable content')
return read
if parsed.query is not None and parsed.query != '':
query = u'?'
else:
query = u''
clean_url = u"{0}://{1}{2}{query}{3}".format(
parsed[0],
parsed[1],
parsed[2],
parsed[4],
query=query)
try:
LOG.debug('Readable Parsed: ' + clean_url)
request = urllib2.Request(clean_url.encode('utf-8'))
request.add_header('User-Agent', USER_AGENT)
opener = urllib2.build_opener()
fh = opener.open(request)
# if it works, then we default to a 200 request
# it's ok, promise :)
read.status = 200
read.headers = fh.info()
read.content_type = read.headers.gettype()
except urllib2.HTTPError, exc:
# for some reason getting a code 429 from a server
if exc.code not in [429]:
read.error(exc.code, HTTPH.responses[exc.code])
else:
read.error(exc.code, unicode(exc.code) + ': ' + clean_url)
except httplib.InvalidURL, exc:
read.error(STATUS_CODES['901'], str(exc))
except urllib2.URLError, exc:
read.error(STATUS_CODES['901'], str(exc))
except httplib.BadStatusLine, exc:
read.error(STATUS_CODES['905'], str(exc))
except socket.error, exc:
read.error(STATUS_CODES['902'], str(exc))
LOG.debug('is error?')
LOG.debug(read.status)
# let's check to make sure we should be parsing this
# for example: don't parse images
if not read.is_error() and not read.is_image():
try:
document = Article(fh.read(), url=clean_url)
if not document.readable:
read.error(STATUS_CODES['900'],
"Could not parse document.")
else:
read.set_content(document.readable)
except socket.error, exc:
read.error(STATUS_CODES['902'], str(exc))
except httplib.IncompleteRead, exc:
read.error(STATUS_CODES['903'], str(exc))
except lxml.etree.ParserError, exc:
read.error(STATUS_CODES['904'], str(exc))
return read
|
rearmlkp/Smart_Flash
|
API/migrations/0003_auto_20170201_0842.py
|
Python
|
gpl-3.0
| 917
| 0
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-01 08:42
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('API', '0002_auto_20170201_0840'),
]
operations = [
migrations.AddField(
model_name='card',
name='b
|
ack',
field=models.CharField(default='', max_length=1000),
),
migrations.AddField(
model_name='card',
name='front',
field=models.CharField(default='', max_length=1000),
),
migrations.AddField(
model_name='card',
name='review_count',
field=models.IntegerField(default=0),
),
migrations.A
|
ddField(
model_name='card',
name='type',
field=models.IntegerField(default=0),
),
]
|
skyoo/jumpserver
|
apps/terminal/migrations/0018_auto_20191202_1010.py
|
Python
|
gpl-2.0
| 2,294
| 0
|
# Generated by Django 2.2.7 on 2019-12-02 02:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('terminal', '0017_auto_20191125_0931'),
]
operations = [
migrations.RemoveField(
model_name='session',
name='date_last_active',
),
migrations.AlterField(
model_name='session',
name='remote_addr',
field=models.CharField(blank=True, max_length=128, null=True,
verbose_name='Remote addr'),
),
migrations.AddField(
model_name='session',
name='asset_id',
field=models.CharField(blank=True, db_index=True, default='',
max_length=36),
),
migrations.AddField(
model_name='session',
name='system_user_id',
field=models.CharField(blank=True, db_index=True, default='',
max_length=36),
),
migrations.AddField(
m
|
odel_name='session',
name='user_id',
|
field=models.CharField(blank=True, db_index=True, default='',
max_length=36),
),
migrations.AlterField(
model_name='session',
name='asset',
field=models.CharField(db_index=True, max_length=128,
verbose_name='Asset'),
),
migrations.AlterField(
model_name='session',
name='protocol',
field=models.CharField(
choices=[('ssh', 'ssh'), ('rdp', 'rdp'), ('vnc', 'vnc'),
('telnet', 'telnet')], db_index=True, default='ssh',
max_length=8),
),
migrations.AlterField(
model_name='session',
name='system_user',
field=models.CharField(db_index=True, max_length=128,
verbose_name='System user'),
),
migrations.AlterField(
model_name='session',
name='user',
field=models.CharField(db_index=True, max_length=128,
verbose_name='User'),
),
]
|
gialloporpora/yellowpy
|
lastfm.py
|
Python
|
gpl-2.0
| 4,732
| 0.042054
|
# download images from last.fm
# PyQuery is a very powerful module to parse HTML pages, but it is not by default distributed with Python
# if you want install it you need first install lxml module
# Same features of this script works only with pyquery, but the most important ones (download images of cover and artist) works without installing it
try:
from pyquery import PyQuery as pq
pyquery = True
except ImportError:
pyquery = False
# Create an istance of FancyURLopener to avoid to be banned from certains sites that reject no browser user agent
from urllib import FancyURLopener, quote_plus
class MyOpener(FancyURLopener):
version = "Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.9.2.4) Gecko/20100513 Firefox/3.6.4"
import sys
def wget(url, name=""):
""" http://stackoverflow.com/questions/22676/how-do-i-download-a-file-over-http-using-python """
import urllib2
if name=="":
file_name = url.split('/')[-1]
else:
file_name = name
u = urllib2.urlopen(url)
f = open(file_name, 'wb')
meta = u.info()
file_size = int(meta.getheaders("Content-Length")[0])
print "Downloading: %s Bytes: %s" % (file_name, file_size)
file_size_dl = 0
block_sz = 8192
while True:
buffer = u.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
status = r"%10d [%3.2f%%]" % (file_size_dl, file_size_dl * 100. / file_size)
status = status + chr(8)*(len(status)+1)
print status,
f.close()
def openURL(url):
""" Open a URL using the Firefox user agent to avoid to be banned from getting the page content """
myopener = MyOpener()
u = myopener.open(url)
s = u.read()
u.close()
return s
def downloadURL(url, f):
myopener = MyOpener()
myopener.retrieve(url, filename = f)
def decodeArgs(s,encoding = sys.getfilesystemencoding()):
""" Encode arguments to pass as GET request to lastfm """
return quote_plus(s.decode(encoding).encode("utf-8"))
def findArtistImage_npq(s):
""" Return a dictionary of art images
This funtion not use pyquery to parse HTML and it is very rough, improove it if you wish """
import re
regex = re.compile('<img[^>]*>')
images=regex.findall(s)
img=[i for i in images if i.find("catalogueImage")!=-1]
regex=re.compile('src\b*=\b*"([^"]*)"')
try:
link=regex.findall(img[0])
return link[0]
except IndexError: return None
def findAlbumImage_npq(s):
""" Returns album cover without using pyquery, code it is very rough """
import re
try:
s = s.split('<span id="albumCover" class="albumCover coverMega">')[1].split('</span>')[0]
regex=re.compile('src\b*=\b*"([^"]*)"')
img = regex.findall(s)[0]
return img
except IndexError: return None
def findArtistImage_pq(s):
d = pq(s)
img=d('.resource-images img[itemprop="image"]').eq(0)
return img.attr("src")
def findAlbumImage_pq(s):
d=pq(s)
return d('.g.album-cover-wrapper img').eq(0).attr('src')
def getImages(artist, album=None):
if album:
s= openURL(getUrl(artist, album))
name="%s - %s" %(prettyName(artist), prettyName(album))
else:
s = openURL(getUrl(artist))
name=prettyName(artist)
if pyquery:
if album:r = findAlbumImage_pq(s)
else: r = findArtistImage_pq(s)
else:
if album:r = findAlbumImage_npq(s)
else: r = findArtistImage_npq(s)
# Check for some invalid arguments
# This part of code needs to be improoved raising exception to distinguish from different type of errors
if r=="http://cdn.last.fm/flatness/catalogue/noimage/2/default_album_mega.png": r ="Not found"
return {"url" : r, "name" : name}
def getUrl(artist, album = None):
url="http://www.lastfm.it/music/"
url +=decodeArgs(artist)
if (album): url +="/" + decodeArgs(album)
return url
def prettyName(s):
return " ".join(word.capitalize() for word in s.split())
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description="Download artist'
|
s and album's images from Last.fm.")
group = parser.add_mutually_exclusive_group()
parser.add_argument('artist',
help="Artist name")
parser.add_argument("-a","--album", dest="album", default = None,
help="Album title")
group.add_argument("-d", "--download", action="store_true",
help="Download the detected image")
group.add_argument("-f","--file",
help="Name of the downloaded file")
args = parser.parse_args()
img=getImages(args.artist,
|
args.album)
print img["url"]
if args.download:
args.file ="%s.%s" %(img["name"], img["url"].split('.')[-1])
args.file=args.file.decode(sys.getfilesystemencoding())
if args.file:
wget(img["url"], args.file)
print "Image as been downloaded successfully as %s" %args.file
|
gofed/gofed-ng
|
testsuite/helpers/utils.py
|
Python
|
gpl-3.0
| 1,073
| 0.001864
|
#!/bin/python
# gofed-ng - Golang system
# Copyright (C) 2016 Fridolin Pokorny, fpokorny@redhat.com
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# ME
|
RCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# ####################################################################
import sys
import os
def service_path2service_name(
|
service_path):
basename = os.path.basename(service_path)
return basename[:-len('.py')]
if __name__ == "__main__":
sys.exit(1)
|
FEniCS/dolfin
|
site-packages/dolfin_utils/meshconvert/meshconvert.py
|
Python
|
lgpl-3.0
| 50,178
| 0.004604
|
# -*- coding: utf-8 -*-
""" Module for converting various mesh formats."""
# Copyright (C) 2006 Anders Logg
#
# This file is part of DOLFIN.
#
# DOLFIN is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DOLFIN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.
#
# Modified by Garth N. Wells (gmsh function)
# Modified by Alexander H. Jarosch (gmsh fix)
# Modified by Angelo Simone (Gmsh and Medit fix)
# Modified by Andy R. Terrel (gmsh fix and triangle function)
# Modified by Magnus Vikstrom (metis and scotch function)
# Modified by Bartosz Sawicki (diffpack function)
# Modified by Gideon Simpson (Exodus II function)
# Modified by Kent-Andre Mardal (Star-CD function)
# Modified by Nuno Lopes (fix for emc2 mesh format (medit version 0))
# Modified by Neilen Marais (add gmsh support for reading physical region)
# Modified by Evan Lezar (add support for reading gmsh physical regions on facets)
# Modified by Jan Blechta (add triangle support for marker on edges and attributes on triangles)
#
# Last changed: 2014-02-06
# NOTE: This module does not depend on (py)dolfin beeing installed.
# NOTE: If future additions need that please import dolfin in a try: except:
# NOTE: clause and tell the user to install dolfin if it is not installed.
from __future__ import print_function
import getopt
import sys
from instant import get_status_output
import re
import warnings
import os.path
import numpy
import six
from . import abaqus
from . import xml_writer
def format_from_suffix(suffix):
"Return format for given suffix"
if suffix == "xml":
return "xml"
elif suffix == "mesh":
return "mesh"
elif suffix == "gmsh":
return "gmsh"
elif suffix == "msh":
return "gmsh"
elif suffix == "gra":
return "metis"
elif suffix == "grf":
return "scotch"
elif suffix == "grid":
return "diffpack"
elif suffix == "inp":
return "abaqus"
elif suffix == "ncdf":
return "NetCDF"
elif suffix =="exo":
return "ExodusII"
elif suffix =="e":
return "ExodusII"
elif suffix == "vrt" or suffix == "cel":
return "StarCD"
elif suffix == "ele" or suffix == "node":
return "Triangle"
else:
_error("Sorry, unknown suffix %s." % suffix)
def mesh2xml(ifilename, ofilename):
"""Convert between .mesh and .xml, parser implemented as a
state machine:
0 = read 'Vertices'
1 = read number of vertices
2 = read next vertex
3 = read 'Triangles' or 'Tetrahedra'
4 = read number of cells
5 = read next cell
6 = done
"""
print("Converting from Medit format (.mesh) to DOLFIN XML format")
# Open files
ifile = open(ifilename, "r")
ofile = open(ofilename, "w")
# Scan file for cell type
cell_type = None
dim = 0
while 1:
# Read next line
line = ifile.readline()
if not line: break
# Remove newline
line = line.strip(" \n\r").split(" ")
# Read dimension either on same line or following line
if line[0] == "Dimension":
if (len(line) == 2):
line = line[1]
else:
line = ifile.readline()
num_dims = int(line)
if num_dims == 2:
cell_type = "triangle"
dim = 2
elif num_dims == 3:
cell_type = "tetrahedron"
dim = 3
break
# Check that we got the cell type
if cell_type == None:
_error("Unable to find cell type.")
# Step to beginning of file
ifile.seek(0)
# Write header
xml_writer.write_header_mesh(ofile, cell_type, dim)
# Current state
state = 0
# Write data
num_vertices_read = 0
num_cells_read = 0
while 1:
# Read next line
line = ifile.readline()
if not line: break
# Skip comments
if line[0] == '#':
continue
# Remove newline
line = line.rstrip("\n\r")
if state == 0:
if line == "Vertices" or line == " Vertices":
state += 1
elif state == 1:
num_vertices = int(line)
xml_writer.write_header_vertices(ofile, num_vertices)
state +=1
elif state == 2:
if num_dims == 2:
(x, y, tmp) = line.split()
x = float(x)
y = float(y)
z = 0.0
elif num_dims == 3:
(x, y, z, tmp) = line.split()
x = float(x)
y = float(y)
z = float(z)
xml_writer.write_vertex(ofile, num_vertices_read, x, y, z)
num_vertices_read +=1
if num_vertices == num_vertices_read:
xml_writer.write_footer_vertices(ofile)
state += 1
elif state == 3:
if (line == "Triangles" or line == " Triangles") and num_dims == 2:
state += 1
if line == "Tetrahedra" and num_dims == 3:
state += 1
elif state == 4:
num_cells = int(line)
xml_writer.write_header_cells(ofile, num_cells)
state +=1
elif state == 5:
if num_dims == 2:
(n0, n1, n2, tmp) = line.split()
n0 = int(n0) - 1
n1 = int(n1) - 1
n2 = int(n2) - 1
xml_writer.write_cell_triangle(ofile, num_cells_read, n0, n1, n2)
elif num_dims == 3:
(n0, n1, n2, n3, tmp) = line.split()
n0 = int(n0) - 1
n1 = int(n1) - 1
n2 = int(n2) - 1
n3 = int(n3) - 1
xml_writer.write_cell_tetrahedron(ofile, num_cells_read, n0, n1, n2, n3)
num_cells_read +=1
if num_cells == num_cells_read:
xml_writer.write_footer_cells(ofile)
state += 1
elif state == 6:
break
# Check that we got all data
if state == 6:
print("Conversion done")
else:
_error("Missing data, unable to convert")
# Write footer
xml_writer.write_footer_mesh(ofile)
# Close files
ifile.close()
ofile.close()
def gmsh2xml(ifilename, handler):
"""Convert between .gmsh v2.0 format (http://www.geuz.org/gmsh/) and .xml,
parser implemented as a state machine:
0 = read 'MeshFormat'
1 = read mesh format data
2 = read 'EndMeshFormat'
3 = read 'Nodes'
4 = read number of vertices
5 = read vertices
6 = read 'EndNodes'
7 = read 'Elements'
8 = read number of cells
9 = read cells
10 = done
Afterwards, extract physical region numbers if they are defined in
the mesh file as a mesh function.
"""
print("Converting from Gmsh format (.msh, .gmsh) to DOLFIN XML format")
# The dimension of the gmsh element types supported here as well as the dolfin cell types for each dimension
gmsh_dim = {15: 0, 1: 1, 2: 2, 4: 3}
cell_type_for_dim = {1: "interval", 2: "triangle", 3: "tetrahedron" }
# the gmsh element types suppo
|
rted for conversion
supported_gmsh_element_types = [1, 2, 4, 15]
# Open files
ifile = open(ifilename, "r")
# Scan file for cell type
cell_type = None
highest_dim = 0
line = ifile.readline()
while line:
# Remove newline
line = line.rstrip("\n\r")
# Read dimension
if line.find("$Elements") == 0:
line
|
= ifile.readline()
num_elements = i
|
stevearc/dynamo3
|
tests/__init__.py
|
Python
|
mit
| 22,100
| 0.000271
|
""" Tests for Dynamo3 """
import sys
import unittest
from decimal import Decimal
from pickle import dumps, loads
from urllib.parse import urlparse
from botocore.exceptions import ClientError
from mock import ANY, MagicMock, patch
from dynamo3 import (
Binary,
Dynamizer,
DynamoDBConnection,
DynamoDBError,
DynamoKey,
GlobalIndex,
Limit,
Table,
ThroughputException,
)
from dynamo3.constants import STRING
from dynamo3.result import Capacity, ConsumedCapacity, Count, ResultSet, add_dicts
class BaseSystemTest(unittest.TestCase):
"""Base class for system tests"""
dynamo: DynamoDBConnection = None # type: ignore
def setUp(self):
super(BaseSystemTest, self).setUp()
# Clear out any pre-existing tables
for tablename in self.dynamo.list_tables():
self.dynamo.delete_table(tablename)
def tearDown(self):
super(BaseSystemTest, self).tearDown()
for tablename in self.dynamo.list_tables():
self.dynamo.delete_table(tablename)
self.dynamo.clear_hooks()
class TestMisc(BaseSystemTest):
"""Tests that don't fit anywhere else"""
def tearDown(self):
super(TestMisc, self).tearDown()
self.dynamo.default_return_capacity = False
def test_connection_host(self):
"""Connection can access host of endpoint"""
urlparse(self.dynamo.host)
def test_connection_region(self):
"""Connection can access name of connected region"""
self.assertTrue(isinstance(self.dynamo.region, str))
def test_connect_to_region(self):
"""Can connect to a dynamo region"""
conn = DynamoDBConnection.connect("us-west-1")
self.assertIsNotNone(conn.host)
def test_connect_to_region_creds(self):
"""Can connect to a dynamo region with credentials"""
conn = DynamoDBConnection.connect(
"us-west-1", access_key="abc", secret_key="12345"
)
self.assertIsNotNone(conn.host)
def test_connect_to_host_without_session(self):
"""Can connect to a dynamo host without passing in a session"""
conn = DynamoDBConnection.connect("us-west-1", host="localhost")
self.assertIsNotNone(conn.host)
@patch("dynamo3.connection.time")
def test_retry_on_throughput_error(self, time):
"""Throughput exceptions trigger a retry of the request"""
def call(*_, **__):
"""Dummy service call"""
response = {
"ResponseMetadata": {
"HTTPStatusCode": 400,
},
"Error": {
"Code": "ProvisionedThroughputExceededException",
"Message": "Does not matter",
},
}
raise ClientError(response, "list_tables")
with patch.object(self.dynamo, "client") as client:
client.list_tables.side_effect = call
with self.assertRaises(ThroughputException):
self.dynamo.call("list_tables")
self.assertEqual(len(time.sleep.mock_calls), self.dynamo.request_retries - 1)
self.assertTrue(time.sleep.called)
def test_describe_missing(self):
"""Describing a missing table returns None"""
ret = self.dynamo.describe_table("foobar")
self.assertIsNone(ret)
def test_magic_table_props(self):
"""Table can look up properties on response object"""
hash_key = DynamoKey("id")
self.dynamo.create_table("foobar", hash_key=hash_key)
ret = self.dynamo.describe_table("foobar")
assert ret is not None
self.assertEqual(ret.item_count, ret["ItemCount"])
with self.assertRaises(KeyError):
self.assertIsNotNone(ret["Missing"])
def test_magic_index_props(self):
"""Index can look up properties on response object"""
index = GlobalIndex.all("idx-name", DynamoKey("id"))
index.response = {"FooBar": 2}
self.assertEqual(index["FooBar"], 2)
with self.assertRaises(KeyError):
self.assertIsNotNone(index["Missing"])
def test_describe_during_delete(self):
"""Describing a table during a delete operation should not crash"""
response = {
"ItemCount": 0,
"ProvisionedThroughput": {
"NumberOfDecreasesToday": 0,
"ReadCapacityUnits": 5,
"WriteCapacityUnits": 5,
},
"TableName": "myTableName",
"TableSizeBytes": 0,
"TableStatus": "DELETING",
}
table = Table.from_response(response)
self.assertEqual(table.status, "DELETING")
def test_delete_missing(self):
"""Deleting a missing table returns False"""
ret = self.dynamo.delete_table("foobar")
self.assertTrue(not ret)
def test_re_raise_passthrough(self):
"""DynamoDBError can re-raise itself if missing original exception"""
err = DynamoDBError(400, Code="ErrCode", Message="Ouch", args={})
caught = False
try:
err.re_raise()
except DynamoDBError as e:
caught = True
self.assertEqual(err, e)
self.assertTrue(caught)
def test_re_raise(self):
"""DynamoDBError can re-raise itself with stacktrace of original exc"""
caught = False
try:
try:
raise Exception("Hello")
except Exception as e1:
err = DynamoDBError(
400,
Code="ErrCode",
Message="Ouch",
args={},
exc_info=sys.exc_info(),
)
err.re_raise()
except DynamoDBError as e:
caught = True
import traceback
tb = traceback.format_tb(e.__traceback__)
self.assertIn("Hello", tb[-1])
self.assertEqual(e.status_code, 400)
self.assertTrue(caught)
def test_default_return_capacity(self):
"""When default_return_capacity=True, always return capacity"""
self.dynamo.default_return_capacity = True
with patch.object(self.dynamo, "call") as call:
call().get.return_value = None
rs = self.dynamo.scan("foobar")
list(rs)
call.a
|
ssert_called_with(
"scan",
TableName="foobar",
ReturnConsumedCapacity="INDEXES",
ConsistentRead=False,
|
)
def test_list_tables_page(self):
"""Call to ListTables should page results"""
hash_key = DynamoKey("id")
for i in range(120):
self.dynamo.create_table("table%d" % i, hash_key=hash_key)
tables = list(self.dynamo.list_tables(110))
self.assertEqual(len(tables), 110)
def test_limit_complete(self):
"""A limit with item_capacity = 0 is 'complete'"""
limit = Limit(item_limit=0)
self.assertTrue(limit.complete)
def test_wait_create_table(self):
"""Create table shall wait for the table to come online."""
tablename = "foobar_wait"
hash_key = DynamoKey("id")
self.dynamo.create_table(tablename, hash_key=hash_key, wait=True)
self.assertIsNotNone(self.dynamo.describe_table(tablename))
def test_wait_delete_table(self):
"""Delete table shall wait for the table to go offline."""
tablename = "foobar_wait"
hash_key = DynamoKey("id")
self.dynamo.create_table(tablename, hash_key=hash_key, wait=True)
result = self.dynamo.delete_table(tablename, wait=True)
self.assertTrue(result)
class TestDataTypes(BaseSystemTest):
"""Tests for Dynamo data types"""
def make_table(self):
"""Convenience method for making a table"""
hash_key = DynamoKey("id")
self.dynamo.create_table("foobar", hash_key=hash_key)
def test_string(self):
"""Store and retrieve a string"""
self.make_table()
self.dynamo.put_item("foobar", {"id": "abc"})
item = list(self.dynamo.scan("foobar"))[0]
self.assertEqual(item["id"], "abc")
self.
|
bloer/bgexplorer
|
bgexplorer/modelviewer/evaldata.py
|
Python
|
bsd-2-clause
| 20,163
| 0.000446
|
from itertools import chain
import gzip
import multiprocessing
import time
import numpy as np
from enum import Enum
from uncertainties import unumpy
from io import BytesIO
from flask import abort
import pymongo
try:
import matplotlib
import matplotlib.figure
import matplotlib.pyplot
except ImportError:
matplotlib = None
from .. import utils
from bgmodelbuilder import units
from bgmodelbuilder.simulationsdb.histogram import Histogram
from bgmodelbuilder.common import try_reduce
import logging
log = logging.getLogger(__name__)
# todo: take component, spec, groupname, groupval? with class?
class ModelEvaluator(object):
""" Utiilty class to generate data tables and spectra for non-temp models """
class StatusCodes(Enum):
NoEntryInCache = "Cache query returned 0 hits"
def __init__(self, model, modeldb=None, simsdbview=None,
bypasscache=False, writecache=True, cacheimages=True):
""" Constructor
Args:
model (BgModel): model object to evaluate
modeldb (ModelDB): database with models
simsdbview (SimsDbView): defines vals and spectra
bypasscache (bool): If True, do not search for cached value
writecache (bool): If False, do not write calculation results to cache
cacheimages (bool): If False, don't cache image generation
"""
self.model = model
self.cache = None
if not modeldb:
modeldb = utils.get_modeldb()
if modeldb and not modeldb.is_model_temp(model.id):
self.cache = modeldb.getevalcache()
self.bypasscache = bypasscache
self.writecache = writecache
self.cacheimages = cacheimages
self.simsdbview = simsdbview
if simsdbview is None:
self.simsdbview = utils.get_simsdbview(model=model)
self.simsdb = self.simsdbview.simsdb
def _valtostr(self, valname, val, match):
# convert to unit if provided
unit = self.simsdbview.values_units.get(valname, None)
if unit:
try:
|
val = val.to(unit).m
except AttributeError: # not a Quantity...
pass
except units.errors.DimensionalityError as e:
if val != 0:
log.warning(e)
val = getattr(val, 'm', 0)
# convert to string
val = "{:.3g}".format(val)
if match.spec.islimit:
val = '<'+val
return val
def _applyspecunit(self, specname, spec)
|
:
unit = self.simsdbview.spectra_units.get(specname, None)
if unit is not None:
try:
spec.hist.ito(unit)
except AttributeError: # not a quantity
pass
return spec
def _evalmatch(self, match, dovals=True, dogroups=True, dospectra=False):
""" Evaluate SimDocEvals and grous for a match
Returns:
dict
"""
toeval = []
if dovals:
toeval.extend(self.simsdbview.values.values())
if dospectra:
toeval.extend(self.simsdbview.spectra.values())
result = self.simsdb.evaluate(toeval, match)
doc = dict()
if dovals:
doc['values'] = [self._valtostr(name, val, match) for name, val in
zip(self.simsdbview.values.keys(), result)]
result = result[len(self.simsdbview.values):]
if dospectra:
doc['spectra'] = [self._applyspecunit(name, spec) for name, spec in
zip(self.simsdbview.spectra.keys(), result)]
if dogroups:
doc['groups'] = self.simsdbview.evalgroups(match).values()
return doc
def datatable(self, doallcache=False):
""" Generate the datatable with line for each sim data match,
return the result as a gzip compressed blob
Args:
doallcache (bool): If True, while evaluating all values, also
generate spectra. This slows down datatable
generation, but speeds up caching speed overall
"""
cached = self.readfromcache("datatable")
if cached is not self.StatusCodes.NoEntryInCache:
return cached
start = time.monotonic()
log.info(f"Generating datatable for model {self.model.id}")
# define some useful helper functions
def _valhead(val):
suffix = ''
if val in self.simsdbview.values_units:
suffix = f' [{self.simsdbview.values_units[val]}]'
return f'V_{val}{suffix}'
# prepare output buffer
buf = BytesIO()
datatable = gzip.open(buf, mode='wt', newline='\n')
# write the header
header = '\t'.join(chain(['ID'],
(f'G_{g}' for g in self.simsdbview.groups),
(_valhead(v)
for v in self.simsdbview.values.keys())
))
datatable.write(header)
datatable.write('\n')
for match in self.model.simdata.values():
doc = self._evalmatch(match, dovals=True, dogroups=True,
dospectra=doallcache)
dtline = '\t'.join(chain([match.id],
[str(g) for g in doc['groups']],
doc['values']))
datatable.write(dtline)
datatable.write('\n')
if doallcache:
for name, spec in zip(self.simsdbview.spectra, doc['spectra']):
self.writetocache(name, spec, match=match, fmt='hist')
datatable.flush()
result = buf.getvalue()
self.writetocache('datatable', result)
log.info("Finished evaluation of data for model %s in %s seconds",
self.model.id, time.monotonic()-start)
return result
def spectrum(self, specname, component=None, spec=None, match=None,
matches=None):
return self._spectrum_impl(specname, component, spec, match, matches,
fmt="hist")
def spectrum_image(self, specname, component=None, spec=None, match=None,
matches=None):
return self._spectrum_impl(specname, component, spec, match, matches,
fmt="png")
def fillallcache(self, genimages=False):
""" Loop over all matches, components, and spectra in the model and
create cache entries for all spectra
Args:
genimages (bool): If True, also generate PNG images
"""
if not self.cacheimages:
genimages = False
start = time.monotonic()
log.info(f"Generating full cache for model {self.model.id}")
self.datatable(doallcache=True)
specfunc = self.spectrum_image if genimages else self.spectrum
for specname in self.simsdbview.spectra:
for match in self.model.getsimdata():
specfunc(specname, match=match)
for comp in self.model.getcomponents():
specfunc(specname, component=comp)
for spec in self.model.getspecs(rootonly=True):
specfunc(specname, spec=spec)
# also gen the top-level model hists
specfunc(specname)
log.info("Finished caching data for model %s in %s seconds",
self.model.id, time.monotonic()-start)
def _spectrum_impl(self, specname, component=None, spec=None, match=None,
matches=None, fmt="hist"):
if match and matches:
raise ValueError("Only one of `match` and `matches` can be provided")
# see if `matches` is a single match
try:
if len(matches) == 1:
match = matches[0]
matches = None
except TypeError:
# matches has no len and may be a generator
pass
# if 'match' or 'matches' is defined, we ignore component and spec
if match or matches:
|
liwanggui/bssh
|
bssh/network.py
|
Python
|
lgpl-2.1
| 1,271
| 0
|
# -*- coding: utf-8 -*-
import socket
from paramiko import SSHClient, AutoAddPolicy, AuthenticationException
from bssh.utils import env
from bssh.auth import get_pkey
from bssh.logger import logger
def connect(
hostname=None,
port=22,
username=None,
password=None,
pkey=None,
pkey_pwd=None,
sock=None,
timeout=env.timeout,
**kwargs
):
"""Connect the remote ssh server"""
passauth = True if password else False
pkey = pkey if passauth else get_pkey(pkey, pkey_pwd)
client = SSHClient()
client.set_missing_host_key_policy(AutoAddPolicy())
try:
client.connect(hostname=hostname,
port=int(port),
username=username,
password=password,
pkey=pkey,
sock=sock,
|
timeout=timeout)
logger.login.debug('%s connect successfully.' % hostname)
return client
except AuthenticationException:
logger.login.error('%s Validation faile
|
d.' % hostname)
except socket.error:
logger.login.error('%s Network Error' % hostname)
except Exception as e:
logger.login.error('%s %s' % (hostname, str(e)))
|
ulif/pulp
|
server/pulp/server/managers/consumer/history.py
|
Python
|
gpl-2.0
| 7,603
| 0.002104
|
"""
Contains manager class and exceptions for operations for recording and retrieving
consumer history events.
"""
import datetime
import isodate
import pymongo
from pulp.common import dateutils
from pulp.server import config
from pulp.server.db.model.consumer import Consumer, ConsumerHistoryEvent
from pulp.server.exceptions import InvalidValue, MissingResource
from pulp.server.managers import factory as managers_factory
# Event Types
TYPE_CONSUMER_REGISTERED = 'consumer_registered'
TYPE_CONSUMER_UNREGISTERED = 'consumer_unregistered'
TYPE_REPO_BOUND = 'repo_bound'
TYPE_REPO_UNBOUND = 'repo_unbound'
TYPE_CONTENT_UNIT_INSTALLED = 'content_unit_installed'
TYPE_CONTENT_UNIT_UNINSTALLED = 'content_unit_uninstalled'
TYPE_UNIT_PROFILE_CHANGED = 'unit_profile_changed'
TYPE_ADDED_TO_GROUP = 'added_to_group'
TYPE_REMOVED_FROM_GROUP = 'removed_from_group'
TYPES = (TYPE_CONSUMER_REGISTERED, TYPE_CONSUMER_UNREGISTERED, TYPE_REPO_BOUND,
TYPE_REPO_UNBOUND, TYPE_CONTENT_UNIT_INSTALLED, TYPE_CONTENT_UNIT_UNINSTALLED,
TYPE_UNIT_PROFILE_CHANGED, TYPE_ADDED_TO_GROUP, TYPE_REMOVED_FROM_GROUP)
# Maps user entered query sort parameters to the pymongo representation
SORT_ASCENDING = 'ascending'
SORT_DESCENDING = 'descending'
SORT_DIRECTION = {
SORT_ASCENDING: pymongo.ASCENDING,
SORT_DESCENDING: pymongo.DESCENDING,
}
class ConsumerHistoryManager(object):
"""
Performs consumer related CRUD operations
"""
# -- internal ----------------------------------------
def _originator(self):
'''
Returns the value to use as the originator of the consumer event (either the
consumer itself or an admin user).
@return: login of the originator value to use in the event
@rtype: string
'''
return managers_factory.principal_manager().get_principal().login
def record_event(self, consumer_id, event_type, event_details=None):
"""
@ivar consumer_id: identifies the consumer
@type id: str
@param type: event type
@type type: str
@param details: event details
@type details: dict
@raises MissingResource: if the given consumer does not exist
@raises InvalidValue: if any of the fields is unacceptable
"""
# Check that consumer exists for all except registration event
existing_consumer = Consumer.get_collection().find_one({'id': consumer_id})
if not existing_consumer and event_type != TYPE_CONSUMER_UNREGISTERED:
raise MissingResource(consumer=consumer_id)
invalid_values = []
if event_type not in TYPES:
invalid_values.append('event_type')
if event_details is not None and not isinstance(event_details, dict):
invalid_values.append('event_details')
if invalid_values:
raise InvalidValue(invalid_values)
event = ConsumerHistoryEvent(consumer_id, self._originator(), event_type, event_details)
ConsumerHistoryEvent.get_collection().save(event)
def query(self, consumer_id=None, event_type=None, limit=None, sort='descending',
start_date=None, end_date=None):
'''
Queries the consumer history storage.
@param consumer_id: if specified, events will only be returned for the the
consumer referenced
@type consumer_id: string or number
@param event_type: if specified, only events of the given type are returned
@type event_type: string (enumeration found in TYPES)
@param limit: if specified, the query will only return up to this amount of
entries; default is to not limit the entries returned
@type limit: number greater than zero
@param sort: indicates the sort direction of the results; results are sorted
by timestamp
@type sort: string; valid values are 'ascending' and 'descending'
@param start_date: if specified, no events prior to this date will be returned
@type start_date: datetime.datetime
@param end_date: if specified, no events after this date will be returned
@type end_date: datetime.datetime
@return: list of consumer history entries that match the given parameters;
empty list (not None) if no matching entries are found
@rtype: list of ConsumerHistoryEvent instances
@raises MissingResource: if the given consumer does not exist
@raises InvalidValue: if any of the fields is unacceptable
'''
invalid_values = []
if event_type is not None and event_type not in TYPES:
invalid_values.append('event_type')
# Verify the limit makes sense
if limit is not None and limit < 1:
invalid_values.append('limit')
# Verify the sort direction is valid
if sort not in SORT_DIRECTION:
invalid_values.append('sort')
# Verify that start_date and end_date is valid
if start_date is not None:
try:
dateutils.parse_iso8601_date(start_date)
except (ValueError, isodate.ISO8601Error):
invalid_values.append('start_date')
if end_date is not None:
try:
dateutils.parse_iso8601_date(end_date)
except (ValueError, isodate.ISO8601Error):
invalid_values.append('end_date')
if invalid_values:
raise InvalidValue(invalid_values)
# Assemble the mongo search parameters
search_params = {}
if consumer_id:
search_params['consumer_id'] = consumer_id
if event_type:
search_params['type'] = event_type
# Add in date range limits if specified
|
date_range = {}
if start_date:
date_range['$gte'] = start_date
if end_date:
date_range['$lte'] = end_date
if len(date_range) > 0:
search_params['timestamp'] = date_range
# Determine the correct mongo cursor to retrieve
if len(search_params) == 0:
cursor = ConsumerHistoryEvent.get_collection().find()
else:
cursor = ConsumerHistoryEvent.get_collection().find(search_params)
# Sort
|
by most recent entry first
cursor.sort('timestamp', direction=SORT_DIRECTION[sort])
# If a limit was specified, add it to the cursor
if limit:
cursor.limit(limit)
# Finally convert to a list before returning
return list(cursor)
def event_types(self):
return TYPES
def cull_history(self, lifetime):
'''
Deletes all consumer history entries that are older than the given lifetime.
@param lifetime: length in days; history entries older than this many days old
are deleted in this call
@type lifetime: L{datetime.timedelta}
'''
now = datetime.datetime.now(dateutils.local_tz())
limit = dateutils.format_iso8601_datetime(now - lifetime)
spec = {'timestamp': {'$lt': limit}}
self.collection.remove(spec, safe=False)
def _get_lifetime(self):
'''
Returns the configured maximum lifetime for consumer history entries.
@return: time in days
@rtype: L{datetime.timedelta}
'''
days = config.config.getint('consumer_history', 'lifetime')
return datetime.timedelta(days=days)
# -- functions ----------------------------------------------------------------
|
wrigri/libcloud
|
libcloud/common/openstack_identity.py
|
Python
|
apache-2.0
| 48,080
| 0.000021
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Common / shared code for handling authentication against OpenStack identity
service (Keystone).
"""
import sys
import datetime
from libcloud.utils.py3 import httplib
from libcloud.utils.iso8601 import parse_date
from libcloud.common.base import ConnectionUserAndKey, Response
from libcloud.compute.types import (LibcloudError, InvalidCredsError,
MalformedResponseError)
try:
import simplejson as json
except ImportError:
import json
AUTH_API_VERSION = '1.1'
# Auth versions which contain token expiration information.
AUTH_VERSIONS_WITH_EXPIRES = [
'1.1',
'2.0',
'2.0_apikey',
'2.0_password',
'3.0',
'3.x_password'
]
# How many seconds to subtract from the auth token expiration time before
# testing if th
|
e token is still valid.
# The time is subtracted to account for the HTTP request latency and prevent
# user from
|
getting "InvalidCredsError" if token is about to expire.
AUTH_TOKEN_EXPIRES_GRACE_SECONDS = 5
__all__ = [
'OpenStackIdentityVersion',
'OpenStackIdentityDomain',
'OpenStackIdentityProject',
'OpenStackIdentityUser',
'OpenStackIdentityRole',
'OpenStackServiceCatalog',
'OpenStackServiceCatalogEntry',
'OpenStackServiceCatalogEntryEndpoint',
'OpenStackIdentityEndpointType',
'OpenStackIdentityConnection',
'OpenStackIdentity_1_0_Connection',
'OpenStackIdentity_1_1_Connection',
'OpenStackIdentity_2_0_Connection',
'OpenStackIdentity_3_0_Connection',
'get_class_for_auth_version'
]
class OpenStackIdentityEndpointType(object):
"""
Enum class for openstack identity endpoint type.
"""
INTERNAL = 'internal'
EXTERNAL = 'external'
ADMIN = 'admin'
class OpenStackIdentityTokenScope(object):
"""
Enum class for openstack identity token scope.
"""
PROJECT = 'project'
DOMAIN = 'domain'
UNSCOPED = 'unscoped'
class OpenStackIdentityVersion(object):
def __init__(self, version, status, updated, url):
self.version = version
self.status = status
self.updated = updated
self.url = url
def __repr__(self):
return (('<OpenStackIdentityVersion version=%s, status=%s, '
'updated=%s, url=%s>' %
(self.version, self.status, self.updated, self.url)))
class OpenStackIdentityDomain(object):
def __init__(self, id, name, enabled):
self.id = id
self.name = name
self.enabled = enabled
def __repr__(self):
return (('<OpenStackIdentityDomain id=%s, name=%s, enabled=%s>' %
(self.id, self.name, self.enabled)))
class OpenStackIdentityProject(object):
def __init__(self, id, name, description, enabled, domain_id=None):
self.id = id
self.name = name
self.description = description
self.enabled = enabled
self.domain_id = domain_id
def __repr__(self):
return (('<OpenStackIdentityProject id=%s, domain_id=%s, name=%s, '
'enabled=%s>' %
(self.id, self.domain_id, self.name, self.enabled)))
class OpenStackIdentityRole(object):
def __init__(self, id, name, description, enabled):
self.id = id
self.name = name
self.description = description
self.enabled = enabled
def __repr__(self):
return (('<OpenStackIdentityRole id=%s, name=%s, description=%s, '
'enabled=%s>' % (self.id, self.name, self.description,
self.enabled)))
class OpenStackIdentityUser(object):
def __init__(self, id, domain_id, name, email, description, enabled):
self.id = id
self.domain_id = domain_id
self.name = name
self.email = email
self.description = description
self.enabled = enabled
def __repr__(self):
return (('<OpenStackIdentityUser id=%s, domain_id=%s, name=%s, '
'email=%s, enabled=%s>' % (self.id, self.domain_id, self.name,
self.email, self.enabled)))
class OpenStackServiceCatalog(object):
"""
http://docs.openstack.org/api/openstack-identity-service/2.0/content/
This class should be instantiated with the contents of the
'serviceCatalog' in the auth response. This will do the work of figuring
out which services actually exist in the catalog as well as split them up
by type, name, and region if available
"""
_auth_version = None
_service_catalog = None
def __init__(self, service_catalog, auth_version=AUTH_API_VERSION):
self._auth_version = auth_version
# Check this way because there are a couple of different 2.0_*
# auth types.
if '3.x' in self._auth_version:
entries = self._parse_service_catalog_auth_v3(
service_catalog=service_catalog)
elif '2.0' in self._auth_version:
entries = self._parse_service_catalog_auth_v2(
service_catalog=service_catalog)
elif ('1.1' in self._auth_version) or ('1.0' in self._auth_version):
entries = self._parse_service_catalog_auth_v1(
service_catalog=service_catalog)
else:
raise LibcloudError('auth version "%s" not supported'
% (self._auth_version))
# Force consistent ordering by sorting the entries
entries = sorted(entries,
key=lambda x: x.service_type + (x.service_name or ''))
self._entries = entries # stories all the service catalog entries
def get_entries(self):
"""
Return all the entries for this service catalog.
:rtype: ``list`` of :class:`.OpenStackServiceCatalogEntry`
"""
return self._entries
def get_catalog(self):
"""
Deprecated in the favor of ``get_entries`` method.
"""
return self.get_entries()
def get_public_urls(self, service_type=None, name=None):
"""
Retrieve all the available public (external) URLs for the provided
service type and name.
"""
endpoints = self.get_endpoints(service_type=service_type,
name=name)
result = []
for endpoint in endpoints:
endpoint_type = endpoint.endpoint_type
if endpoint_type == OpenStackIdentityEndpointType.EXTERNAL:
result.append(endpoint.url)
return result
def get_endpoints(self, service_type=None, name=None):
"""
Retrieve all the endpoints for the provided service type and name.
:rtype: ``list`` of :class:`.OpenStackServiceCatalogEntryEndpoint`
"""
endpoints = []
for entry in self._entries:
# Note: "if XXX and YYY != XXX" comparison is used to support
# partial lookups.
# This allows user to pass in only one argument to the method (only
# service_type or name), both of them or neither.
if service_type and entry.service_type != service_type:
continue
if name and entry.service_name != name:
continue
for endpoint in entry.endpoints:
endpoints.append(endpoint)
return endpoints
def get_endpoint(self, service_type=None, name=Non
|
leighpauls/k2cro4
|
third_party/python_26/Lib/site-packages/win32/lib/win32traceutil.py
|
Python
|
bsd-3-clause
| 1,423
| 0.021082
|
# This is a helper for the win32trace module
# If imported from a normal Python program, it sets up sys.stdout and sys.stderr
# so output goes to the collector.
# If run from the command line, it creates a collector loop.
# Eg:
# C:>start win32traceutil.py (or python.exe win32traceutil.py)
# will start a process with a (pretty much) blank screen.
#
# then, switch to a DOS prompt, and type:
# C:>python.exe
# Python 1.4 etc...
# >>> import win32traceutil
# Redirecting output to win32trace remote collector
# >>> print "Hello"
# >>>
# And the output wil
|
l appear in the first collector process.
# Note - the client or the collector can be started first.
# There is a 64k buffer. If this gets full, it is reset, and new
# output appended from the start.
import win32trace
def RunAsCollector():
import sys
try:
import win32api
win32api.SetConsoleTitle("Python Trace Collector")
except:
pass # Oh well!
win32trace.InitRead()
print "Collecting Python Trace Output..."
# import win32api;win32api.DebugBreak()
while 1:
# print win32trac
|
e.blockingread()
sys.stdout.write(win32trace.blockingread())
def SetupForPrint():
win32trace.InitWrite()
try: # Under certain servers, sys.stdout may be invalid.
print "Redirecting output to win32trace remote collector"
except:
pass
win32trace.setprint() # this works in an rexec environment.
if __name__=='__main__':
RunAsCollector()
else:
SetupForPrint()
|
RobertoPrevato/flask-three-template
|
dalmongo/__init__.py
|
Python
|
mit
| 267
| 0.003745
|
from pymong
|
o import MongoClient
from dalmongo import configuration
# get the instance of MongoDB client
client = MongoClient(configuration.MONGODB_HOST, configuration.MONGODB_PORT)
|
# get the main application database
db = getattr(client, configuration.MONGODB_NAME)
|
MichaelCurrin/twitterverse
|
app/utils/insert/lookup_and_store_tweets.py
|
Python
|
mit
| 1,724
| 0.00174
|
#!/usr/bin/env python
"""
Lookup and Store Tweets utility.
Lookup tweets on Twitter by the GUID and then stores the profile and tweet
data in the local db.
TODO: Use the system category and campaign as set in app.conf file.
"""
import argparse
import os
imp
|
ort sys
# A
|
llow imports to be done when executing this file directly.
sys.path.insert(
0,
os.path.abspath(
os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir)
),
)
from lib import tweets
from lib.twitter_api import authentication
def main():
"""
Command-line interface for Lookup and Store Tweets utility.
"""
parser = argparse.ArgumentParser(
description="""Lookup and Store Tweets utility. Fetches a tweet from
the Twitter API given its GUID. Stores or updates the author
Profile and Tweet in the db."""
)
parser.add_argument(
"tweetGUIDs",
metavar="TWEET_GUID",
nargs="+",
help="""List of one or more Tweet GUIDs to lookup, separated by spaces.
The Tweet 'GUID' in the local db is equivalent to the Tweet 'ID'
on the Twitter API.""",
)
parser.add_argument(
"-u",
"--update-all-fields",
action="store_true",
help="""If supplied, update all fields when updating an existing
local Tweet record. Otherwise, the default behavior is to
only update the favorite and retweet counts of the record.""",
)
args = parser.parse_args()
APIConn = authentication.getAppOnlyConnection()
tweets.lookupTweetGuids(
APIConn, args.tweetGUIDs, onlyUpdateEngagements=not (args.update_all_fields)
)
if __name__ == "__main__":
main()
|
shortbloke/home_assistant_config
|
custom_components/nodered/switch.py
|
Python
|
mit
| 7,962
| 0.001005
|
"""Sensor platform for nodered."""
import json
import logging
import voluptuous as vol
from homeassistant.components.websocket_api import event_message
from homeassistant.const import (
CONF_ENTITY_ID,
CONF_ICON,
CONF_ID,
CONF_STATE,
CONF_TYPE,
EVENT_STATE_CHANGED,
)
from homeassistant.core import callback
from homeassistant.helpers import entity_platform, trigger
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import ToggleEntity
from . import NodeRedEntity
from .const import (
CONF_CONFIG,
CONF_DATA,
CONF_DEVICE_TRIGGER,
CONF_OUTPUT_PATH,
CONF_PAYLOAD,
CONF_REMOVE,
CONF_SKIP_CONDITION,
CONF_SUB_TYPE,
CONF_SWITCH,
CONF_TRIGGER_ENTITY_ID,
DOMAIN,
NODERED_DISCOVERY_NEW,
SERVICE_TRIGGER,
SWITCH_ICON,
)
from .utils import NodeRedJSONEncoder
_LOGGER = logging.getLogger(__name__)
SERVICE_TRIGGER_SCHEMA = vol.Schema(
{
vol.Required(CONF_ENTITY_ID): cv.entity_ids,
vol.Optional(CONF_TRIGGER_ENTITY_ID): cv.entity_id,
vol.Optional(CONF_SKIP_CONDITION): cv.boolean,
vol.Optional(CONF_OUTPUT_PATH): cv.boolean,
vol.Optional(CONF_PAYLOAD): vol.Extra,
}
)
EVENT_TRIGGER_NODE = "automation_triggered"
EVENT_DEVICE_TRIGGER = "device_trigger"
TYPE_SWITCH = "switch"
TYPE_DEVICE_TRIGGER = "device_trigger"
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Switch platform."""
async def async_discover(config, connection):
await _async_setup_entity(hass, config, async_add_entities, connection)
async_dispatcher_connect(
hass,
NODERED_DISCOVERY_NEW.format(CONF_SWITCH),
async_discover,
)
platform = entity_platform.current_platform.get()
platform.async_register_entity_service(
SERVICE_TRIGGER, SERVICE_TRIGGER_SCHEMA, "async_trigger_node"
)
async def _async_setup_entity(hass, config, async_add_entities, connection):
"""Set up the Node-RED Switch."""
switch_type = config.get(CONF_SUB_TYPE, TYPE_SWITCH)
switch_class = (
NodeRedDeviceTrigger if switch_type == TYPE_DEVICE_TRIGGER else NodeRedSwitch
)
async_add_entities([switch_class(hass, config, connection)])
class NodeRedSwitch(ToggleEntity, NodeRedEntity):
"""Node-RED Switch class."""
def __init__(self, hass, config, connection):
"""Initialize the switch."""
super().__init__(hass, config)
self._message_id = config[CONF_ID]
self._connection = connection
self._state = config.get(CONF_STATE, True)
self._component = CONF_SWITCH
self._available = True
@property
def is_on(self) -> bool:
"""Return the state of the switch."""
return self._state
@property
def icon(self):
"""Return the icon of the sensor."""
return self._config.get(CONF_ICON, SWITCH_ICON)
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self._available
async def async_turn_off(self, **kwargs) -> None:
"""Turn off the switch."""
self._update_node_red(False)
async def async_turn_on(self, **kwargs) -> None:
"""Turn on the switch."""
self._update_node_red(True)
async def async_trigger_node(self, **kwargs) -> None:
"""Trigger node in Node-RED."""
data = {}
data[CONF_ENTITY_ID] = kwargs.get(CONF_TRIGGER_ENTITY_ID)
data[CONF_SKIP_CONDITION] = kwargs.get(CONF_SKIP_CONDITION, False)
data[CONF_OUTPUT_PATH] = kwargs.get(CONF_OUTPUT_PATH, True)
if kwargs.get(CONF_PAYLOAD) is not None:
data[CONF_PAYLOAD] = kwargs[CONF_PAYLOAD]
self._connection.send_message(
event_message(
self._message_id,
{CONF_TYPE: EVENT_TRIGGER_NODE, CONF_DATA: data},
)
)
def _update_node_red(self, state):
self._connection.send_message(
event_message(
self._message_id, {CONF_TYPE: EVENT_STATE_CHANGED, CONF_STATE: state}
)
)
@callback
def handle_lost_connection(self):
"""Set availability to False when disconnected."""
self._available = False
self.async_write_ha_state()
@callback
def handle_discovery_update(self, msg, connection):
"""Update entity config."""
if CONF_REMOVE in msg:
# Remove entity
self.hass.async_create_task(self.async_remove())
else:
self._available = True
self._state = msg[CONF_STATE]
self._config = msg[CONF_CONFIG]
self._message_id = msg[CONF_ID]
self._connection = connection
self._connection.subscriptions[msg[CONF_ID]] = self.handle_lost_connection
self.async_write_ha_state()
async def async_added_to_hass(self) -> None:
"""Run when entity about to be added to hass."""
await super().async_added_to_hass()
self._connection.subscriptions[self._message_id] = self.handle_lost_connection
class NodeRedDeviceTrigger(NodeRedSwitch):
"""Node-RED Device Trigger class."""
def __init__(self, hass, config, connection):
"""Initialize the switch."""
super().__init__(hass, config, connection)
self._trigger_config = config[CONF_DEVICE_TRIGGER]
self._unsubscribe_device_trigger = None
@callback
def handle_lost_connection(self):
"""Set remove device trigger when disconnected."""
super().handle_lost_connection()
self.remove_device_trigger()
async def add_device_trigger(self):
"""Validate device trigger."""
@callback
def forward_trigger(event, context=None):
"""Forward events to websocket."""
message = event_message(
self._message_id,
{"type": EVENT_DEVICE_TRIGGER, "data": event["trigger"]},
)
self._connection.send_message(
json.dumps(message, cls=NodeRedJSONEncoder, allow_nan=False)
)
try:
trigger_config = await trigger.async_validate_trigger_config(
self.hass, [self._trigger_config]
)
self._unsubscribe_device_trigger = await trigger.async_initialize_triggers(
self.hass,
trigger_config,
forward_trigger,
DOMAIN,
DOMAIN,
_LOGGER.log,
)
except vol.MultipleInvalid as ex:
_LOGGER.error(
f"Error initializing device trigger '{self._node_id}': {str(ex)}",
)
def remove_device_trigger(self):
"""Remove device trigger."""
self._trigger_config = None
if self._unsubscribe_device_trigger is not None:
_LOGGER.info(f"removed device triger - {self._server_id} {self._node_id}")
self._unsubscribe_device_trigger()
|
self._unsubscribe_device_trigger = None
@callback
async def handle_discovery_update(self, msg, connection):
"""Update entity config."""
if CONF_REMOVE not in msg and self._trigger_config != msg[CONF_DEVICE_TRIGGER]:
self.remove_device_trigger()
|
self._trigger_config = msg[CONF_DEVICE_TRIGGER]
await self.add_device_trigger()
super().handle_discovery_update(msg, connection)
async def async_added_to_hass(self):
"""Run when entity about to be added to hass."""
await super().async_added_to_hass()
await self.add_device_trigger()
async def async_will_remove_from_hass(self) -> None:
"""Run when entity will be removed from hass."""
self.remove_device_trigger()
await super().async_will_remove_from_hass()
|
fake-name/ReadableWebProxy
|
WebMirror/management/rss_parser_funcs/feed_parse_extractSpearpointtranslationsHomeBlog.py
|
Python
|
bsd-3-clause
| 692
| 0.027457
|
def extractSpearpointtranslationsHomeBlog(item):
'''
Parser for 'spearpointtranslations.home.blog'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('Record of the Missing Sect Master', 'Record of the Missing Sect M
|
aster', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname
|
in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
NodeTrie/NodeTrie_Py
|
setup.py
|
Python
|
lgpl-2.1
| 2,127
| 0.004701
|
"""Setup script for NodeTrie"""
from setuptools import setup, find_packages, Extension
import versioneer
try:
from Cython.Build import cythonize
except ImportError:
USING_CYTHON = False
else:
USING_CYTHON = True
ext = 'pyx' if USING_CYTHON else 'c'
extensions = [Extension("nodetrie.nodetrie",
["nodetrie/nodetrie.%s" % (ext,),
"nodetrie_c/src/node.c",],
depends=["nodetrie_c/src/node.h"],
include_dirs=["nodetrie_c/src"],
extra_compile_args=["-std=c99", "-O3"],
),
]
if USING_CYTHON:
extensions = cythonize(
extensions,
compiler_directives={'embedsignature': True,}
)
cmdclass = versioneer.get_cmdclass()
setup(
name='nodetrie',
version=versioneer.get_version(),
cmdclass=cmdclass,
url='https://github.com/NodeTrie/NodeTrie_Py',
license='LGPLv2',
author='Panos Kittenis',
author_email='22e889d8@opayq.com',
description=('Python bindings for NodeTrie, a trie data structure library'),
long_description=open('README.rst').read(),
packages=find_packages('.'),
zip_safe=False,
include_package_data=True,
platforms='any',
classifiers=[
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering :: In
|
formation Analysis',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
|
'License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)',
'License :: OSI Approved :: GNU Lesser General Public License v2 (LGPLv2)',
],
ext_modules=extensions,
)
|
Orav/kbengine
|
kbe/src/lib/python/Lib/test/test_importlib/abc.py
|
Python
|
lgpl-3.0
| 2,382
| 0
|
import abc
import unittest
class FinderTests(metaclass=abc.ABCMeta):
"""Basic tests for a finder to pass."""
@abc.abstractmethod
def test_module(self):
# Test importing a top-level module.
pass
@abc.abstractmethod
def test_package(self):
# Test importing a package.
pass
@abc.abstractmethod
def test_module_in_package(self):
# Test importing a module contained within a package.
# A value for 'path' should be used if for a meta_path finder.
pass
@abc.abstractmethod
def test_package_in_package(self):
# Test importing a subpackage.
# A value for 'path' should be used if for a meta_path finder.
pass
@abc.abstractmethod
def test_package_over_module(self):
# Test that packages are chosen over modul
|
es.
pass
@abc.abstractmethod
def test_failure(self):
# Test trying to find a module that cannot be handled.
pass
class LoaderTests(metaclass=abc.ABCMeta):
@abc.abstractmethod
def test_module(self):
"""A module should load with
|
out issue.
After the loader returns the module should be in sys.modules.
Attributes to verify:
* __file__
* __loader__
* __name__
* No __path__
"""
pass
@abc.abstractmethod
def test_package(self):
"""Loading a package should work.
After the loader returns the module should be in sys.modules.
Attributes to verify:
* __name__
* __file__
* __package__
* __path__
* __loader__
"""
pass
@abc.abstractmethod
def test_lacking_parent(self):
"""A loader should not be dependent on it's parent package being
imported."""
pass
@abc.abstractmethod
def test_state_after_failure(self):
"""If a module is already in sys.modules and a reload fails
(e.g. a SyntaxError), the module should be in the state it was before
the reload began."""
pass
@abc.abstractmethod
def test_unloadable(self):
"""Test ImportError is raised when the loader is asked to load a module
it can't."""
pass
|
proyan/sot-torque-control
|
unitTesting/unit_test_inverse_dynamics_balance_controller.py
|
Python
|
gpl-3.0
| 6,246
| 0.018092
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 31 16:04:18 2017
@author: adelpret
"""
import pinocchio as se3
import numpy as np
from pinocchio import RobotWrapper
from conversion_utils import config_sot_to_urdf, joints_sot_to_urdf, velocity_sot_to_urdf
from dynamic_graph.sot.torque_control.inverse_dynamics_balance_controller import InverseDynamicsBalanceController
from dynamic_graph.sot.torque_control.create_entities_utils import create_ctrl_manager
import dynamic_graph.sot.torque_control.hrp2.balance_ctrl_sim_conf as balance_ctrl_conf
import dynamic_graph.sot.torque_control.hrp2.control_manager_sim_conf as control_manager_conf
from dynamic_graph.sot.torque_control.tests.robot_data_test import initRobotData
np.set_printoptions(precision=3, suppress=True, linewidth=100);
def create_balance_controller(dt, q, conf, robot_name='robot'):
ctrl = InverseDynamicsBalanceController("invDynBalCtrl");
ctrl.q.value = tuple(q);
ctrl.v.value = (NJ+6)*(0.0,);
ctrl.wrench_right_foot.value = 6*(0.0,);
ctrl.wrench_left_foot.value = 6*(0.0,);
ctrl.posture_ref_pos.value = tuple(q[6:]);
ctrl.posture_ref_vel.value = NJ*(0.0,);
ctrl.posture_ref_acc.value = NJ*(0.0,);
ctrl.com_ref_pos.value = (0., 0., 0.8);
ctrl.com_ref_vel.value = 3*(0.0,);
ctrl.com_ref_acc.value = 3*(0.0,);
# ctrl.rotor_inertias.value = np.array(conf.ROTOR_INERTIAS);
# ctrl.gear_ratios.value = conf.GEAR_RATIOS;
ctrl.rotor_inertias.value = tuple([g*g*r for (g,r) in zip(conf.GEAR_RATIOS, conf.ROTOR_INERTIAS)])
ctrl.gear_ratios.value = NJ*(1.0,);
ctrl.contact_normal.value = conf.FOOT_CONTACT_NORMAL;
ctrl.contact_points.value = conf.RIGHT_FOOT_CONTACT_POINTS;
ctrl.f_min.value = conf.fMin;
ctrl.f_max_right_foot.value = conf.fMax;
ctrl.f_max_left_foot.value = conf.fMax;
ctrl.mu.value = conf.mu[0];
ctrl.weight_contact_forces.value = (1e2, 1e2, 1e0, 1e3, 1e3, 1e3);
ctrl.kp_com.value = 3*(conf.kp_com,);
ctrl.kd_com.value = 3*(conf.kd_com,);
ctrl.kp_constraints.value = 6*(conf.kp_constr,);
ctrl.kd_constraints.value = 6*(conf.kd_constr,);
ctrl.kp_feet.value = 6*(conf.kp_feet,);
ctrl.kd_feet.value = 6*(conf.kd_feet,);
ctrl.kp_posture.value = conf.kp_posture;
ctrl.kd_posture.value = conf.kd_posture;
ctrl.kp_pos.value = conf.kp_pos;
ctrl.kd_pos.value = conf.kd_pos;
ctrl.w_com.value = conf.w_com;
ctrl.w_feet.value = conf.w_feet;
ctrl.w_forces.value = conf.w_forces;
ctrl.w_posture.value = conf.w_posture;
ctrl.w_base_orientation.value = conf.w_base_orientation;
ctrl.w_torques.value = conf.w_torques;
ctrl.active_joints.value = NJ*(1,);
ctrl.init(dt, robot_name);
return ctrl;
print "*** UNIT TEST FOR INVERSE-DYNAMICS-BALANCE-CONTROLLER (IDBC) ***"
print "This test computes the torques using the IDBC and compares them with"
print "the torques computed using the desired joint accelerations and contact"
print "wrenches computed by the IDBC. The two values should be identical."
print "Some small differences are expected due to the precision loss when"
print "Passing the parameters from python to c++."
print "However, none of the following values should be larger than 1e-3.\n"
N_TESTS =
|
100
dt = 0.001;
NJ = initRobotData.nbJoints
# robot configuration
q_sot = np.array([-0.0027421149619457344, -0.0013842807952574399, 0.6421082804660067,
-0.0005693871512031474, -0.0013094048521806974, 0.0028568508070167,
-0.0006369040657361668, 0.002710094953239396, -0.482419929066185
|
36, 0.9224570746372157, -0.43872624301275104, -0.0021586727954009096,
-0.0023395862060549863, 0.0031045906573987617, -0.48278188636903313, 0.9218508861779927, -0.4380058166724791, -0.0025558837738616047,
-0.012985322450541008, 0.04430420221275542, 0.37027327677517635, 1.4795064165303056,
0.20855551221055582, -0.13188842278441873, 0.005487207370709895, -0.2586657542648506, 2.6374918629921953, -0.004223605878088189, 0.17118034021053144, 0.24171737354070008, 0.11594430024547904, -0.05264225067057105, -0.4691871937149223, 0.0031522040623960016, 0.011836097472447007, 0.18425595002313025]);
ctrl_manager = create_ctrl_manager(control_manager_conf, dt);
ctrl = create_balance_controller(dt, q_sot, balance_ctrl_conf);
robot = RobotWrapper(initRobotData.testRobotPath, [], se3.JointModelFreeFlyer())
index_rf = robot.index('RLEG_JOINT5');
index_lf = robot.index('LLEG_JOINT5');
Md = np.matrix(np.zeros((NJ+6,NJ+6)));
gr = joints_sot_to_urdf(balance_ctrl_conf.GEAR_RATIOS);
ri = joints_sot_to_urdf(balance_ctrl_conf.ROTOR_INERTIAS);
for i in range(NJ):
Md[6+i,6+i] = ri[i] * gr[i] * gr[i];
for i in range(N_TESTS):
q_sot += 0.001*np.random.random(NJ+6);
v_sot = np.random.random(NJ+6);
q_pin = np.matrix(config_sot_to_urdf(q_sot));
v_pin = np.matrix(velocity_sot_to_urdf(v_sot));
ctrl.q.value = tuple(q_sot);
ctrl.v.value = tuple(v_sot);
ctrl.tau_des.recompute(i);
tau_ctrl = joints_sot_to_urdf(np.array(ctrl.tau_des.value));
ctrl.dv_des.recompute(i);
dv = velocity_sot_to_urdf(np.array(ctrl.dv_des.value));
M = Md + robot.mass(q_pin);
h = robot.bias(q_pin, v_pin);
ctrl.f_des_right_foot.recompute(i);
ctrl.f_des_left_foot.recompute(i);
f_rf = np.matrix(ctrl.f_des_right_foot.value).T;
f_lf = np.matrix(ctrl.f_des_left_foot.value).T;
J_rf = robot.jacobian(q_pin, index_rf);
J_lf = robot.jacobian(q_pin, index_lf);
tau_pin = M*np.matrix(dv).T + h - J_rf.T * f_rf - J_lf.T * f_lf;
# ctrl.M.recompute(i);
# M_ctrl = np.array(ctrl.M.value);
print "norm(tau_ctrl-tau_pin) = %.4f"% np.linalg.norm(tau_ctrl - tau_pin[6:,0].T);
print "norm(tau_pin[:6]) = %.4f"% np.linalg.norm(tau_pin[:6]);
# print "q_pin:\n", q_pin;
# print "tau_pin:\n", tau_pin[6:,0].T, "\n";
# print "tau ctrl:\n", tau_ctrl.T, "\n";
# print "dv = ", np.linalg.norm(dv);
# print "f_rf:", f_rf.T, "\n";
# print "f_lf:", f_lf.T, "\n";
# print "h:", h.T, "\n";
# M_err = M-M_ctrl
# print "M-M_ctrl = ", M_err.diagonal(), "\n"
# for j in range(NJ+6):
# print M_err[j,:];
|
michaelBenin/sqlalchemy
|
test/orm/inheritance/test_relationship.py
|
Python
|
mit
| 53,676
| 0.007564
|
from sqlalchemy.orm import create_session, relationship, mapper, \
contains_eager, joinedload, subqueryload, subqueryload_all,\
Session, aliased, with_polymorphic
from sqlalchemy import Integer, String, ForeignKey
from sqlalchemy.engine import default
from sqlalchemy.testing import AssertsCompiledSQL, fixtures
from sqlalchemy import testing
from sqlalchemy.testing.schema import Table, Column
from sqlalchemy.testing import assert_raises, eq_, is_
class Company(fixtures.ComparableEntity):
pass
class Person(fixtures.ComparableEntity):
pass
class Engineer(Person):
pass
class Manager(Person):
pass
class Boss(Manager):
pass
class Machine(fixtures.ComparableEntity):
pass
class Paperwork(fixtures.ComparableEntity):
pass
class SelfReferentialTestJoinedToBase(fixtures.MappedTest):
run_setup_mappers = 'once'
@classmethod
def define_tables(cls, metadata):
Table('people', metadata,
Column('person_id', Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('name', String(50)),
Column('type', String(30)))
Table('engineers', metadata,
Column('person_id', Integer,
ForeignKey('people.person_id'),
primary_key=True),
Column('primary_language', String(50)),
Column('reports_to_id', Integer,
ForeignKey('people.person_id')))
@classmethod
def setup_mappers(cls):
engineers, people = cls.tables.engineers, cls.tables.people
mapper(Person, people,
polymorphic_on=people.c.type,
polymorphic_identity='person')
mapper(Engineer, engineers,
inherits=Person,
inherit_condition=engineers.c.person_id == people.c.person_id,
polymorphic_identity='engineer',
properties={
'reports_to':relationship(
Person,
primaryjoin=
people.c.person_id == engineers.c.reports_to_id)})
def test_has(self):
p1 = Person(name='dogbert')
e1 = Engineer(name='dilbert', primary_language='java
|
', reports_to=p1)
sess = create_session()
sess.add(p1)
sess.add(e1)
sess.flush()
sess.expunge_all()
eq_(sess.query(Engineer)
.filter(Engineer.reports_to.has(Person.name == 'dogbert'))
.first(),
Engineer(name='dilbert'))
def test_oftype_aliases_in_exists(self):
e1 = Engineer(name='dilbert', primary_language='java')
e2
|
= Engineer(name='wally', primary_language='c++', reports_to=e1)
sess = create_session()
sess.add_all([e1, e2])
sess.flush()
eq_(sess.query(Engineer)
.filter(Engineer.reports_to
.of_type(Engineer)
.has(Engineer.name == 'dilbert'))
.first(),
e2)
def test_join(self):
p1 = Person(name='dogbert')
e1 = Engineer(name='dilbert', primary_language='java', reports_to=p1)
sess = create_session()
sess.add(p1)
sess.add(e1)
sess.flush()
sess.expunge_all()
eq_(sess.query(Engineer)
.join('reports_to', aliased=True)
.filter(Person.name == 'dogbert').first(),
Engineer(name='dilbert'))
class SelfReferentialJ2JTest(fixtures.MappedTest):
run_setup_mappers = 'once'
@classmethod
def define_tables(cls, metadata):
people = Table('people', metadata,
Column('person_id', Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('name', String(50)),
Column('type', String(30)))
engineers = Table('engineers', metadata,
Column('person_id', Integer,
ForeignKey('people.person_id'),
primary_key=True),
Column('primary_language', String(50)),
Column('reports_to_id', Integer,
ForeignKey('managers.person_id'))
)
managers = Table('managers', metadata,
Column('person_id', Integer, ForeignKey('people.person_id'),
primary_key=True),
)
@classmethod
def setup_mappers(cls):
engineers = cls.tables.engineers
managers = cls.tables.managers
people = cls.tables.people
mapper(Person, people,
polymorphic_on=people.c.type,
polymorphic_identity='person')
mapper(Manager, managers,
inherits=Person,
polymorphic_identity='manager')
mapper(Engineer, engineers,
inherits=Person,
polymorphic_identity='engineer',
properties={
'reports_to':relationship(
Manager,
primaryjoin=
managers.c.person_id == engineers.c.reports_to_id,
backref='engineers')})
def test_has(self):
m1 = Manager(name='dogbert')
e1 = Engineer(name='dilbert', primary_language='java', reports_to=m1)
sess = create_session()
sess.add(m1)
sess.add(e1)
sess.flush()
sess.expunge_all()
eq_(sess.query(Engineer)
.filter(Engineer.reports_to.has(Manager.name == 'dogbert'))
.first(),
Engineer(name='dilbert'))
def test_join(self):
m1 = Manager(name='dogbert')
e1 = Engineer(name='dilbert', primary_language='java', reports_to=m1)
sess = create_session()
sess.add(m1)
sess.add(e1)
sess.flush()
sess.expunge_all()
eq_(sess.query(Engineer)
.join('reports_to', aliased=True)
.filter(Manager.name == 'dogbert').first(),
Engineer(name='dilbert'))
def test_filter_aliasing(self):
m1 = Manager(name='dogbert')
m2 = Manager(name='foo')
e1 = Engineer(name='wally', primary_language='java', reports_to=m1)
e2 = Engineer(name='dilbert', primary_language='c++', reports_to=m2)
e3 = Engineer(name='etc', primary_language='c++')
sess = create_session()
sess.add_all([m1, m2, e1, e2, e3])
sess.flush()
sess.expunge_all()
# filter aliasing applied to Engineer doesn't whack Manager
eq_(sess.query(Manager)
.join(Manager.engineers)
.filter(Manager.name == 'dogbert').all(),
[m1])
eq_(sess.query(Manager)
.join(Manager.engineers)
.filter(Engineer.name == 'dilbert').all(),
[m2])
eq_(sess.query(Manager, Engineer)
.join(Manager.engineers)
.order_by(Manager.name.desc()).all(),
[(m2, e2), (m1, e1)])
def test_relationship_compare(self):
m1 = Manager(name='dogbert')
m2 = Manager(name='foo')
e1 = Engineer(name='dilbert', primary_language='java', reports_to=m1)
e2 = Engineer(name='wally', primary_language='c++', reports_to=m2)
e3 = Engineer(name='etc', primary_language='c++')
sess = create_session()
sess.add(m1)
sess.add(m2)
sess.add(e1)
sess.add(e2)
sess.add(e3)
sess.flush()
sess.expunge_all()
eq_(sess.query(Manager)
.join(Manager.engineers)
.filter(Engineer.reports_to == None).all(),
[])
eq_(sess.query(Manager)
.join(Manager.engineers)
.filter(Engineer.reports_to == m1).all(),
[m1])
class SelfReferentialJ2JSelfTest(fixtures.MappedTest):
run_setup_mappers = 'once'
@classmethod
def define_tables(cls, metadata):
people = Table('people', metadata,
Column('person_id', Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('name', String(50)),
Column('type', String(30)))
engineers = Table('engineers', metadata,
|
danialbehzadi/Nokia-RM-1013-2.0.0.11
|
webkit/Tools/Scripts/webkitpy/tool/commands/queues_unittest.py
|
Python
|
gpl-3.0
| 22,259
| 0.002965
|
# Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import StringIO
from webkitpy.common.checkout.scm import CheckoutNeedsUpdate
from webkitpy.common.net.bugzilla import Attachment
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.layout_tests.layout_package import test_results
from webkitpy.layout_t
|
ests.layout_package import test_failures
from webkitp
|
y.thirdparty.mock import Mock
from webkitpy.tool.commands.commandtest import CommandsTest
from webkitpy.tool.commands.queues import *
from webkitpy.tool.commands.queuestest import QueuesTest
from webkitpy.tool.commands.stepsequence import StepSequence
from webkitpy.tool.mocktool import MockTool, MockSCM, MockStatusServer
class TestQueue(AbstractPatchQueue):
name = "test-queue"
class TestReviewQueue(AbstractReviewQueue):
name = "test-review-queue"
class TestFeederQueue(FeederQueue):
_sleep_duration = 0
class AbstractQueueTest(CommandsTest):
def test_log_directory(self):
self.assertEquals(TestQueue()._log_directory(), os.path.join("..", "test-queue-logs"))
def _assert_run_webkit_patch(self, run_args, port=None):
queue = TestQueue()
tool = MockTool()
tool.status_server.bot_id = "gort"
tool.executive = Mock()
queue.bind_to_tool(tool)
queue._options = Mock()
queue._options.port = port
queue.run_webkit_patch(run_args)
expected_run_args = ["echo", "--status-host=example.com", "--bot-id=gort"]
if port:
expected_run_args.append("--port=%s" % port)
expected_run_args.extend(run_args)
tool.executive.run_and_throw_if_fail.assert_called_with(expected_run_args)
def test_run_webkit_patch(self):
self._assert_run_webkit_patch([1])
self._assert_run_webkit_patch(["one", 2])
self._assert_run_webkit_patch([1], port="mockport")
def test_iteration_count(self):
queue = TestQueue()
queue._options = Mock()
queue._options.iterations = 3
self.assertTrue(queue.should_continue_work_queue())
self.assertTrue(queue.should_continue_work_queue())
self.assertTrue(queue.should_continue_work_queue())
self.assertFalse(queue.should_continue_work_queue())
def test_no_iteration_count(self):
queue = TestQueue()
queue._options = Mock()
self.assertTrue(queue.should_continue_work_queue())
self.assertTrue(queue.should_continue_work_queue())
self.assertTrue(queue.should_continue_work_queue())
self.assertTrue(queue.should_continue_work_queue())
def _assert_log_message(self, script_error, log_message):
failure_log = AbstractQueue._log_from_script_error_for_upload(script_error, output_limit=10)
self.assertTrue(failure_log.read(), log_message)
def test_log_from_script_error_for_upload(self):
self._assert_log_message(ScriptError("test"), "test")
# In python 2.5 unicode(Exception) is busted. See:
# http://bugs.python.org/issue2517
# With no good workaround, we just ignore these tests.
if not hasattr(Exception, "__unicode__"):
return
unicode_tor = u"WebKit \u2661 Tor Arne Vestb\u00F8!"
utf8_tor = unicode_tor.encode("utf-8")
self._assert_log_message(ScriptError(unicode_tor), utf8_tor)
script_error = ScriptError(unicode_tor, output=unicode_tor)
expected_output = "%s\nLast %s characters of output:\n%s" % (utf8_tor, 10, utf8_tor[-10:])
self._assert_log_message(script_error, expected_output)
class FeederQueueTest(QueuesTest):
def test_feeder_queue(self):
queue = TestFeederQueue()
tool = MockTool(log_executive=True)
expected_stderr = {
"begin_work_queue": self._default_begin_work_queue_stderr("feeder-queue", MockSCM.fake_checkout_root),
"should_proceed_with_work_item": "",
"next_work_item": "",
"process_work_item": """Warning, attachment 128 on bug 42 has invalid committer (non-committer@example.com)
Warning, attachment 128 on bug 42 has invalid committer (non-committer@example.com)
MOCK setting flag 'commit-queue' to '-' on attachment '128' with comment 'Rejecting attachment 128 from commit-queue.' and additional comment 'non-committer@example.com does not have committer permissions according to http://trac.webkit.org/browser/trunk/Tools/Scripts/webkitpy/common/config/committers.py.
- If you do not have committer rights please read http://webkit.org/coding/contributing.html for instructions on how to use bugzilla flags.
- If you have committer rights please correct the error in Tools/Scripts/webkitpy/common/config/committers.py by adding yourself to the file (no review needed). The commit-queue restarts itself every 2 hours. After restart the commit-queue will correctly respect your committer rights.'
MOCK: update_work_items: commit-queue [106, 197]
Feeding commit-queue items [106, 197]
Feeding EWS (1 r? patch, 1 new)
MOCK: submit_to_ews: 103
""",
"handle_unexpected_error": "Mock error message\n",
}
self.assert_queue_outputs(queue, tool=tool, expected_stderr=expected_stderr)
class AbstractPatchQueueTest(CommandsTest):
def test_next_patch(self):
queue = AbstractPatchQueue()
tool = MockTool()
queue.bind_to_tool(tool)
queue._options = Mock()
queue._options.port = None
self.assertEquals(queue._next_patch(), None)
tool.status_server = MockStatusServer(work_items=[2, 197])
expected_stdout = "MOCK: fetch_attachment: 2 is not a known attachment id\n" # A mock-only message to prevent us from making mistakes.
expected_stderr = "MOCK: release_work_item: None 2\n"
patch_id = OutputCapture().assert_outputs(self, queue._next_patch, [], expected_stdout=expected_stdout, expected_stderr=expected_stderr)
self.assertEquals(patch_id, None) # 2 is an invalid patch id
self.assertEquals(queue._next_patch().id(), 197)
class NeedsUpdateSequence(StepSequence):
def _run(self, tool, options, state):
raise CheckoutNeedsUpdate([], 1, "", None)
class AlwaysCommitQueueTool(object):
def __init__(self):
self.status_server = MockStatusServer()
def command_by_name(self, name):
return CommitQueue
class SecondThoughtsCommitQueue(CommitQueue):
def __init__(self):
self._reject_patch = False
CommitQueue.__init__(self)
def run_command(self, command):
# We want to reject the patch after the first validation,
# so wait to reject it
|
adrienpacifico/openfisca-core
|
openfisca_core/tests/test_cycles.py
|
Python
|
agpl-3.0
| 5,495
| 0.008735
|
# -*- coding: utf-8 -*-
from nose.tools import raises
from openfisca_core import periods
from openfisca_core.columns import IntCol
from openfisca_core.formulas import CycleError, SimpleFormulaColumn
from openfisca_core.tests import dummy_country
from openfisca_core.tests.dummy_country import Individus, reference_formula
from openfisca_core.tools import assert_near
# 1 <--> 2 with same period
@reference_formula
class variable1(SimpleFormulaColumn):
column = IntCol
entity_class = Individus
def function(self, simulation, period):
return period, simulation.calculate('variable2', period)
@reference_formula
class variable2(SimpleFormulaColumn):
column = IntCol
entity_class = Individus
def function(self, simulation, period):
return period, simulation.calculate('variable1', period)
# 3 <--> 4 with a period offset, but without explicit cycle allowed
@reference_formula
class variable3(SimpleFormulaColumn):
column = IntCol
entity_class = Individus
def function(self, simulation, period):
return period, simulation.calculate('variable4', period.last_year)
@reference_formula
class variable4(SimpleFormulaColumn)
|
:
column = IntCol
entity_class = Individus
def function(self, simulation, period):
return period, simulation.calculate('variable3', period)
# 5 -f-> 6 with a
|
period offset, with cycle flagged but not allowed
# <---
@reference_formula
class variable5(SimpleFormulaColumn):
column = IntCol
entity_class = Individus
def function(self, simulation, period):
variable6 = simulation.calculate('variable6', period.last_year, max_nb_cycles = 0)
return period, 5 + variable6
@reference_formula
class variable6(SimpleFormulaColumn):
column = IntCol
entity_class = Individus
def function(self, simulation, period):
variable5 = simulation.calculate('variable5', period)
return period, 6 + variable5
# december cotisation depending on november value
@reference_formula
class cotisation(SimpleFormulaColumn):
column = IntCol
entity_class = Individus
def function(self, simulation, period):
period = period.this_month
if period.start.month == 12:
return period, 2 * simulation.calculate('cotisation', period.last_month, max_nb_cycles = 1)
else:
return period, self.zeros() + 1
# 7 -f-> 8 with a period offset, with explicit cycle allowed (1 level)
# <---
@reference_formula
class variable7(SimpleFormulaColumn):
column = IntCol
entity_class = Individus
def function(self, simulation, period):
variable8 = simulation.calculate('variable8', period.last_year, max_nb_cycles = 1)
return period, 7 + variable8
@reference_formula
class variable8(SimpleFormulaColumn):
column = IntCol
entity_class = Individus
def function(self, simulation, period):
variable7 = simulation.calculate('variable7', period)
return period, 8 + variable7
# TaxBenefitSystem instance declared after formulas
tax_benefit_system = dummy_country.init_tax_benefit_system()
reference_period = periods.period(u'2013')
@raises(AssertionError)
def test_pure_cycle():
simulation = tax_benefit_system.new_scenario().init_single_entity(
period = reference_period,
parent1 = dict(),
).new_simulation(debug = True)
simulation.calculate('variable1')
@raises(CycleError)
def test_cycle_time_offset():
simulation = tax_benefit_system.new_scenario().init_single_entity(
period = reference_period,
parent1 = dict(),
).new_simulation(debug = True)
simulation.calculate('variable3')
def test_allowed_cycle():
"""
Calculate variable5 then variable6 then in the order order, to verify that the first calculated variable
has no effect on the result.
"""
simulation = tax_benefit_system.new_scenario().init_single_entity(
period = reference_period,
parent1 = dict(),
).new_simulation(debug = True)
variable6 = simulation.calculate('variable6')
variable5 = simulation.calculate('variable5')
variable6_last_year = simulation.calculate('variable6', reference_period.last_year)
assert_near(variable5, [5])
assert_near(variable6, [11])
assert_near(variable6_last_year, [0])
def test_allowed_cycle_different_order():
simulation = tax_benefit_system.new_scenario().init_single_entity(
period = reference_period,
parent1 = dict(),
).new_simulation(debug = True)
variable5 = simulation.calculate('variable5')
variable6 = simulation.calculate('variable6')
variable6_last_year = simulation.calculate('variable6', reference_period.last_year)
assert_near(variable5, [5])
assert_near(variable6, [11])
assert_near(variable6_last_year, [0])
def test_cotisation_1_level():
simulation = tax_benefit_system.new_scenario().init_single_entity(
period = reference_period.last_month, # December
parent1 = dict(),
).new_simulation(debug = True)
cotisation = simulation.calculate('cotisation')
assert_near(cotisation, [2])
def test_cycle_1_level():
simulation = tax_benefit_system.new_scenario().init_single_entity(
period = reference_period,
parent1 = dict(),
).new_simulation(debug = True)
variable7 = simulation.calculate('variable7')
# variable8 = simulation.calculate('variable8')
assert_near(variable7, [22])
|
mozilla-services/autopush
|
autopush/tests/conftest.py
|
Python
|
mpl-2.0
| 235
| 0
|
from autopush.tests import
|
setUp, tearDown
def pytest_configure(config):
"""Called before testing begins"""
setUp()
def pytest_unconfigure(config):
"""Called after all tests run and warnings displayed"""
tearDown()
| |
bobcyw/django
|
django/core/urlresolvers.py
|
Python
|
bsd-3-clause
| 26,463
| 0.001474
|
"""
This module converts requested URLs to callback view functions.
RegexURLResolver is the main class here. Its resolve() method takes a URL (as
a string) and returns a ResolverMatch object which provides access to all
attributes of the resolved URL match.
"""
from __future__ import unicode_literals
import functools
import re
import warnings
from importlib import import_module
from threading import local
from django.core.exceptions import ImproperlyConfigured, ViewDoesNotExist
from django.http import Http404
from django.utils import lru_cache, six
from django.utils.datastructures import MultiValueDict
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.encoding import force_str, force_text, iri_to_uri
from django.utils.functional import cached_property, lazy
from django.utils.http import RFC3986_SUBDELIMS, urlquote
from django.utils.module_loading import module_has_submodule
from django.utils.regex_helper import normalize
from django.utils.six.moves.urllib.parse import urlsplit, urlunsplit
from django.utils.translation import get_language, override
# SCRIPT_NAME prefixes for each thread are stored here. If there's no entry for
# the current thread (which is the only one we ever access), it is assumed to
# be empty.
_prefixes = local()
# Overridden URLconfs for each thread are stored here.
_urlconfs = local()
class ResolverMatch(object):
def __init__(self, func, args, kwargs, url_name=None, app_names=None, namespaces=None):
self.func = func
self.args = args
self.kwargs = kwargs
self.url_name = url_name
# If a URLRegexResolver doesn't have a namespace or app_name, it passes
# in an empty value.
self.app_names = [x for x in app_names if x] if app_names else []
self.app_name = ':'.join(self.app_names)
if namespaces:
self.namespaces = [x for x in namespaces if x]
else:
self.namespaces = []
self.namespace = ':'.join(self.namespaces)
if not hasattr(func, '__name__'):
# A class-based view
self._func_path = '.'.join([func.__class__.__module__, func.__class__.__name__])
else:
# A function-based view
self._func_path = '.'.join([func.__module__, func.__name__])
view_path = url_name or self._func_path
self.view_name = ':'.join(self.namespaces + [view_path])
def __getitem__(self, index):
return (self.func, self.args, self.kwargs)[index]
def __repr__(self):
return "ResolverMatch(func=%s, args=%s, kwargs=%s, url_name=%s, app_names=%s, namespaces=%s)" % (
self._func_path, self.args, self.kwargs, self.url_name, self.app_names, self.namespaces)
class Resolver404(Http404):
pass
class NoReverseMatch(Exception):
pass
@lru_cache.lru_cache(maxsize=None)
def get_callable(lookup_view, can_fail=False):
"""
Return a callable corresponding to lookup_view. This function is used
by both resolve() and reverse(), so can_fail allows the caller to choose
between returning the input as is and raising an exception when the input
string can't be interpreted as an import path.
If lookup_view is already a callable, return it.
If lookup_view is a string import path that can be resolved to a callable,
import that callable and return it.
If lookup_view is some other kind of string and can_fail is True, the string
is returned as is. If can_fail is False, an exception is raised (either
ImportError or ViewDoesNotExist).
"""
if callable(lookup_view):
return lookup_view
if not isinstance(lookup_view, six.string_types):
raise ViewDoesNotExist(
"'%s' is not a callable or a dot-notation path" % lookup_view
)
mod_name, func_name = get_mod_func(lookup_view)
if not func_name: # No '.' in lookup_view
if can_fail:
return lookup_view
else:
raise ImportError(
"Could not import '%s'. The path must be fully qualified." %
lookup_view)
try:
mod = import_module(mod_name)
except ImportError:
if can_fail:
return lookup_view
else:
parentmod, submod = get_mod_func(mod_name)
if submod and not module_has_submodule(import_module(parentmod), submod):
raise ViewDoesNotExist(
"Could not import '%s'. Parent
|
module %s does not exist." %
(lookup_view, mod_name))
else:
raise
else:
try:
view_func = getattr(mod, func_name)
except AttributeError:
if can_fail:
return lookup_vi
|
ew
else:
raise ViewDoesNotExist(
"Could not import '%s'. View does not exist in module %s." %
(lookup_view, mod_name))
else:
if not callable(view_func):
# For backwards compatibility this is raised regardless of can_fail
raise ViewDoesNotExist(
"Could not import '%s.%s'. View is not callable." %
(mod_name, func_name))
return view_func
@lru_cache.lru_cache(maxsize=None)
def get_resolver(urlconf=None):
if urlconf is None:
from django.conf import settings
urlconf = settings.ROOT_URLCONF
return RegexURLResolver(r'^/', urlconf)
@lru_cache.lru_cache(maxsize=None)
def get_ns_resolver(ns_pattern, resolver):
# Build a namespaced resolver for the given parent urlconf pattern.
# This makes it possible to have captured parameters in the parent
# urlconf pattern.
ns_resolver = RegexURLResolver(ns_pattern, resolver.url_patterns)
return RegexURLResolver(r'^/', [ns_resolver])
def get_mod_func(callback):
# Converts 'django.views.news.stories.story_detail' to
# ['django.views.news.stories', 'story_detail']
try:
dot = callback.rindex('.')
except ValueError:
return callback, ''
return callback[:dot], callback[dot + 1:]
class LocaleRegexProvider(object):
"""
A mixin to provide a default regex property which can vary by active
language.
"""
def __init__(self, regex):
# regex is either a string representing a regular expression, or a
# translatable string (using ugettext_lazy) representing a regular
# expression.
self._regex = regex
self._regex_dict = {}
@property
def regex(self):
"""
Returns a compiled regular expression, depending upon the activated
language-code.
"""
language_code = get_language()
if language_code not in self._regex_dict:
if isinstance(self._regex, six.string_types):
regex = self._regex
else:
regex = force_text(self._regex)
try:
compiled_regex = re.compile(regex, re.UNICODE)
except re.error as e:
raise ImproperlyConfigured(
'"%s" is not a valid regular expression: %s' %
(regex, six.text_type(e)))
self._regex_dict[language_code] = compiled_regex
return self._regex_dict[language_code]
class RegexURLPattern(LocaleRegexProvider):
def __init__(self, regex, callback, default_args=None, name=None):
LocaleRegexProvider.__init__(self, regex)
# callback is either a string like 'foo.views.news.stories.story_detail'
# which represents the path to a module and a view function name, or a
# callable object (view).
if callable(callback):
self._callback = callback
else:
self._callback = None
self._callback_str = callback
self.default_args = default_args or {}
self.name = name
def __repr__(self):
return force_str('<%s %s %s>' % (self.__class__.__name__, self.name, self.regex.pattern))
def add_prefix(self, prefix):
"""
Adds the prefix string to a string-based callback.
"""
if not prefix or
|
zynga/jasy
|
jasy/js/tokenize/Lang.py
|
Python
|
mit
| 458
| 0
|
#
# Jasy - Web
|
Tooling Framework
# Copyright 2010-2012 Zynga Inc.
#
"""JavaScript 1.7 keywords"""
keywords = set([
"break",
"case", "catch", "const", "continue",
"debugger", "default", "delete", "do",
"else",
"false", "finally", "for", "function
|
",
"if", "in", "instanceof",
"let",
"new", "null",
"return",
"switch",
"this", "throw", "true", "try", "typeof",
"var", "void",
"yield",
"while", "with"
])
|
Gateswong/GatesMusicPet
|
music_pet/utils.py
|
Python
|
mit
| 4,849
| 0.000825
|
# -*- coding: utf-8 -*-
import os
from codecs import encode, decode
import re
LINUX_ROOT = u"/"
def trim_quote(text):
if len(text) > 2 and text[0] == '"' and text[-1] == '"':
text = text[1:-1]
return text
def to_unicode(text, encoding="utf8"):
if type(text) == unicode:
return text
elif type(text) == str:
return decode(text, encoding)
else:
return unicode(text)
def remove_bom(input_filename, output_filename):
fp = open(input_filename, "rb")
bom = fp.read(3)
if bom != b'\xef\xbb\xbf':
raise ValueError("File doesn't have UTF-8 BOM")
fo = open(output_filename, "wb")
fo.write(fp.read())
fo.close()
fp.close()
def iconv_file(input_filename, output_filename, encoding, overwrite=False):
fp = open(input_filename, "rb")
ansi_content = fp.read()
fp.close()
if not overwrite:
if os.path.exists(output_filename):
return
with open(output_filename, "w") as fp:
if encoding.lower() in ["utf8", "utf-8", "u8", "utf", "utf_8"]:
fp.write(ansi_content)
else:
fp.write(encode(
decode(ansi_content, encoding),
"utf8"))
def read_file(filename, encoding="utf_8"):
'''
Load the content of a CUE file.
'''
file_content_string = open(filename, "rb").read()
if file_content_string[:3] == b"\xef\xbb\xbf":
file_content_string = file_content_string[3:]
return decode(file_content_string, encoding)
def filename_safe(filename):
parts = filename.split(u"/")
for i in xrange(len(parts)):
for ch in u'''<>'"?*\\/:''':
parts[i] = parts[i].replace(ch, u"_").strip()
return u"/".join(parts).strip()
def path_from_pattern(pattern, d):
all_keys = {}
group_stack = []
buffer = ""
# parse all the keys in the pattern string
iter_formats = re.finditer(u'''%\(([^)]+)\)s''', pattern)
for f in iter_formats:
all_keys[f.span()[0]] = (
f.groups()[0],
f.span()[1] - f.span()[0]
)
# parse the pattern
i = 0 # current position in pattern string
while i < len(pattern):
# Case 1: we meet %(xxx)s
if i in all_keys: # if we find a key %(xxx)s
if not group_stack: # not in option fields:
if all_keys[i][0] in d: # the key exists in `d`
buffer += d[all_keys[i][0]]
i += all_keys[i][1]
else:
if all_keys[i][0] in d:
group_stack[-1] += d[all_keys[i][0]]
i += all_keys[i][1]
else: # doesn't exists, skip all for this optional field
while i < len(pattern):
if pattern[i] == u">": break
i += 1
i += 1
group_stack.pop()
continue
# Case 2: we meet a `<`
if pattern[i] == u"<":
group_stack.append("")
i += 1
continue
# Case 3: we meet a `>`
if pattern[i] == u">":
if not group_stack:
raise ValueError("Invalid pattern! (unmatched `>`)")
opt_str = group_stack.pop()
if not group_stack:
buffer += opt_str
else:
group_stack[-1] += opt_str
i += 1
continue
# Otherwise
if not group_stack:
buffer += pattern[i]
else:
group_stack[-1] += pattern[i]
i += 1
if len(group_stack):
rai
|
se ValueError("Invalid pattern! (lack of `>`)")
return buffer
def cli_escape(text):
for ch in u'''`''':
text = text.replace(ch, u'''\\%s''' % ch)
return text
def parent_folder(path):
parts = path.split(u"/")
if path == LINUX_ROOT:
raise ValueError(u"Can't get parent fol
|
der from linux dir /")
if parts[-1] == u"":
del parts[-1]
if parts[-1] == u".":
parts[-1] = u".."
elif parts[-1] == u"..":
parts.append(u"..")
elif len(parts) == 1:
return u"./"
else:
del parts[-1]
parts.append(u"")
return u"/".join(parts)
def ensure_parent_folder(path):
try:
os.makedirs(parent_folder(path), mode=0755)
except OSError, e:
if e.errno != os.errno.EEXIST:
raise
def command_copy_to(files, folder, base_command=u"cp"):
if len(files) == 0:
return u"echo"
if not folder:
folder = u"."
arguments = [base_command]
arguments.append(u"-n")
for f in files:
arguments.append(u'''"%s"''' % cli_escape(f))
if not folder.endswith(u"/"):
folder += u"/"
arguments.append(u'''"%s"''' % folder)
return u" ".join(arguments)
|
inkenbrandt/Earth_Tides
|
Micrograv/util/tilt_sens.py
|
Python
|
gpl-2.0
| 593
| 0.006745
|
#!/usr/bin/
|
python
# compute new sensitivity from formulae in manual
import math
print "Tilt Sensitivity Calculator"
print "X1 refers to the tilt measurement, in arc sec"
print "R0/R1 refer to the gravimeter readings, in mGal"
print "Get the current tilt sensitivity from data files or the Setup menu"
oldSens = float(raw_input("Current tilt sensitivity: "))
r0 = float(raw_input("R0 [mGal] : "))
r1 = float(
|
raw_input("R1 [mGal] : "))
x1 = float(raw_input("X1 [arc sec]: "))
K = math.sqrt( 1 + (87000 * (r0-r1)/(x1*x1)) )
newSens = K * oldSens
print "New tilt Sensitivity: %f"%newSens
|
jacobajit/ion
|
intranet/apps/eighth/views/admin/blocks.py
|
Python
|
gpl-2.0
| 5,960
| 0.002349
|
# -*- coding: utf-8 -*-
import logging
import pickle
import re
from cacheops import invalidate_model
from django import http
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.shortcuts import redirect, render
from ..attendance import generate_roster_pdf
from ...forms.admin.blocks import BlockForm, QuickBlockForm
from ...models import EighthBlock, EighthScheduledActivity
from ....auth.decorators import eighth_admin_required
logger = logging.getLogger(__name__)
@eighth_admin_required
def add_block_view(request):
if request.method == "POST" and "custom_block" in request.POST:
form = QuickBlockForm(request.POST)
if form.is_valid():
form.save()
messages.success(request, "Successfully added block.")
return redirect("eighth_admin_dashboard")
else:
messages.error(request, "Error adding block.")
request.session["add_block_form"] = pickle.dumps(form)
date = None
show_letters = None
if "date" in request.GET:
date = request.GET.get("date")
if "date" in request.POST:
date = request.POST.get("date")
title_suffix = ""
if date:
date_format = re.compile(r'([0-9]{2})\/([0-9]{2})\/([0-9]{4})')
fmtdate = date_format.sub(r'\3-\1-\2', date)
logger.debug(fmtdate)
title_suffix = " - {}".format(fmtdate)
show_letters = True
if "modify_blocks" in request.POST:
letters = request.POST.getlist("blocks")
current_letters = []
blocks_day = EighthBlock.objects.filter(date=fmtdate)
for day in blocks_day:
current_letters.append(day.block_letter)
logger.debug(letters)
logger.debug(current_letters)
for l in letters:
if len(l) == 0:
continue
if l not in current_letters:
EighthBlock.objects.create(date=fmtdate, block_letter=l)
messages.success(request, "Successfully added {} Block on {}".format(l, fmtdate))
for l in current_letters:
if len(l) == 0:
continue
if l not in letters:
EighthBlock.objects.get(date=fmtdate, block_letter=l).delete()
messages.success(request, "Successfully removed {} Block on {}".format(l, fmtdate))
invalidate_model(EighthBlock)
letters = []
visible_blocks = ["A", "B", "C", "D", "E", "F", "G", "H"]
if show_letters:
onday = EighthBlock.objects.filter(date=fmtdate)
for l in visible_blocks:
exists = onday.filter(block_letter=l)
letters.append({"name": l, "exists": exists})
for blk in onday:
if blk.block_letter not in visible_blocks:
visible_blocks.append(blk.block_letter)
let
|
ters.append({"name": blk.block_letter, "
|
exists": True})
context = {"admin_page_title": "Add or Remove Blocks{}".format(title_suffix),
"date": date,
"letters": letters,
"show_letters": show_letters,
"add_block_form": QuickBlockForm}
return render(request, "eighth/admin/add_block.html", context)
@eighth_admin_required
def edit_block_view(request, block_id):
try:
block = EighthBlock.objects.get(id=block_id)
except EighthBlock.DoesNotExist:
raise http.Http404
if request.method == "POST":
form = BlockForm(request.POST, instance=block)
if form.is_valid():
form.save()
invalidate_model(EighthBlock)
messages.success(request, "Successfully edited block.")
return redirect("eighth_admin_dashboard")
else:
messages.error(request, "Error adding block.")
else:
form = BlockForm(instance=block)
context = {"form": form, "delete_url": reverse("eighth_admin_delete_block", args=[block_id]), "admin_page_title": "Edit Block"}
return render(request, "eighth/admin/edit_form.html", context)
@eighth_admin_required
def delete_block_view(request, block_id):
try:
block = EighthBlock.objects.get(id=block_id)
except EighthBlock.DoesNotExist:
raise http.Http404
if request.method == "POST":
block.delete()
invalidate_model(EighthBlock)
messages.success(request, "Successfully deleted block.")
return redirect("eighth_admin_dashboard")
else:
context = {"admin_page_title": "Delete Block",
"item_name": str(block),
"help_text": "Deleting this block will remove all records "
"of it related to eighth period."}
return render(request, "eighth/admin/delete_form.html", context)
@eighth_admin_required
def print_block_rosters_view(request, block_id):
if "schact_id" in request.POST:
response = HttpResponse(content_type="application/pdf")
response["Content-Disposition"] = "inline; filename=\"block_{}_rosters.pdf\"".format(block_id)
sched_act_ids = request.POST.getlist("schact_id")
pdf_buffer = generate_roster_pdf(sched_act_ids, True)
response.write(pdf_buffer.getvalue())
pdf_buffer.close()
return response
else:
try:
block = EighthBlock.objects.get(id=block_id)
schacts = EighthScheduledActivity.objects.filter(block=block).order_by("sponsors")
schacts = sorted(schacts, key=lambda x: "{}".format(x.get_true_sponsors()))
except (EighthBlock.DoesNotExist, EighthScheduledActivity.DoesNotExist):
raise http.Http404
context = {"eighthblock": block, "admin_page_title": "Choose activities to print", "schacts": schacts}
return render(request, "eighth/admin/choose_roster_activities.html", context)
|
luizcieslak/AlGDock
|
AlGDock/IO.py
|
Python
|
mit
| 14,740
| 0.021574
|
import os
import numpy as np
import MMTK
class Grid:
"""
Class to read and write alchemical grids.
Data is a dictionary with
spacing - the grid spacing, in Angstroms.
counts - the number of points in each dimension.
vals - the values.
All are numpy arrays.
"""
def __init__(self):
pass
def read(self, FN, multiplier=None):
"""
Reads a grid in dx or netcdf format
The multiplier affects the origin and spacing.
"""
if FN is None:
raise Exception('File is not defined')
elif FN.endswith('.dx') or FN.endswith('.dx.gz'):
data = self._read_dx(FN)
elif FN.endswith('.nc'):
data = self._read_nc(FN)
else:
raise Exception('File type not supported')
if multiplier is not None:
data['origin'] = multiplier*data['origin']
data['spacing'] = multiplier*data['spacing']
return data
def _read_dx(self, FN):
"""
Reads a grid in dx format
"""
if FN.endswith('.dx'):
F = open(FN,'r')
else:
import gzip
F = gzip.open(FN,'r')
# Read the header
line = F.readline()
while line.find('object')==-1:
line = F.readline()
header = {}
header['counts'] = [int(x) for x in line.split(' ')[-3:]]
for name in ['origin','d0','d1','d2']:
header[name] = [float(x) for x in F.readline().split(' ')[-3:]]
F.readline()
header['npts'] = int(F.readline().split(' ')[-3])
# Test to make sure the grid type is okay.
# These conditions are not absolultely essential,
# but they reduce the number of subtraction operations.
if not (header['d0'][1]==0 and header['d0'][2]==0 and
header['d1'][0]==0 and header['d1'][2]==0 and
header['d2'][0]==0 and header['d2'][1]==0):
raise Exception('Trilinear grid must be in original basis')
if not (header['d0'][0]>0 and header['d1'][1]>0 and header['d2'][2]>0):
raise Exception('Trilinear grid must have positive coordinates')
# Read the data
vals = np.ndarray(shape=header['npts'], dtype=float)
index = 0
while index<header['npts']:
line = F.readline()[:-1]
items = [float(item) for item in line.split()]
vals[index:index+len(items)] = items
index = index + len(items)
F.close()
data = {
'origin':np.array(header['origin']), \
'spacing':np.array([header['d0'][0],header['d1'][1],header['d2'][2]]), \
'counts':np.array(header['counts']), \
'vals':vals}
return data
def _read_nc(self, FN):
"""
Reads a grid in netcdf format
"""
from netCDF4 import Dataset
grid_nc = Dataset(FN,'r')
data = {}
for key in list(grid_nc.variables):
data[key] = np.array(grid_nc.variables[key][:][0][:])
grid_nc.close()
return data
def write(self, FN, data, multiplier=None):
"""
Writes a grid in dx or netcdf format.
The m
|
ultiplier affects the origin and spacing.
"""
if mul
|
tiplier is not None:
data_n = {'origin':multiplier*data['origin'],
'counts':data['counts'],
'spacing':multiplier*data['spacing'],
'vals':data['vals']}
else:
data_n = data
if FN.endswith('.nc'):
self._write_nc(FN, data_n)
elif FN.endswith('.dx') or FN.endswith('.dx.gz'):
self._write_dx(FN, data_n)
else:
raise Exception('File type not supported')
def _write_dx(self, FN, data):
"""
Writes a grid in dx format
"""
n_points = data['counts'][0]*data['counts'][1]*data['counts'][2]
if FN.endswith('.dx'):
F = open(FN,'w')
else:
import gzip
F = gzip.open(FN,'w')
F.write("""object 1 class gridpositions counts {0[0]} {0[1]} {0[2]}
origin {1[0]} {1[1]} {1[2]}
delta {2[0]} 0.0 0.0
delta 0.0 {2[1]} 0.0
delta 0.0 0.0 {2[2]}
object 2 class gridconnections counts {0[0]} {0[1]} {0[2]}
object 3 class array type double rank 0 items {3} data follows
""".format(data['counts'],data['origin'],data['spacing'],n_points))
for start_n in range(0,len(data['vals']),3):
F.write(' '.join(['%6e'%c for c in data['vals'][start_n:start_n+3]]) + '\n')
F.write('object 4 class field\n')
F.write('component "positions" value 1\n')
F.write('component "connections" value 2\n')
F.write('component "data" value 3\n')
F.close()
def _write_nc(self, FN, data):
"""
Writes a grid in netcdf format
"""
n_points = data['counts'][0]*data['counts'][1]*data['counts'][2]
from netCDF4 import Dataset
grid_nc = Dataset(FN,'w',format='NETCDF4')
grid_nc.createDimension('one', 1)
grid_nc.createDimension('n_cartesian', 3)
grid_nc.createDimension('n_points', n_points)
grid_nc.createVariable('origin','f8',('one','n_cartesian'))
grid_nc.createVariable('counts','i8',('one','n_cartesian'))
grid_nc.createVariable('spacing','f8',('one','n_cartesian'))
grid_nc.createVariable('vals','f8',('one','n_points'), zlib=True)
for key in data.keys():
grid_nc.variables[key][:] = data[key]
grid_nc.close()
def truncate(self, in_FN, out_FN, counts, multiplier=None):
"""
Truncates the grid at the origin and
with a limited number of counts per dimension
multiplier is for the values, not the grid scaling
"""
data_o = self.read(in_FN)
nyz_o = data_o['counts'][1]*data_o['counts'][2]
nz_o = data_o['counts'][2]
min_i = int(-data_o['origin'][0]/data_o['spacing'][0])
min_j = int(-data_o['origin'][1]/data_o['spacing'][1])
min_k = int(-data_o['origin'][2]/data_o['spacing'][2])
# vals = np.ndarray(shape=tuple(counts), dtype=float)
# for i in range(counts[0]):
# for j in range(counts[1]):
# for k in range(counts[2]):
# vals[i,j,k] = data_o['vals'][(i+min_i)*nyz_o + (j+min_j)*nz_o + (k+min_k)]
vals = np.array(
[[[data_o['vals'][(i+min_i)*nyz_o + (j+min_j)*nz_o + (k+min_k)]
for k in range(counts[2])]
for j in range(counts[1])]
for i in range(counts[0])])
if multiplier is not None:
vals = vals*multiplier
data_n = {'origin':np.array([0., 0., 0.]), \
'counts':counts, 'spacing':data_o['spacing'], 'vals':vals.flatten()}
self.write(out_FN,data_n)
class crd:
"""
Class to read and write AMBER coordinate/restart and trajectory files.
"""
def __init__(self):
pass
def read(self, FN, natoms=None, return_title=False, \
multiplier=None, trajectory=False):
"""
Reads an AMBER coordinate/restart or trajectory file.
If natoms is not none, then the coordinates will be split
into a list of natoms X 3 arrays.
The coordinates will be multiplied by multiplier.
The default of 0.1 converts Angstroms into nanometers.
"""
if not os.path.isfile(FN):
raise Exception('Coordinate file %s does not exist!'%FN)
if FN.endswith('.gz'):
import gzip
F = gzip.open(FN, 'r')
else:
F = open(FN,'r')
dat = F.read().strip().split('\n')
F.close()
title = dat.pop(0) # Title
if len(dat[0].split())>1:
# VMD format (does not specify number of atoms)
crd = []
for line in dat:
crd = crd + [float(x) for x in line.split()]
crd = np.resize(crd,(len(crd)/3,3))
else:
# AMBER format
file_natoms = int(dat.pop(0)) # Number of atoms
if (natoms is not None) and (file_natoms!=natoms):
print "Incorrect number of atoms in crd file"
return np.array([])
if trajectory:
w = 8 # For mdcrd
else:
w = 12 # For inpcrd
crd = []
for line in dat:
crd = crd + [float(line[x:x+w]) for x in range(0,len(line),w)]
crd = np.resize(crd,(len(crd)/3,3))
if multiplier is not None:
crd = multiplier*crd
if (natoms is not None):
crd = np.vsplit(crd,crd.shape[0]/natoms)
print " read %d configurations from %s"%(len(crd), FN)
if return_title:
return (crd, title)
else:
return crd
def write(self, FN, crd, title='', append=False, \
multiplier=None, trajectory=False):
"""
Writes an AMBER coordinate/restart or tr
|
LuisSuall/Sleight_of_hand
|
Finger_run/utils/gesture.py
|
Python
|
gpl-2.0
| 2,182
| 0.024748
|
import extendedHand
from extendedHand import *
import math
from math import *
'''
Function that calculates the @percent % of @whole
'''
def percentage(whole, percent):
return (whole * percent) / 100.0
'''
Function that detects the run gesture.
@hand: the hand that we're analysing.
@tolerance: the percentage of tolerance in the measurements.
'''
def detectRunGesture(hand, tolerance):
#We use the index and the middle finger like two legs and we're going to simulate de run action.
#Then we need the position information about these fingers.
index = getFinger(hand, 'index')
middle = getFinger(hand, 'middle')
#We get the tips position of the two fingers.
index_tip_pos = index.bone(3).next_joint
middle_tip_pos = middle.bone(3).next_joint
#We calculate the signed difference between the Y coordenates.
#We use the sign to check that the fingers have been moved.
diffBtwTipsY = index_tip_pos[1] - middle_tip_pos[1]
#We check the palm orientation and we want a minimum distance between the two fingers.
if detectRunGesture.sign*diffBtwTipsY <= (-30 + percentage(30, tolerance)) and palmOrientation(hand) == 'down':
detectRunGesture.sign = copysign(1, diffBtwTipsY)
return True
else:
return False
detectRunGesture.sign = -1
'''
Function that detects the OK gesture.
@hand: the hand that we're analysing.
@tolerance: the percentage of tolerance in the measurements.
'''
def detectOKGesture(hand, tolerance):
#We use the index finger and the thumb so we need the position information about these fingers.
thumb = getFinger(hand, 'thumb')
index = getFinger(hand, 'index')
#We get the ti
|
ps position of the two fingers.
thumb_tip_pos = thumb.bone(3).next_joint
index_tip_pos = index.bone(3).next_joint
#We calculate the distance between the tips.
distanceBtwTips = sqrt(pow(thumb_tip_pos[0]-index_tip_pos[0],2) + pow(thumb_tip_pos[1]-index_tip_pos[1],2) + pow(thumb_tip_pos[2]-index_tip_pos[2],2))
#We check the palm orientation and the distance between tips.
if di
|
stanceBtwTips < (30 + percentage(30, tolerance)) and palmOrientation(hand) == 'down':
return True
else:
return False
|
minesh1291/Learning-Python
|
solutions/parse-xml.py
|
Python
|
apache-2.0
| 508
| 0.033465
|
# Import Functions
i
|
mport urllib2
import xml.etree.ElementTree as ET
# Get online XML file
url="https://cghub.ucsc.edu/cghub/metadata/analysisDetail/a8f16339-4802-440c-81b6-d7a6635e604b"
request=urllib2.Request(url, headers={"Accept" : "application/xml"})
u=urllib2.urlopen(request)
tree=ET.parse(u)
root=tree.getroot()
dict={}
for i in root.iter():
if i.text!=None:
dict[i.tag]=i.text.strip()
else:
dict[i.tag]=""
for key in sorted(dict.keys(), key=lambda v
|
: v.upper()):
print key+":"+dict[key]
|
tkelman/utf8rewind
|
tools/gyp/pylib/gyp/mac_tool.py
|
Python
|
mit
| 26,881
| 0.007366
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions to perform Xcode-style build steps.
These functions are executed via gyp-mac-tool when using the Makefile generator.
"""
import fcntl
import fnmatch
import glob
import json
import os
import plistlib
import re
import shutil
import string
import struct
import subprocess
import sys
import tempfile
def main(args):
executor = MacTool()
exit_code = executor.Dispatch(args)
if exit_code is not None:
sys.exit(exit_code)
class MacTool(object):
"""This class performs all the Mac tooling steps. The methods can either be
executed directly, or dispatched from an argument list."""
def Dispatch(self, args):
"""Dispatches a string command to a method."""
if len(args) < 1:
raise Exception("Not enough arguments")
method = "Exec%s" % self._CommandifyName(args[0])
return getattr(self, method)(*args[1:])
def _CommandifyName(self, name_string):
"""Transforms a tool name like copy-info-plist to CopyInfoPlist"""
return name_string.title().replace('-', '')
def ExecCopyBundleResource(self, source, dest, convert_to_binary):
"""Copies a resource file to the bundle/Resources directory, performing any
necessary compilation on each resource."""
extension = os.path.splitext(source)[1].lower()
if os.path.isdir(source):
# Copy tree.
# TODO(thakis): This copies file attributes like mtime, while the
# single-file branch below doesn't. This should probably be changed to
# be consistent with the single-file branch.
if os.path.exists(dest):
shutil.rmtree(dest)
shutil.copytree(source, dest)
elif extension == '.xib':
return self._CopyXIBFile(source, dest)
elif extension == '.storyboard':
return self._CopyXIBFile(source, dest)
elif extension == '.strings':
self._CopyStringsFile(source, dest)
else:
if os.path.exists(dest):
os.unlink(dest)
shutil.copy(source, dest)
if extension in ('.plist', '.strings') and convert_to_binary == 'True':
self._ConvertToBinary(dest)
def _CopyXIBFile(self, source, dest):
"""Compiles a XIB file with ibtool into a binary plist in the bundle."""
# ibtool sometimes crashes with relative paths. See crbug.com/314728.
base = os.path.dirname(os.path.realpath(__file__))
if os.path.relpath(source):
source = os.path.join(base, source)
if os.path.relpath(dest):
dest = os.path.join(base, dest)
args = ['xcrun', 'ibtool', '--errors', '--warnings', '--notices']
if os.environ['XCODE_VERSION_ACTUAL'] > '0700':
args.extend(['--auto-activate-custom-fonts'])
if 'IPHONEOS_DEPLOYMENT_TARGET' in os.environ:
args.extend([
'--target-device', 'iphone', '--target-device', 'ipad',
'--minimum-deployment-target',
os.environ['IPHONEOS_DEPLOYMENT_TARGET'],
])
else:
args.extend([
'--target-device', 'mac',
'--minimum-deployment-target',
os.environ['MACOSX_DEPLOYMENT_TARGET'],
])
args.extend(['--output-format', 'human-readable-text', '--compile', dest,
source])
ibtool_section_re = re.compile(r'/\*.*\*/')
ibtool_re = re.compile(r'.*note:.*is clipping its content')
ibtoolout = subprocess.Popen(args, stdout=subprocess.PIPE)
current_section_header = None
for line in ibtoolout.stdout:
if ibtool_section_re.match(line):
current_section_header = line
elif not ibtool_re.match(line):
if current_section_header:
sys.stdout.write(current_section_header)
current_section_header = None
sys.stdout.write(line)
return ibtoolout.returncode
def _ConvertToBinary(self, dest):
subprocess.check_call([
'xcrun', 'plutil', '-convert', 'binary1', '-o', dest, dest])
def _CopyStringsFile(self, source, dest):
"""Copies a .strings file using iconv to reconvert the input into UTF-16."""
input_code = self._DetectInputEncoding(source) or "UTF-8"
# Xcode's CpyCopyStringsFile / builtin-copyStrings seems to call
# CFPropertyListCreateFromXMLData() behind the scenes; at least it prints
# CFPropertyListCre
|
ateFromXMLData(): Old-style plist parser: missing
# semicolon in dictionary.
# on invalid files. Do the same kind of validation.
import CoreFoundation
s = open(source, 'rb').read()
|
d = CoreFoundation.CFDataCreate(None, s, len(s))
_, error = CoreFoundation.CFPropertyListCreateFromXMLData(None, d, 0, None)
if error:
return
fp = open(dest, 'wb')
fp.write(s.decode(input_code).encode('UTF-16'))
fp.close()
def _DetectInputEncoding(self, file_name):
"""Reads the first few bytes from file_name and tries to guess the text
encoding. Returns None as a guess if it can't detect it."""
fp = open(file_name, 'rb')
try:
header = fp.read(3)
except e:
fp.close()
return None
fp.close()
if header.startswith("\xFE\xFF"):
return "UTF-16"
elif header.startswith("\xFF\xFE"):
return "UTF-16"
elif header.startswith("\xEF\xBB\xBF"):
return "UTF-8"
else:
return None
def ExecCopyInfoPlist(self, source, dest, convert_to_binary, *keys):
"""Copies the |source| Info.plist to the destination directory |dest|."""
# Read the source Info.plist into memory.
fd = open(source, 'r')
lines = fd.read()
fd.close()
# Insert synthesized key/value pairs (e.g. BuildMachineOSBuild).
plist = plistlib.readPlistFromString(lines)
if keys:
plist = dict(plist.items() + json.loads(keys[0]).items())
lines = plistlib.writePlistToString(plist)
# Go through all the environment variables and replace them as variables in
# the file.
IDENT_RE = re.compile(r'[/\s]')
for key in os.environ:
if key.startswith('_'):
continue
evar = '${%s}' % key
evalue = os.environ[key]
lines = string.replace(lines, evar, evalue)
# Xcode supports various suffices on environment variables, which are
# all undocumented. :rfc1034identifier is used in the standard project
# template these days, and :identifier was used earlier. They are used to
# convert non-url characters into things that look like valid urls --
# except that the replacement character for :identifier, '_' isn't valid
# in a URL either -- oops, hence :rfc1034identifier was born.
evar = '${%s:identifier}' % key
evalue = IDENT_RE.sub('_', os.environ[key])
lines = string.replace(lines, evar, evalue)
evar = '${%s:rfc1034identifier}' % key
evalue = IDENT_RE.sub('-', os.environ[key])
lines = string.replace(lines, evar, evalue)
# Remove any keys with values that haven't been replaced.
lines = lines.split('\n')
for i in range(len(lines)):
if lines[i].strip().startswith("<string>${"):
lines[i] = None
lines[i - 1] = None
lines = '\n'.join(filter(lambda x: x is not None, lines))
# Write out the file with variables replaced.
fd = open(dest, 'w')
fd.write(lines)
fd.close()
# Now write out PkgInfo file now that the Info.plist file has been
# "compiled".
self._WritePkgInfo(dest)
if convert_to_binary == 'True':
self._ConvertToBinary(dest)
def _WritePkgInfo(self, info_plist):
"""This writes the PkgInfo file from the data stored in Info.plist."""
plist = plistlib.readPlist(info_plist)
if not plist:
return
# Only create PkgInfo for executable types.
package_type = plist['CFBundlePackageType']
if package_type != 'APPL':
return
# The format of PkgInfo is eight characters, representing the bundle type
# and bundle signature, each four characters. If that is missing, four
# '?' characters are used instead.
signature_code = plist.get('CFBundleSignature', '????')
if len(signature_code) != 4: # Wrong length resets everything, too.
signature
|
WmHHooper/aima-python
|
submissions/Thompson/myBayes.py
|
Python
|
mit
| 907
| 0.003308
|
## Burglary example [Figure 14.2]
from probability import BayesNet
T, F = True, False
gym = BayesNet([
#WorkOuts
('LegDay', '', .33),
('ArmsDay', '', 0.33),
('Cardio', '', 0.33),
('Tired', 'LegDay ArmDay', 'Cardio',
{(T, T, T): 0.1,
(T, T, F): 0
|
.1,
(T, F, T): 0.7,
(T, F, F): 0.8,
(F, T, T): 0.7,
(F, T, F): 0.9,
(F, F, T): 0.9,
(F, F, F): 0.5}),
('Quit', 'Tired', {T: 0.70, F: 0.01}),
('Push', 'Tired', {T: 0.90, F: 0.10})
])
gym.label = 'Gym Day'
examples = {
gym: [
{'variable': 'Legday',
'evidence': {'Quit': T, 'Push': F}
},
{'variable': 'Legday',
'evidence': {'Quit': F, 'Push': F}
},
|
{'variable': 'Armday',
'evidence': {'Quit': T, 'Push': T}
},
{'variable': 'Cardio',
'evidence': {'Quit': F, 'Push': T}
}
]
}
#
|
LordAmit/epub-highlighter
|
epub_highlighter.py
|
Python
|
mit
| 7,285
| 0.000686
|
'''
Oh, an attractive module description here.
'''
import os
import shutil
import zipfile
from gi.repository import Gtk
from xml.dom import minidom
from xml.etree import ElementTree as ET
import re
import distutils.archive_util
EPUB_PATH = "/home/amit/git/epub-highlighter/epub/test.epub"
# print(os.path.spli(EPUB_PATH)[0] + "tmp")
# os.mkdir(os.path.spli(EPUB_PATH)[0]+"tmp")
EXTRACT_ROOT = "/home/amit/git/epub-highlighter/epub/tmp/"
MIMETYPE_OPF = 'application/oebps-package+xml'
MEDIA_TYPE = 'application/xhtml+xml'
# XML_PATH = '/home/amit/git/epub-highlighter/epub/tmp/test.epub/index_split_000.xhtml'
LIST_PATH = "/home/amit/git/epub-highlighter/list"
current_progress_in_percent = 0
counter = 0
DELIMITER = ',-,'
def get_content_files(opf_path: str):
opf_xml = minidom.parse(opf_path).documentElement
xhtmls = []
for element in opf_xml.getElementsByTagName('item'):
# print(element.getAttribute("href"))
if element.getAttribute("media-type") == MEDIA_TYPE:
xhtmls.append(element.getAttribute("href"))
return xhtmls
# if element.getAttribute("media-type") is (MEDIA_TYPE):
# print(element.getAttribute("href"))
def read_container(extract_path: str)->str:
container_xml = extract_path + "META-INF/container.xml"
minidom_xml = minidom.parse(container_xml).documentElement
opf_path = None
for element in minidom_xml.getElementsByTagName('rootfile'):
if element.getAttribute('media-type') == MIMETYPE_OPF:
# Only take the first full-path available
opf_path = element.getAttribute('full-path')
break
opf_path = extract_path + opf_path
return opf_path
# i = root.findall('./rootfile')
# print(i[0].tag)
def highlight_content(content, word, meaning=None):
global counter
# insensitive_hippo = re.compile(re.escape('hippo'), re.IGNORECASE)
# insensitive_hippo.sub('giraffe', 'I want a hIPpo for my birthday')
word = str(word).strip()
word = ' ' + word + ' '
if not meaning:
highlighted_word = " <b><i>" + word.upper() + "</i></b> "
else:
highlighted_word = " <b><i>" + \
word.upper() + "</i></b> [" + meaning.strip() + "] "
# print(word, highlighted_word)
# exit()
insensitive_pattern = re.compile(re.escape(word), re.IGNORECASE)
changed_content = insensitive_pattern.sub(highlighted_word, content)
if content != changed_content:
counter = counter + 1
# print(content, changed_content)
# exit()
return changed_content
def read_contents(xml_path) -> str:
return str(open(xml_path, "r").read())
def read_list_of_words(list_path):
return open(list_path).readlines()
def read_list_of_words_with_meanings(list_path):
contents = open(list_path).readlines()
words = []
meanings = []
for content in contents:
# print(content)
split_content = str(content).split(DELIMITER)
words.append(split_content[0])
meanings.append(split_content[1])
return words, meanings
def write_content(xml_path, content):
open(xml_path, mode='w').write(content)
def do_something_with_progress(progress_in_hundred: int):
print("Current Progress: " + str(progress_in_hundred))
def replace_xml_files(xmls_with_path, words, progress_bar=None, status_bar=None, mea
|
nings=None):
global current_progress_in_percent
xml_file_count = len(xmls_with_path)
files_processed = 0
for xml in xmls_with_path:
# content = open(xml).read()
# p
|
rint("Processing: " + xml)
xml_file_contents = read_contents(xml)
# print(xml_file_contents)
for i in range(0, len(words)):
word = words[i]
# print(word)
if meanings:
meaning = meanings[i]
xml_file_contents = highlight_content(
xml_file_contents, word, meaning)
else:
xml_file_contents = highlight_content(
xml_file_contents, word)
# print(xml_file_contents)
write_content(xml, xml_file_contents)
files_processed = files_processed + 1
current_progress_in_percent = (files_processed / xml_file_count)
msg = "processing " + os.path.basename(xml)
if status_bar and progress_bar:
status_bar.push(1, msg)
progress_bar.set_fraction(current_progress_in_percent)
while Gtk.events_pending():
Gtk.main_iteration()
# do_something_with_progress(current_progress_in_percent)
def create_epub(extracted_epub_path, original_epub_path):
original_epub_basename = os.path.split(original_epub_path)[1]
original_epub_dir = os.path.split(original_epub_path)[0]
# print(original_epub_dir)
# print(original_epub_basename)
new_epub_name = os.path.splitext(original_epub_basename)[
0] + "_highlighted.epub"
# print(new_epub_name)
# print(extracted_epub_path)
new_epub_path = original_epub_dir + "/" + new_epub_name
# print(new_epub_path)
zip_path = distutils.archive_util.make_archive(
new_epub_name, format='zip', root_dir=extracted_epub_path)
shutil.move(zip_path, new_epub_path + '.zip')
os.rename(new_epub_path + '.zip', new_epub_path.replace('zip', ''))
def remove_extracted_directory(extract_root):
import shutil
shutil.rmtree(extract_root)
def extract_epub_to_tmp_directory(
epub_path) ->str:
epub_basename = os.path.basename(EPUB_PATH)
temp_dir = os.path.split(EPUB_PATH)[
0] + "/tmp-" + os.path.splitext(epub_basename)[0]
# os.mkdir(temp_dir)
# words = ["Test"]
epub_file = zipfile.ZipFile(epub_path, mode='r')
# print(epub_basename)
# extract_path: str = EXTRACT_ROOT + epub_basename + "/"
extract_path = temp_dir + "/"
# print(extract_path)
epub_file.extractall(path=extract_path)
return extract_path
def get_full_content_xmls_filepaths(extract_path):
opf_path = read_container(extract_path)
opf_path_base = os.path.split(opf_path)[0]
xmls = get_content_files(opf_path)
xmls_with_path = []
for xml in xmls:
xml_with_path = opf_path_base + '/' + xml
xmls_with_path.append(xml_with_path)
return xmls_with_path
def main(epub_path, list_path, progress_bar=None, status_bar=None, with_meaning: bool = None):
extract_path = extract_epub_to_tmp_directory(epub_path)
xmls_with_path = get_full_content_xmls_filepaths(extract_path)
if not with_meaning:
texts = read_list_of_words(list_path)
replace_xml_files(xmls_with_path, texts, progress_bar, status_bar)
else:
words, meanings = read_list_of_words_with_meanings(list_path)
# print(words, meanings)
replace_xml_files(xmls_with_path, words,
progress_bar, status_bar, meanings)
create_epub(extract_path, epub_path)
remove_extracted_directory(extract_path)
global counter
success_msg = "Complete! Highlighted " + \
str(counter) + " Words in " + str(len(xmls_with_path)) + " files"
if status_bar:
status_bar.push(1, success_msg)
else:
print(success_msg)
if __name__ == '__main__':
main(EPUB_PATH, LIST_PATH, None, None, False)
|
outworldrunner/nightbay
|
movies/views.py
|
Python
|
gpl-3.0
| 1,200
| 0.004167
|
from django.shortcuts import render, get_object_or_404, redirect
from .models import Movie, Trailer
from favorites.models import Favourite
def recent_movies(request):
movie_list = Movie.objects.all().order_by('-id')
# favs = Favourite.objects.all()
# for fav in favs:
# if fav.user == request.user.username:
# print(fav.favourites.all())
context = {
"movies": movie_list,
}
return render(request, "activity.html", context)
def movie_detail(request, id=None):
movie = get_object_or_404(Movie, id=id)
favs = Favourite.objects.filter(user=request.user)
is_favourited = False
for fav in favs:
if fav.favourites.all()[0] == movie:
is_favourited = True
context = {
"movie": movie,
"is_favourited": is_favourited,
}
return render(request, "movie_detail.html", context)
def mark_as_favourite(request, id=None):
if not request.user.is_authenticated():
return redirect("login")
movie = get_object_or_404(Movie, id=id)
fav = Favourite(user=request.
|
user.username)
fav.save
|
()
fav.favourites.add(movie)
return redirect("activity:detail", id=id)
|
igorcoding/asynctnt-queue
|
tests/test_queue.py
|
Python
|
apache-2.0
| 925
| 0
|
from asynctnt_queue import Queue, Tube
from tests import BaseTarantoolTestCase
class QueueTestCase(BaseTarantoolTestCase):
async def test__queue_create(self):
q = Queue(self.conn)
self.assertEqual(q.conn, self.conn, 'conn valid')
def test__queue_get_tube(self):
q = Queue(self.conn)
|
tube = q.tube('test_tube')
self.assertEqual(tube.name, 'test_tube', 'name valid')
self.assertIsInstance(tube, Tube, 'tube valid type')
self.assertEqual(tube.conn, self.conn, 'conn valid')
def
|
test__queue_get_tube_multiple(self):
q = Queue(self.conn)
tube1 = q.tube('test_tube')
tube2 = q.tube('test_tube')
self.assertIs(tube1, tube2, 'the same object')
async def test__queue_statistics(self):
q = Queue(self.conn)
res = await q.statistics()
self.assertIsNotNone(res)
self.assertIn('test_tube', res)
|
JulienMcJay/eclock
|
windows/Python27/Lib/site-packages/docutils/parsers/rst/languages/zh_tw.py
|
Python
|
gpl-2.0
| 5,172
| 0.001354
|
# -*- coding: utf-8 -*-
# $Id: zh_tw.py 7119 2011-09-02 13:00:23Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Traditional Chinese language mappings for language-dependent features of
reStructuredText.
"""
__docformat__ = 'reStructuredText'
directives = {
# language-dependent: fixed
'attention (translation required)': 'attention',
'caution (translation required)': 'caution',
'code (translation required)': 'code',
'danger (translation required)': 'danger',
'error (translation required)': 'error',
'hint (translation required)': 'hint',
'important (translation required)': 'important',
'note (translation required)': 'note',
'tip (translation required)': 'tip',
'warning (translation required)': 'warning',
'admonition (translation required)': 'admonition',
'sidebar (translation required)': 'sidebar',
'topic (translation required)': 'topic',
'line-block (translation required)': 'line-block',
'parsed-literal (translation required)': 'parsed-literal',
'rubric (translation required)': 'rubric',
'epigraph (translation required)': 'epigraph',
'highlights (translation required)': 'highlights',
'pull-quote (translation required)': 'pull-quote',
'compound (translation required)': 'compound',
u'container (translation required)': 'container',
#'questions (translation required)': 'questions',
'table (translation required)': 'table',
'csv-table (translation required)': 'csv-table',
'list-table (translation required)': 'list-table',
#'qa (translation required)': 'questions',
#'faq (translation required)': 'questions',
'meta (translation required)': 'meta',
'math (translation required)': 'math',
#'imagemap (translation required)': 'imagemap',
'image (translation required)': 'image',
'figure (translation required)': 'figure',
'include (translation required)': 'include',
'raw (translation required)': 'raw',
'replace (translation required)': 'replace',
'unicode (translation required)': 'unicode',
u'日期': 'date',
'class (translation required)': 'class',
'role (translation required)': 'role',
u'default-role (translation required)': 'default-role',
u'title (translation required)': 'title',
'contents (translation required)': 'contents',
'sectnum (translation required)': 'sectnum',
'section-numbering (translation required)': 'sectnum',
u'header (translation required)': 'header',
u'footer (translation required)': 'footer',
#'footnotes (translation required)': 'footnotes',
#'citations (translation required)': 'citations',
'target-notes (translation required)': 'target-notes',
'restructuredtext-test-directive': 'restructuredtext-test-directive'}
"""Traditional Chinese name to registered (in directives/__init__.py)
directive name mapping."""
roles = {
# language-dependent: fixed
'abbreviation (translation required)': 'abbreviation',
'ab (translation required)': 'abbreviation',
'acronym (translation required)': 'acronym',
'ac (translati
|
on required)': 'acronym',
u'code (translation required)': 'code',
'index (translation required)': 'index',
'i (translation required)': 'index',
'subscript (translation required)': 'subscript',
'sub (translation required)': 'subscript',
'superscript (translation required)': 'superscript',
'sup (translatio
|
n required)': 'superscript',
'title-reference (translation required)': 'title-reference',
'title (translation required)': 'title-reference',
't (translation required)': 'title-reference',
'pep-reference (translation required)': 'pep-reference',
'pep (translation required)': 'pep-reference',
'rfc-reference (translation required)': 'rfc-reference',
'rfc (translation required)': 'rfc-reference',
'emphasis (translation required)': 'emphasis',
'strong (translation required)': 'strong',
'literal (translation required)': 'literal',
'math (translation required)': 'math',
'named-reference (translation required)': 'named-reference',
'anonymous-reference (translation required)': 'anonymous-reference',
'footnote-reference (translation required)': 'footnote-reference',
'citation-reference (translation required)': 'citation-reference',
'substitution-reference (translation required)': 'substitution-reference',
'target (translation required)': 'target',
'uri-reference (translation required)': 'uri-reference',
'uri (translation required)': 'uri-reference',
'url (translation required)': 'uri-reference',
'raw (translation required)': 'raw',}
"""Mapping of Traditional Chinese role names to canonical role names for
interpreted text."""
|
hryamzik/ansible
|
lib/ansible/modules/network/aci/aci_tenant_span_dst_group.py
|
Python
|
gpl-3.0
| 6,414
| 0.001559
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: aci_tenant_span_dst_group
short_description: Manage SPAN destination groups (span:DestGrp)
description:
- Manage SPAN destination groups on Cisco ACI fabrics.
notes:
- The C(tenant) used must exist before using this module in your playbook.
The M(aci_tenant) module can be used for this.
- More information about the internal APIC class B(span:DestGrp) from
L(the APIC Management Information Model reference,https://developer.cisco.com/docs/apic-mim-ref/).
author:
- Dag Wieers (@dagwieers)
version_added: '2.4'
options:
dst_group:
description:
- The name of the SPAN destination group.
required: yes
aliases: [ name ]
description:
description:
- The description of the SPAN destination group.
aliases: [ descr ]
tenant:
description:
- The name of the tenant.
required: yes
aliases: [ tenant_name ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: aci
'''
# FIXME: Add more, better examples
EXAMPLES = r'''
- aci_tenant_span_dst_group:
host: apic
username: admin
password: SomeSecretPassword
dst_group: '{{ dst_group }}'
description: '{{ descr }}'
tenant: '{{ tenant }}'
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: string
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: string
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: string
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: string
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: string
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
def main():
argument_spec = aci_argument_s
|
pec()
argument_spec.update(
dst_group=dict(type='str', required=False, aliases=['name']), # Not required for querying all objects
tenant=dict(type='str', required=False, aliases=['tenant_name']), # Not required for querying all objects
description=dict(type='str'
|
, aliases=['descr']),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['dst_group', 'tenant']],
['state', 'present', ['dst_group', 'tenant']],
],
)
dst_group = module.params['dst_group']
description = module.params['description']
state = module.params['state']
tenant = module.params['tenant']
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class='fvTenant',
aci_rn='tn-{0}'.format(tenant),
filter_target='eq(fvTenant.name, "{0}")'.format(tenant),
module_object=tenant,
),
subclass_1=dict(
aci_class='spanDestGrp',
aci_rn='destgrp-{0}'.format(dst_group),
filter_target='eq(spanDestGrp.name, "{0}")'.format(dst_group),
module_object=dst_group,
),
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class='spanDestGrp',
class_config=dict(
name=dst_group,
descr=description,
),
)
aci.get_diff(aci_class='spanDestGrp')
aci.post_config()
elif state == 'absent':
aci.delete_config()
aci.exit_json()
if __name__ == "__main__":
main()
|
demonchild2112/travis-test
|
grr/server/grr_response_server/server_stubs.py
|
Python
|
apache-2.0
| 12,170
| 0.012572
|
#!/usr/bin/env python
"""Stubs of client actions.
Client actions shouldn't be used on the server, stubs should be used instead.
This way we prevent loading effectively the whole client code into ours
server parts.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from future.utils import with_metaclass
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import registry
from grr_response_core.lib.rdfvalues import artifacts as rdf_artifacts
from grr_response_core.lib.rdfvalues import chipsec_types as rdf_chipsec_types
from grr_response_core.lib.rdfvalues import client as rdf_client
from grr_response_core.lib.rdfvalues import client_action as rdf_client_action
from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs
from grr_response_core.lib.rdfvalues import client_network as rdf_client_network
from grr_response_core.lib.rdfvalues import client_stats as rdf_client_stats
from grr_response_core.lib.rdfvalues import cloud as rdf_cloud
from grr_response_core.lib.rdfvalues import file_finder as rdf_file_finder
from grr_response_core.lib.rdfvalues import flows as rdf_flows
from grr_response_core.lib.rdfvalues import memory as rdf_memory
from grr_response_core.lib.rdfvalues import osquery as rdf_osquery
from grr_response_core.lib.rdfvalues import paths as rdf_paths
from grr_response_core.lib.rdfvalues import plist as rdf_plist
from grr_response_core.lib.rdfvalues import protodict as rdf_protodict
class ClientActionStub(with_metaclass(registry.MetaclassRegistry, object)):
"""Stub for a client action. To be used in server code."""
in_rdfvalue = None
out_rdfvalues = [None]
# from artifacts.py
class ArtifactCollector(ClientActionStub):
"""The client side artifact collector implementation."""
in_rdfvalue = rdf_artifacts.ClientArtifactCollectorArgs
out_rdfvalues = [rdf_artifacts.ClientArtifactCollectorResult]
# from windows/windows.py, osx/osx.py and linux/linux.py
class GetInstallDate(ClientActionStub):
"""Estimate the install date of this system."""
# DataBlob is deprecated but might still be sent by old clients.
out_rdfvalues = [rdf_protodict.DataBlob, rdfvalue.RDFDatetime]
class EnumerateInterfaces(ClientActionStub):
"""Enumerate all MAC addresses of all NICs."""
out_rdfvalues = [rdf_client_network.Interface]
class EnumerateFilesystems(ClientActionStub):
"""Enumerate all unique filesystems local to the system."""
out_rdfvalues = [rdf_client_fs.Filesystem]
class Uninstall(ClientActionStub):
"""Remove the service that starts us at startup."""
out_rdfvalues = [rdf_protodict.DataBlob]
class UpdateAgent(ClientActionStub):
"""Updates the GRR agent to a new version."""
in_rdfvalue = rdf_client_action.ExecuteBinaryRequest
out_rdfvalues = [rdf_client_action.ExecuteBinaryResponse]
# Windows-specific
class WmiQuery(ClientActionStub):
"""Runs a WMI query and returns the results to a server callback."""
in_rdfvalue = rdf_client_action.WMIRequest
out_rdfvalues = [rdf_protodict.Dict]
# OS X-specific
class OSXEnumerateRunningServices(ClientActionStub):
"""Enumerate all running launchd jobs."""
in_rdfvalue = None
out_rdfvalues = [rdf_client.OSXServiceInformation]
# Linux-specific
class EnumerateRunningServices(ClientActionStub):
"""List running daemons."""
in_rdfvalue = None
out_rdfvalues = [None]
class EnumerateUsers(ClientActionStub):
"""Enumerates all the users on this system."""
# Client versions 3.0.7.1 and older used to return KnowledgeBaseUser.
# KnowledgeBaseUser was renamed to User.
out_rdfvalues = [rdf_client.User, rdf_client.KnowledgeBaseUser]
# from admin.py
class Echo(ClientActionStub):
"""Returns a message to the server."""
in_rdfvalue = rdf_client_action.EchoRequest
out_rdfvalues = [rdf_client_action.EchoRequest]
class GetHostname(ClientActionStub):
"""Retrieves the host
|
name of the client."""
out_rdfvalues = [rdf_protod
|
ict.DataBlob]
class GetPlatformInfo(ClientActionStub):
"""Retrieves platform information."""
out_rdfvalues = [rdf_client.Uname]
class Kill(ClientActionStub):
"""A client action for terminating (ClientActionStub) the client."""
out_rdfvalues = [rdf_flows.GrrMessage]
class Hang(ClientActionStub):
"""A client action for simulating the client becoming unresponsive."""
in_rdfvalue = rdf_protodict.DataBlob
class BusyHang(ClientActionStub):
"""A client action that burns cpu cycles. Used for testing cpu limits."""
in_rdfvalue = rdf_protodict.DataBlob
class Bloat(ClientActionStub):
"""A client action that uses lots of memory for testing."""
in_rdfvalue = rdf_protodict.DataBlob
class GetConfiguration(ClientActionStub):
"""Retrieves the running configuration parameters."""
in_rdfvalue = None
out_rdfvalues = [rdf_protodict.Dict]
class GetLibraryVersions(ClientActionStub):
"""Retrieves version information for installed libraries."""
in_rdfvalue = None
out_rdfvalues = [rdf_protodict.Dict]
class UpdateConfiguration(ClientActionStub):
"""Updates configuration parameters on the client."""
in_rdfvalue = rdf_protodict.Dict
class GetClientInfo(ClientActionStub):
"""Obtains information about the GRR client installed."""
out_rdfvalues = [rdf_client.ClientInformation]
class GetClientStats(ClientActionStub):
"""This retrieves some stats about the GRR process."""
in_rdfvalue = rdf_client_action.GetClientStatsRequest
out_rdfvalues = [rdf_client_stats.ClientStats]
class GetClientStatsAuto(GetClientStats):
"""Action used to send the reply to a well known flow on the server."""
class SendStartupInfo(ClientActionStub):
out_rdfvalues = [rdf_client.StartupInfo]
# from plist.py
class PlistQuery(ClientActionStub):
"""Parses the plist request specified and returns the results."""
in_rdfvalue = rdf_plist.PlistRequest
out_rdfvalues = [rdf_protodict.RDFValueArray]
# from standard.py
class ReadBuffer(ClientActionStub):
"""Reads a buffer from a file and returns it to a server callback."""
in_rdfvalue = rdf_client.BufferReference
out_rdfvalues = [rdf_client.BufferReference]
class TransferBuffer(ClientActionStub):
"""Reads a buffer from a file and returns it to the server efficiently."""
in_rdfvalue = rdf_client.BufferReference
out_rdfvalues = [rdf_client.BufferReference]
class HashBuffer(ClientActionStub):
"""Hash a buffer from a file and returns it to the server efficiently."""
in_rdfvalue = rdf_client.BufferReference
out_rdfvalues = [rdf_client.BufferReference]
class HashFile(ClientActionStub):
"""Hash an entire file using multiple algorithms."""
in_rdfvalue = rdf_client_action.FingerprintRequest
out_rdfvalues = [rdf_client_action.FingerprintResponse]
class ListDirectory(ClientActionStub):
"""Lists all the files in a directory."""
in_rdfvalue = rdf_client_action.ListDirRequest
out_rdfvalues = [rdf_client_fs.StatEntry]
# DEPRECATED.
#
# This action was replaced by newer `GetFileStat` action. This stub is left for
# compatibility with old clients. After the transition period all clients should
# support new action and this class should be removed.
#
# TODO(hanuszczak): Remove this class after 2021-01-01.
class StatFile(ClientActionStub):
"""Sends a StatEntry for a single file."""
in_rdfvalue = rdf_client_action.ListDirRequest
out_rdfvalues = [rdf_client_fs.StatEntry]
class GetFileStat(ClientActionStub):
"""A client action that yields stat of a given file."""
in_rdfvalue = rdf_client_action.GetFileStatRequest
out_rdfvalues = [rdf_client_fs.StatEntry]
class ExecuteCommand(ClientActionStub):
"""Executes one of the predefined commands."""
in_rdfvalue = rdf_client_action.ExecuteRequest
out_rdfvalues = [rdf_client_action.ExecuteResponse]
class ExecuteBinaryCommand(ClientActionStub):
"""Executes a command from a passed in binary."""
in_rdfvalue = rdf_client_action.ExecuteBinaryRequest
out_rdfvalues = [rdf_client_action.ExecuteBinaryResponse]
class ExecutePython(ClientActionStub):
"""Executes python code with exec."""
in_rdfvalue = rdf_client_action.ExecutePythonRequest
out_rdfvalues
|
akretion/laposte_api
|
laposte_api/data/colissimo_9V_nhas22.py
|
Python
|
agpl-3.0
| 2,753
| 0.009081
|
# -*- coding: utf-8 -*-
delivery={'weight': '10.0', 'pec_bar': u'9V169001>59647441000000023', 'suivi_bar': u'9V0>50000000024', 'cab_prise_en_charge': u'9V1 69001 964744 1000 000023', 'date': '12/05/2014', 'cab_suivi': u'9V 00000 00002 4', 'ref_client': u'OUT/00007', 'Instructions': ''}
sender={'city': u'city', 'account': u'964744', 'name': u'Your Company', 'zip': u'zip', 'phone': u'599', 'country': u'France', 'support_city': u'Gennevilliers PFC', 'street': u'rue', 'password': u'123456'}
address={'city': u'Lyon', 'name': u'Jim NHASTIC', 'zip': u'69001', 'mobile': '', 'street2': '', 'street3': '', 'countryCode': u'FR', 'phone': '', 'street': u'150 rue Vauban', 'email': ''}
option={'ar': False, 'nm': False, 'ftd': False}
kwargs={'logo': 'EXPERT_F', '_product_code': u'9V'}
content="""/* Utf8 file encoded converted in CP1252 by python */
^XA
^LH30,30 /* initial position*/
^CI27 /* windows CP1252 decoding */
^CF0,22 /*CF:default font|font_type,size*/
/*Fonts : P,Q,R,S,T fonts are the same with Zebra GX420t, only size change font '0' seems to be functionnal for general purpose */
^FWN /*FW:Default orientation*/
^BY3 /*BY:Bar Code Field Default*/
^FO80,01^XGE:EXPERT_F,1,1^FS
^FO0,100^GB770,1,4^FS
^FO10,130^A0,30^FDEXPEDITEUR
^FS
^FO450,130^FDRef Client: OUT/00007^FS
^FO0,160^GB360,160,4^FS /*GB:graphic box|width,height,thickness*/
/*graphic diagonal line:width,height,border_thickness,,orientation(R=right diagonal)*/
^FO0,160^GD350,160,10,,R^FS
^FO0,160^GD350,160,10,,L^FS
^FO410,160^GB360,160,4^FS
/*^A0 /*A:font|font_type,orientation,size*/*/
^FO25,175^A0,30,30^FDYour Company^FS
^FO25,205 /*FO:field origin|x,y*/
^FB400,5,3, /*FB:field block|width text,line number,space beetween lines*/
/* COLISS RULE Teleph expediteur si OM ou I */
/* COLISS RULE Pays expediteur si OM ou I */
^A0,24^FDrue
\&
\&zip city
^FS
^FO420,170 /*FO:field origin|x,y*/
^FB400,6,3,
^FDCOMPTE CLIENT: 964744
\&SITE DE PRISE EN CHARGE:
\&Gennevilliers PFC
\&N° Colis : 9V 00000 00002 4
\&Poids : 10.0 Kg
\&Edité le : 12/05/2014
^FS
/* ||| || |||| */
/* >5 => is subset C invocation code ; >6 => is subset B invocation code */
^FO40,345^PR2,2^BCN,230,Y,N,N^FD9V0>50000000024^FS
^FO40,575^GB402,3,4^FS
^FO0,585^FDN° de colis :^FS
/* /!\ /_\ /!\ /_\ /!\ */
^FO30,630^A0,30^FDDESTINATAIRE^FS
^FO5,660^GB450,200,4^FS
^FO30,675^A0,24,28^FDJim NHASTIC^FS
^FO30,705^FB400,6,2,
^FD150 rue Vauban
\&
\&^FS
^FO30,755
^A0,40
^FD69001 Lyon^FS
/* COLISS RULE Phone+country expediteur si Internationale */
^FO30,800^FDTEL: ^FS
^FO0,950^A0B^FDSPECIFIQUE^FS
/* ||| || |||| */
^FO70,880^BCN,230,Y,N,N^FD9V169001>59647441000000023^FS
^FO100,1120^FDN°
|
PCH:^FS
^
|
FO0,1136^XGE:POSTE,1,1^FS
^FO720,1130^XGE:CAMERA,1,1^FS
^XZ
"""
|
SonienTaegi/CELLAR
|
CELLAR/settings.py
|
Python
|
gpl-2.0
| 2,762
| 0.000724
|
"""
Django settings for CELLAR project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEMPLATE_DIRS = [os.path.join(BASE_DIR, 'templates')]
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'PLEASE FILL YOUR OWN SECRET KEY'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ["localhost", "127.0.0.1", ]
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'Browser',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'CELLAR.urls
|
'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
|
},
},
]
WSGI_APPLICATION = 'CELLAR.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'CELLAR.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'ko-kr'
TIME_ZONE = 'Asia/Seoul'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
|
mvo5/python-apt
|
tests/test_hashes.py
|
Python
|
gpl-2.0
| 9,619
| 0
|
#!/usr/bin/python
#
# Copyright (C) 2009 Julian Andres Klode <jak@debian.org>
#
# Copying and distribution of this file, with or without modification,
# are permitted in any medium without royalty provided the copyright
# notice and this notice are preserved.
"""Unit tests for verifying the correctness of hashsums in apt_pkg.
Unit tests to verify the correctness of Hashes, HashString and the various
functions like md5sum."""
import unittest
import hashlib
import sys
import warnings
import apt_pkg
import testcommon
class TestHashes(testcommon.TestCase):
"""Test apt_pkg.Hashes() and the various apt_pkg.*sum() functions."""
def setUp(self):
"""Prepare the tests, create reference values..."""
testcommon.TestCase.setUp(self)
self.file = open(apt_pkg.__file__, "rb")
self.value = self.file.read()
self.hashes = apt_pkg.Hashes(self.value)
self.file.seek(0)
self.fhashes = apt_pkg.Hashes(self.file)
# Reference values.
self.md5 = hashlib.md5(self.value).hexdigest()
self.sha1 = hashlib.sha1(self.value).hexdigest()
self.sha256 = hashlib.sha256(self.value).hexdigest()
self.file.seek(0)
def tearDown(self):
"""Cleanup, Close the file object used for the tests."""
self.file.close()
def test_md5sum(self):
"""hashes: Test apt_pkg.md5sum()"""
self.assertEqual(apt_pkg.md5sum(self.value), self.md5)
self.assertEqual(apt_pkg.md5sum(self.file), self.md5)
def test_sha1sum(self):
"""hashes: Test apt_pkg.sha1sum()"""
self.assertEqual(apt_pkg.sha1sum(self.value), self.sha1)
self.assertEqual(apt_pkg.sha1sum(self.file), self.sha1)
def test_sha256sum(self):
"""hashes: Test apt_pkg.sha256sum()"""
self.assertEqual(apt_pkg.sha256sum(self.value), self.sha256)
self.assertEqual(apt_pkg.sha256sum(self.file), self.sha256)
def test_bytes(self):
"""hashes: Test apt_pkg.Hashes(bytes)"""
with warnings.catch_warnings(record=True) as caught_warnings:
warnings.simplefilter("always")
self.assertEqual(self.hashes.md5, self.md5)
self.assertEqual(self.hashes.sha1, self.sha1)
self.assertEqual(self.hashes.sha256, self.sha256)
self.assertEqual(len(caught_warnings), 3)
self.assertTrue(issubclass(caught_warnings[0].category,
DeprecationWarning))
self.assertTrue(issubclass(caught_warnings[1].category,
DeprecationWarning))
self.assertTrue(issubclass(caught_warnings[2].category,
DeprecationWarning))
def test_file(self):
"""hashes: Test apt_pkg.Hashes(file)."""
with warnings.catch_warnings(record=True) as caught_warnings:
warnings
|
.simplefilter("always")
self.assertEqu
|
al(self.hashes.md5, self.fhashes.md5)
self.assertEqual(self.hashes.sha1, self.fhashes.sha1)
self.assertEqual(self.hashes.sha256, self.fhashes.sha256)
self.assertEqual(len(caught_warnings), 6)
self.assertTrue(issubclass(caught_warnings[0].category,
DeprecationWarning))
self.assertTrue(issubclass(caught_warnings[1].category,
DeprecationWarning))
self.assertTrue(issubclass(caught_warnings[2].category,
DeprecationWarning))
self.assertTrue(issubclass(caught_warnings[3].category,
DeprecationWarning))
self.assertTrue(issubclass(caught_warnings[4].category,
DeprecationWarning))
self.assertTrue(issubclass(caught_warnings[5].category,
DeprecationWarning))
def test_unicode(self):
"""hashes: Test apt_pkg.Hashes(unicode)."""
if sys.version_info[0] == 3:
self.assertRaises(TypeError, apt_pkg.Hashes, "D")
self.assertRaises(TypeError, apt_pkg.md5sum, "D")
self.assertRaises(TypeError, apt_pkg.sha1sum, "D")
self.assertRaises(TypeError, apt_pkg.sha256sum, "D")
else:
self.assertRaises(TypeError, apt_pkg.Hashes, unicode())
self.assertRaises(TypeError, apt_pkg.md5sum, unicode())
self.assertRaises(TypeError, apt_pkg.sha1sum, unicode())
self.assertRaises(TypeError, apt_pkg.sha256sum, unicode())
class TestHashString(testcommon.TestCase):
"""Test apt_pkg.HashString()."""
def setUp(self):
"""Prepare the test by reading the file."""
testcommon.TestCase.setUp(self)
self.file = open(apt_pkg.__file__)
self.hashes = apt_pkg.Hashes(self.file)
with warnings.catch_warnings(record=True) as caught_warnings:
warnings.simplefilter("always")
self.md5 = apt_pkg.HashString("MD5Sum", self.hashes.md5)
self.sha1 = apt_pkg.HashString("SHA1", self.hashes.sha1)
self.sha256 = apt_pkg.HashString("SHA256", self.hashes.sha256)
self.assertEqual(len(caught_warnings), 3)
self.assertTrue(issubclass(caught_warnings[0].category,
DeprecationWarning))
self.assertTrue(issubclass(caught_warnings[1].category,
DeprecationWarning))
self.assertTrue(issubclass(caught_warnings[2].category,
DeprecationWarning))
def tearDown(self):
"""Cleanup, Close the file object used for the tests."""
self.file.close()
def test_md5(self):
"""hashes: Test apt_pkg.HashString().md5"""
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
self.assertEqual("MD5Sum:%s" % self.hashes.md5, str(self.md5))
self.assertTrue(self.md5.verify_file(apt_pkg.__file__))
def test_sha1(self):
"""hashes: Test apt_pkg.HashString().sha1"""
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
self.assertEqual("SHA1:%s" % self.hashes.sha1, str(self.sha1))
self.assertTrue(self.sha1.verify_file(apt_pkg.__file__))
def test_sha256(self):
"""hashes: Test apt_pkg.HashString().sha256"""
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
self.assertEqual("SHA256:%s" % self.hashes.sha256,
str(self.sha256))
self.assertTrue(self.sha256.verify_file(apt_pkg.__file__))
def test_wrong(self):
"""hashes: Test apt_pkg.HashString(wrong_type)."""
self.assertRaises(TypeError, apt_pkg.HashString, 0)
if sys.version_info[0] == 3:
self.assertRaises(TypeError, apt_pkg.HashString, bytes())
class TestHashStringList(testcommon.TestCase):
"""Test apt_pkg.HashStringList()"""
def test_file_size(self):
hsl = apt_pkg.HashStringList()
self.assertEqual(hsl.file_size, 0)
hsl.file_size = 42
self.assertEqual(hsl.file_size, 42)
self.assertEqual(len(hsl), 1)
# Verify that I can re-assign value (this handles the long case on
# Python 2).
hsl.file_size = hsl.file_size
with self.assertRaises(OverflowError):
hsl.file_size = -1
hsl.file_size = 0
def test_append(self):
"""Testing whether append works correctly."""
hs1 = apt_pkg.HashString("MD5Sum",
"a60599e6200b60050d7a30721e3532ed")
hs2 = apt_pkg.HashString("SHA1",
"ef113338e654b1ada807a939ad47b3a67633391b")
hsl = apt_pkg.HashStringList()
hsl.append(hs1)
hsl.append(hs2)
self.assertEqual(len(hsl), 2)
self.assertEqual(hsl[0].hashtype, "MD5Sum")
self.assertEqual(hsl[1].hashtype, "SHA1")
self.assertEqual(str(hsl[0]), str(hs1))
self.assertEqual(str(hsl[1]), str(hs2))
def test_find(self):
"""Testing whether append works correctly."""
hs1
|
pyconjp/pyconjp-website
|
pycon/pycon_api/migrations/0001_initial.py
|
Python
|
bsd-3-clause
| 12,615
| 0.007372
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'APIAuth'
db.create_table(u'pycon_api_apiauth', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('auth_key', self.gf('django.db.models.fields.CharField')(default='3871ec85-2c87-4f82-b92e-d067843fba45', max_length=36)),
('enabled', self.gf('django.db.models.fields.BooleanField')(default=True)),
))
db.send_create_signal(u'pycon_api', ['APIAuth'])
# Adding model 'ProposalData'
db.create_table(u'pycon_api_proposaldata', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('proposal', self.gf('django.db.models.fields.related.OneToOneField')(related_name='data', unique=True, to=orm['proposals.ProposalBase'])),
('data', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal(u'pycon_api', ['ProposalData'])
# Adding model 'IRCLogLine'
db.create_table(
|
u'pycon_api_irclogline', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('timestamp', self.gf('django.db.models.fields.DateTimeField')()),
('proposal', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['proposals.ProposalBase'])),
('user', self.gf('django.db.models.fields.CharField')(max_length=40)),
('line', self.gf('django.db.models.fields.TextField')(blank=True)),
))
|
db.send_create_signal(u'pycon_api', ['IRCLogLine'])
def backwards(self, orm):
# Deleting model 'APIAuth'
db.delete_table(u'pycon_api_apiauth')
# Deleting model 'ProposalData'
db.delete_table(u'pycon_api_proposaldata')
# Deleting model 'IRCLogLine'
db.delete_table(u'pycon_api_irclogline')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'conference.conference': {
'Meta': {'object_name': 'Conference'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'timezone': ('timezones.fields.TimeZoneField', [], {'default': "'US/Eastern'", 'max_length': '100', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'conference.section': {
'Meta': {'object_name': 'Section'},
'conference': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['conference.Conference']"}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'proposals.additionalspeaker': {
'Meta': {'unique_together': "(('speaker', 'proposalbase'),)", 'object_name': 'AdditionalSpeaker', 'db_table': "'proposals_proposalbase_additional_speakers'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'proposalbase': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['proposals.ProposalBase']"}),
'speaker': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['speakers.Speaker']"}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
u'proposals.proposalbase': {
'Meta': {'object_name': 'ProposalBase'},
'abstract': ('django.db.models.fields.TextField', [], {}),
'additional_notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'additional_speakers': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['speakers.Speaker']", 'symmetrical': 'False', 'through': u"orm['proposals.AdditionalSpeaker']", 'blank': 'True'}),
'cancelled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '400'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['proposals.ProposalKind']"}),
'speaker': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'proposals'", 'to': u"orm['speakers.Speaker']"}),
'submitted': ('django.db.mo
|
rtancman/filmes
|
movies/core/migrations/0003_auto_20150914_2157.py
|
Python
|
mit
| 408
| 0
|
# -*- coding: utf-8 -*-
from __future__ import
|
unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0002_auto_20150914_2147'),
]
operations = [
migrations.AlterField(
model_name='movie',
name='slug',
field=models.SlugField(null=True, editable=F
|
alse),
),
]
|
mjamesruggiero/tripp
|
tests/test_logistic_regression.py
|
Python
|
bsd-3-clause
| 6,511
| 0.000154
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from .context import tripp
from tripp import logistic_regression
from tripp import munge
from tripp import gradient
from tripp import ml
from tripp import algebra
import random
import logging
logging.basicConfig(level=logging.ERROR, format="%(lineno)d\t%(message)s")
class TestLogisticRegression(unittest.TestCase):
def setUp(self):
tuples = [
(0.7, 48000, 1), (1.9, 48000, 0), (2.5, 60000, 1),
(4.2, 63000, 0), (6, 76000, 0), (6.5, 69000, 0),
(7.5, 76000, 0), (8.1, 88000, 0), (8.7, 83000, 1),
(10, 83000, 1), (0.8, 43000, 0), (1.8, 60000, 0),
(10, 79000, 1), (6.1, 76000, 0), (1.4, 50000, 0),
(9.1, 92000, 0), (5.8, 75000, 0), (5.2, 69000, 0),
(1, 56000, 0), (6, 67000, 0), (4.9, 74000, 0),
(6.4, 63000, 1), (6.2, 82000, 0), (3.3, 58000, 0),
(9.3, 90000, 1), (5.5, 57000, 1), (9.1, 102000, 0),
(2.4, 54000, 0), (8.2, 65000, 1), (5.3, 82000, 0),
(9.8, 107000, 0), (1.8, 64000, 0), (0.6, 46000, 1),
(0.8, 48000, 0), (8.6, 84000, 1), (0.6, 45000, 0),
(0.5, 30000, 1), (7.3, 89000, 0), (2.5, 48000, 1),
(5.6, 76000, 0), (7.4, 77000, 0), (2.7, 56000, 0),
(0.7, 48000, 0), (1.2, 42000, 0), (0.2, 32000, 1),
(4.7, 56000, 1), (2.8, 44000, 1), (7.6, 78000, 0),
(1.1, 63000, 0), (8, 79000, 1), (2.7, 56000, 0),
(6, 52000, 1), (4.6, 56000, 0), (2.5, 51000, 0),
(5.7, 71000, 0), (2.9, 65000, 0), (1.1, 33000, 1),
(3, 62000, 0), (4, 71000, 0), (2.4, 61000, 0),
(7.5, 75000, 0), (9.7, 81000, 1), (3.2, 62000, 0),
(7.9, 88000, 0), (4.7, 44000, 1), (2.5, 55000, 0),
(1.6, 41000, 0), (6.7, 64000, 1), (6.9, 66000, 1),
(7.9, 78000, 1), (8.1, 102000, 0), (5.3, 48000, 1),
(8.5, 66000, 1), (0.2, 56000, 0), (6, 69000, 0),
(7.5, 77000, 0), (8, 86000, 0), (4.4, 68000, 0),
(4.9, 75000, 0), (1.5, 60000, 0), (2.2, 50000, 0),
(3.4, 49000, 1), (4.2, 70000, 0), (7.7, 98000, 0),
(8.2, 85000, 0), (5.4, 88000, 0), (0.1, 46000, 0),
(1.5, 37000, 0), (6.3, 86000, 0), (3.7, 57000, 0),
(8.4, 85000, 0), (2, 42000, 0), (5.8, 69000, 1),
(2.7, 64000, 0), (3.1, 63000, 0), (1.9, 48000, 0),
(10, 72000, 1), (0.2, 45000, 0), (8.6, 95000, 0),
(1.5, 64000, 0), (9.8, 95000, 0), (5.3, 65000, 0),
(7.5, 80000, 0), (9.9, 91000, 0), (9.7, 50000, 1),
(2.8, 68000, 0), (3.6, 58000, 0), (3.9, 74000, 0),
(4.4, 76000, 0), (2.5, 49000, 0), (7.2, 81000, 0),
(5.2, 60000, 1), (2.4, 62000, 0), (8.9, 94000, 0),
(2.4, 63000, 0), (6.8, 69000, 1), (6.5, 77000, 0),
(7, 86000, 0), (9.4, 94000, 0), (7.8, 72000, 1),
(0.2, 53000, 0), (10, 97000, 0), (5.5, 65000, 0),
(7.7, 71000, 1), (8.1, 66000, 1), (9.8, 91000, 0),
(8, 84000, 0), (2.7, 55000, 0), (2.8, 62000, 0),
(9.4, 79000, 0), (2.5, 57000, 0), (7.4, 70000, 1),
(2.1, 47000, 0), (5.3, 62000, 1), (6.3, 79000, 0),
(6.8, 58000, 1), (5.7, 80000, 0), (2.2, 61000, 0),
(4.8, 62000, 0), (3.7, 64000, 0), (4.1, 85000, 0),
(2.3, 51000, 0), (3.5, 58000, 0), (0.9, 43000, 0),
(0.9, 54000, 0), (4.5, 74000, 0), (6.5, 55000, 1),
(4.1, 41000, 1), (7.1, 73000, 0), (1.1, 66000, 0),
(9.1, 81000, 1), (8, 69000, 1), (7.3, 72000, 1),
(3.3, 50000, 0), (3.9, 58000, 0), (2.6, 49000, 0),
(1.6, 78000, 0), (0.7, 56000, 0), (2.1, 36000, 1),
(7.5, 90000, 0), (4.8, 59000, 1), (8.9, 95000, 0),
(6.2, 72000, 0), (6.3, 63000, 0), (9.1, 100000, 0),
(7.3, 61000, 1), (5.6, 74000, 0), (0.5, 66000, 0),
(1.1, 59000, 0), (5.1, 61000, 0), (6.2, 70000, 0),
(6.6, 56000, 1), (6.3, 76000, 0), (6.5, 78000, 0),
(5.1, 59000, 0), (9.5, 74000, 1), (4.5, 64000, 0),
(2, 54000, 0), (1, 52000, 0), (4, 69000, 0),
(6.5, 76000, 0), (3, 60000, 0), (4.5, 63000, 0),
(7.8, 70000, 0), (3.9, 60000, 1), (0.8, 51000, 0),
(4.2, 78000, 0), (1.1, 54000, 0), (6.2, 60000, 0),
(2.9, 59000, 0), (2.1, 52000, 0), (8.2, 87000, 0),
(4.8, 73000, 0), (2.2, 42000, 1), (9.1, 98000, 0),
(6.5, 84000, 0), (6.9, 73000, 0), (5.1, 72000, 0),
(9.1, 69000, 1), (9.8, 79000, 1)
]
self.data = map(list, tuples)
def test_gradient(self):
"""logistic_regression -- gradient"""
# x is [1, experience, salary]
x = [[1] + row[:2] for row in self.data]
# y is "paid account"
y = [row[2] for row in self.data]
rescaled = munge.rescale(x)
random.seed(0)
x_train, x_test, y_train, y_test = ml.train_test_split(rescaled,
y,
0.33)
beta_0 = [1, 1, 1]
beta_hat = gradient.\
maximize_stochastic(logistic_regression.logistic_log_likelihood_i,
logistic_regression.logistic_log_gradient_i,
x_train,
y_train,
beta_0)
true_positives = false_positives = true_negatives = false_negatives = 0
for x_i, y_i in zip(x_test, y_test):
predict = logistic_regression.logistic(algebra.dot(beta_hat, x_i))
if y_i == 1 and predict >= 0.5:
true_positives += 1
elif y_i == 1:
false_negatives += 1
elif predict >= 0.5:
false_positives += 1
else:
|
true_negatives += 1
message = "true_pos={0}; false_neg={1}, false_pos={2}; true_neg={3}"
logging.debug(message.format(true_positives,
false_negatives,
false_positives,
true_negatives))
precision = true_positives / float((true_positives + false_positives))
recall = true_positives / float((true_positives + false_negatives))
|
self.assertEqual(0.93, round(precision, 2))
self.assertEqual(0.82, round(recall, 2))
|
LeoTestard/qt-ubuntu-components
|
tests/autopilot/tavastia/tests/textfield/test_textfield.py
|
Python
|
lgpl-3.0
| 923
| 0.005417
|
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
# Copyright 2012 Canonical
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
"""Tests for the TextInput Qml component."""
from
|
autopilot.matchers import Eventually
from textwrap import dedent
from testtools.matchers import Is, Not, Equals
from testtools import skip
import os
fro
|
m tavastia.tests import TavastiaTestCase
class TextFieldTests(TavastiaTestCase):
"""Tests for TextField component."""
test_qml_file = "%s/%s.qml" % (os.path.dirname(os.path.realpath(__file__)),"TextFieldTests")
def test_can_select_textfield(self):
"""Must be able to select the Qml TextField component."""
obj = self.app.select_single('TextField')
self.assertThat(obj, Not(Is(None)))
|
aequitas/home-assistant
|
homeassistant/components/deconz/__init__.py
|
Python
|
apache-2.0
| 6,477
| 0
|
"""Support for deCONZ devices."""
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import (
CONF_API_KEY, CONF_HOST, CONF_PORT, EVENT_HOMEASSISTANT_STOP)
from homeassistant.helpers import config_validation as cv
# Loading the config flow file will register the flow
from .config_flow import get_master_gateway
from .const import (
CONF_ALLOW_CLIP_SENSOR, CONF_ALLOW_DECONZ_GROUPS, CONF_BRIDGEID,
CONF_MASTER_GATEWAY, DEFAULT_PORT, DOMAIN, _LOGGER)
from .gateway import DeconzGateway
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_API_KEY): cv.string,
vol.Optional(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
})
}, extra=vol.ALLOW_EXTRA)
SERVICE_DECONZ = 'configure'
SERVICE_FIELD = 'field'
SERVICE_ENTITY = 'entity'
SERVICE_DATA = 'data'
SERVICE_SCHEMA = vol.All(vol.Schema({
vol.Optional(SERVICE_ENTITY): cv.entity_id,
vol.Optional(SERVICE_FIELD): cv.matches_regex('/.*'),
vol.Required(SERVICE_DATA): dict,
vol.Optional(CONF_BRIDGEID): str
}), cv.has_at_least_one_key(SERVICE_ENTITY, SERVICE_FIELD))
SERVICE_DEVICE_REFRESH = 'device_refresh'
SERVICE_DEVICE_REFRESCH_SCHEMA = vol.All(vol.Schema({
vol.Optional(CONF_BRIDGEID): str
}))
async def async_setup(hass, config):
"""Load configuration for deCONZ component.
Discovery has loaded the component if DOMAIN is not present in config.
"""
if not hass.config_entries.async_entries(DOMAIN) and DOMAIN in config:
deconz_config = config[DOMAIN]
hass.async_create_task(hass.config_entries.flow.async_init(
DOMAIN, context={'source': config_entries.SOURCE_IMPORT},
data=deconz_config
))
return True
async def async_setup_entry(hass, config_entry):
"""Set up a deCONZ bridge for a config entry.
Load config, group, light and sensor data for server information.
Start websocket for push notification of state changes from deCONZ.
"""
if DOMAIN not in hass.data:
hass.data[DOMAIN] = {}
if not config_entry.options:
await async_populate_options(hass, config_entry)
gateway = DeconzGateway(hass, config_entry)
if not await gateway.async_setup():
return False
hass.data[DOMAIN][gateway.bridgeid] = gateway
await gateway.async_update_device_registry()
async def async_configure(call):
"""Set attribute of device in deCONZ.
Entity is used to resolve to a device path (e.g. '/lights/1').
Field is a string representing either a full path
(e.g. '/lights/1/state') when entity is not specified, or a
subpath (e.g. '/state') when used together with entity.
Data is a json object with what data you want to alter
e.g. data={'on': true}.
{
"field": "/lights/1/state",
"data": {"on": true}
}
See Dresden Elektroniks REST API documentation for details:
http://dresden-elektronik.github.io/deconz-rest-doc/rest/
"""
field = call.data.get(SERVICE_FIELD, '')
entity_id = call.data.get(SERVICE_ENTITY)
data = call.
|
data[SERVICE_DATA]
gateway = get_ma
|
ster_gateway(hass)
if CONF_BRIDGEID in call.data:
gateway = hass.data[DOMAIN][call.data[CONF_BRIDGEID]]
if entity_id:
try:
field = gateway.deconz_ids[entity_id] + field
except KeyError:
_LOGGER.error('Could not find the entity %s', entity_id)
return
await gateway.api.async_put_state(field, data)
hass.services.async_register(
DOMAIN, SERVICE_DECONZ, async_configure, schema=SERVICE_SCHEMA)
async def async_refresh_devices(call):
"""Refresh available devices from deCONZ."""
gateway = get_master_gateway(hass)
if CONF_BRIDGEID in call.data:
gateway = hass.data[DOMAIN][call.data[CONF_BRIDGEID]]
groups = set(gateway.api.groups.keys())
lights = set(gateway.api.lights.keys())
scenes = set(gateway.api.scenes.keys())
sensors = set(gateway.api.sensors.keys())
await gateway.api.async_load_parameters()
gateway.async_add_device_callback(
'group', [group
for group_id, group in gateway.api.groups.items()
if group_id not in groups]
)
gateway.async_add_device_callback(
'light', [light
for light_id, light in gateway.api.lights.items()
if light_id not in lights]
)
gateway.async_add_device_callback(
'scene', [scene
for scene_id, scene in gateway.api.scenes.items()
if scene_id not in scenes]
)
gateway.async_add_device_callback(
'sensor', [sensor
for sensor_id, sensor in gateway.api.sensors.items()
if sensor_id not in sensors]
)
hass.services.async_register(
DOMAIN, SERVICE_DEVICE_REFRESH, async_refresh_devices,
schema=SERVICE_DEVICE_REFRESCH_SCHEMA)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, gateway.shutdown)
return True
async def async_unload_entry(hass, config_entry):
"""Unload deCONZ config entry."""
gateway = hass.data[DOMAIN].pop(config_entry.data[CONF_BRIDGEID])
if not hass.data[DOMAIN]:
hass.services.async_remove(DOMAIN, SERVICE_DECONZ)
hass.services.async_remove(DOMAIN, SERVICE_DEVICE_REFRESH)
elif gateway.master:
await async_populate_options(hass, config_entry)
new_master_gateway = next(iter(hass.data[DOMAIN].values()))
await async_populate_options(hass, new_master_gateway.config_entry)
return await gateway.async_reset()
async def async_populate_options(hass, config_entry):
"""Populate default options for gateway.
Called by setup_entry and unload_entry.
Makes sure there is always one master available.
"""
master = not get_master_gateway(hass)
options = {
CONF_MASTER_GATEWAY: master,
CONF_ALLOW_CLIP_SENSOR: config_entry.data.get(
CONF_ALLOW_CLIP_SENSOR, False),
CONF_ALLOW_DECONZ_GROUPS: config_entry.data.get(
CONF_ALLOW_DECONZ_GROUPS, True)
}
hass.config_entries.async_update_entry(config_entry, options=options)
|
SasView/sasmodels
|
sasmodels/core.py
|
Python
|
bsd-3-clause
| 21,256
| 0.001553
|
"""
Core model handling routines.
"""
from __future__ import print_function
__all__ = [
"list_models", "load_model", "load_model_info",
"build_model", "precompile_dlls", "reparameterize",
]
import os
from os.path import basename, join as joinpath
from glob import glob
import re
import copy
import numpy as np # type: ignore
# NOTE: delay loading of kernelcl, kernelcuda, kerneldll and kernelpy
# cl and cuda in particular take awhile since they try to establish a
# connection with the card to verify that the environment works.
from . import generate
from . import modelinfo
from . import product
from . import mixture
from . import custom
# pylint: disable=unused-import
try:
from typing import List, Union, Optional, Any, Tuple
from .kernel import KernelModel
from .modelinfo import ModelInfo
except ImportError:
pass
# pylint: enable=unused-import
CUSTOM_MODEL_PATH = os.environ.get('SAS_MODELPATH', "")
if CUSTOM_MODEL_PATH == "":
CUSTOM_MODEL_PATH = joinpath(os.path.expanduser("~"), ".sasmodels", "custom_models")
#if not os.path.isdir(CUSTOM_MODEL_PATH):
# os.makedirs(CUSTOM_MODEL_PATH)
# TODO: refactor composite model support
# The current load_model_info/build_model does not reuse existing model
# definitions when loading a composite model, instead reloading and
# rebuilding the kernel for each component model in the expression. This
# is fine in a scripting environment where the model is built when the script
# starts and is thrown away when the script ends, but may not be the best
# solution in a long-lived application. This affects the following functions:
#
# load_model
# load_model_info
# build_model
KINDS = ("all", "py", "c", "double", "single", "opencl", "1d", "2d",
"nonmagnetic", "magnetic")
def list_models(kind=None):
# type: (str) -> List[str]
"""
Return the list of available models on the model path.
*kind* can be one of the following:
* all: all models
* py: python models only
* c: c models only
* single: c models which support single precision
* double: c models which require double precision
* opencl: c models which run in opencl
* dll: c models which do not run in opencl
* 1d: models without orientation
* 2d: models with orientation
* magnetic: models supporting magnetic sld
* nommagnetic: models without magnetic parameter
For multiple conditions, combine with plus. For example, *c+single+2d*
would return all oriented models implemented in C which can be computed
accurately with single precision arithmetic.
"""
if kind and any(k not in KINDS for k in kind.split('+')):
raise ValueError("kind not in " + ", ".join(KINDS))
files = sorted(glob(joinpath(generate.MODEL_PATH, "[a-zA-Z]*.py")))
available_models = [basename(f)[:-3] for f in files]
if kind and '+' in kind:
all_kinds = kind.split('+')
condition = lambda name: all(_matches(name, k) for k in all_kinds)
else:
condition = lambda name: _matches(name, kind)
selected = [name for name in available_models if condition(name)]
return selected
def _matches(name, kind):
if kind is None or kind == "all":
return True
info = load_model_info(name)
pars = info.parameters.kernel_parameters
# TODO: may be adding Fq to the list at some point
is_pure_py = callable(info.Iq)
if kind == "py":
return is_pure_py
elif kind == "c":
return not is_pure_py
elif kind == "double":
return not info.single and not is_pure_py
elif kind == "single":
return info.single and not is_pure_py
elif kind == "opencl":
return info.opencl
elif kind == "dll":
return not info.opencl and not is_pure_py
elif kind == "2d":
return any(p.type == 'orientation' for p in pars)
elif kind == "1d":
return all(p.type != 'orientation' for p in pars)
elif kind == "magnetic":
return any(p.type == 'sld' for p in pars)
elif kind == "nonmagnetic":
return not any(p.type == 'sld' for p in pars)
return False
def load_model(model_name, dtype=None, platform='ocl'):
# type: (str, str, str) -> KernelModel
"""
Load model info and build model.
*model_name* is the name of the model, or perhaps a model expression
such as sphere*hardsphere or sphere+cylinder.
*dtype* and *platform* are given by :func:`build_model`.
"""
return build_model(load_model_info(model_name),
dtype=dtype, platform=platform)
def load_model_info(model_string):
# type: (str) -> modelinfo.ModelInfo
"""
Load a model definition given the model name.
*model_string* is the name of the model, or perhaps a model expression
such as sphere*cylinder or sphere+cylinder. Use '@' for a structure
factor product, e.g. sphere@hardsphere. Custom models can be specified by
prefixing the model name with 'custom.', e.g. 'custom.MyModel+sphere'.
This returns a handle to the module defining the model. This can be
used with functions in generate to build the docs or extract model info.
"""
if "+" in model_string:
parts = [load_model_info(part)
for part in model_string.split("+")]
return mixture.make_mixture_info(parts, operation='+')
elif "*" in model_string:
parts = [load_model_info(part)
for part in model_string.split("*")]
return mixture.make_mixture_info(parts, operation='*')
elif "@" in model_string:
p_info, q_info = [load_model_info(part)
for part in model_string.split("@")]
return product.make_product_info(p_info, q_info)
# We are now dealing with a pure model
elif "custom." in model_string:
pattern = "custom.([A-Za-z0-9_-]+)"
result = re.match(pattern, model_string)
if result is None:
raise ValueError("Model name in invalid format: " + model_string)
model_name = result.group(1)
# Use ModelName to find the path to the custom model file
model_path = joinpath(CUSTOM_MODEL_PATH, model_name + ".py")
if not os.path.isfile(model_path):
raise ValueError("The model file {} doesn't exist".format(model_path))
kernel_module = custom.load_custom_kernel_module(model_path)
return modelinfo.make_model_info(kernel_module)
kernel_module = generate.load_kernel_module(model_string)
return modelinfo.make_model_info(kernel_module)
_REPARAMETERIZE_DOCS = """\
Definition
----------
Constrain :ref:`%(base)s` according to the following::
%(translation)s
"""
_LHS_RE = re.compile(r"^ *(?<![.0-9])([A-Za-z_][A-Za-z0-9_]+) *=",
flags=re.MULTILINE)
def reparameterize(
base, parameters, translation, filename=None,
title=None, insert_after=None, docs=None, name=None,
source=None,
):
"""
Reparameterize an existing model.
*base* is the original modelinfo. This cannot be a reparameterized model;
only one level of reparameterization is supported.
*parameters* are the new parameter definitions that will be
included in the model info.
*translation* is a string each line containing *var = expr*. The variable
*var* can be a new intermedi
|
ate value, or it can be a parameter from
the base model that will be replace by the expression. The expression
*expr* can be any C99 expression, including C-style if-expressions
*condition ? value1 : value2*. Expressions can use any new or existing
parameter that is not being replaced including intermediate values that
are previously defined. Parameters can only be assigned once, never
updated. C99 math functions are available, as well as any functions
defined in the base mo
|
del or included in *source* (see below).
*filename* is the filename for the replacement model. This is usually
*__file__*, giving the path to the model file, but it could also be a
nominal filename for translations defined on-the-fly.
*title* is the mode
|
sigmunau/nav
|
python/nav/auditlog/utils.py
|
Python
|
gpl-2.0
| 794
| 0.001259
|
from __future__ import unicode_literals
from django.utils.encoding import force_text
from django.db.models import Q
from . import find_modelname
from models import LogEntry
LATEST_N_AUDITLOG_ENTRIES = 15
def get_auditlog_entries(iterable, limit=LATEST_N_AUDITLOG_ENTRIES):
modelname = find_modelname(list(iterable)[0])
pks = [force_text(i.pk
|
) for i in iterable]
object_query = Q(object_pk__in=pks, object_model=modelname)
target_query = Q(target_pk__in=pks, object_model=modelname)
actor_query = Q(actor_pk__in=pks, object_model=modelname)
filter_query = object_query | target_query | actor_query
entries = (LogEntry.objects
.filter(filter_query)
|
.distinct()
.order_by('-timestamp')[:limit]
)
return entries
|
pbanaszkiewicz/amy
|
amy/autoemails/templatetags/type_extras.py
|
Python
|
mit
| 555
| 0
|
from collections.abc import Iterable
from django import template
from django.db.models import Model
register = template.Library()
@register.filter
def get_type(value):
# inspired by: https://stackoverflow.com/a/12028864
return type(value)
@register.filter
def is_model(
|
value):
return isinstance(value, Model)
@register.filter
def is_iterable(value):
return isinstance(value, Iterable)
@register.filter
def is_str(value):
return isinstance(value, str)
@register.filter
def is_bool(value):
return isinstance(value, bo
|
ol)
|
leahrnh/ticktock_text_api
|
Backend.py
|
Python
|
gpl-2.0
| 4,516
| 0.010407
|
#!/usr/bin/env python
###########################################################################
## ##
## Language Technologies Institute ##
## Carnegie Mellon University ##
## Copyright (c) 2012 ##
## All Rights Reserved. ##
## ##
## Permission is hereby granted, free of charge, to use and distribute ##
## this software and its documentation without restriction, including ##
## without limitation the rights to use, copy, modify, merge, publish, ##
## distribute, sublicense, and/or sell copies of this work, and to ##
## permit persons to whom this work is furnished to do so, subject to ##
## the following conditions: ##
## 1. The code must retain the above copyright notice, this list of ##
## conditions and the following disclaimer. ##
## 2. Any modifications must be clearly marked as such. ##
## 3. Original authors' names are not deleted. ##
## 4. The authors' names are not used to endorse or promote products ##
## derived from this software without specific prior written ##
## permission. ##
## ##
## CARNEGIE MELLON UNIVERSITY AND THE CONTRIBUTORS TO THIS WORK ##
## DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ##
## ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT ##
## SHALL CARNEGIE MELLON UNIVERSITY NOR THE CONTRIBUTORS BE LIABLE ##
## FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ##
## WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN ##
## AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ##
## ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF ##
## THIS SOFTWARE. ##
## ##
###########################################################################
## Author: Aasish Pappu (aasish@cs.cmu.edu) ##
## Date : November 2012 ##
###########################################################################
## Description: Example python backend module for olympus applications ##
###########################################################################
###########################################################################
## RedditResponder Modifications ##
## Author: Leah Nicolich-Henkin (leah.nh@cs.cmu.edu) ##
## Date : January 2016 ##
## ##
## Working off TickTock 'galbackend' version from January 2015 ##
## with notable additions by @yipeiw ##
## Deleted
|
nearly the entirety of the code, ret
|
aining structure of ##
## methods previously used for debugging ##
## and resource structure/initialization ##
## ##
###########################################################################
# LNH: uses the Loader to create idf_dict, which is used for comparing candidates
import Loader
import RedditQuery
# @yipeiw
resource = {}
# listfile = 'reddit_corpus.list' # file listing all corpus files to be used as a database
idf_file = 'idf_dict.csv' # file listing words and idf values
def init_resource():
global resource
resource = Loader.load_language_resource(idf_file)
# @yipeiw
# LNH: instead of using Control/Understand/Retrieval to find a response from the database,
# call RedditQuery, which queries Reddit directly
def get_response(user_input):
global database, resource
relevance, answer = RedditQuery.find_candidate(user_input, resource)
# print("answer is: " + str(answer))
output = " ".join(answer)
return output
|
aperigault/ansible
|
lib/ansible/modules/cloud/hcloud/hcloud_server.py
|
Python
|
gpl-3.0
| 14,437
| 0.001939
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Hetzner Cloud GmbH <info@hetzner-cloud.de>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: hcloud_server
short_description: Create and manage cloud servers on the Hetzner Cloud.
version_added: "2.8"
description:
- Create, update and manage cloud servers on the Hetzner Cloud.
author:
- Lukas Kaemmerling (@LKaemmerling)
options:
id:
description:
- The ID of the Hetzner Cloud server to manage.
- Only required if no server I(name) is given
type: int
name:
description:
- The Name of the Hetzner Cloud server to manage.
- Only required if no server I(id) is given or a server does not exists.
type: str
server_type:
description:
- The Server Type of the Hetzner Cloud server to manage.
- Required if server does not exists.
type: str
ssh_keys:
description:
- List of SSH key names
- The key names correspond to the SSH keys configured for your
Hetzner Cloud account access.
type: list
volumes:
description:
- List of Volumes IDs that should be attached to the server on server creation.
type: list
image:
description:
- Image the server should be created from.
- Required if server does not exists.
type: str
location:
description:
- Location of Server.
- Required if no I(datacenter) is given and server does not exists.
type: str
datacenter:
description:
- Datacenter of Server.
- Required of no I(location) is given and server does not exists.
type: str
backups:
description:
- Enable or disable Backups for the given Server.
type: bool
default: no
upgrade_disk:
description:
- Resize the disk size, when resizing a server.
- If you want to downgrade the server later, this value should be False.
type: bool
default: no
force_upgrade:
description:
- Force the upgrade of the server.
- Power off the server if it is running on upgrade.
type: bool
default: no
user_data:
description:
- User Data to be passed to the server on creation.
- Only used if server does not exists.
type: str
labels:
description:
- User-defined labels (key-value pairs).
type: dict
state:
description:
- State of the server.
default: present
choices: [ absent, present, restarted, started, stopped, rebuild ]
type: str
extends_documentation_fragment: hcloud
"""
EXAMPLES = """
- name: Create a basic server
hcloud_server:
name: my-server
server_type: cx11
image: ubuntu-18.04
state: present
- name: Create a basic server with ssh key
hcloud_server:
name: my-server
server_type: cx11
image: ubuntu-18.04
location: fsn1
ssh_keys:
- me@myorganisation
state: present
- name: Resize an existing server
hcloud_server:
name: my-server
server_type: cx21
upgrade_disk: yes
state: present
- name: Ensure the server is absent (remove if needed)
hcloud_server:
name: my-server
state: absent
- name: Ensure the server is started
hcloud_server:
name: my-server
state: started
- name: Ensure the server is stopped
hcloud_server:
name: my-server
state: stopped
- name: Ensure the server is restarted
hcloud_server:
name: my-server
state: restarted
- name: Ensure the server is rebuild
hcloud_server:
name: my-server
image: ubuntu-18.04
state: rebuild
"""
RETURN = """
hcloud_server:
description: The server instance
returned: Always
type: dict
sample: {
"backup_window": null,
"datacenter": "nbg1-dc3",
"id": 1937415,
"image": "ubuntu-18.04",
"ipv4_address": "116.203.104.109",
"ipv6": "2a01:4f8:1c1c:c140::/64",
"labels": {},
"location": "nbg1",
"name": "mein-server-2",
"rescue_enabled": false,
"server_type": "cx11",
"status": "running"
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible.module_utils.hcloud import Hcloud
try:
from hcloud.volumes.domain import Volume
from hcloud.ssh_keys.domain import SSHKey
from hcloud.servers.domain import Server
from hcloud import APIException
except ImportError:
pass
class AnsibleHcloudServer(Hcloud):
def __init__(self, module):
Hcloud.__init__(self, module, "hcloud_server")
self.hcloud_server = None
def _prepare_result(self):
return {
"id": to_native(self.hcloud_server.id),
"name": to_native(self.hcloud_server.name),
"ipv4_address": to_native(self.hcloud_server.public_net.ipv4.ip),
"ipv6": to_native(self.hcloud_server.public_net.ipv6.ip),
"image": to_native(self.hcloud_server.image.name),
"server_type": to_native(self.hcloud_server.server_type.name),
"datacenter": to_native(self.hcloud_server.datacenter.name),
"location": to_native(self.hcloud_server.datacenter.location.name),
"rescue_enabled": self.hcloud_server.rescue_enabled,
"backup_window": to_native(self.hcloud_server.backup_window),
"labels": self.hcloud_server.labels,
"status": to_native(self.hcloud_server.status),
|
}
def _get_server(self):
try:
if self.module.params.get("id") is not None:
self.hcloud_server = self.client.servers.get_by_id(
self.module.params.get("id")
|
)
else:
self.hcloud_server = self.client.servers.get_by_name(
self.module.params.get("name")
)
except APIException as e:
self.module.fail_json(msg=e.message)
def _create_server(self):
self.module.fail_on_missing_params(
required_params=["name", "server_type", "image"]
)
params = {
"name": self.module.params.get("name"),
"server_type": self.client.server_types.get_by_name(
self.module.params.get("server_type")
),
"user_data": self.module.params.get("user_data"),
"labels": self.module.params.get("labels"),
}
if self.client.images.get_by_name(self.module.params.get("image")) is not None:
# When image name is not available look for id instead
params["image"] = self.client.images.get_by_name(self.module.params.get("image"))
else:
params["image"] = self.client.images.get_by_id(self.module.params.get("image"))
if self.module.params.get("ssh_keys") is not None:
params["ssh_keys"] = [
SSHKey(name=ssh_key_name)
for ssh_key_name in self.module.params.get("ssh_keys")
]
if self.module.params.get("volumes") is not None:
params["volumes"] = [
Volume(id=volume_id) for volume_id in self.module.params.get("volumes")
]
if self.module.params.get("location") is None and self.module.params.get("datacenter") is None:
# When not given, the API will choose the location.
params["location"] = None
params["datacenter"] = None
elif self.module.params.get("location") is not None and self.module.params.get("datacenter") is None:
params["location"] = self.client.locations.get_by_name(
self.modul
|
RasaHQ/rasa_nlu
|
tests/engine/training/test_hooks.py
|
Python
|
apache-2.0
| 3,903
| 0.001025
|
from rasa.engine.caching import TrainingCache
from rasa.engine.graph import ExecutionContext, GraphNode, GraphSchema, SchemaNode
from rasa.engine.storage.storage import ModelStorage
from rasa.engine.training import fingerprinting
from rasa.engine.training.components import PrecomputedValueProvider
from rasa.engine.training.hooks import TrainingHook
from tests.engine.graph_components_test_classes import (
CacheableComponent,
CacheableText,
)
def test_training_hook_saves_to_cache(
default_model_storage: ModelStorage, temp_cache: TrainingCache,
):
# We need an execution context so the hook can determine the class of the graph
# component
execution_context = ExecutionContext(
GraphSchema(
{
"hello": SchemaNode(
needs={},
constructor_name="create",
fn="run",
config={},
uses=CacheableComponent,
)
}
),
"1",
)
node = GraphNode(
node_name="hello",
component_class=CacheableComponent,
constructor_name="create",
component_config={},
fn_name="run",
inputs={"suffix": "input_node"},
eager=False
|
,
model_storage=default_model_storage,
resource=None,
execution_context=execution_context,
hooks=[
TrainingHook(
cache=temp_cache,
model_storage=default_model_storage,
|
pruned_schema=execution_context.graph_schema,
)
],
)
node(("input_node", "Joe"))
# This is the same key that the hook will generate
fingerprint_key = fingerprinting.calculate_fingerprint_key(
graph_component_class=CacheableComponent,
config={"prefix": "Hello "},
inputs={"suffix": "Joe"},
)
output_fingerprint_key = temp_cache.get_cached_output_fingerprint(fingerprint_key)
assert output_fingerprint_key
cached_result = temp_cache.get_cached_result(
output_fingerprint_key=output_fingerprint_key,
model_storage=default_model_storage,
node_name="hello",
)
assert isinstance(cached_result, CacheableText)
assert cached_result.text == "Hello Joe"
def test_training_hook_does_not_cache_cached_component(
default_model_storage: ModelStorage, temp_cache: TrainingCache,
):
# We need an execution context so the hook can determine the class of the graph
# component
execution_context = ExecutionContext(
GraphSchema(
{
"hello": SchemaNode(
needs={},
constructor_name="create",
fn="run",
config={},
uses=PrecomputedValueProvider,
)
}
),
"1",
)
node = GraphNode(
node_name="hello",
component_class=PrecomputedValueProvider,
constructor_name="create",
component_config={"output": CacheableText("hi")},
fn_name="get_value",
inputs={},
eager=False,
model_storage=default_model_storage,
resource=None,
execution_context=execution_context,
hooks=[
TrainingHook(
cache=temp_cache,
model_storage=default_model_storage,
pruned_schema=execution_context.graph_schema,
)
],
)
node(("input_node", "Joe"))
# This is the same key that the hook will generate
fingerprint_key = fingerprinting.calculate_fingerprint_key(
graph_component_class=PrecomputedValueProvider,
config={"output": CacheableText("hi")},
inputs={},
)
# The hook should not cache the output of a PrecomputedValueProvider
assert not temp_cache.get_cached_output_fingerprint(fingerprint_key)
|
listyque/TACTIC-Handler
|
thlib/ui_classes/ui_maya_dock.py
|
Python
|
epl-1.0
| 6,812
| 0.001468
|
# module General Ui
# file ui_maya_dock.py
# Main Dock Window interface
from thlib.side.Qt import QtWidgets as QtGui
#from thlib.side.Qt import QtCore
from thlib.environment import env_inst, env_mode, env_read_config, env_write_config
import thlib.maya_functions as mf
import thlib.tactic_classes as tc
import thlib.global_functions as gf
from maya.app.general.mayaMixin import MayaQWidgetDockableMixin
import maya.cmds as cmds
import ui_main_classes
# reload(ui_main_classes)
class Ui_DockMain(MayaQWidgetDockableMixin, QtGui.QMainWindow):
def __init__(self, parent=None):
super(self.__class__, self).__init__(parent=parent)
env_inst.ui_maya_dock = self
self.setObjectName('TacticHandlerDock')
self.docked = None
self.dock_pos = None
self.dock_area = None
self.dock_size = None
self.dock_is_floating = None
self.readSettings()
self.toggle_dock = None
self.maya_dock = None
self.status_bar = None
self.create_ui_main()
self.create_ui()
self.catch_maya_closing()
def create_ui(self):
if self.docked:
self.set_docked()
else:
self.set_undocked()
def toggle_docking(self):
if self.toggle_dock:
self.set_undocked()
else:
self.set_docked()
def create_ui_main(self):
env_inst.ui_main = ui_main_classes.Ui_Main()
self.setCentralWidget(env_inst.ui_main)
self.setWindowTitle(env_inst.ui_main.windowTitle())
self.move(self.dock_pos)
def set_docked(self):
# status_bar = env_inst.ui_main.statusBar()
# if status_bar:
# status_bar.show()
self.toggle_dock = True
self.setDockableParameters(
dockable=True,
floating=self.dock_is_floating,
area=self.dock_area,
width=self.dock_size.width(),
height=self.dock_size.height()
)
self.show()
self.raise_()
self.docked = True
def set_undocked(self):
self.toggle_dock = False
self.setDockableParameters(
dockable=False,
floating=self.dock_is_floating,
area=self.dock_area,
width=self.dock_size.width(),
height=self.dock_size.height()
)
if self.maya_dock:
print self.maya_dock
self.removeDockWidget(self.maya_dock)
self.maya_dock.close()
self.maya_dock.deleteLater()
self.docked = False
# status_bar = env_inst.ui_main.statusBar()
# status_bar.show()
def set_settings_from_dict(self, settings_dict=None):
ref_settings_dict = {
'docked': 0,
'dock_pos': (200, 200),
'dock_size': (427, 690),
'dock_isFloating': 0,
'dock_tabArea': 1,
}
settings = gf.check_config(ref_settings_dict, settings_dict)
self.docked = bool(int(settings['docked']))
self.dock_pos = gf.tuple_to_qsize(settings['dock_pos'], 'pos')
self.dock_size = gf.tuple_to_qsize(settings['dock_size'], 'size')
self.dock_is_floating = bool(int(settings['dock_isFloating']))
if int(settings['dock_tabArea']) == 2:
self.dock_area = 'right'
else:
self.dock_area = 'left'
def get_settings_dict(self):
settings_dict = {
'docked': int(self.docked),
}
if self.docked:
maya_dock = self.parent()
settings_dict['dock_pos'] = gf.qsize_to_tuple(maya_dock.pos())
settings_dict['dock_size'] = gf.qsize_to_tuple(maya_dock.size())
settings_dict['dock_isFloating'] = int(bool(self.isFloating()))
settings_dict['dock_tabArea'] = int(env_inst.ui_super.dockWidgetArea(self.maya_dock))
else:
settings_dict['dock_pos'] = gf.qsize_to_tuple(self.pos())
settings_dict['dock_size'] = gf.qsize_to_tuple(self.size())
settings_dict['dock_isFloating'] = 0
settings_dict['dock_tabArea'] = 1
return settings_dict
def readSettings(self):
self.set_settings_from_dict(
env_read_config(filename='ui_maya_settings', unique_id='ui_main', long_abs_path=True)
)
def writeSettings(self):
env_write_config(self.get_settings_dict(), filename='ui_maya_settings', unique_id='ui_main', long_abs_path=True)
def raise_window(self):
if self.isMaximized():
self.showMaximized()
else:
self.showNormal()
QtGui.QDialog.activateWindow(self)
def catch_maya_closing(self):
QtGui.QApplication.instance().aboutToQuit.connect(env_inst.ui_main.close)
QtGui.QApplication.instance().aboutToQuit.connect(self.close)
def closeEvent(self, event):
if self.docked:
self.removeDockWidget(self.maya_dock)
self.maya_dock.close()
self.maya_dock.deleteLater()
self.writeSettings()
event.accept()
def init_env(current_path):
env_mode.set_current_path(current_path)
env_mode.set_mode('maya')
def close_all_instances():
try:
main_docks = mf.get_maya_dock_window()
for dock in main_docks:
dock.writeSettings()
dock.close()
dock.deleteLater()
if env_inst.ui_main:
env_inst.ui_main.close()
if cmds.workspaceControl('TacticHandlerDockWorkspaceControl', e=True, exists=True):
cmds.deleteUI('TacticHandlerDockWorkspaceControl', control=True)
except:
raise
@gf.catch_error
def create_ui(error_tuple=None):
if error_tuple:
env_mode.set_offline()
main_tab = Ui_DockMain()
gf.error_handle(error_tuple)
else:
env_mode.set_online()
main_tab = Ui_DockMain()
main_tab.show()
main_tab.raise_()
@gf.catch_error
def startup(restart=False, *args, **kwargs):
if restart:
close_all_instances()
env_inst.ui_super = mf.get_maya_window()
try:
main_tab = mf.get_maya_dock_window()[0]
main_tab.show()
m
|
ain_tab.raise_()
main_tab.raise_window()
except:
# def server_ping_agent():
# return tc.server_ping()
#
# ping_worker, thr
|
ead_pool = gf.get_thread_worker(
# server_ping_agent,
# finished_func=lambda: create_ui(None),
# error_func=create_ui
# )
#
# thread_pool.start(ping_worker)
env_inst.start_pools()
worker = env_inst.server_pool.add_task(tc.server_ping)
worker.finished.connect(create_ui)
worker.error.connect(create_ui)
worker.start()
|
Ibuprofen/gizehmoviepy
|
node_positions_image.py
|
Python
|
mit
| 1,603
| 0.008734
|
import json
from PIL import Image, ImageFont, ImageDraw
with open('./config/nodes.json') as data_file:
data = json.load(data_file)
gray = (200, 200, 200)
black = (0,0,0)
lightblue = (225, 255, 255)
darkblue = (185, 255, 255)
img = Image.new("RGBA", (400,400), 128)
usr_font = ImageFont.truetype("./fonts/Arial.ttf", 10)
draw = ImageDraw.Draw(img)
# grid
for i in range(10, 400, 10):
if (i / 50.0) % 1 == 0:
fillcolor = darkblue
else:
fillcolor = lightblue
draw.line([(i, 1), (i, 399)], fill=fillcolor, width=1)
draw.line([(1, i), (399, i)], fill=fillcolor, width=1)
# square border
draw.line([
(1, 1), (1, img.size[1]-1),
(1, img.size[1]-1), (img.size[0]-1, img.size[1]-1),
(img.size[0]-1, img.size[1]-1), (img.size[0]-1, 1),
(img.size[0]-1, 1), (1, 1)
], fill=gray, width=1)
# crosshair
draw.line([(img.size[0]/2, 1), (img.size[0]/2, img.size[1]-1)], fill=gr
|
ay, width
|
=1)
draw.line([(1, img.size[1]/2), (img.size[0]-1, img.size[1]/2)], fill=gray, width=1)
# top tier circle approximation
draw.ellipse([
(130, 130),
(270, 270)
], fill=None, outline=black)
# roof tier approximation
draw.ellipse([
(60, 60),
(340, 340)
], fill=None, outline=black)
# bottom tier (walls) approximation
draw.ellipse([
(25, 25),
(375, 375)
], fill=None, outline=black)
# label the global position numbers
for i, pos in data.items():
# huh? y axis 0 is top of the image...
draw.point((pos['x'], -pos['y'] + 400), (0, 0, 0))
draw.text((pos['x'], -pos['y'] + 400), i, (0,0,0), font=usr_font)
img.save("./web/positions.png", "PNG")
|
gstiebler/odemis
|
src/odemis/util/test/util_test.py
|
Python
|
gpl-2.0
| 8,569
| 0.001984
|
#-*- coding: utf-8 -*-
"""
@author: Rinze de Laat
Copyright © 2013 Rinze de Laat, Delmic
This file is part of Odemis.
Odemis is free software: you can redistribute it and/or modify it under the terms
of the GNU General Public License version 2 as published by the Free Software
Foundation.
Odemis is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Publ
|
ic License along with
Odemis. If not, see http://www.gnu.org/licenses/.
"""
from __future__ import division
from functools import partial
import gc
import logging
from odemis import util
from odemis.util import limit_invocation, TimeoutError
from odemis.util import timeout
import time
import unittest
import weakref
logging.getL
|
ogger().setLevel(logging.DEBUG)
class TestLimitInvocation(unittest.TestCase):
def test_not_too_often(self):
self.count = 0
now = time.time()
end = now + 1.1 # a bit more than 1 s
while time.time() < end:
self.count_max_1s()
time.sleep(0.01)
self.assertLessEqual(self.count, 2, "method was called more than twice in 1 second: %d" % self.count)
time.sleep(2) # wait for the last potential calls to happen
self.assertLessEqual(self.count, 3, "method was called more than three times in 2 seconds: %d" % self.count)
@limit_invocation(1)
def count_max_1s(self):
# never called more than once per second
self.count += 1
time.sleep(0.2)
def test_gc(self):
u = Useless()
wku = weakref.ref(u)
now = time.time()
end = now + 1.1 # a bit more than 1 s
while time.time() < end:
u.doit(time.time(), b=3)
time.sleep(0.01)
# Check the object u has nothing preventing it from being dereferenced
del u
time.sleep(1) # wait for the last potential calls to happen
self.assertIsNone(wku())
class Useless(object):
"""
Independent class for testing limit_invocation decorator
"""
def __del__(self):
print "Useless %r is gone" % self
@limit_invocation(0.1)
def doit(self, a, b=None):
print "doing it %s, %s" % (a, b)
class TestTimeout(unittest.TestCase):
@timeout(1.2)
def test_notimeout(self):
time.sleep(1)
def test_timeout(self):
self.assertRaises(TimeoutError, self.toolong)
@timeout(0.5)
def toolong(self):
# will always timeout
time.sleep(1)
class SortedAccordingTestCase(unittest.TestCase):
def test_simple(self):
in_exp = ((([1, 2, 3], [3, 2, 1]), [3, 2, 1]),
(([1, 2, 3], [4, 2]), [2, 1, 3]),
(([], [4, 2]), []),
((["b", "a"], []), ["b", "a"]),
)
for i, eo in in_exp:
o = util.sorted_according_to(*i)
self.assertEqual(o, eo, "Failed to get correct output for %s" % (i,))
class AlmostEqualTestCase(unittest.TestCase):
def test_simple(self):
in_exp = {(0., 0): True,
(-5, -5.): True,
(1., 1. - 1e-9): True,
(1., 1. - 1e-3): False,
(1., 1. + 1e-3): False,
(-5e-8, -5e-8 + 1e-19): True,
(5e18, 5e18 + 1): True,
}
for i, eo in in_exp.items():
o = util.almost_equal(*i)
self.assertEqual(o, eo, "Failed to get correct output for %s" % (i,))
# Bounding box clipping test data generation
def tp(trans, ps):
""" Translate points ps using trans """
r = []
i = 0
for p in ps:
r.append(p + trans[i])
i = (i + 1) % len(trans)
return tuple(r)
# First we define a bounding boxes, at different locations
bounding_boxes = [(-2, -2, 0, 0),
(-1, -1, 1, 1),
(0, 0, 2, 2),
(2, 2, 4, 4)]
# From this, we generate boxes that are situated all around these
# bounding boxes, but that do not touch or overlap them.
def relative_boxes(bb):
t_left = [(-3, i) for i in range(-3, 4)]
to_the_left = [tp(t, bb) for t in t_left]
t_top = [(i, -3) for i in range(-3, 4)]
to_the_top = [tp(t, bb) for t in t_top]
t_right = [(3, i) for i in range(-3, 4)]
to_the_right = [tp(t, bb) for t in t_right]
t_bottom = [(i, 3) for i in range(-3, 4)]
to_the_bottom = [tp(t, bb) for t in t_bottom]
outside_boxes = to_the_left + to_the_top + to_the_right + to_the_bottom
# Selection boxes that touch the outside of the bounding box
touch_left = [tp((1, 0), b) for b in to_the_left[1:-1]]
touch_top = [tp((0, 1), b) for b in to_the_top[1:-1]]
touch_right = [tp((-1, 0), b) for b in to_the_right[1:-1]]
touch_bottom = [tp((0, -1), b) for b in to_the_bottom[1:-1]]
touching_boxes = touch_left + touch_top + touch_right + touch_bottom
# Partial overlapping boxes
overlap_left = [tp((1, 0), b) for b in touch_left[1:-1]]
overlap_top = [tp((0, 1), b) for b in touch_top[1:-1]]
overlap_right = [tp((-1, 0), b) for b in touch_right[1:-1]]
overlap_bottom = [tp((0, -1), b) for b in touch_bottom[1:-1]]
overlap_boxes = overlap_left + overlap_top + overlap_right + overlap_bottom
return outside_boxes, touching_boxes, overlap_boxes
class CanvasTestCase(unittest.TestCase):
def test_clipping(self):
tmp = "{}: {} - {} -> {}"
for bb in bounding_boxes:
outside, touching, overlap = relative_boxes(bb)
for b in outside:
r = util.rect_intersect(b, bb)
msg = tmp.format("outside", b, bb, r)
self.assertIsNone(r, msg)
for b in touching:
r = util.rect_intersect(b, bb)
msg = tmp.format("touching", b, bb, r)
self.assertIsNone(r, msg)
for b in overlap:
r = util.rect_intersect(b, bb)
msg = tmp.format("overlap", b, bb, r)
self.assertIsNotNone(r, msg)
# 'Manual' checks
if bb == (-1, -1, 1, 1):
if b[:2] == (-2, -2):
self.assertEqual(r, (-1, -1, 0, 0), msg)
elif b[:2] == (0, -1):
self.assertEqual(r, (0, -1, 1, 1), msg)
elif b[:2] == (0, 0):
self.assertEqual(r, (0, 0, 1, 1), msg)
# full and exact overlap
b = bb
r = util.rect_intersect(b, bb)
self.assertEqual(r, bb)
# inner overlap
b = (bb[0] + 1, bb[1] + 1, bb[2], bb[3])
r = util.rect_intersect(b, bb)
self.assertEqual(r, b)
# overflowing overlap
b = (bb[0] - 1, bb[1] - 1, bb[2] + 1, bb[2] + 1)
r = util.rect_intersect(b, bb)
self.assertEqual(r, bb)
def test_line_clipping(self):
bounding_box = (0, 4, 4, 0)
clip = partial(util.clip_line, *bounding_box)
# Test lines within bounding box, i.e. no clipping should occur
internal = [
(0, 0, 0, 0),
(2, 2, 2, 2),
(0, 0, 4, 4),
(4, 4, 0, 0),
(0, 2, 2, 0),
(2, 0, 0, 2),
]
for line in internal:
self.assertEqual(line, clip(*line))
# Test clipping for lines originating in the center of the bounding box and ending outside
# of it.
inner_to_outer = [
((2, 2, 2, 6), (2, 2, 2, 4)),
((2, 2, 6, 2), (2, 2, 4, 2)),
((2, 2, 2, -2), (2, 2, 2, 0)),
((2, 2, -2, 2), (2, 2, 0, 2)),
((2, 2, 6, -2), (2, 2, 4, 0)),
((2, 2, -2, -2), (2, 2, 0, 0)),
((2, 2, -2, -2), (2, 2, 0, 0)),
]
for orig, clipped in inner_to_outer:
self.assertEqual(clipped, clip(*orig))
outer_to_inner = [
((2, 6, 2, 2), (2, 4, 2, 2)),
((6, 2, 2, 2),
|
mugurrus/superdesk-core
|
superdesk/io/feed_parsers/__init__.py
|
Python
|
agpl-3.0
| 14,119
| 0.002408
|
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
from abc import ABCMeta, abstractmethod
from superdesk.etree import etree as sd_etree
from superdesk.errors import SkipValue
from flask import current_app as app
from superdesk.metadata.item import Priority
from collections import OrderedDict
from lxml import etree
import superdesk
import logging
logger = logging.getLogger(__name__)
class FeedParser(metaclass=ABCMeta):
"""
Base class for a Feed Parser.
A Feed Parser class must have the following attributes:
1. `NAME` - unique name under which to register the class.
"""
@abstractmethod
def can_parse(self, article):
"""Sub-classes must override this method and tell whether it can parse the given article.
:param article: article
:return: True if the feed parser can parse, False otherwise.
:rtype: bool
"""
raise NotImplementedError()
@abstractmethod
def parse(self, article, provider=None):
"""Parse the given article and extracts the relevant elements/attributes values from the given article.
:param article: XML String to parse
:type article: str
:param provider: Ingest Provider Details, defaults to None
:type provider: dict having properties defined in
:py:class: `superdesk.io.ingest_provider_model.IngestProviderResource`
:return: parsed data as dict.
:rtype: dict having properties defined in :py:mod: `superdesk.metadata.item`
"""
raise NotImplementedError()
def set_dateline(self, item, city=None, text=None):
"""Sets the 'dateline' to the article identified by item.
If city is passed then the system checks if city is available in Cities collection.
If city is not found in Cities collection then dateline's located is set with default values.
:param item: article.
:type item: dict
:param city: Name of the city, if passed the system will search in Cities collection.
:type city: str
:param text: dateline in full. For example, "STOCKHOLM, Aug 29, 2014"
:type text: str
"""
item.setdefault('dateline', {})
if city:
cities = app.locators.find_cities()
located = [c for c in cities if c['city'] == city]
item['dateline']['located'] = located[0] if len(located) > 0 else {'city_code': city, 'city': city,
'tz': 'UTC', 'dateline': 'city'}
if text:
item['dateline']['text'] = text
def map_priority(self, source_priority):
"""
Maps the source priority to superdesk priority
:param source_priority:
:type source_priority: str
:return: priority of the item
:rtype int
"""
if source_priority and source_priority.isdigit():
if int(source_priority) in Priority.values():
return int(source_priority)
return Priority.Ordinary.value
class XMLFeedParser(FeedParser, metaclass=ABCMeta):
"""
Base class for Feed Parsers which can parse the XML Content.
"""
def __init__(self):
self.root = None
self.metadata_mapping = None
def _parse_mapping(self, value):
if isinstance(value, dict):
if 'default_attr' in value:
if 'default' in value:
logger.error("default and default_attr can't be used at the same time,"
"only default will be used ({})".format(self.__class__))
if 'xpath':
if '/' not in 'xpath':
logger.error("default_attr can be used for simple child element ({})".format(self.__class__))
else:
logger.error("xpath is needed when default_attr is used ({})".format(self.__class__))
if 'callback' in value and 'list' in value:
del value['list']
logger.error("list can't ve used with callback ({})".format(self.__class__))
return value
elif isinstance(value, str):
if not value:
return {}
return {'xpath': value}
elif callable(value):
return {'callback': value}
else:
logger.warn("Can't parse mapping value {}, ignoring it".format(value))
return {}
def _generate_mapping(self, setting_param_name):
"""Generate self.metadata_mapping according to available mappings.
The following mappings are used in this order (last is more important):
- self.default_mapping
- self.MAPPING, intended for subclasses
- [setting_param_name] dictionary which can be put in settings
If a value is a non-empty string, it is a xpath, @attribute can be used as last path component.
If value is empty string/dict, the key will be ignored
If value is a callable, it will be executed with nitf Element as argument, return value will be used.
If a dictionary is used as value, following keys can be used:
xpath: path to the element
callback: callback executed with nitf Element as argument, return value will be used
default: value to use if element/attribute doesn't exists (default: doesn't set the key)
list: a bool which indicate if a list is expected
if False (default), only first value is used
filter: callable to be used with found element/value
value returned by the callable will be used
if None is returned, value will be ignored
In case of multiple values (i.e. if "list" is set), filter is called on each item
default_attr: value if element exist but attribute is missing
this works actually for all values, if it is not found parent element is checked
and default_attr is used only if parent element exists
key_hook: a callable which store itself the resulting value in the item,
usefull for specific behaviours when several values goes to same key
callable will get item and value as arguments.
update: a bool which indicate that default mapping must be updated instead of overwritten
Note the difference between using a callable directly, and "filter" in a dict:
the former get the root element and can be skipped with SkipValue, while the
later get an element/value found with xpath.
"""
try:
class_mapping = self.MAPPING
except AttributeError:
class_mapping = {}
|
if setting_param_name is not None:
settings_mapping = getattr(superdesk.config, setting_param_name)
if settings_mapping is None:
logging.info("No mapping found in settings for NITF parser, u
|
sing default one")
settings_mapping = {}
else:
settings_mapping = {}
mapping = self.metadata_mapping = OrderedDict()
for source_mapping in (self.default_mapping, class_mapping, settings_mapping):
for key, value in source_mapping.items():
key_mapping = self._parse_mapping(value)
if key_mapping.get('update', False) and key in mapping:
mapping[key].update(key_mapping)
else:
mapping[key] = key_mapping
def do_mapping(self, item, item_xml, setting_param_name=None, namespaces=None):
"""Apply mapping to item's XML content to get article metadata
mapping is generated by self._generate_mapping
:param item: dictionary to fill with item metadata
:type item: dict
:param item_xml: XML element to parse
|
DjenieLabs/django-multisites-utils
|
multisitesutils/__init__.py
|
Python
|
bsd-3-clause
| 276
| 0.003623
|
__version__ = '0.2.0'
c
|
lass Preferences(object):
"""
Placeholder class to which preferences properties are added
dynamically through a signal.
See behaviours.Preferences an
|
d behaviours.preferences_class_prepared
"""
pass
preferences = Preferences()
|
Captain-Coder/tribler
|
Tribler/Core/Modules/restapi/debug_endpoint.py
|
Python
|
lgpl-3.0
| 18,160
| 0.002203
|
from __future__ import absolute_import
import logging
import os
import sys
import datetime
import psutil
from six import StringIO
from twisted.web import http, resource
from Tribler.Core.Utilities.instrumentation import WatchDog
import Tribler.Core.Utilities.json_util as json
HAS_MELIAE = True
try:
from meliae import scanner
except ImportError:
HAS_MELIAE = False
class MemoryDumpBuffer(StringIO):
"""
Meliae expects its file handle to support write(), flush() and __call__().
The StringIO class does not support __call__(), therefore we provide this subclass.
"""
def __call__(self, s):
StringIO.write(self, s)
class DebugEndpoint(resource.Resource):
"""
This endpoint is responsible for handing requests regarding debug information in Tribler.
"""
def __init__(self, session):
resource.Resource.__init__(self)
child_handler_dict = {"circuits": DebugCircuitsEndpoint, "open_files": DebugOpenFilesEndpoint,
"open_sockets": DebugOpenSocketsEndpoint, "threads": DebugThreadsEndpoint,
"cpu": DebugCPUEndpoint, "memory": DebugMemoryEndpoint,
"log": DebugLogEndpoint, "profiler": DebugProfilerEndpoint}
for path, child_cls in child_handler_dict.iteritems():
self.putChild(path, child_cls(session))
class DebugCircuitsEndpoint(resource.Resource):
"""
This class handles requests regarding the tunnel community debug information.
"""
def __init__(self, session):
resource.Resource.__init__(self)
self.session = session
self.putChild("slots", DebugCircuitSlotsEndpoint(session))
def render_GET(self, request):
"""
.. http:get:: /debug/circuits
A GET request to this endpoint returns information about the built circuits in the tunnel community.
**Example request**:
.. sourcecode:: none
curl -X GET http://localhost:8085/debug/circuits
**Example response**:
.. sourcecode:: javascript
{
"circuits": [{
"id": 1234,
"state": "EXTENDING",
"goal_hops": 4,
"bytes_up": 45,
"bytes_down": 49,
"created": 1468176257,
"hops": [{
"host": "unknown"
}, {
"host": "39.95.147.20:8965"
}],
...
}, ...]
}
"""
tunnel_community = self.session.lm.tunnel_community
if not tunnel_community:
request.setResponseCode(http.NOT_FOUND)
return json.dumps({"error": "tunnel community not found"})
circuits_json = []
for circuit_id, circuit in tunnel_community.circuits.iteritems():
item = {'id': circuit_id, 'state': str(circuit.state), 'goal_hops': circuit.goal_hops,
'bytes_up': circuit.bytes_up, 'bytes_down': circuit.bytes_down, 'created': circuit.creation_time}
hops_array = []
for hop in circuit.hops:
hops_array.append({'host': 'unknown' if 'UNKNOWN HOST' in hop.host else '%s:%s' % (hop.host, hop.port)})
item['hops'] = hops_array
circuits_json.append(item)
return json.dumps({'circuits': circuits_json})
class DebugCircuitSlotsEndpoint(resource.Resource):
"""
This class handles requests for information about slots in the tunnel overlay.
"""
def __init__(self, session):
resource.Resource.__init__(self)
self.session = session
def render_GET(self, request):
"""
.. http:get:: /debug/circuits/slots
A GET request to this endpoint returns information about the slots in the tunnel overlay.
**Example request**:
.. sourcecode:: none
curl -X GET http://localhost:8085/debug/circuits/slots
**Example response**:
.. sourcecode:: javascript
{
"open_files": [{
"path": "path/to/open/file.txt",
"fd": 33,
}, ...]
}
"""
return json.dumps({
"slots": {
"random": self.session.lm.tunnel_community.random_slots,
"competing": self.session.lm.tunnel_community.competing_slots
}
})
class DebugOpenFilesEndpoint(resource.Resource):
"""
This class handles request for information about open files.
"""
def __init__(self, session):
resource.Resource.__init__(self)
self.session = session
def render_GET(self, request):
"""
.. http:get:: /debug/open_files
A GET request to this endpoint returns information about files opened by Tribler.
**Example request**:
.. sourcecode:: none
curl -X GET http://localhost:8085/debug/open_files
**Example response**:
.. sourcecode:: javascript
{
"open_files": [{
"path": "path/to/open/file.txt",
"fd": 33,
}, ...]
}
"""
my_process = psutil.Process()
return json.dumps({
"open_files": [{"path": open_file.path, "fd": open_file.fd} for ope
|
n_file in my_process.open_files()]})
class DebugOpenSocketsEndpoint(resource.Resource):
"""
This class handles request for information about open sockets.
"""
def __init__(self, session):
resource.Resource._
|
_init__(self)
self.session = session
def render_GET(self, request):
"""
.. http:get:: /debug/open_sockets
A GET request to this endpoint returns information about open sockets.
**Example request**:
.. sourcecode:: none
curl -X GET http://localhost:8085/debug/openfiles
**Example response**:
.. sourcecode:: javascript
{
"open_sockets": [{
"family": 2,
"status": "ESTABLISHED",
"laddr": "0.0.0.0:0",
"raddr": "0.0.0.0:0",
"type": 30
}, ...]
}
"""
my_process = psutil.Process()
sockets = []
for open_socket in my_process.connections():
sockets.append({
"family": open_socket.family,
"status": open_socket.status,
"laddr": ("%s:%d" % open_socket.laddr) if open_socket.laddr else "-",
"raddr": ("%s:%d" % open_socket.raddr) if open_socket.raddr else "-",
"type": open_socket.type
})
return json.dumps({"open_sockets": sockets})
class DebugThreadsEndpoint(resource.Resource):
"""
This class handles request for information about threads.
"""
def __init__(self, session):
resource.Resource.__init__(self)
self.session = session
def render_GET(self, request):
"""
.. http:get:: /debug/threads
A GET request to this endpoint returns information about running threads.
**Example request**:
.. sourcecode:: none
curl -X GET http://localhost:8085/debug/threads
**Example response**:
.. sourcecode:: javascript
{
"threads": [{
"thread_id": 123456,
"thread_name": "my_thread",
"frames": ["my_frame", ...]
}, ...]
}
"""
watchdog = WatchDog()
return json.dumps({"threads": watchdog.get_threads_info()})
class DebugCPUEndpoint(resource.Resource):
"""
This class handles reques
|
pbrod/scipy
|
scipy/interpolate/tests/test_polyint.py
|
Python
|
bsd-3-clause
| 24,329
| 0.005754
|
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from numpy.testing import (
assert_almost_equal, assert_array_equal, assert_array_almost_equal,
TestCase, run_module_suite, assert_allclose, assert_equal, assert_,
assert_raises)
from scipy.interpolate import (
KroghInterpolator, krogh_interpolate,
BarycentricInterpolator, barycentric_interpolate,
approximate_taylor_polynomial, pchip, PchipInterpolator,
pchip_interpolate, Akima1DInterpolator, CubicSpline, make_interp_spline)
from scipy._lib.six import xrange
def check_shape(interpolator_cls, x_shape, y_shape, deriv_shape=None, axis=0,
extra_args={}):
np.random.seed(1234)
x = [-1, 0, 1, 2, 3, 4]
s = list(range(1, len(y_shape)+1))
s.insert(axis % (len(y_shape)+1), 0)
y = np.random.rand(*((6,) + y_shape)).transpose(s)
# Cython code chokes on y.shape = (0, 3) etc, skip them
if y.size == 0:
return
xi = np.zeros(x_shape)
yi = interpolator_cls(x, y, axis=axis, **extra_args)(xi)
target_shape = ((deriv_shape or ()) + y.shape[:axis]
+ x_shape + y.shape[axis:][1:])
assert_equal(yi.shape, target_shape)
# check it works also with lists
if x_shape and y.size > 0:
interpolator_cls(list(x), list(y), axis=axis, **extra_args)(list(xi))
# check also values
if xi.size > 0 and deriv_shape is None:
bs_shape = y.shape[:axis] + (1,)*len(x_shape) + y.shape[axis:][1:]
yv = y[((slice(None,),)*(axis % y.ndim)) + (1,)]
yv = yv.reshape(bs_shape)
yi, y = np.broadcast_arrays(yi, yv)
assert_allclose(yi, y)
SHAPES = [(), (0,), (1,), (6, 2, 5)]
def test_shapes():
def spl_interp(x, y, axis):
return make_interp_spline(x, y, axis=axis)
for ip in [KroghInterpolator, BarycentricInterpolator, pchip,
Akima1DInterpolator, CubicSpline, spl_interp]:
for s1 in SHAPES:
for s2 in SHAPES:
for axis in range(-len(s2), len(s2)):
if ip != CubicSpline:
yield check_shape, ip, s1, s2, None, axis
else:
for bc in ['natural', 'clamped']:
extra = {'bc_type': bc}
yield check_shape, ip, s1, s2, None, axis, extra
def test_derivs_shapes():
def krogh_derivs(x, y, axis=0):
return KroghInterpolator(x, y, axis).derivatives
for s1 in SHAPES:
for s2 in SHAPES:
for axis in range(-len(s2), len(s2)):
yield check_shape, krogh_derivs, s1, s2, (6,), axis
def test_deriv_shapes():
def krogh_deriv(x, y, axis=0):
return KroghInterpolator(x, y, axis).derivative
def pchip_deriv(x, y, axis=0):
return pchip(x, y, axis).derivative()
def pchip_deriv2(x, y, axis=0):
return pchip(x, y, axis).derivative(2)
def pchip_antideriv(x, y, axis=0):
return pchip(x, y, axis).derivative()
def pchip_antideriv2(x, y, axis=0):
return pchip(x, y, axis).derivative(2)
def pchip_deriv_inplace(x, y, axis=0):
class P(PchipInterpolator):
def __call__(self, x):
return PchipInterpolator.__call__(self, x, 1)
pass
return P(x, y, axis)
def akima_deriv(x, y, axis=0):
return Akima1DInterpolator(x, y, axis).derivative()
def akima_antideriv(x, y, axis=0):
return Akima1DInterpolator(x, y, axis).antiderivative()
def cspline_deriv(x, y, axis=0):
return CubicSpline(x, y, axis).derivative()
def cspline_antideriv(x, y, axis=0):
return CubicSpline(x, y, axis).antiderivative()
def bspl_deriv(x, y, axis=0):
return make_interp_spline(x, y, axis=axis).derivative()
def bspl_antideriv(x, y, axis=0):
return make_interp_spline(x, y, axis=axis).antiderivative()
for ip in [krogh_deriv, pchip_deriv, pchip_deriv2, pchip_deriv_inplace,
pchip_antideriv, pchip_antideriv2, akima_deriv, akima_antideriv,
cspline_deriv, cspline_antideriv, bspl_deriv, bspl_antideriv]:
for s1 in SHAPES:
for s2 in SHAPES:
for axis in range(-len(s2), len(s2)):
yield check_shape, ip, s1, s2, (), axis
def _check_complex(ip):
x = [1, 2, 3, 4]
y = [1, 2, 1j, 3]
p = ip(x, y)
assert_allclose(y, p(x))
def test_complex():
for ip in [KroghInterpolator, BarycentricInterpolator, pchip, CubicSpline]:
yield _check_complex, ip
class CheckKrogh(TestCase):
def setUp(self):
self.true_poly = np.poly1d([-2,3,1,5,-4])
self.test_xs = np.linspace(-1,1,100)
self.xs = np.linspace(-1,1,5)
self.ys = self.true_poly(self.xs)
def test_lagrange(self):
P = KroghInterpolator(self.xs,self.ys)
assert_almost_equal(self.true_poly(self.test_xs),P(self.test_xs))
def test_scalar(self):
P = KroghInterpolator(self.xs,self.ys)
assert_almost_equal(self.true_poly(7),P(7))
assert_almost_equal(self.true_poly(np.array(7)), P(np.array(7)))
def test_derivatives(self):
P = KroghInterpolator(self.xs,self.ys)
D = P.derivatives(self.test_xs)
for i in xrange(D.shape[0]):
assert_almost_equal(self.true_poly.deriv(i)(self.test_xs),
D[i])
def test_low_derivatives(self):
P = KroghInterpolator(self.xs,self.ys)
D = P.derivatives(self.test_xs,len(self.xs)+2)
for i in xrange(D.shape[0]):
assert_almost_equal(self.true_poly.deriv(i)(self.test_xs),
D[i])
def test_derivative(self):
P = KroghInterpolator(self.xs,self.ys)
m = 10
r = P.derivatives(self.test_xs,m)
for i in xrange(m):
assert_almost_equal(P.derivative(self.test_xs,i),r[i])
def test_high_derivative(self):
P = KroghInterpolator(self.xs,self.ys)
for i in xrange(len(self.xs),2*len(self.xs)):
assert_almost_equal(P.derivative(self.test_xs,i),
np.zeros(len(self.test_xs)))
def test_hermite(self):
xs = [0,0,0,1,1,1,2]
ys = [self.true_poly(0),
self.true_poly.deriv(1)(0),
self.true_poly.deriv(2)(0),
self.true_poly(1),
self.true_poly.deriv(1)(1),
self.true_poly.deriv(2)(1),
self.true_poly(2)]
P = KroghInterpolator(self.xs,self.ys)
assert_almost_equal(self.true_poly(self.test_xs),P(self.test_xs))
def test_vector(self):
xs = [0, 1, 2]
ys = np.array([[0,1],[1,0],[2,1]])
P = KroghInterpolator(xs,ys)
Pi = [KroghInterpolator(xs,ys[:,i]) for i in xrange(ys.shape[1])]
test_xs = np.linspace(-1,3,100)
assert_almost_equal(P(test_xs),
np.rollaxis(np.asarray([p(test_xs) for p in Pi]),-1))
assert_almost_equal(P.derivatives(test_xs),
np.transpose(np.asarray([p.derivatives(test_xs) for p in Pi]),
(1,2,0)))
def test_empty(self):
P = KroghInterpolator(self
|
.xs,self.
|
ys)
assert_array_equal(P([]), [])
def test_shapes_scalarvalue(self):
P = KroghInterpolator(self.xs,self.ys)
assert_array_equal(np.shape(P(0)), ())
assert_array_equal(np.shape(P(np.array(0))), ())
assert_array_equal(np.shape(P([0])), (1,))
assert_array_equal(np.shape(P([0,1])), (2,))
def test_shapes_scalarvalue_derivative(self):
P = KroghInterpolator(self.xs,self.ys)
n = P.n
assert_array_equal(np.shape(P.derivatives(0)), (n,))
assert_array_equal(np.shape(P.derivatives(np.array(0))), (n,))
assert_array_equal(np.shape(P.derivatives([0])), (n,1))
assert_array_equal(np.shape(P.derivatives([0,1])), (n,2))
def test_shapes_vectorvalue(self):
P = KroghInterpolator(self.xs,np.outer(self.ys,np.arange(3)))
assert_array_equal(np.shape(P(0)), (3,))
assert_array_equal(np.shape(P([0])),
|
t-stark/ec2cli
|
scripts/pretest_setup.py
|
Python
|
gpl-2.0
| 6,293
| 0.001112
|
"""
Test Setup Module -- THIS SHOULD BE REPURPOSED, TRIGGERED BY make-test.sh script
"""
import os
import sys
import time
import json
import inspect
from configparser import ConfigParser
import logging
# aws imports
import boto3
import moto
import pytest
from botocore.exceptions import ClientError, ProfileNotFound
# test imports
sys.path.insert(0, os.path.abspath('../'))
from tests import environment
from keyup.statics import PACKAGE
# global objects
config = ConfigParser()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# set global Autotag-specific vars
account_number = '123456789012'
TestUsers = ('developer1', 'developer2', 'developer3')
setup_profilename = 'default'
# test module globals
base_path = '/tmp/autotag-tests-%s' % time.time()
version = 'testing-' + base_path
test_assets = 'tests/assets'
# set region default
if os.getenv('AWS_DEFAULT_REGION') is None:
default_region = 'us-east-2'
os.environ['AWS_DEFAULT_REGION'] = default_region
else:
default_region = os.getenv('AWS_DEFAULT_REGION')
ami_id = 'ami-redhat7'
min_count = 1
max_count = 2
ec2_size = 't2.micro'
@moto.mock_ec2
def get_regions():
ec2 = boto3.client('ec2')
return [x['RegionName'] for x in ec2.describe_regions()['Regions'] if 'cn' not in x['RegionName']]
@pytest.fixture()
def regionize():
os.environ['AWS_REGION'] = default_region
yield
if default_region is not None:
os.environ['AWS_REGION'] = default_region
else:
del os.environ['AWS_REGION']
@pytest.fixture()
def sts_resource_objects(region=default_region):
session = boto3.Session(profile_name=setup_profilename)
client = session.client('sts', region_name=region)
yield client
moto.mock_sts().stop()
@pytest.fixture()
def iam_resource_objects(region=default_region):
session = boto3.Session(profile_name=setup_profilename)
client = session.client('iam')
yield client
@pytest.fixture()
def return_reference(filename):
with open(test_assets + '/' + filename, 'r') as f1:
f2 = f1.read()
content = json.loads(f2)
yield content
@pytest.fixture()
def import_file_object(filepath):
handle = open(filepath, 'r')
file_obj = handle.read()
return file_obj
def tear_down():
""" Tears down structures setup expressly for testing """
HOME = os.environ['HOME']
awscli = HOME + '/.aws/credentials'
if os.path.isfile(awscli):
config.read(awscli)
for profile in config.sections():
if 'ec2cli-dev1' in profile:
config.pop(profile)
with open(awscli, 'w') as f1:
config.write(f1)
return True
return False
class PreTestSetup():
"""
Sets up default AWS Account with all structures to run
keyup automated testing
"""
def __init__(self, user_list):
self.test_users = user_list
self.policy_arns = []
if self.setup_complete(user_list[0]) is False:
complete1 = self.create_users(user_list)
complete2 = self.create_policies(user_list)
complete3 = self.assign_policies(user_list)
complete4 = self.create_keys(user_list)
r = self.assess_setup(complete1, complete2, complete3, complete4)
return r
else:
return True
def setup_complete(self, canary):
""" Determines if setup has occurred """
iam_client = next(iam_resource_objects())
users = iam_client.list_users()
if canary in users:
logger.info('PreTest Setup already completed. Exit setup')
return True
else:
return False
def create_users(self, iam_resource_objects, iam_user, profile=setup_profilename):
"""
Setup for successive tests in this module
"""
try:
iam_client = iam_resource_objects
# create users
for user in self.test_users:
r = iam_client.create_user(Path='/', UserName=iam_user)
except ClientError as e:
logger.exception(
"%s: Error while creating test user IAM accounts (Code: %s Message: %s)" %
(inspect.stack()[0][3], e.response['Error']['Code'],
e.response['Error']['Message']))
return True
def create_policies(self, users):
""" Create IAM policies for new test users """
iam_client = next(iam_resource_objects())
policy = next(return_reference('iampolicy-AccessKeySelfService.json'))
try:
r = iam_client.create_policy(
PolicyName='iampolicy-AccessKeySelfService',
Path='/',
PolicyDocument=str(policy),
Description='self manage iam access keys'
)
self.policy_arns.append(r['Policy']['Arn'])
except Exception as e:
logger.exception('Error while creating IAM policy')
return False
return True
def assign_policies(self, users):
""" Assign IAM policies to new test users """
iam_client = next(iam_resource_objects())
try:
for user in users:
for arn in self.policy_arns:
r = iam_client.attach_user_policy(UserName=user, PolicyArn=arn)
except ClientError as e:
logger.exception('Error while attaching IAM policy')
return False
return True
def create_keys(self, users):
""" Create initial set of access keys for each test user """
iam_client = next(iam_resource_objects())
HOME = os.environ['HOME']
config.read(HOME + '/.aws/credentials')
# create keys for each user
for user in us
|
ers:
keys = iam_client.create_access_key(UserName=user)
access_key =
|
keys['AccessKey']['AccessKeyId']
secret_key = keys['AccessKey']['SecretAccessKey']
config[profile]
# write new keys
def assess_setup(self, *args):
for arg in args:
if arg is False:
return False
return True
if ___name__ == '__main__':
# run setup
response = PreTestSetup(TestUsers)
logger.info('End result from PreTestSetup run: %s' % str(response))
|
huhuchen/asyncqueue
|
asyncqueue/_redis.py
|
Python
|
mit
| 720
| 0.002778
|
#!/usr/bin/en
|
v python
# -*- coding: utf-8 -*-
import redis
import json
class Redis(object):
def __init__(self, host="localhost", port=6379):
self._host = host
self._port = port
self._redis_cursor = None
d
|
ef conn(self):
if self._redis_cursor is None:
pool = redis.ConnectionPool(host=self._host, port=self._port, db=0)
self._redis_cursor = redis.Redis(connection_pool=pool)
def enqueue(self, qname, data):
self.conn()
self._redis_cursor.rpush(qname, json.dumps(data))
def dequeue(self, qname):
self.conn()
r = self._redis_cursor.blpop(qname)
return json.loads(r[1])
if __name__ == "__main__":
pass
|
Phaiax/ArcticTypescript
|
lib/display/views/Outline.py
|
Python
|
mit
| 3,505
| 0.003994
|
# coding=utf8
import sublime
from .Base import Base
from ...utils import Debug
from ...utils.uiutils import get_prefix
class Outline(Base):
regions = {}
ts_view = None
def __init__(self, t3sviews):
super(Outline, self).__init__('Typescript : Outline View', t3sviews)
# SET TEXT
def set_text(self, edit_token, members, ts_view):
"""
This function takes the tss.js members structure instead of a string.
"""
# this will process the outline, even if the view is closed
self.ts_view = ts_view
if type(members) == list:
self._tssjs_2_outline_format(members)
elif type(members) == str:
self.text = members
super(Outline, self).set_text(edit_token, self.text)
def is_current_ts(self, ts_view):
if ts_view is None or self.ts_view is None:
return
return ts_view.id() == self.ts_view.id()
def _tssjs_2_outline_format(self, members):
text = []
line = 0
self.regions = {}
for member in members:
start_line = member['min']['line']
end_line = member['lim']['line']
left = member['min']['character']
right = member['lim']['character']
a = self.ts_view.text_point(start_line-1, left-1)
b = self.ts_view.text_point(end_line-1, right-1)
region = sublime.Region(a, b)
kind = get_prefix(member['loc']['kind'])
container_kind = get_prefix(member['loc']['containerKind'])
if member['loc']['kindModifiers'] != "":
member['loc']['kindModifiers'] = " " + member['loc']['kindModifiers']
if member['loc']['kind'] != 'class' and member['loc']['kind'] != 'interface':
t = "%s %s %s %s" % (kind, member['loc']['kindModifiers'], member['loc']['kind'], member['loc']['name'])
text.append('\n\t')
text.append(t.strip())
line += 1
self.regions[line] = region
else:
t = "%s %s %s %s {" % (container_kind, member['loc']['kindModifiers'], member['loc']['kind'], member['loc']['name'])
if len(text) == 0:
text.append('\n%s\n' % t.strip())
line += 2
self.regions[line - 1] = region
else:
text.append('\n\n}\n\n%s\n' % t.strip())
line += 5
self.regions[line - 1] = region
if len(members) == 0:
text.append("\n\nno members found\n")
self.text = ''.join(text)
is_focusing_ts_view = False
def on_click(self,line):
if self.is_focu
|
sing_ts_view:
Debug
|
('focus', 'Outline.on_click: is just focusing other view > ignore')
return
if line in self.regions:
draw = sublime.DRAW_NO_FILL
self.ts_view.add_regions('typescript-definition', [self.regions[line]], 'comment', 'dot', draw)
self._focus_member_in_view(self.regions[line])
def _focus_member_in_view(self, region):
if self.ts_view.is_loading():
return
else:
Debug('focus', "_focus_member_in_view, Region @pos %i" % (region.begin()))
self.is_focusing_ts_view = True
self.ts_view.show(region)
self.ts_view.window().focus_view(self.ts_view)
self.is_focusing_ts_view = False
|
Rouslan/NTracer
|
scripts/polytope.py
|
Python
|
mit
| 24,145
| 0.014206
|
#!python
import math
import fractions
import pygame
import argparse
import os.path
import sys
import subprocess
import time
from itertools import combinations,islice
from ntracer import NTracer,Material,ImageFormat,Channel,BlockingRenderer,CUBE
from ntracer.pygame_render import PygameRenderer
ROT_SENSITIVITY = 0.005
WHEEL_INCREMENT = 8
def excepthook(type,value,traceback):
if isinstance(value,Exception):
print('error: '+str(value),file=sys.stderr)
else:
sys.__excepthook__(type,value,traceback)
sys.excepthook = excepthook
def schlafli_component(x):
x = x.partition('/')
p = int(x[0],10)
if p < 3: raise argparse.ArgumentTypeError('a component cannot be less than 3')
if not x[2]: return fractions.Fraction(p)
s = int(x[2],10)
if s < 1: raise argparse.ArgumentTypeError('for component p/q: q cannot be less than 1')
if s >= p: raise argparse.ArgumentTypeError('for component p/q: q must be less than p')
if fractions.gcd(s,p) != 1: raise argparse.ArgumentTypeError('for component p/q: p and q must be co-prime')
return fractions.Fraction(p,s)
def positive_int(x):
x = int(x,10)
if x < 1: raise argparse.ArgumentTypeError('a positive number is required')
return x
def screen_size(x):
w,_,h = x.partition('x')
w = int(w,10)
h = int(h,10)
if w < 1 or h < 1: raise argparse.ArgumentTypeError('invalid screen size')
return w,h
def fov_type(x):
x = float(x)
if x <= 0 or x >= 180: raise argparse.ArgumentTypeError('fov must be between 0 and 180 degrees')
return x/180*math.pi
parser = argparse.ArgumentParser(
description='Display a regular polytope given its Schl\u00e4fli symbol.')
parser.add_argument('schlafli',metavar='N',type=schlafli_component,nargs='+',help='the Schl\u00e4fli symbol components')
parser.add_argument('-o','--output',metavar='PATH',help='save an animation to PATH instead of displaying the polytope')
parser.add_argument('-t','--type',metavar='TYPE',default='h264',
help='Specifies output type when --output is used. If TYPE is "png", the '+
'output is a series of PNG images. For any other value, it is used '+
'as the video codec for ffmpeg.')
parser.add_argument('-f','--frames',metavar='F',type=positive_int,default=160,help='when creating an animation or benchmarking, the number of frames to render')
parser.add_argument('-s','--screen',metavar='WIDTHxHEIGHT',type=screen_size,default=(800,600),help='screen size')
parser.add_argument('-a','--fov',metavar='FOV',type=fov_type,default=0.8,help='field of vision in degrees')
parser.add_argument('-d','--cam-dist',metavar='DIST',type=float,default=4,
help='How far the view-port is from the center of the polytope. The '+
'value is a multiple of the outer raidius of the polytope.')
parser.add_argument('--benchmark',action='store_true',help='measure the speed of rendering the scene')
parser.add_argument('--no-special',action='store_true',help='use the slower generic version of library even if a specialized version exists')
args = parser.parse_args()
material = Material((1,0.5,0.5))
nt = NTracer(max(len(args.schlafli)+1,3),force_generic=args.no_special)
def higher_dihedral_supplement(schlafli,ds):
a = math.pi*schlafli.denominator/schlafli.numerator
return 2*math.asin(math.sin(math.acos(1/(math.tan(ds/2)*math.tan(a))))*math.sin(a))
def almost_equal(a,b,threshold=0.001):
return (a-b).absolute() < threshold
def radial_vector(angle):
return nt.Vector.axis(0,math.sin(angle)) + nt.Vector.axis(1,math.cos(angle))
class Instance:
def __init__(self,shape,position,orientation=nt.Matrix.identity()):
self.shape = shape
self.position = position
self.orientation = orientation
self.inv_orientation = orientation.inverse()
def translated(self,position=nt.Vector(),orientation=nt.Matrix.identity()):
return (
position + (orientation * self.position),
orientation * self.orientation)
def tesselate(self,*args):
return self.shape.tesselate(*self.translated(*args))
def tesselate_inner(self,*args):
return self.shape.tesselate_inner(*self.translated(*args))
def any_point(self,*args):
return self.shape.any_point(*self.translated(*args))
def contains(self,p):
return self.shape.contains(self.inv_orientation * (p - self.position))
def star_component(x):
return (x.numerator - 1) > x.denominator > 1
class LineSegment:
star = False
def __init__(self,index,convex_ds,polygon):
self.index = index
self.p = polygon
self.position = radial_vector(index*convex_ds)
def tesselate(self,position,orientation):
return [
orientation*self.p.base_points[self.index-1]+position,
orientation*self.p.base_points[self.index]+position]
class Polygon:
apothem = 1
def __init__(self,schlafli):
self.star = star_component(schlafli)
convex_ds = 2 * math.pi / schlafli.numerator
self.dihedral_s = convex_ds * schlafli.denominator
self.parts = [LineSegment(i,convex_ds,self) for i in range(schlafli.numerator)]
self._circumradius = 1/math.cos(convex_ds/2)
self.base_points = [self._circumradius * radial_vector((i+0.5) * convex_ds) for i in range(schlafli.numerator)]
if self.star:
self._circumradius = math.tan(convex_ds)*math.tan(convex_ds/2) + 1
self.outer_points = [self._circumradius * radial_vector(i * convex_ds) for i in range(schlafli.numerator)]
def points(self,position,orientation,pset=None):
if pset is None: pset = self.base_points
return (orientation * bp + position for bp in pset)
def tesselate_inner(self,position,orientation):
points = list(self.points(position,orientation))
r = [points[0:3]]
for i in range(len(points)-3):
r.append([points[0],points[i+2],points[i+3]])
return r
def tesselate(self,position,orientation):
if not self.star:
return self.tesselate_inner(position,orientation)
points = list(self.points(position,orientation))
opoints = list(self.points(position,orientation,self.outer_points))
return [[opoints[i],points[i-1],points[i]] for i in range(len(points))]
def any_point(self,position,orientation):
return next(self.points(position,orientation))
def contains(self,p):
return any(almost_equal(p,test_p) for test_p in self.base_points)
def hull(self,position=nt.Vector(),orientation=nt.Matrix.identity()):
tris = [nt.TrianglePrototype(tri,material) for tri in self.tesselate_inner(position,orientation)]
if self.star: tris.extend(nt.TrianglePrototype(tri,
|
material) for tri in
self.tesselate(position,orientation))
return tris
def circumradius(self):
return self._circumradius
def circumradius_square(self):
return self._circumradius*self._circumradius
def line_apothem_square(self):
return 1
class Plane:
def __init__(self,nt,position):
self.normal = position.unit()
self.d = -position.absolute()
self._dot = n
|
t.dot
def distance(self,point):
return self._dot(point,self.normal) + self.d
class Line:
def __init__(self,nt,p0,v,planes,outer=False):
self.p0 = p0
self.v = v
self.planes = set(planes)
self.outer = outer
self._dot = nt.dot
def point_at(self,t):
return self.p0 + self.v*t
def dist_square(self,point):
a = point - self.p0
b = self._dot(a,self.v)
return a.square() - b*b/self.v.square()
def __repr__(self):
return 'Line({0!r},{1!r})'.format(self.p0,self.v)
def plane_point_intersection(nt,planes):
assert nt.dimension == len(planes)
try:
return nt.Matrix(p.normal for p in planes).inverse()*nt.Vector(-p.d for p in planes)
except ValueError:
return None
def plane_line_intersection(nt,planes):
assert nt.dimension - 1 == len(planes)
v = nt.cross(p.normal for p in planes).unit()
return Line(
nt
|
xkmato/tracpro
|
tracpro/contacts/urls.py
|
Python
|
bsd-3-clause
| 137
| 0
|
from __future__ import absolute_import, unicode_literals
from .
|
views import ContactCRUDL
|
urlpatterns = ContactCRUDL().as_urlpatterns()
|
reinoud/factuursturen
|
factuursturen/__init__.py
|
Python
|
bsd-2-clause
| 19,420
| 0.003862
|
#!/usr/bin/env python
"""
a class to access the REST API of the website www.factuursturen.nl
"""
import collections
import ConfigParser
from datetime import datetime, date
import re
import requests
from os.path import expanduser
import copy
import urllib
__author__ = 'Reinoud van Leeuwen'
__copyright__ = "Copyright 2013, Reinoud van Leeuwen"
__license__ = "BSD"
__maintainer__ = "Reinoud van Leeuwen"
__email__ = "reinoud.v@n.leeuwen.net"
CONVERTABLEFIELDS = {
'clients' : {'clientnr': 'int',
'showcontact': 'bool',
'tax_shifted': 'bool',
'lastinvoice': 'date',
'top': 'int',
'stddiscount': 'float',
'notes_on_invoice': 'bool',
'active': 'bool',
'default_email': 'int',
'timestamp': 'date'},
'products': {'id': 'int',
'price': 'float',
'taxes': 'int'},
'invoices': {'profile': 'int',
'discount': 'float',
'paymentperiod': 'int',
'collection': 'bool',
'tax': 'float',
'totalintax': 'float',
'sent': 'date',
'uncollectible': 'date',
'lastreminder': 'date',
'open': 'float',
'paiddate': 'date',
'duedate': 'date',
'overwrite_if_exist': 'bool',
'initialdate': 'date',
'finalsenddate': 'date'},
'invoices_payment': {'date': 'date'},
'invoices_saved': {'id': 'int',
'profile': 'int',
'discount': 'float',
'paymentperiod': 'int',
'totaldiscount': 'float',
'totalintax': 'float',
'clientnr': 'int'},
'invoices_repeated': {'id': 'int',
'profile': 'int',
'discount': 'float',
'paymentperiod': 'int',
'datesaved': 'date',
'totalintax': 'float',
'initialdate': 'date',
'nextsenddate': 'date',
'finalsenddate': 'date',
'clientnr': 'int'},
'profiles': {'id': 'int'},
'countrylist' : {'id': 'int'},
'taxes': {'percentage': 'int',
'default': 'bool'}
}
API = {'getters' : ['clients',
'products',
'invoices',
'invoices_saved',
'invoices_repeated',
'profiles',
'balance',
'countrylist',
'taxes'],
'single_getters' : ['invoices_pdf'],
'posters' : ['clients',
'products',
'invoices'],
'putters' : ['clients',
'products',
'invoices_payment'],
'deleters' : ['clients',
'products',
'invoices',
'invoices_saved',
'invoices_repeated']}
class FactuursturenError(Exception):
"""Base class for exceptions in this module."""
def __init__(self, value = ''):
self.value = value
def __str__(self):
return repr(self.value)
class FactuursturenGetError(FactuursturenError):
pass
class FactuursturenPostError(FactuursturenError):
pass
class FactuursturenWrongPostvalue(FactuursturenError):
pass
class FactuursturenWrongPutvalue(FactuursturenError):
pass
class FactuursturenEmptyResult(FactuursturenError):
pass
class FactuursturenNoAuth(FactuursturenError):
pass
class FactuursturenConversionError(FactuursturenError):
pass
class FactuursturenWrongCall(FactuursturenError):
pass
class FactuursturenNotFound(FactuursturenError):
pass
class FactuursturenNoMoreApiCalls(FactuursturenError):
pass
class Client:
"""
client class to access www.factuursturen.nl though REST API
"""
def __init__(self,
apikey='',
username='',
configsection='default',
host='www.factuursturen.nl',
protocol='https',
apipath='/api',
version='v0'):
"""
initialize object
When apikey and username are not present, look for INI-style file .factuursturen_rc
in current directory and homedirectory to find those values there.
when only username is present, try to find apikey in configfilesection where it is defined
:param apikey: APIkey (string) as generated online on the website http://www.factuursturen.nl
:param username: accountname for the website
:param configsection: section in file ~/.factuursturen_rc where apikey and username should be present
"""
self._url = protocol + '://' + host + apipath + '/' + version + '/'
# try to read auth details from file when not passed
config = ConfigParser.RawConfigParser()
config.read(['.factuursturen_
|
rc', expanduser('~/.factuursturen_rc')])
if (not apikey) and (not username):
try:
self._apikey = config.get(configsection, 'apikey')
self._username = config.get(configsection, 'username')
except ConfigParser.NoSectionError:
r
|
aise FactuursturenNoAuth ('key and username not given, nor found in .factuursturen_rc or ~/.factuursturen_rc')
except ConfigParser.NoOptionError:
raise FactuursturenNoAuth ('no complete auth found')
elif username and (not apikey):
self._username = username
for section in config.sections():
if config.get(section, 'username') == username:
self._apikey = config.get(section, 'apikey')
if not self._apikey:
raise FactuursturenNoAuth ('no apikey found for username {}'.format(username))
else:
if not (apikey and username):
raise FactuursturenNoAuth ('no complete auth passed to factuursturen.Client')
self._apikey = apikey
self._username = username
# remaining allowed calls to API
self._remaining = None
self._lastresponse = None
self._headers = {'content-type': 'application/json',
'accept': 'application/json'}
# keep a list of which functions can be used to convert the fields
# from and to a string
self._convertfunctions = {'fromstring': {'int': self._string2int,
'bool': self._string2bool,
'float': self._string2float,
'date': self._string2date},
'tostring': {'int': self._int2string,
'bool': self._bool2string,
'float': self._float2string,
'date': self._date2string}}
# single value conversionfunctions
def _string2int(self, string):
try:
return int(string)
except ValueError:
raise FactuursturenConversionError('cannot convert {} to int'.format(string))
def _string2bool(self, string):
return string.lower() in ("yes", "true", "t", "1")
def _string2float(self, string):
try:
return float(string)
except ValueError:
raise FactuursturenConversionError('cannot convert {} to float'.format(string))
def _string2date(self, string):
if string == '':
return None
try:
return datetime.strptime(string, '%Y-%m-%d')
except ValueError:
raise FactuursturenConversionError('cannot convert {} to date'.format(string))
def _int2string(self, number):
if not isinstance(number, int):
|
TheDavidGithub/mysite
|
car/migrations/0001_initial.py
|
Python
|
gpl-2.0
| 1,116
| 0.000896
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
|
dependencies = [
]
operations = [
migrations.CreateModel(
name='Car',
fields=[
|
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('city', models.CharField(max_length=50)),
('brand', models.CharField(max_length=50)),
('types', models.CharField(max_length=50)),
('car_time', models.CharField(max_length=30)),
('mileage', models.CharField(max_length=30)),
('car_price', models.CharField(max_length=30)),
('image_url', models.CharField(max_length=200)),
('car_url', models.CharField(max_length=200)),
('model', models.CharField(max_length=300)),
('transmission_mode', models.CharField(max_length=50)),
('have_accident', models.CharField(max_length=10)),
],
),
]
|
richo/groundstation
|
test/support/crypto_fixture.py
|
Python
|
mit
| 10,569
| 0.001608
|
#{{{ valid_key
valid_key = """-----BEGIN RSA PRIVATE KEY-----
MIIEpQIBAAKCAQEAyGE8JpvCpp/0aLm6s0C56V5JtVr/fWra/pdFviA8YSabUlur
kcVPwoZLPbYmZtqmjfLSp5k1aCQbSqQDuB3nduFvWS6TzB8ACRDfw4KHE2D76pNE
lPbPejzIF8AyNKLrqi/Tba17cmqw1FFICg3B5ftu7mBzhJCPS/mt1i89CuoKVWRo
VB1CCKqJ0XIUO5/GC8nH2TwAzhnQpCx5B0bUJZkPxB5qbjXPpewY0IpSMNLrAGBJ
AMshLs04rTDSQpWuWHgEcN8QGOedDlYkaVIFgrLG1OZjTzr63cIqWhbRO4b/iC3u
vgca2WvkALNvhYz42Ckment1by6BlGpku/EKZQIDAQABAoIBAQCXsGQkpg04riYL
kW4wfHNAoswRqg13GAkoacO2JhkPQnaaZiL/euriS19yHA3+Fs+27XdiwKyZZZvz
aS4Xlyu95vKwBH6GK4GPW2LJUmqG1Qhfj5R/YEe4NYMT178p8ZuNfh8EDCxDdVjf
oZhzY+7A9xIHCqToWbbgwOZEGgaP1z19ByNME0sL1V1bqQRgDK0cdjschM2l27SM
n5gwv412Gj+zvyo6WrXVq1RxRW+Lz8xVFKfL3ojv/4mC9TCmXj5XCLYkvtzz5Fm3
T3/QNOBw+/Ki6bTPd7AlkmI0/hQzAWoJmaka32195o+0YfKF0EWqOrjPqmUKHg+4
NT/qAoWBAoGBAOZd6MH/iFclB3yGRDOgCjFuOQ8BKxFxu6djW+QnRlIcSlmC/PoO
dYF1Ao811+QyIGIeikpcXkKPjsT2t/FpEidzKJAdFlfB/R83gR6VY2av666Dd46+
c5oEvZwJ4F2B53dcfE4uc3eDBSIvDkDXg8GgoKUspC5oAMc0SApJtTTtAoGBAN6t
IkawG4RACnP5JJbQSzqJMmYQEDjYwlYKaBOAK8driRf5riH+1zxV2iZSbRMRi5YB
OXnMqVyeAMrCmDuWC/m13Q9+F1mwrGCC4d2PdxG3siBk76Wk9lt5i0E5u+0Fkhb8
LocL/i5CdnS7vMzjUIJv+2nynZBcI2C5O5h5R7RZAoGBAKse0wlYccSaAwejDQJ0
PV4ejE/zBDT/EO/0RIwkCH5x+WCohpaRKwQxEOekjWw7OAbqvLskBfjXfqwcwOKY
tB2N03HJyZECzq3joww+2AmtYBrQGX11R5X/M4G3bGzJVFE76DKDGs6KCsQrxCCy
dnLciLhpgsPeCcXvthu+SNa5AoGAU339VR3hI2RTxwpKwtWU73zTzdsGChqUO2ip
f25We+6BN+EhfpOsJm5WXnoXdcflW7hX0aiAdD62M1BlfioTkWy0V4PCw5lv+Lv6
bnXFgnorKuRNywEgckoPgcRhB01Iap/h3jcL9lJrUfe2oeg1nsLYNPCITcGObkzl
4JTQ4vkCgYEAmdxFKmwWLzsG0Q01YHp6W6Mc+YzIPAVYGc3+sJ7tSpk09XFXRCMw
Ypq+K8dAZ0dkyFqfPjeY/loTQUAkat/Ay6YT+Ytnflg8laZXjS8c5OZPD7dj425C
oDTkAxaQEVeJ33n37f9coMnLv7bySNZQzeD9ai5ysddFMQ6+RYEeaRE=
-----END RSA PRIVATE KEY-----"""
valid_pubkey = """ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDIYTwmm8Kmn/RoubqzQLnpXkm1Wv99atr+l0W+IDxhJptSW6uRxU/Chks9tiZm2qaN8tKnmTVoJBtKpAO4Hed24W9ZLpPMHwAJEN/DgocTYPvqk0SU9s96PMgXwDI0ouuqL9NtrXtyarDUUUgKDcHl+27uYHOEkI9L+a3WLz0K6gpVZGhUHUIIqonRchQ7n8YLycfZPADOGdCkLHkHRtQlmQ/EHmpuNc+l7BjQilIw0usAYEkAyyEuzTitMNJCla5YeARw3xAY550OViRpUgWCssbU5mNPOvrdwipaFtE7hv+ILe6+BxrZa+QAs2+FjPjYKSZ6e3VvLoGUamS78Qpl valid_key"""
#}}}
#{{{ invalid_key
bad_key = """-----BEGIN RSA PRIVAT
|
E KEY-----
MIIEogIBAAKCAQEA8kRD19JjebNWHmGPnTcToYhFR2PE8XjiPJJ4qdd72AjHwGcJ
UZVGAuiFrtEX1QiG619ccUnm/wGPhTS19o4vImBLCz07soOb9qfkkl5b0tjYB9oq
oBbCs1sgPSnO1Ju05/FuVANDQH53jRpHi9t7Jta8A0fZ3t2j1nITwj/aJL4zC3lI
VIQXcR8DteKoY656eavLevKDXNueOpdJIa5kVR3cSLlJzNQGY1AJi4CXpr/+2Krh
mXr+SdHPDtgL0DpJsXVkDYkRkOxGJ49XlRq/SGH+mUhEVM6yXkU53NYsyl64z92u
6Uw0793R2wUIlBo2waeng7B
|
qSijo6lRRoO1p7wIDAQABAoIBAFVu21nVeHznUBxA
nUt8q7CQBJZLSZ052tYvdNu4AJVLa12fODsL3/eQlevzEmtuqV2BcHEG9a3BmCIK
V4zN0TNXL7+T5WRrYumVhVZUbh8plu0V82gm/pwPK4xGHQj+q8oLarO3vtSUhIY+
2TIpwQEOQpkJknw0Pt2VtwAOUlgYBuz9joirz8qgU63lRjrt1dok/tXUulaIXwIq
u7UNR+KltpM+OG8Dkw3dRGi0vd+0pE/udN08mIdbnpF0WxoRYDax5CPKTNVZnNA4
PyjPriXLQNbguRITZhOjdNhEbpFjYnh6SDoWjXILEu5eMMqRn/AkOXgIaV5uMhjK
WBbvcykCgYEA/c1pCWtMICcWQDLLIc+gyBOOM225xTEc/Uwd/3BKY0d5WCZwME5C
KWfWP5FHIjcbkqSFjxoF8iqcjJDDK3mvdhw4FfYky0p4MBVbgcHmWlA43aMISUg6
yOkSVlgJGcBE7qjaity8zq9EplXJ0jeacnVg3PFwcyGyET8fwqRjhtsCgYEA9F1I
u4ZDn/nCESuYan72U64NGb4otmzY5bECsCVxuqvFW18DBOZT5dmhy8WvRzIjHvUk
e5BKBBHH9sqoU62L8TDhgxCkFQI5tW/6wFtxMkagIf2ZS34LjXw6DNmadFSdt895
QgTuOTfDo7CNUCmKVvWvnfYh3WflVFcdduFAA30CgYBDAjOkN/7+UjOOuZpmD+J8
UKVvsJ2mkYP84FbfNS9+rMU939ApANB5DCBF4j5sqy4NdoCIuq8tM9DnT+UnmlYY
EfVA3x7xyXSYBVuGk8IUwC9zXPL4HijfGKItx8qHUPsiI9SnawYyb5XpcKb7aJDP
fWwYiWpOC2xvH0JsamiYlwKBgFclXWbi/wLyQy90aKqyFI7LJ4jitAo6zy867UZ8
VA7Ka1umiqEYsxrpfjX5jgTJ8JR9wEEAuCSzYqi0pUsSPTIKyKHBMYL8hDAce0RW
WAwh+8GjeyPdE6mlf95NTDPoaUbACvJsZ7ioFnEt6XeJGPjiLXsb5//ZqYNmfe+D
bRzRAoGAcii2U2bkXl0v3gTsFgfFYgWVofnUnGersnPzwsL0/5l/O36McPZxdAf5
OVBHW0TIsQ22eyNcOwQiqxlWPOdimCuMi3FdVfXiYjO1FT1Fg/rPBUbfTO9JDJVJ
LE7PfYWR0rWVnASlEYFGnlTNDHRXTfrsP9A6hqllSjvCH/BRgLs=
-----END RSA PRIVATE KEY-----"""
bad_pubkey = """ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDyREPX0mN5s1YeYY+dNxOhiEVHY8TxeOI8knip13vYCMfAZwlRlUYC6IWu0RfVCIbrX1xxSeb/AY+FNLX2ji8iYEsLPTuyg5v2p+SSXlvS2NgH2iqgFsKzWyA9Kc7Um7Tn8W5UA0NAfneNGkeL23sm1rwDR9ne3aPWchPCP9okvjMLeUhUhBdxHwO14qhjrnp5q8t68oNc2546l0khrmRVHdxIuUnM1AZjUAmLgJemv/7YquGZev5J0c8O2AvQOkmxdWQNiRGQ7EYnj1eVGr9IYf6ZSERUzrJeRTnc1izKXrjP3a7pTDTv3dHbBQiUGjbBp6eDsGpKKOjqVFGg7Wnv invalid_pubkey"""
#}}}
#{{{ passphrase_key
passphrase_key = """-----BEGIN RSA PRIVATE KEY-----
Proc-Type: 4,ENCRYPTED
DEK-Info: AES-128-CBC,203FFB8DE1E202DD1F430A5E1B6C0210
YIncd0kBPCZCsW7W7fXBrMAD7/lsLj8k+ZuFJNZFF/fK+361Lteoy8O/wJn5ZHWm
R2r0/JLuXt6diwmpJO2xi2NpT9ktq1D9YEvKq/R1cSWdBXFHxlBf9eZi+B5+z/pF
ObzlkHbeSEKQmOTbTN+jqO3hOsf6y665geqTE4/fcH1Y/gl4y92v+yNJ6p1PVmv6
/4V/8uYVMTkt+xGEYDMDzxnGNfufkvwfB8hYlVB7eJB1IBcRlciee4h9NretEjds
UenXEYMtmRtHNEat+eTiPUJRihoedc36Za64yO/9Qk2A+bxeOe5JIFzaSHFFr6om
stsAnAqW6Vl7xx0p3FTzqhTrVh+DRB4WNYfOPLZz26906OvGfsxcp8PC+NLEXcUm
Bgti016e/UzYWlikCx8iao9LTVx0vTsJVqKX1zEJ/wBkzhkU9GSPP18grCKECjdW
gEwMO830aLLQpSHj1fzoPCabaNRDiF5qwz2ZPMpT5R5PTMKUjwqXU/4K7qzGCf77
8WZtOvthzGLpr7Sk4XC38+fhXEmHiBlnDeaESamw8Sub6cNbnanhuDFSfVl+m6+z
0suhE1wxY6IEPPkfbAhoMTtbxUl+uaxZS1cZBPseTw4Lyo7vut1UQdNvqM9pxirg
Az7Sefv9xB6OTfFKWf0bS+U6ZRipTvGUEaI2rgKUjf1IyhjVHTp4nN2H36x6XxBh
vMS9t+VcVwnpAVeG+owHEMfns0jdGhpborcM/xUdToovUk4wvxx9OjS17FNEDrJC
HvcbHfA3Gmt5QTlhPDrmy+KcUMmbBDqSuuxDezQBVHdiMTK1/3YL0cnEeyzq9/Fv
u8WVonE2oopOGzpW4zIcPnLAyz2Era8JcjW0cvsjTl8nIkxEnY6vXlz4X4L50EW3
2zUAT9iVV49qwDNWOehog7bD/+M8DL1MdMZyhKP6kT9gP6yVzk7pLTs7mDc4aDNp
ZyzDVVf6FB5vET3zIC3bkDywI1vY/5E7u6P2tV4M8SzPDjWb5wf32FlXhwHnlDBK
qDGV0aRK8mwZ74begQN3GsXW4V6iYOVo5ny2n4o5Q/mkVZ7gcvPuvyOBL5xqSNt+
Ux5iiyBk2phFRW5xEomjZkTwgzXgci+JMrwX7w3AZzR+QCQlNXdJ4JvwJ2PDLUKn
Rd3qBFh8VJHPhiIPtp309L3qqvQM9LdmoMP8HPxGVEpuy5qLNqZN10uQT3HM2D4d
RzMwhDRACO59SQN3iEcWsiXhwq3Qtd1YULEfEjhACkDKDSKPsI0SPa4/0Xp53zpw
PspdS29xzaWIcMahC0qI1k00x/HAIX78AS/c3aiujuxdb/6NifDfph5RAjeH0Qc9
EY7yVwffjFVu74a6goc8Ru8yOsdIl4Z49rFRlzsGjeWNV1qu55TZOSudPphYDZF3
m0T09+lJya8Mpd25Fnl8DVKxoop2FOQlRx9yYlMhsCyLcfdETGgr79cqUvNEBOem
wdSaedQYAMbvJdHp+3pgI/FiJa4w+nVaczgFM8rt4O1hdDmPpYXuVAWj5zSvIB4r
Z/+5HMtkbzOYzHwMHTwIvTLsn6AGtLeLLHj4fF4YxOaPZ/fzLQJys+TmLLzcJtua
-----END RSA PRIVATE KEY-----"""
passphrase_pubkey = """ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDGmMmPwbfpQJp9FnbNLti8txJ04ByE+cHc68SmjGJiJJEE+f1xnMMt0EeRit1BAJn7I8YLIansmf582KaknQS909OuNfzer8W5fNAOp6T+eHyi4sQ5vtOdeRURudP0q3FjJvn8ZtHu0Fp28HXbRTbvPCOPCJK/ZGK0z3M4JgG2Ir/L0KibvcN8eDbkMXLYmqfD0t95Rm4DFGTbtkWn8DzzPZmkodLAvUfEZE5fKUp+ZR2HO1XEEjPZwdg+bYBqs7hldps2K/TlJAw6fhAzEohiogMKMueFds+ZLuJB3yO1nKgPMXOZwKrxSR+IJ1nAlwHoLoipozefffoxwqtoEzcB passphrase_key"""
#}}}
#{{{ invalid_key
invalid_key = """-----BEGIN RSA PRIVATE KEY-----
MIIEogIBAAKCAQEA8kRD19JjebNWHmGPnTcToYhFR2PE8XjiPJJ4qdd72AjHwGcJ
UZVGAuiFrtEX1QiG619ccUnm/wGPhTS19o4vImBLCz07soOb9qfkkl5b0tjYB9oq
oBbCs1sgPSnO1Ju05/FuVANDQH53jRpHi9t7Jta8A0fZ3t2j1nITwj/aJL4zC3lI
VIQXcR8DteKoY656eavLevKDXNueOpdJIa5kVR3cSLlJzNQGY1AJi4CXpr/+2Krh
mXr+SdHPDtgL0DpJsXVkDYkRkOxGJ49XlRq/SGH+mUhEVM6yXkU53NYsyl64z92u
6Uw0793R2wUIlBo2waeng7BqSijo6lRRoO1p7wIDAQABAoIBAFVu21nVeHznUBxA
V4zN0TNXL7+T5WRrYumVhVZUbh8plu0V82gm/pwPK4xGHQj+q8oLarO3vtSUhIY+
2TIpwQEOQpkJknw0Pt2VtwAOUlgYBuz9joirz8qgU63lRjrt1dok/tXUulaIXwIq
u7UNR+KltpM+OG8Dkw3dRGi0vd+0pE/udN08mIdbnpF0WxoRYDax5CPKTNVZnNA4
PyjPriXLQNbguRITZhOjdNhEbpFjYnh6SDoWjXILEu5eMMqRn/AkOXgIaV5uMhjK
WBbvcykCgYEA/c1pCWtMICcWQDLLIc+gyBOOM225xTEc/Uwd/3BKY0d5WCZwME5C
KWfWP5FHIjcbkqSFjxoF8iqcjJDDK3mvdhw4FfYky0p4MBVbgcHmWlA43aMISUg6
u4ZDn/nCESuYan72U64NGb4otmzY5bECsCVxuqvFW18DBOZT5dmhy8WvRzIjHvUk
e5BKBBHH9sqoU62L8TDhgxCkFQI5tW/6wFtxMkagIf2ZS34LjXw6DNmadFSdt895
QgTuOTfDo7CNUCmKVvWvnfYh3WflVFcdduFAA30CgYBDAjOkN/7+UjOOuZpmD+J8
UKVvsJ2mkYP84FbfNS9+rMU939ApANB5DCBF4j5sqy4NdoCIuq8tM9DnT+UnmlYY
fWwYiWpOC2xvH0JsamiYlwKBgFclXWbi/wLyQy90aKqyFI7LJ4jitAo6zy867UZ8
VA7Ka1umiqEYsxrpfjX5jgTJ8JR9wEEAuCSzYqi0pUsSPTIKyKHBMYL8hDAce0RW
WAwh+8GjeyPdE6mlf95NTDPoaUbACvJsZ7ioFnEt6XeJGPjiLXsb5//ZqYNmfe+D
bRzRAoGAcii2U2bkXl0v3gTsFgfFYgWVofnUnGersnPzwsL0/5l/O36McPZxdAf5
OVBHW0TIsQ22eyNcOwQiqxlWPOdimCuMi3FdVfXiYjO1FT1Fg/rPBUbfTO9JDJVJ
LE7PfYWR0rWVnASlEYFGnlTNDHRXTfrsP9A6hqllSjvCH/BRgLs=
-----END RSA PRIVATE KEY-----"""
invalid_pubkey = """ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDyREPX0mN5s1YeYY+dNxOhiEVHY8TxeOI8knip13vYCMfAZwlRlUYC6IWu0RfVCIbrX1xxSebsLPTuyg5v2p+SSXlvS2NgH2iqgFsKzWyA9K
|
weijia/djangoautoconf
|
djangoautoconf/auto_detection/routing_auto_detection.py
|
Python
|
bsd-3-clause
| 1,242
| 0.00161
|
from importlib import import_module
from djangoautoconf.auto_conf_urls import enum_app_names
from djangoautoconf.auto_conf_utils import is_at_least_one_sub_filesystem_item_exists, get_module_path
from ufs_tools.short_decorator.ignore_exception import ignore_exc_with_result
def autodiscover():
from django.conf import settings
routing_holder = settings.CHANNEL_LAYERS["default"]["ROUTING"]
routing_module = ".".join(routing_holder.split(".")[0:-1])
root_url = import_module(routing_module)
root_default_channel_routing = root_url.default_channel_routing
for app i
|
n enum_app_names():
if app == "channels":
continue
mod = import_module(app)
if is_at_least_one_sub_filesystem_item_exists(get_module_path(mod), ["routing.py"]):
routing_module_name = "%s.routing" % app
routing_settings = get_routing_settings(routing_module_name)
root_default_channel_routing.extend(routing_settings)
@ignore_exc_with_result(exception_result=[], is_notification_needed=Tr
|
ue)
def get_routing_settings(routing_module_name):
routing_module = import_module(routing_module_name)
routing_settings = routing_module.channel_routing
return routing_settings
|
modelblocks/modelblocks-release
|
resource-gcg/scripts/induciblediscgraph.py
|
Python
|
gpl-3.0
| 35,189
| 0.036204
|
###############################################################################
## ##
## This file is part of ModelBlocks. Copyright 2009, ModelBlocks developers. ##
## ##
## ModelBlocks is free software: you can redistribute it and/or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## ModelBlocks is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with ModelBlocks. If not, see <http://www.gnu.org/licenses/>. ##
##
|
##
################################################################
|
###############
import sys, os, collections, sets
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'resource-gcg', 'scripts'))
import discgraph
VERBOSE = False
for a in sys.argv:
if a=='-d':
VERBOSE = True
################################################################################
def complain( s ):
print( '#ERROR: ' + s )
sys.stderr.write( 'ERROR: ' + s + '\n' )
# exit( 1 )
################################################################################
class InducibleDiscGraph( discgraph.DiscGraph ):
def getChainFromSup( D, xLo ):
return [ xLo ] + [ x for l,xHi in D.Inhs.get(xLo,{}).items() if l!='w' and l!='o' for x in D.getChainFromSup(xHi) ]
def getChainFromSub( D, xHi ):
return [ xHi ] + [ x for xLo in D.Subs.get(xHi,[]) for x in D.getChainFromSub(xLo) ]
def __init__( D, line ):
discgraph.DiscGraph.__init__( D, line )
## List of referents that participate in elementary predications (which does not include the eventuality / elementary predication itself)...
D.Participants = sets.Set([ x for pred in D.PredTuples for x in pred[2:] ])
## List of heirs for each inherited referent...
D.Legators = collections.defaultdict( list )
D.Heirs = collections.defaultdict( list )
for xLo in D.Referents:
D.Legators[ xLo ] = D.getLegators( xLo )
for xHi in D.Referents:
D.Heirs[ xHi ] = D.getHeirs( xHi )
if VERBOSE: print( 'Legators = ' + str(D.Legators) )
if VERBOSE: print( 'Heirs = ' + str(D.Heirs) )
def getTopUnaryLegators( xLo ):
L = [ xLeg for l,xHi in D.Inhs.get( xLo, {} ).items() if l!='w' and l!='o' and len( D.Subs.get(xHi,[]) ) < 2 for xLeg in getTopUnaryLegators(xHi) ]
return L if L != [] else [ xLo ]
# if D.Inhs.get( xLo, {} ).items() != [] else [ xLo ]
# UnaryL = [ xLeg for xLeg in D.Legators.get(xLo,[]) if all([ xLo in D.Heirs.get(xHeir,[]) for xHeir in D.Legators.get(xLo,[]) if xHeir in D.Heirs.get(xLeg,[]) ]) ]
# return [ x for x in UnaryL if not any([ x in D.Heirs.get(y,[]) for y in UnaryL if y != x ]) ]
def getTopLegators( xLo ):
L = [ xLeg for l,xHi in D.Inhs.get( xLo, {} ).items() if l!='w' and l!='o' for xLeg in getTopLegators(xHi) ]
return L if L != [] else [ xLo ]
# if D.Inhs.get( xLo, {} ).items() != [] else [ xLo ]
D.TopLegators = { xLo : sets.Set( getTopLegators(xLo) ) for xLo in D.Inhs }
if VERBOSE: print( 'TopLegators = ' + str(D.TopLegators) )
D.TopUnaryLegators = { xLo : sets.Set( getTopUnaryLegators(xLo) ) for xLo in D.Inhs }
if VERBOSE: print( 'TopUnaryLegators = ' + str(D.TopUnaryLegators) )
# D.PredRecency = { }
## List of heirs for each participant...
D.HeirsOfParticipants = [ xLo for xHi in D.Participants for xLo in D.Heirs.get(xHi,[]) ]
if VERBOSE: print( 'HeirsOfParticipants = ' + str(D.HeirsOfParticipants) )
## Obtain inheritance chain for each reft...
D.Chains = { x : sets.Set( D.getChainFromSup(x) + D.getChainFromSub(x) ) for x in D.Referents }
if VERBOSE: print( 'Chains = ' + str(D.Chains) )
# Inheritances = { x : sets.Set( getChainFromSup(x) ) for x in Referents }
## Mapping from referent to elementary predications containing it...
# D.RefToPredTuples = { xOrig : [ (ptup,xInChain) for xInChain in D.Chains[xOrig] for ptup in D.PredTuples if xInChain in ptup[2:] ] for xOrig in D.Referents }
def orderTuplesFromSups( x ):
Out = []
if x in D.Nuscos:
for src in D.Nuscos[x]:
Out += [ (ptup,src) for ptup in D.PredTuples if src in ptup[2:] ]
Out += [ (ptup,x) for ptup in D.PredTuples if x in ptup[2:] ]
for lbl,dst in D.Inhs.get(x,{}).items():
Out += orderTuplesFromSups( dst )
return Out
def orderTuplesFromSubs( x ):
Out = []
Out += [ (ptup,x) for ptup in D.PredTuples if x in ptup[2:] ]
for src in D.Subs.get(x,[]):
Out += orderTuplesFromSubs( src )
# Out += [ (ptup,src) for ptup in D.PredTuples if src in ptup[2:] ]
return Out
D.FullRefToPredTuples = { x : sets.Set( orderTuplesFromSubs(x) + orderTuplesFromSups(x) ) for x in D.Referents }
D.WeakRefToPredTuples = { x : orderTuplesFromSubs( D.Inhs.get(x,{}).get('r',x) ) for x in D.Referents }
D.BareRefToPredTuples = { x : [ (ptup,x) for ptup in D.PredTuples if x in ptup[2:] ] for x in D.Referents }
if VERBOSE: print( 'FullRefToPredTuples = ' + str(D.FullRefToPredTuples) )
if VERBOSE: print( 'WeakRefToPredTuples = ' + str(D.WeakRefToPredTuples) )
if VERBOSE: print( 'BareRefToPredTuples = ' + str(D.BareRefToPredTuples) )
def constrainingTuplesFromSups( x ):
return [ ptup for ptup in D.PredTuples if x in ptup[1:] ] + [ ptup for _,xHi in D.Inhs.get(x,{}).items() for ptup in constrainingTuplesFromSups( xHi ) ]
def constrainingTuplesFromSubs( x ):
return [ ptup for ptup in D.PredTuples if x in ptup[1:] ] + [ ptup for xLo in D.Subs.get(x,[]) for ptup in constrainingTuplesFromSubs( xLo ) ]
D.ConstrainingTuples = { x : sets.Set( constrainingTuplesFromSups(x) + constrainingTuplesFromSubs(x) ) for x in D.Referents }
## Calculate ceilings of scoped refts...
# D.AnnotatedCeilings = sets.Set([ y for y in D.Referents for x in D.Scopes.keys() if D.ceiling(x) in D.Chains[y] ]) #D.Chains[D.ceiling(x)] for x in D.Scopes.keys() ])
# if len(D.AnnotatedCeilings) == 0:
# D.AnnotatedCeilings = sets.Set( sorted([ (len(chain),chain) for x,chain in D.Chains.items() if x.startswith('000') ])[-1][1] ) # sets.Set(D.Chains['0001s'])
# print( '#NOTE: Discourse contains no scope annotations -- defining root as longest chain through first sentence: ' + str(sorted(D.AnnotatedCeilings)) )
# sys.stderr.write( 'NOTE: Discourse contains no scope annotations -- defining root as longest chain through first sentence: ' + str(sorted(D.AnnotatedCeilings)) + '\n' )
# DisjointCeilingPairs = [ (x,y) for x in D.AnnotatedCeilings for y in D.AnnotatedCeilings if x<y and not D.reachesInChain( x, y ) ]
# if len(DisjointCeilingPairs) > 0:
# print( '#WARNING: Maxima of scopal annotations are disjoint: ' + str(DisjointCeilingPairs) + ' -- disconnected annotations cannot all be assumed dominant.' )
# sys.stderr.write( 'WARNING: Maxima of scopal annotations are disjoint: ' + str(DisjointCeilingPairs) + ' -- disconnected annotations cannot all be assumed dominant.\n' )
# if VERBOSE: print( 'AnnotatedCeilings = ' + str(D.AnnotatedCeilings) )
# D.NotOutscopable = [ x for x in D.Referents if D.ceiling(x) in D.AnnotatedCeilings ]
# if VERBOSE: print( 'NotOutscopable = ' + str(D.NotOutscopable) )
D.PredToTuple = { xOrig : ptup f
|
amanzi/ats-dev
|
tools/visit_ats/visit_ats/visit_ats.py
|
Python
|
bsd-3-clause
| 21,342
| 0.005716
|
import sys, os
import visit_rcParams as vrc
import visit_colormaps as vc
import visit_time as vt
import visit as v
rcParams = dict( surface_linewidth=3,
snow_linewidth=4
)
class Operator:
def __init__(self, oname, otype, oatts):
self.oname = oname
self.otype = otype
self.oatts = oatts
class Plot:
def __init__(self, pname, ptype, patts, varname=None):
"""Plot class"""
self.pname = pname
self.varname = varname
self.ptype = ptype
self.patts = patts
self.operators = []
self.annot = v.GetAnnotationObject(pname)
self.annot.fontFamily = vrc.getDefaultFont()
self.annot.fontHeight = vrc.rcParams['legend.fontheight']
self.annot.managePosition = 0
self.annot.xScale = vrc.rcParams['legend.scale'][0]
self.annot.yScale = vrc.rcParams['legend.scale'][1]
self.annot.position = vrc.rcParams['legend.position']
self.annot.drawMinMax = vrc.rcParams['legend.minmax']
if varname is not None:
self.annot.drawTitle = 0
self.title = v.CreateAnnotationObject("Text2D")
self.title.text = varname
self.title.fontFamily = vrc.getDefaultFont()
self.title.height = vrc.rcParams['legend.title.fontheight']
self.title.position = vrc.rcParams['legend.title.position']
def getLimits(self):
assert self.ptype == 'Pseudocolor'
min = None
max = None
if self.patts.minFlag:
min = self.patts.min
if self.patts.maxFlag:
min = self.patts.max
return min,max
class VisItWindow:
"""Class for a window"""
class Slice:
"""Helper class for slicing into 2D"""
def __init__(self, point=None, normal=None):
if point is None:
point = (0,0,0)
if normal is None:
normal = (0,-1,0)
assert type(point) is tuple
assert len(point) == 3
assert type(normal) is tuple
assert len(normal) == 3
self.point = point
self.normal = normal
def toAttributes(self):
s = v.SliceAttributes()
s.originType = s.Point
s.originPoint = self.point
s.normal = self.normal
s.axisType = s.Arbitrary
return s
def __init__(self, index):
self.i = index
self.annot = vrc.getAnnotationAttributes()
self.setDimension(3)
self.plots = []
self.nonplots = [] # other objects like meshes
self._slice = None
self.exaggeration = None
def setDimension(self, dim):
"""Sets the dimension, which is used in controlling the view"""
self.dim = dim
if dim == 2:
self.view = v.GetView2D()
elif dim == 3:
self.view = v.GetView3D()
else:
raise RuntimeError("Invalid dimension %s"%str(dim))
def slice(self, point=None, normal=None):
"""Registers a slice -- this is not immediately added"""
self._slice = self.Slice(point, normal)
self.setDimension(2)
def exaggerateVertical(self, factor):
"""Registers an exxageration -- this is not immediately added"""
self.exaggeration = factor
def _exaggerateVertical(self):
if self.dim == 3:
self.view.axis3DScaleFlag = 1
self.view.axis3DScales = (self.view.axis3DScales[0],
self.view.axis3DScales[1],
self.exaggeration)
else:
for i,plot in enumerate(self.plots):
done = False
for op in plot.operators:
if "exaggerate_vertical" == op.oname:
done = True
if not done:
print "transforming plot %d..."%i
tr = v.TransformAttributes()
tr.doScale = 1
tr.scaleY = self.exaggeration
v.SetActivePlots(i)
v.AddOperator("Transform")
v.SetOperatorOptions(tr)
plot.operators.append(Operator("exaggerate_vertical", "Transform", tr))
def createMesh(self, color='w', opacity=0.15, silo=False):
_colors = dict(w=(255,255,255,255),
k=(0,0,0,255),
gray=(175,175,175),
)
if silo:
v.AddPlot('Mesh', "mesh")
else:
v.AddPlot('Mes
|
h', "Mesh")
ma = v.MeshAttributes()
ma.legendFlag = 0
ma.meshColor = _colors[color]
ma.meshColorSource = ma.MeshCustom
if (opacity < 1.):
ma.opaqueMode = ma.On
ma.opacity = opacity
v.SetPlotOptions(ma)
pname = v.GetPlotL
|
ist().GetPlots(v.GetNumPlots()-1).plotName
if silo:
plot = Plot(pname, 'mesh', ma)
else:
plot = Plot(pname, 'Mesh', ma)
self.nonplots.append(plot)
return plot
def createPseudocolor(self, varname, display_name=None, cmap=None,
limits=None, linewidth=None, legend=True, alpha=False):
"""Generic creation of pseudocolor"""
if display_name is None:
display_name = vrc.renameScalar(varname)
if "temperature" in display_name:
display_name = display_name.replace("[K]", "[C]")
print "defining alias: %s = %s"%(display_name, varname)
v.DefineScalarExpression(display_name, "<%s> - 273.15"%varname)
elif display_name != varname:
print "defining alias: %s = %s"%(display_name, varname)
v.DefineScalarExpression(display_name, '<'+varname+'>')
v.AddPlot('Pseudocolor', display_name)
pa = v.PseudocolorAttributes()
# limits
if limits is None:
limits = vrc.getLimits(varname)
if limits is not None:
min = limits[0]
max = limits[1]
if min is not None:
pa.minFlag = 1
pa.min = min
if max is not None:
pa.maxFlag = 1
pa.max = max
# opacity
if alpha:
pa.opacity = 0
pa.opacityType = pa.ColorTable
# colormap
if cmap is not None:
reverse = cmap.endswith("_r")
if reverse:
cmap = cmap.strip("_r")
pa.invertColorTable = 1
pa.colorTableName = cmap
# linewidth for 2D
if linewidth is None:
linewidth = vrc.rcParams['pseudocolor.linewidth']
pa.lineWidth = linewidth
# turn off legend for 2D surf
if not legend:
pa.legendFlag = 0
v.SetActivePlots(len(self.plots)+1)
v.SetPlotOptions(pa)
pname = v.GetPlotList().GetPlots(v.GetNumPlots()-1).plotName
if legend:
plot = Plot(pname, 'Pseudocolor', pa, display_name)
else:
plot = Plot(pname, 'Pseudocolor', pa)
self.plots.append(plot)
return plot
def createContour(self, varname, value, color=None, linewidth=None):
"""Generic creation of a single contour without a legend"""
v.AddPlot('Contour', varname)
ca = v.ContourAttributes()
ca.contourMethod = ca.Value
ca.contourValue = (value,)
ca.colorType = ca.ColorBySingleColor
if color is None:
color = vrc.rcParams['contour.color']
if type(color) is str:
color = vc.common_colors[color]
ca.singleColor = color
if linewidth is None:
linewidth = vrc.rcParams['contour.linewidth']
ca.lineWidth = linewidth
# turn off legend for 2D surf
ca.legendFlag = 0
v.SetPlotOptions(ca)
pname = v.GetPlotList().GetPlots(v.GetNumPlots()-1).plotName
plot = Plot(pname, 'Contour', ca)
self.plots.append(plot)
return plot
def draw(self):
|
hanmy75/voice-recognizer
|
src/aiy/audio.py
|
Python
|
apache-2.0
| 3,887
| 0.000772
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Drivers for audio functionality provided by the VoiceHat."""
import time
import wave
import aiy._drivers._player
import aiy._drivers._recorder
import aiy._drivers._tts
AUDIO_SAMPLE_SIZE = 2 # bytes per sample
AUDIO_SAMPLE_RATE_HZ = 16000
# Global variables. They are lazily initialized.
_voicehat_recorder = None
_voicehat_player = None
_status_ui = None
class _WaveDump(object):
"""A processor that saves recorded audio to a wave file."""
def __init__(self, filepath, duration):
self._wave = wave.open(filepath, 'wb')
self._wave.setnchannels(1)
self._wave.setsampwidth(2)
self._wave.setframerate(16000)
self._bytes = 0
self._bytes_limit = int(duration * 16000) * 1 * 2
def add_data(self, data):
max_bytes = self._bytes_limit - self._bytes
data = data[:max_bytes]
self._bytes += len(data)
if data:
self._wave.writeframes(data)
def is_done(self):
return self._bytes >= self._bytes_limit
def __enter__(self):
return self
def __exit__(self, *args):
self._wave.close()
def get_player():
"""Returns a driver to control the VoiceHat speaker.
The aiy modules automatically use this player. So usually you do not need to
use this. Instead, use 'aiy.audio.play_wave' if you would like to play some
audio.
"""
global _voicehat_player
if _voicehat_player is None:
_voicehat_player = aiy._drivers._player.Player()
return _voicehat_player
def get_recorder():
"""Returns a driver to control the VoiceHat microphones.
The aiy modules automatically use this recorder. So usually you do not need to
use this.
"""
global _voicehat_recorder
if _voicehat_recorder is None:
_voicehat_recorder = aiy._drivers._recorder.Recorder()
return _voicehat_recorder
def record_to_wave(filepath, duration):
"""Records an audio for the given duration to a wave file."""
recorder = get_recorder()
dumper = _WaveDump(filepath, duration)
with recorder, dumper:
recorder.add_processor(dumper)
while not dumper.is_done():
time.sleep(0.1)
def play_wave(wave_file):
"""Plays the given
|
wave file.
The wave file has to be mono and small enough to be loaded in memory.
"""
player = get_player()
player.play_wav(wave_file)
def play_audio(audio_data)
|
:
"""Plays the given audio data."""
player = get_player()
player.play_bytes(audio_data, sample_width=AUDIO_SAMPLE_SIZE, sample_rate=AUDIO_SAMPLE_RATE_HZ)
def say(words, lang=None):
"""Says the given words in the given language with Google TTS engine.
If lang is specified, e.g. "en-US', it will be used to say the given words.
Otherwise, the language from aiy.i18n will be used.
"""
if not lang:
lang = aiy.i18n.get_language_code()
aiy._drivers._tts.say(aiy.audio.get_player(), words, lang=lang)
def get_status_ui():
"""Returns a driver to access the StatusUI daemon.
The StatusUI daemon controls the LEDs in the background. It supports a list
of statuses it is able to communicate with the LED on the Voicehat.
"""
global _status_ui
if _status_ui is None:
_status_ui = aiy._drivers._StatusUi()
return _status_ui
|
ivanlyon/exercises
|
kattis/k_statistics.py
|
Python
|
mit
| 970
| 0.004124
|
'''
Produce minimum, maximum and the difference of number list
Status: Accepted
'''
###############################################################################
def read_line_of_integers():
"""Read one line of numbers or detect EOF"""
try:
text = input()
return [int(i) for i in text.split()][1:]
except EOFError:
pass
return None
###############################################################################
def main():
"""Read input and print output statistics about list of numbers"""
test_case = 0
while True:
numbers = read_line_of_integers()
if numbers:
test_case += 1
mini = min(numbers)
maxi = max(numbers)
print('Case {0}: {1} {2} {3}'.format(test_case, mini
|
, maxi, maxi - mini))
else:
break
###############################################################################
if __name__ == '__ma
|
in__':
main()
|
yuzheng/python-ex
|
checkDigit.py
|
Python
|
mit
| 301
| 0.017668
|
#-*-coding:UTF-8 -*-
#
# 判斷輸入是否為整數(int)
input_
|
string = input('Please input n:')
#while input_string.isdigit() == False:
while not input_string.isdigit():
print("Error, %s is not digit
|
!" % input_string)
input_string = input('Please input n:')
print("%s is digit!" % input_string)
|
dalejung/trtools
|
trtools/io/tests/test_filecache.py
|
Python
|
mit
| 1,754
| 0.008552
|
from unittest import TestCase
from io import StringIO
from trtools.util.tempdir import TemporaryDirectory
import trtools.io.filecache as fc
class TestFileCache(TestCase):
def __init__(self, *args, **kwargs):
TestCase.__init__(self, *args, **kwargs)
def runTest(self):
pass
def setUp(self):
pass
class MetaKey(object):
def __init__(self, key):
self.key = key
def __hash__(self):
return hash(self.key)
def __filename__(self):
return str(self.key) + '.save'
def __repr__(self):
return repr(self.key)
class Value(object):
def __init__(self, string):
self.string = string
class TestMetaFileCache(TestCase):
def __init__(self, *args, **kwargs):
TestCase.__init__(self, *args, **kwargs)
def runTest(self):
pass
def setUp(self):
pass
def test_meta(self):
key1 = MetaKey('dale')
key2 = MetaKey('bob')
with TemporaryDirectory() as td:
mfc = fc.MetaFileCache(td)
mfc[key1] = Value('daledata')
mfc[key2] = Value('bobdata')
mfc[key1] = Value('daledata2')
# grabbing with key s
|
till works
assert mfc[key1].string == 'daledata2'
# this one should load keys from index file
mfc2 = fc.MetaFileCache(td)
for a,b in zip( list(mfc2.keys()), list(mfc.keys())):
assert a.key == b.key
if __name__ == '__main__':
import nose
nose.runmodule(arg
|
v=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],exit=False)
|
nakul02/incubator-systemml
|
src/main/python/systemml/mlcontext.py
|
Python
|
apache-2.0
| 25,332
| 0.004934
|
#-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distribut
|
ed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software d
|
istributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
# Methods to create Script object
script_factory_methods = [ 'dml', 'pydml', 'dmlFromResource', 'pydmlFromResource', 'dmlFromFile', 'pydmlFromFile', 'dmlFromUrl', 'pydmlFromUrl' ]
# Utility methods
util_methods = [ '_java2py', 'getHopDAG' ]
__all__ = ['MLResults', 'MLContext', 'Script', 'Matrix' ] + script_factory_methods + util_methods
import os
import numpy as np
import pandas as pd
import threading, time
try:
import py4j.java_gateway
from py4j.java_gateway import JavaObject
from pyspark import SparkContext
from pyspark.conf import SparkConf
import pyspark.mllib.common
from pyspark.sql import SparkSession
except ImportError:
raise ImportError('Unable to import `pyspark`. Hint: Make sure you are running with PySpark.')
from .converters import *
from .classloader import *
def getHopDAG(ml, script, lines=None, conf=None, apply_rewrites=True, with_subgraph=False):
"""
Compile a DML / PyDML script.
Parameters
----------
ml: MLContext instance
MLContext instance.
script: Script instance
Script instance defined with the appropriate input and output variables.
lines: list of integers
Optional: only display the hops that have begin and end line number equals to the given integers.
conf: SparkConf instance
Optional spark configuration
apply_rewrites: boolean
If True, perform static rewrites, perform intra-/inter-procedural analysis to propagate size information into functions and apply dynamic rewrites
with_subgraph: boolean
If False, the dot graph will be created without subgraphs for statement blocks.
Returns
-------
hopDAG: string
hop DAG in dot format
"""
if not isinstance(script, Script):
raise ValueError("Expected script to be an instance of Script")
scriptString = script.scriptString
script_java = script.script_java
lines = [ int(x) for x in lines ] if lines is not None else [int(-1)]
sc = get_spark_context()
if conf is not None:
hopDAG = sc._jvm.org.apache.sysml.api.mlcontext.MLContextUtil.getHopDAG(ml._ml, script_java, lines, conf._jconf, apply_rewrites, with_subgraph)
else:
hopDAG = sc._jvm.org.apache.sysml.api.mlcontext.MLContextUtil.getHopDAG(ml._ml, script_java, lines, apply_rewrites, with_subgraph)
return hopDAG
def dml(scriptString):
"""
Create a dml script object based on a string.
Parameters
----------
scriptString: string
Can be a path to a dml script or a dml script itself.
Returns
-------
script: Script instance
Instance of a script object.
"""
if not isinstance(scriptString, str):
raise ValueError("scriptString should be a string, got %s" % type(scriptString))
return Script(scriptString, scriptType="dml")
def dmlFromResource(resourcePath):
"""
Create a dml script object based on a resource path.
Parameters
----------
resourcePath: string
Path to a dml script on the classpath.
Returns
-------
script: Script instance
Instance of a script object.
"""
if not isinstance(resourcePath, str):
raise ValueError("resourcePath should be a string, got %s" % type(resourcePath))
return Script(resourcePath, scriptType="dml", isResource=True)
def pydml(scriptString):
"""
Create a pydml script object based on a string.
Parameters
----------
scriptString: string
Can be a path to a pydml script or a pydml script itself.
Returns
-------
script: Script instance
Instance of a script object.
"""
if not isinstance(scriptString, str):
raise ValueError("scriptString should be a string, got %s" % type(scriptString))
return Script(scriptString, scriptType="pydml")
def pydmlFromResource(resourcePath):
"""
Create a pydml script object based on a resource path.
Parameters
----------
resourcePath: string
Path to a pydml script on the classpath.
Returns
-------
script: Script instance
Instance of a script object.
"""
if not isinstance(resourcePath, str):
raise ValueError("resourcePath should be a string, got %s" % type(resourcePath))
return Script(resourcePath, scriptType="pydml", isResource=True)
def dmlFromFile(filePath):
"""
Create a dml script object based on a file path.
Parameters
----------
filePath: string
Path to a dml script.
Returns
-------
script: Script instance
Instance of a script object.
"""
if not isinstance(filePath, str):
raise ValueError("filePath should be a string, got %s" % type(filePath))
return Script(filePath, scriptType="dml", isResource=False, scriptFormat="file")
def pydmlFromFile(filePath):
"""
Create a pydml script object based on a file path.
Parameters
----------
filePath: string
Path to a pydml script.
Returns
-------
script: Script instance
Instance of a script object.
"""
if not isinstance(filePath, str):
raise ValueError("filePath should be a string, got %s" % type(filePath))
return Script(filePath, scriptType="pydml", isResource=False, scriptFormat="file")
def dmlFromUrl(url):
"""
Create a dml script object based on a url.
Parameters
----------
url: string
URL to a dml script.
Returns
-------
script: Script instance
Instance of a script object.
"""
if not isinstance(url, str):
raise ValueError("url should be a string, got %s" % type(url))
return Script(url, scriptType="dml", isResource=False, scriptFormat="url")
def pydmlFromUrl(url):
"""
Create a pydml script object based on a url.
Parameters
----------
url: string
URL to a pydml script.
Returns
-------
script: Script instance
Instance of a script object.
"""
if not isinstance(url, str):
raise ValueError("url should be a string, got %s" % type(url))
return Script(url, scriptType="pydml", isResource=False, scriptFormat="url")
def _java2py(sc, obj):
""" Convert Java object to Python. """
# TODO: Port this private PySpark function.
obj = pyspark.mllib.common._java2py(sc, obj)
if isinstance(obj, JavaObject):
class_name = obj.getClass().getSimpleName()
if class_name == 'Matrix':
obj = Matrix(obj, sc)
return obj
def _py2java(sc, obj):
""" Convert Python object to Java. """
if isinstance(obj, SUPPORTED_TYPES):
obj = convertToMatrixBlock(sc, obj)
else:
if isinstance(obj, Matrix):
obj = obj._java_matrix
# TODO: Port this private PySpark function.
obj = pyspark.mllib.common._py2java(sc, obj)
return obj
class Matrix(object):
"""
Wrapper around a Java Matrix object.
Parameters
----------
javaMatrix: JavaObject
A Java Matrix object as returned by calling `ml.execute().get()`.
sc: SparkContext
SparkContext
"""
def __init__(self, javaMatrix, sc):
self._java_matrix = javaMatrix
self._sc = sc
|
google/or-tools
|
examples/tests/issue46.py
|
Python
|
apache-2.0
| 3,057
| 0
|
#!/usr/bin/env python3
# Copyright 2010-2021 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code for issue 46 in or-tools."""
from ortools.constraint_solver import pywrapcp
class AssignToStartMin(pywrapcp.PyDecisionBuilder):
def __init__(self, intervals):
pywrapcp.PyDecisionBuilder.__init__(self)
self.__intervals = interv
|
als
def Next(self, solver):
|
for interval in self.__intervals:
interval.SetStartMax(interval.StartMin())
return None
def DebugString(self):
return 'CustomDecisionBuilder'
def NoSequence():
print('NoSequence')
solver = pywrapcp.Solver('Ordo')
tasks = []
[
tasks.append(
solver.FixedDurationIntervalVar(0, 25, 5, False, 'Tasks%i' % i))
for i in range(3)
]
print(tasks)
disj = solver.DisjunctiveConstraint(tasks, 'Disjunctive')
solver.Add(disj)
collector = solver.AllSolutionCollector()
collector.Add(tasks)
intervalPhase = solver.Phase(tasks, solver.INTERVAL_DEFAULT)
solver.Solve(intervalPhase, [collector])
print(collector.SolutionCount())
for i in range(collector.SolutionCount()):
print("Solution ", i)
print(collector.ObjectiveValue(i))
print([collector.StartValue(i, tasks[j]) for j in range(3)])
print([collector.EndValue(i, tasks[j]) for j in range(3)])
def Sequence():
print('Sequence')
solver = pywrapcp.Solver('Ordo')
tasks = []
[
tasks.append(
solver.FixedDurationIntervalVar(0, 25, 5, False, 'Tasks%i' % i))
for i in range(3)
]
print(tasks)
disj = solver.DisjunctiveConstraint(tasks, 'Disjunctive')
solver.Add(disj)
sequence = []
sequence.append(disj.SequenceVar())
sequence[0].RankFirst(0)
collector = solver.AllSolutionCollector()
collector.Add(sequence)
collector.Add(tasks)
sequencePhase = solver.Phase(sequence, solver.SEQUENCE_DEFAULT)
intervalPhase = AssignToStartMin(tasks)
# intervalPhase = solver.Phase(tasks, solver.INTERVAL_DEFAULT)
mainPhase = solver.Compose([sequencePhase, intervalPhase])
solver.Solve(mainPhase, [collector])
print(collector.SolutionCount())
for i in range(collector.SolutionCount()):
print("Solution ", i)
print(collector.ObjectiveValue(i))
print([collector.StartValue(i, tasks[j]) for j in range(3)])
print([collector.EndValue(i, tasks[j]) for j in range(3)])
def main():
NoSequence()
Sequence()
if __name__ == '__main__':
main()
|
uclouvain/osis
|
attribution/tests/factories/attribution_charge_new.py
|
Python
|
agpl-3.0
| 2,021
| 0.001485
|
##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2019 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
#
|
the Free Software Foundation, either version 3 of the License, or
# (at your option) any later vers
|
ion.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
import string
import factory.fuzzy
from attribution.tests.factories.attribution_new import AttributionNewFactory
from base.tests.factories.learning_component_year import LearningComponentYearFactory
class AttributionChargeNewFactory(factory.django.DjangoModelFactory):
class Meta:
model = "attribution.AttributionChargeNew"
external_id = factory.fuzzy.FuzzyText(length=10, chars=string.digits)
attribution = factory.SubFactory(
AttributionNewFactory,
learning_container_year=factory.SelfAttribute(
'..learning_component_year.learning_unit_year.learning_container_year'
)
)
learning_component_year = factory.SubFactory(LearningComponentYearFactory)
allocation_charge = 0
|
martastain/nxtools
|
setup.py
|
Python
|
mit
| 989
| 0.0182
|
import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
|
name = "nxtools",
version = "0.8.4",
author = "Martin Wacker",
author_email = "martas@imm.cz",
description = "Set of common utilities and little helpers.",
license = "MIT",
keywords = "utilities log logging ffmpeg watchfolder media mam time",
url = "https://github.com/immstudios/nxtools",
packages=['nxtools', 'tests', 'nxtools.media', 'nxtools.caspar'],
long_description=read('README.rst'),
classifiers=[
"Development Status :: 5 - Production/Stable",
|
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python",
"Topic :: Multimedia :: Video :: Conversion",
"Topic :: Utilities",
],
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.