code stringlengths 2 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int32 2 1.05M |
|---|---|---|---|---|---|
#!/usr/bin/python
import re
from sys import argv
from orig import strxor
#Load dictionary for validation
with open('cracklib-small', 'r') as fp:
wordlist = set(fp.read().split('\n'))
def isprintable(inp):
words = inp.split(' ')
if len(words) == 1:
return False
for word in words:
word = word.strip()
if len(word) >= 4 and word in wordlist:
return True
return False
def main():
#Ciphertext to be cracked
ct = "32510ba9babebbbefd001547a810e67149caee11d945cd7fc81a05e9f85aac650e9052ba6a8cd8257bf14d13e6f0a803b54fde9e77472dbff89d71b57bddef121336cb85ccb8f3315f4b52e301d16e9f52f904".decode('hex')
#Loading the other ciphertexts encrypted with the same key
with open('inputs', 'r') as fp:
cts = fp.read().split('\n')
text = argv[1] #Guessed part of the text
num = 0
for case_t in cts:
if not case_t:
continue
num += 1
print 'Case', num, ':'
case_t = case_t.decode('hex')
c1c2 = strxor(ct, case_t)
for i in range(len(c1c2)):
res = strxor(text, c1c2[i:i+len(text)])
if isprintable(res):
print i, res
print
if __name__ == '__main__':
main()
| rkrp/2timepad-breaker | crack.py | Python | gpl-3.0 | 1,264 |
import numpy as np
import fir_filter
import gauss_pulse
import comms_filters
class generic_modulator:
def __init__(self, modulation_type, samples_per_symbol, pulse_factor, pulse_length, config):
""" Create the generic modulator object and specify the modulation parameters.
Supported modulation types are:
BPSK
GMSK
QPSK
OQPSK
8PSK
8APSK
16APSK
32APSK
64APSK
128APSK
256APSK
"""
# save the input parameters internally
self.modulation_type = modulation_type
self.samples_per_symbol = samples_per_symbol
self.pulse_factor = pulse_factor
self.pulse_length = pulse_length
self.config = config
# set the spectral density and offset characteristics
if self.modulation_type == "BPSK":
self.spectral_density = 2
self.period_offset = 0
elif self.modulation_type == "GMSK":
self.spectral_density = 2
self.period_offset = 1
elif self.modulation_type == "QPSK":
self.spectral_density = 4
self.period_offset = 0
elif self.modulation_type == "OQPSK":
self.spectral_density = 4
self.period_offset = 1
elif self.modulation_type == "8PSK":
self.spectral_density = 8
self.period_offset = 0
elif self.modulation_type == "8APSK":
self.spectral_density = 8
self.period_offset = 0
elif self.modulation_type == "16APSK":
self.spectral_density = 16
self.period_offset = 0
elif self.modulation_type == "32APSK":
self.spectral_density = 32
self.period_offset = 0
elif self.modulation_type == "64APSK":
self.spectral_density = 64
self.period_offset = 0
elif self.modulation_type == "128APSK":
self.spectral_density = 128
self.period_offset = 0
elif self.modulation_type == "256APSK":
self.spectral_density = 256
self.period_offset = 0
else:
assert False, "Unsupported modulation type supplied."
# create the pulse coefficients
if(self.modulation_type == "GMSK"):
self.pulse_coefficients = gauss_pulse.gauss_pulse( sps = 2*self.samples_per_symbol,
BT = self.pulse_factor)
else:
self.pulse_coefficients = comms_filters.rrcosfilter(N = self.pulse_length*self.samples_per_symbol,
alpha = self.pulse_factor,
Ts = 1,
Fs = self.samples_per_symbol)[1]
# normalise the pulse energy
pulse_energy = np.sum(np.square(abs(self.pulse_coefficients)))/self.samples_per_symbol
self.pulse_coefficients = [_/pulse_energy for _ in self.pulse_coefficients]
self.pulse_coefficients = np.append(self.pulse_coefficients, self.pulse_coefficients[0])
def modulate(self, data, carrier_phase_offset):
""" Modulate the supplied data with the previously setup modulator """
# deinterleave, convert to NRZ and interpolate
if self.modulation_type == "BPSK":
# determine the number of samples
number_of_bits = len(data)
number_of_samples = number_of_bits*self.samples_per_symbol
# prepopulate the output vectors
i_data = np.zeros(number_of_samples)
q_data = np.zeros(number_of_samples)
# loop through each sample modulate the in-phase arm
for n in range(number_of_bits):
i_data[n*self.samples_per_symbol] = 2*data[n]-1
# the quadrature arm is all zeros
q_data = np.zeros(number_of_samples)
# essentially OQPSK with half the frequency
elif self.modulation_type == "GMSK":
# determine the number of samples
number_of_bits = len(data)
number_of_samples = number_of_bits*self.samples_per_symbol
# prepopulate the output vectors
i_data = np.zeros(number_of_samples)
q_data = np.zeros(number_of_samples)
# modulate two bit period with data
for n in range(number_of_bits/2):
i_data[2*n*self.samples_per_symbol] = 2*data[2*n]-1
# module two bit periods offset by a bit period with data
for n in range(number_of_bits/2-1):
q_data[2*n*self.samples_per_symbol + self.samples_per_symbol/2] = 2*data[2*n+1]-1
# map the signal to four constellation points on the complex plane
elif self.modulation_type == "QPSK":
# determine the number of samples
number_of_bits = len(data)
number_of_samples = number_of_bits*self.samples_per_symbol/2
# prepopulate the output vectors
i_data = np.zeros(number_of_samples)
q_data = np.zeros(number_of_samples)
# map every odd bit to the in-phase arm
for n in range(number_of_bits/2):
i_data[n*self.samples_per_symbol] = 2*data[2*n]-1
# map every even bit to the quadarature arm
for n in range(number_of_bits/2):
q_data[n*self.samples_per_symbol] = 2*data[2*n+1]-1
# like QPSK with a half bit period offset on the quadarature arm
elif self.modulation_type == "OQPSK":
# determine the number of samples
number_of_bits = len(data)
number_of_samples = number_of_bits*self.samples_per_symbol/2
# prepopulate the output vectors
i_data = np.zeros(number_of_samples)
q_data = np.zeros(number_of_samples)
# map every odd bit to the in-phase arm
for n in range(number_of_bits/2):
i_data[n*self.samples_per_symbol] = 2*data[2*n]-1
# map every even bit to the quadarature arm with a half bit period offset
for n in range(number_of_bits/2-1):
q_data[n*self.samples_per_symbol + self.samples_per_symbol/2] = 2*data[2*n+1]-1
# split three bits across a even eight point on the circle
# according to EN 302 307-1
elif self.modulation_type == "8PSK":
# determine the number of samples
bits_per_symbol = 3
number_of_bits = len(data)
number_of_samples = int(np.ceil(number_of_bits*self.samples_per_symbol/bits_per_symbol))
# prepopulate the output vectors
i_data = np.zeros(number_of_samples)
q_data = np.zeros(number_of_samples)
# set the bit mapping table
bit_map = [[1, np.pi/4],
[1, 0],
[1, 4*np.pi/4],
[1, 5*np.pi/4],
[1, 2*np.pi/4],
[1, 7*np.pi/4],
[1, 3*np.pi/4],
[1, 6*np.pi/4]]
# loop through all data
for n in range(int(np.ceil(number_of_bits/bits_per_symbol))):
# combine three bits and map to a complex symbol
symbol_int = 0
for i in range(bits_per_symbol):
symbol_int += 2**i * data[bits_per_symbol*n + i]
symbol = bit_map[symbol_int][0] * np.exp(1j*bit_map[symbol_int][1])
# break apart the complex symbol to inphase and quadrature arms
i_data[n*self.samples_per_symbol] = np.real(symbol)
q_data[n*self.samples_per_symbol] = np.imag(symbol)
# split three bits across a complex amplitudde and phase mapping
elif self.modulation_type == "8APSK":
# determine the number of samples
bits_per_symbol = 3
number_of_bits = len(data)
number_of_samples = int(np.ceil(number_of_bits*self.samples_per_symbol/bits_per_symbol))
# prepopulate the output vector
i_data = np.zeros(number_of_samples)
q_data = np.zeros(number_of_samples)
# different mapping for different LDPC codes
# calculate the symbol radiuses
if self.config == "100/180":
R1 = 1.0/6.8
R2 = 5.32/6.8
elif self.config == "104/180":
R1 = 1.0/8.0
R2 = 6.39/8.0
else:
print("No LDPC code specified. Using 100/180")
R1 = 1.0/6.8
R2 = 5.32/6.8
# set the bit mapping table
bit_map = [[R1, 0],
[R2, 1.352*np.pi],
[R2, 0.648*np.pi],
[1.0, 0],
[R1, np.pi],
[R2, -0.352*np.pi],
[R2, 0.352*np.pi],
[1.0, np.pi]]
# loop through all data
for n in range(int(np.ceil(number_of_bits/bits_per_symbol))):
# combine three bits and map to a complex symbol
symbol_int = 0
for i in range(bits_per_symbol):
symbol_int += 2**i * data[bits_per_symbol*n + i]
symbol = bit_map[symbol_int][0] * np.exp(1j*bit_map[symbol_int][1])
# break apart the complex symbol to inphase and quadrature arms
i_data[n*self.samples_per_symbol] = np.real(symbol)
q_data[n*self.samples_per_symbol] = np.imag(symbol)
# split four bits across a complex amplitudde and phase mapping
elif self.modulation_type == "16APSK":
# determine the number of samples
bits_per_symbol = 4
number_of_bits = len(data)
number_of_samples = int(np.ceil(number_of_bits*self.samples_per_symbol/bits_per_symbol))
# prepopulate the output vectors
i_data = np.zeros(number_of_samples)
q_data = np.zeros(number_of_samples)
# for some codes the mapping is performed with a lookup table
if self.config in ["18/30", "20/30"]:
if self.config == "18/30":
# map values to symbols on the complex plane
bit_map = [0.4718 + 0.2606*1j,
0.2606 + 0.4718*1j,
-0.4718 + 0.2606*1j,
-0.2606 + 0.4718*1j,
0.4718 - 0.2606*1j,
0.2606 - 0.4718*1j,
-0.4718 - 0.2606*1j,
-0.2606 - 0.4718*1j,
1.2088 + 0.4984*1j,
0.4984 + 1.2088*1j,
-1.2088 + 0.4984*1j,
-0.4984 + 1.2088*1j,
1.2088 - 0.4984*1j,
0.4984 - 1.2088*1j,
-1.2088 - 0.4984*1j,
-0.4984 - 1.2088*1j]
elif self.config == "20/30":
# map values to symbols on the complex plane
bit_map = [0.5061 + 0.2474*1j,
0.2474 + 0.5061*1j,
-0.5061 + 0.2474*1j,
-0.2474 + 0.5061*1j,
0.5061 - 0.2474*1j,
0.2474 - 0.5061*1j,
-0.5061 - 0.2474*1j,
-0.2474 - 0.5061*1j,
1.2007 + 0.4909*1j,
0.4909 + 1.2007*1j,
-1.2007 + 0.4909*1j,
-0.4909 + 1.2007*1j,
1.2007 - 0.4909*1j,
0.4909 - 1.2007*1j,
-1.2007 - 0.4909*1j,
-0.4909 - 1.2007*1j]
# loop through all data
for n in range(int(np.ceil(number_of_bits/bits_per_symbol))):
# combine three bits and map to a complex symbol
symbol_int = 0
for i in range(bits_per_symbol):
symbol_int += 2**i * data[bits_per_symbol*n + i]
symbol = bit_map[symbol_int]
# break apart the complex symbol to inphase and quadrature arms
i_data[n*self.samples_per_symbol] = np.real(symbol)
q_data[n*self.samples_per_symbol] = np.imag(symbol)
else:
# 8 + 8 modulation
if self.config in ["90/180", "96/180", "100/180"]:
# all of these codes use the same R1 radius
R1 = 1.0/3.7
# set the bit mapping table
bit_map = [[R1, 1*np.pi/8],
[R1, 3*np.pi/8],
[R1, 7*np.pi/8],
[R1, 5*np.pi/8],
[R1, 15*np.pi/8],
[R1, 13*np.pi/8],
[R1, 9*np.pi/8],
[R1, 11*np.pi/8],
[1.0, 1*np.pi/8],
[1.0, 3*np.pi/8],
[1.0, 7*np.pi/8],
[1.0, 5*np.pi/8],
[1.0, 15*np.pi/8],
[1.0, 13*np.pi/8],
[1.0, 9*np.pi/8],
[1.0, 11*np.pi/8]]
# 4 + 12 modulation
else:
# different mapping for different LDPC codes
# calculate the symbol radiuses
if self.config == "26/45":
R1 = 1.0/3.7
elif self.config == "3/5":
R1 = 1.0/3.7
elif self.config == "28/45":
R1 = 1.0/3.5
elif self.config == "23/36":
R1 = 1.0/3.1
elif self.config == "25/36":
R1 = 1.0/3.1
elif self.config == "13/18":
R1 = 1.0/2.85
elif self.config == "140/180":
R1 = 1.0/3.6
elif self.config == "154/180":
R1 = 1.0/3.2
elif self.config == "7/15":
R1 = 1.0/3.32
elif self.config == "8/15":
R1 = 1.0/3.5
elif self.config == "26/45":
R1 = 1.0/3.7
elif self.config == "3/5":
R1 = 1.0/3.7
elif self.config == "32/45":
R1 = 1.0/2.85
else:
print("No LDPC code specified. Using 3/5")
R1 = 1.0/3.7
# set the bit mapping table
bit_map = [[1.0, 3*np.pi/12],
[1.0, 21*np.pi/12],
[1.0, 9*np.pi/12],
[1.0, 15*np.pi/12],
[1.0, 1*np.pi/12],
[1.0, 23*np.pi/12],
[1.0, 11*np.pi/12],
[1.0, 13*np.pi/12],
[1.0, 5*np.pi/12],
[1.0, 19*np.pi/12],
[1.0, 7*np.pi/12],
[1.0, 17*np.pi/12],
[R1, 3*np.pi/12],
[R1, 21*np.pi/12],
[R1, 9*np.pi/12],
[R1, 15*np.pi/12]]
# loop through all data
for n in range(int(np.ceil(number_of_bits/bits_per_symbol))):
# combine three bits and map to a complex symbol
symbol_int = 0
for i in range(bits_per_symbol):
symbol_int += 2**i * data[bits_per_symbol*n + i]
symbol = bit_map[symbol_int][0] * np.exp(1j*bit_map[symbol_int][1])
# break apart the complex symbol to inphase and quadrature arms
i_data[n*self.samples_per_symbol] = np.real(symbol)
q_data[n*self.samples_per_symbol] = np.imag(symbol)
# split five bits across a complex amplitudde and phase mapping
elif self.modulation_type == "32APSK":
# determine the number of samples
bits_per_symbol = 5
number_of_bits = len(data)
number_of_samples = int(np.ceil(number_of_bits*self.samples_per_symbol/bits_per_symbol))
# prepopulate the output vectors
i_data = np.zeros(number_of_samples)
q_data = np.zeros(number_of_samples)
if self.config in ["2/3", "2/3S", "32/45S"]:
# different mapping for different LDPC codes
# calculate the symbol radiuses
if self.config == "2/3":
R1 = 1.0/5.55
R2 = 2.85/5.55
elif self.config == "2/3S":
R1 = 1.0/5.54
R2 = 2.84/5.54
elif self.config == "32/45S":
R1 = 1.0/5.26
R2 = 2.84/5.26
# set the bit mapping table
bit_map = [[1, 11*np.pi/16],
[1, 9*np.pi/16],
[1, 5*np.pi/16],
[1, 7*np.pi/16],
[R2, 9*np.pi/12],
[R2, 7*np.pi/12],
[R2, 3*np.pi/12],
[R2, 5*np.pi/12],
[1, 13*np.pi/16],
[1, 15*np.pi/16],
[1, 3*np.pi/16],
[1, 1*np.pi/16],
[R2, 11*np.pi/12],
[R1, 3*np.pi/4],
[R2, 1*np.pi/12],
[R1, 1*np.pi/4],
[1, 21*np.pi/16],
[1, 23*np.pi/16],
[1, 27*np.pi/16],
[1, 25*np.pi/16],
[R2, 15*np.pi/12],
[R2, 17*np.pi/12],
[R2, 21*np.pi/12],
[R2, 19*np.pi/12],
[1, 19*np.pi/16],
[1, 17*np.pi/16],
[1, 29*np.pi/16],
[1, 31*np.pi/16],
[R2, 13*np.pi/12],
[R1, 5*np.pi/4],
[R2, 23*np.pi/12],
[R1, 7*np.pi/4]]
else:
# different mapping for different LDPC codes
# calculate the symbol radiuses
if self.config == "128/180":
R1 = 1.0/5.6
R2 = 2.6/5.6
R3 = 2.99/5.6
elif self.config == "132/180":
R1 = 1/5.6
R2 = 2.6/5.6
R3 = 2.86/5.6
elif self.config == "140/180":
R1 = 1/5.6
R2 = 2.8/5.6
R3 = 3.08/5.6
else:
print("No LDPC code specified. Using 128/180")
R1 = 1/5.6
R2 = 2.6/5.6
R3 = 2.99/5.6
# set the bit mapping table
bit_map = [[R1, 1*np.pi/4],
[1.0, 7*np.pi/16],
[R1, 7*np.pi/4],
[1.0, 25*np.pi/16],
[R1, 3*np.pi/4],
[1.0, 9*np.pi/16],
[R1, 5*np.pi/4],
[1.0, 23*np.pi/16],
[R2, 1*np.pi/12],
[1.0, 1*np.pi/16],
[R2, 23*np.pi/12],
[1.0, 31*np.pi/16],
[R2, 11*np.pi/12],
[1.0, 15*np.pi/16],
[R2, 13*np.pi/12],
[1.0, 17*np.pi/16],
[R2, 5*np.pi/12],
[1.0, 5*np.pi/16],
[R2, 19*np.pi/12],
[1.0, 27*np.pi/16],
[R2, 7*np.pi/12],
[1.0, 11*np.pi/16],
[R2, 17*np.pi/12],
[1.0, 21*np.pi/16],
[R3, 1*np.pi/4],
[1.0, 3*np.pi/16],
[R3, 7*np.pi/4],
[1.0, 29*np.pi/16],
[R3, 3*np.pi/4],
[1.0, 13*np.pi/16],
[R3, 5*np.pi/4],
[1.0, 19*np.pi/16]]
# loop through all data
for n in range(int(np.ceil(number_of_bits/bits_per_symbol))):
# combine three bits and map to a complex symbol
symbol_int = 0
for i in range(bits_per_symbol):
symbol_int += 2**i * data[bits_per_symbol*n + i]
symbol = bit_map[symbol_int][0] * np.exp(1j*bit_map[symbol_int][1])
# break apart the complex symbol to inphase and quadrature arms
i_data[n*self.samples_per_symbol] = np.real(symbol)
q_data[n*self.samples_per_symbol] = np.imag(symbol)
# split six bits across a complex amplitudde and phase mapping
elif self.modulation_type == "64APSK":
# determine the number of samples
bits_per_symbol = 6
number_of_bits = len(data)
number_of_samples = int(np.ceil(number_of_bits*self.samples_per_symbol/bits_per_symbol))
# prepopulate the output vectors
i_data = np.zeros(number_of_samples)
q_data = np.zeros(number_of_samples)
if self.config in ["128/180"]:
# different mapping for different LDPC codes
# calculate the symbol radiuses
R1 = 1.0/3.95
R2 = 1.88/3.95
R3 = 2.72/3.95
R4 = 1.0
# set the bit mapping table
bit_map = [R1, 1*np.pi/16,
R1, 3*np.pi/16,
R1, 7*np.pi/16,
R1, 5*np.pi/16,
R1, 15*np.pi/16,
R1, 13*np.pi/16,
R1, 9*np.pi/16,
R1, 11*np.pi/16,
R1, 31*np.pi/16,
R1, 29*np.pi/16,
R1, 25*np.pi/16,
R1, 27*np.pi/16,
R1, 17*np.pi/16,
R1, 19*np.pi/16,
R1, 23*np.pi/16,
R1, 21*np.pi/16,
R2, 1*np.pi/16,
R2, 3*np.pi/16,
R2, 7*np.pi/16,
R2, 5*np.pi/16,
R2, 15*np.pi/16,
R2, 13*np.pi/16,
R2, 9*np.pi/16,
R2, 11*np.pi/16,
R2, 31*np.pi/16,
R2, 29*np.pi/16,
R2, 25*np.pi/16,
R2, 27*np.pi/16,
R2, 17*np.pi/16,
R2, 19*np.pi/16,
R2, 23*np.pi/16,
R2, 21*np.pi/16,
R4, 1*np.pi/16,
R4, 3*np.pi/16,
R4, 7*np.pi/16,
R4, 5*np.pi/16,
R4, 15*np.pi/16,
R4, 13*np.pi/16,
R4, 9*np.pi/16,
R4, 11*np.pi/16,
R4, 31*np.pi/16,
R4, 29*np.pi/16,
R4, 25*np.pi/16,
R4, 27*np.pi/16,
R4, 17*np.pi/16,
R4, 19*np.pi/16,
R4, 23*np.pi/16,
R4, 21*np.pi/16,
R3, 1*np.pi/16,
R3, 3*np.pi/16,
R3, 7*np.pi/16,
R3, 5*np.pi/16,
R3, 15*np.pi/16,
R3, 13*np.pi/16,
R3, 9*np.pi/16,
R3, 11*np.pi/16,
R3, 31*np.pi/16,
R3, 29*np.pi/16,
R3, 25*np.pi/16,
R3, 27*np.pi/16,
R3, 17*np.pi/16,
R3, 19*np.pi/16,
R3, 23*np.pi/16,
R3, 21*np.pi/16]
elif self.config in ["7/9", "4/5", "5/6"]:
# different mapping for different LDPC codes
# calculate the symbol radiuses
if self.config == "7/9":
R1 = 1.0/5.2
R2 = 2.2/5.2
R3 = 3.6/5.2
R4 = 1.0
elif self.config == "4/5":
R1 = 1.0/5.2
R2 = 2.2/5.2
R3 = 3.6/5.2
R4 = 1.0
elif self.config == "5/6":
R1 = 1.0/5.0
R2 = 2.2/5.0
R3 = 3.5/5.0
R4 = 1.0
# set the bit mapping table
bit_map = [R2, 25*np.pi/16,
R4, 7*np.pi/4,
R2, 27*np.pi/16,
R3, 7*np.pi/4,
R4, 31*np.pi/20,
R4, 33*np.pi/20,
R3, 31*np.pi/20,
R3, 33*np.pi/20,
R2, 23*np.pi/16,
R4, 5*np.pi/4,
R2, 21*np.pi/16,
R3, 5*np.pi/4,
R4, 29*np.pi/20,
R4, 27*np.pi/20,
R3, 29*np.pi/20,
R3, 27*np.pi/20,
R1, 13*np.pi/8,
R4, 37*np.pi/20,
R2, 29*np.pi/16,
R3, 37*np.pi/20,
R1, 15*np.pi/8,
R4, 39*np.pi/20,
R2, 31*np.pi/16,
R3, 39*np.pi/20,
R1, 11*np.pi/8,
R4, 23*np.pi/20,
R2, 19*np.pi/16,
R3, 23*np.pi/20,
R1, 9*np.pi/8,
R4, 21*np.pi/20,
R2, 17*np.pi/16,
R3, 21*np.pi/20,
R2, 7*np.pi/6,
R4, 1*np.pi/4,
R2, 5*np.pi/6,
R3, 1*np.pi/4,
R4, 9*np.pi/0,
R4, 7*np.pi/0,
R3, 9*np.pi/0,
R3, 7*np.pi/0,
R2, 9*np.pi/6,
R4, 3*np.pi/4,
R2, 11*np.pi/16,
R3, 3*np.pi/4,
R4, 11*np.pi/20,
R4, 13*np.pi/20,
R3, 11*np.pi/20,
R3, 13*np.pi/20,
R1, 3*np.pi/8,
R4, 3*np.pi/0,
R2, 3*np.pi/6,
R3, 3*np.pi/0,
R1, 1*np.pi/8,
R4, 1*np.pi/0,
R2, 1*np.pi/6,
R3, 1*np.pi/0,
R1, 5*np.pi/8,
R4, 17*np.pi/20,
R2, 13*np.pi/16,
R3, 17*np.pi/20,
R1, 7*np.pi/8,
R4, 19*np.pi/20,
R2, 15*np.pi/16,
R3, 19*np.pi/20]
elif self.config in ["132/180"]:
# different mapping for different LDPC codes
# calculate the symbol radiuses
R1 = 1.0/7.0
R2 = 2.4/7.0
R3 = 4.3/7.0
R4 = 1.0
# set the bit mapping table
bit_map = [R4, 1*np.pi/4,
R4, 7*np.pi/4,
R4, 3*np.pi/4,
R4, 5*np.pi/4,
R4, 13*np.pi/28,
R4, 43*np.pi/28,
R4, 15*np.pi/28,
R4, 41*np.pi/28,
R4, 1*np.pi/8,
R4, 55*np.pi/28,
R4, 27*np.pi/28,
R4, 29*np.pi/28,
R1, 1*np.pi/4,
R1, 7*np.pi/4,
R1, 3*np.pi/4,
R1, 5*np.pi/4,
R4, 9*np.pi/8,
R4, 47*np.pi/28,
R4, 19*np.pi/28,
R4, 37*np.pi/28,
R4, 11*np.pi/28,
R4, 45*np.pi/28,
R4, 17*np.pi/28,
R4, 39*np.pi/28,
R3, 1*np.pi/0,
R3, 39*np.pi/20,
R3, 19*np.pi/20,
R3, 21*np.pi/20,
R2, 1*np.pi/2,
R2, 23*np.pi/12,
R2, 11*np.pi/12,
R2, 13*np.pi/12,
R4, 5*np.pi/8,
R4, 51*np.pi/28,
R4, 23*np.pi/28,
R4, 33*np.pi/28,
R3, 9*np.pi/0,
R3, 31*np.pi/20,
R3, 11*np.pi/20,
R3, 29*np.pi/20,
R4, 3*np.pi/8,
R4, 53*np.pi/28,
R4, 25*np.pi/28,
R4, 31*np.pi/28,
R2, 9*np.pi/0,
R2, 19*np.pi/12,
R2, 7*np.pi/2,
R2, 17*np.pi/12,
R3, 1*np.pi/4,
R3, 7*np.pi/4,
R3, 3*np.pi/4,
R3, 5*np.pi/4,
R3, 7*np.pi/0,
R3, 33*np.pi/20,
R3, 13*np.pi/20,
R3, 27*np.pi/20,
R3, 3*np.pi/0,
R3, 37*np.pi/20,
R3, 17*np.pi/20,
R3, 23*np.pi/20,
R2, 1*np.pi/4,
R2, 7*np.pi/4,
R2, 3*np.pi/4,
R2, 5*np.pi/4]
# loop through all data
for n in range(int(np.ceil(number_of_bits/bits_per_symbol))):
# combine three bits and map to a complex symbol
symbol_int = 0
for i in range(bits_per_symbol):
symbol_int += 2**i * data[bits_per_symbol*n + i]
symbol = bit_map[symbol_int][0] * np.exp(1j*bit_map[symbol_int][1])
# break apart the complex symbol to inphase and quadrature arms
i_data[n*self.samples_per_symbol] = np.real(symbol)
q_data[n*self.samples_per_symbol] = np.imag(symbol)
# split seven bits across a complex amplitudde and phase mapping
elif self.modulation_type == "128APSK":
# determine the number of samples
bits_per_symbol = 7
number_of_bits = len(data)
number_of_samples = int(np.ceil(number_of_bits*self.samples_per_symbol/bits_per_symbol))
# prepopulate the output vectors
i_data = np.zeros(number_of_samples)
q_data = np.zeros(number_of_samples)
# select the LDPC codes
if self.config in ["135/180", "140/180"]:
# different mapping for different LDPC codes
# calculate the symbol radiuses
R1 = 1.0/3.819
R2 = 1.715/3.819
R3 = 2.118/3.819
R4 = 2.681/3.819
R5 = 2.75/3.819
R6 = 1.0
# set the bit mapping table
bit_map = [R1, 83*np.pi/60,
R6, 11*np.pi/05,
R6, 37*np.pi/80,
R6, 11*np.pi/68,
R2, 121*np.pi/520,
R3, 23*np.pi/80,
R5, 19*np.pi/20,
R4, 61*np.pi/20,
R1, 103*np.pi/560,
R6, 61*np.pi/20,
R6, 383*np.pi/680,
R6, 61*np.pi/20,
R2, 113*np.pi/560,
R3, 169*np.pi/008,
R5, 563*np.pi/520,
R4, 139*np.pi/840,
R1, 243*np.pi/560,
R6, 1993*np.pi/5040,
R6, 43*np.pi/90,
R6, 73*np.pi/68,
R2, 1139*np.pi/2520,
R3, 117*np.pi/280,
R5, 341*np.pi/720,
R4, 349*np.pi/840,
R1, 177*np.pi/560,
R6, 1789*np.pi/5040,
R6, 49*np.pi/80,
R6, 1789*np.pi/5040,
R2, 167*np.pi/560,
R3, 239*np.pi/720,
R5, 199*np.pi/720,
R4, 281*np.pi/840,
R1, 1177*np.pi/1260,
R6, 94*np.pi/05,
R6, 1643*np.pi/1680,
R6, 157*np.pi/168,
R2, 2399*np.pi/2520,
R3, 257*np.pi/280,
R5, 701*np.pi/720,
R4, 659*np.pi/720,
R1, 457*np.pi/560,
R6, 359*np.pi/420,
R6, 1297*np.pi/1680,
R6, 4111*np.pi/5040,
R2, 447*np.pi/560,
R3, 839*np.pi/008,
R5, 1957*np.pi/2520,
R4, 701*np.pi/840,
R1, 317*np.pi/560,
R6, 3047*np.pi/5040,
R6, 47*np.pi/90,
R6, 95*np.pi/68,
R2, 1381*np.pi/2520,
R3, 163*np.pi/280,
R5, 379*np.pi/720,
R4, 491*np.pi/840,
R1, 383*np.pi/560,
R6, 3251*np.pi/5040,
R6, 131*np.pi/180,
R6, 115*np.pi/168,
R2, 393*np.pi/560,
R3, 481*np.pi/720,
R5, 521*np.pi/720,
R4, 559*np.pi/840,
R1, 2437*np.pi/1260,
R6, 199*np.pi/105,
R6, 3323*np.pi/1680,
R6, 325*np.pi/168,
R2, 4919*np.pi/2520,
R3, 537*np.pi/280,
R5, 1421*np.pi/720,
R4, 1379*np.pi/720,
R1, 1017*np.pi/560,
R6, 779*np.pi/420,
R6, 2977*np.pi/1680,
R6, 9151*np.pi/5040,
R2, 1007*np.pi/560,
R3, 1847*np.pi/1008,
R5, 4477*np.pi/2520,
R4, 1541*np.pi/840,
R1, 877*np.pi/560,
R6, 8087*np.pi/5040,
R6, 137*np.pi/90,
R6, 263*np.pi/168,
R2, 3901*np.pi/2520,
R3, 443*np.pi/280,
R5, 1099*np.pi/720,
R4, 1331*np.pi/840,
R1, 943*np.pi/560,
R6, 8291*np.pi/5040,
R6, 311*np.pi/180,
R6, 283*np.pi/168,
R2, 953*np.pi/560,
R3, 1201*np.pi/720,
R5, 1241*np.pi/720,
R4, 1399*np.pi/840,
R1, 1343*np.pi/1260,
R6, 116*np.pi/105,
R6, 1717*np.pi/1680,
R6, 179*np.pi/168,
R2, 2641*np.pi/2520,
R3, 303*np.pi/280,
R5, 739*np.pi/720,
R4, 781*np.pi/720,
R1, 663*np.pi/560,
R6, 481*np.pi/420,
R6, 2063*np.pi/1680,
R6, 5969*np.pi/5040,
R2, 673*np.pi/560,
R3, 1177*np.pi/1008,
R5, 3083*np.pi/2520,
R4, 979*np.pi/840,
R1, 803*np.pi/560,
R6, 7033*np.pi/5040,
R6, 133*np.pi/90,
R6, 241*np.pi/168,
R2, 3659*np.pi/2520,
R3, 397*np.pi/280,
R5, 1061*np.pi/720,
R4, 1189*np.pi/840,
R1, 737*np.pi/560,
R6, 6829*np.pi/5040,
R6, 229*np.pi/180,
R6, 221*np.pi/168,
R2, 727*np.pi/560,
R3, 959*np.pi/720,
R5, 919*np.pi/720,
R4, 1121*np.pi/840]
# loop through all data
for n in range(int(np.ceil(number_of_bits/bits_per_symbol))):
# combine three bits and map to a complex symbol
symbol_int = 0
for i in range(bits_per_symbol):
symbol_int += 2**i * data[bits_per_symbol*n + i]
symbol = bit_map[symbol_int][0] * np.exp(1j*bit_map[symbol_int][1])
# break apart the complex symbol to inphase and quadrature arms
i_data[n*self.samples_per_symbol] = np.real(symbol)
q_data[n*self.samples_per_symbol] = np.imag(symbol)
# split eight bits across a complex amplitudde and phase mapping
elif self.modulation_type == "256APSK":
# determine the number of samples
bits_per_symbol = 8
number_of_bits = len(data)
number_of_samples = int(np.ceil(number_of_bits*self.samples_per_symbol/bits_per_symbol))
# prepopulate the output vectors
i_data = np.zeros(number_of_samples)
q_data = np.zeros(number_of_samples)
# select the coding based on the LDPC code
if self.config in ["116/180", "124/180", "128/180", "135/180"]:
# different mapping for different LDPC codes
# calculate the symbol radiuses
if self.config in ["116/180", "124/180"]:
R1 = 1.0/6.536
R2 = 1.791/6.536
R3 = 2.405/6.536
R4 = 2.980/6.536
R5 = 3.569/6.536
R6 = 4.235/6.536
R7 = 5.078/6.536
R8 = 1.0
elif self.config == "128/180":
R1 = 1.0/5.4
R2 = 1.794/5.4
R3 = 2.409/5.4
R4 = 2.986/5.4
R5 = 3.579/5.4
R6 = 4.045/5.4
R7 = 4.6/5.4
R8 = 1.0
else:
R1 = 1.0/5.2
R2 = 1.794/5.2
R3 = 2.409/5.2
R4 = 2.986/5.2
R5 = 3.579/5.2
R6 = 4.045/5.2
R7 = 4.5/5.2
R8 = 1.0
# set the bit mapping table
bit_map = [R1, 1*np.pi/32,
R1, 3*np.pi/32,
R1, 7*np.pi/32,
R1, 5*np.pi/32,
R1, 15*np.pi/32,
R1, 13*np.pi/32,
R1, 9*np.pi/32,
R1, 11*np.pi/32,
R1, 31*np.pi/32,
R1, 29*np.pi/32,
R1, 25*np.pi/32,
R1, 27*np.pi/32,
R1, 17*np.pi/32,
R1, 19*np.pi/32,
R1, 23*np.pi/32,
R1, 21*np.pi/32,
R1, 63*np.pi/32,
R1, 61*np.pi/32,
R1, 57*np.pi/32,
R1, 59*np.pi/32,
R1, 49*np.pi/32,
R1, 51*np.pi/32,
R1, 55*np.pi/32,
R1, 53*np.pi/32,
R1, 33*np.pi/32,
R1, 35*np.pi/32,
R1, 39*np.pi/32,
R1, 37*np.pi/32,
R1, 47*np.pi/32,
R1, 45*np.pi/32,
R1, 41*np.pi/32,
R1, 43*np.pi/32,
R2, 1*np.pi/32,
R2, 3*np.pi/32,
R2, 7*np.pi/32,
R2, 5*np.pi/32,
R2, 15*np.pi/32,
R2, 13*np.pi/32,
R2, 9*np.pi/32,
R2, 11*np.pi/32,
R2, 31*np.pi/32,
R2, 29*np.pi/32,
R2, 25*np.pi/32,
R2, 27*np.pi/32,
R2, 17*np.pi/32,
R2, 19*np.pi/32,
R2, 23*np.pi/32,
R2, 21*np.pi/32,
R2, 63*np.pi/32,
R2, 61*np.pi/32,
R2, 57*np.pi/32,
R2, 59*np.pi/32,
R2, 49*np.pi/32,
R2, 51*np.pi/32,
R2, 55*np.pi/32,
R2, 53*np.pi/32,
R2, 33*np.pi/32,
R2, 35*np.pi/32,
R2, 39*np.pi/32,
R2, 37*np.pi/32,
R2, 47*np.pi/32,
R2, 45*np.pi/32,
R2, 41*np.pi/32,
R2, 43*np.pi/32,
R4, 1*np.pi/32,
R4, 3*np.pi/32,
R4, 7*np.pi/32,
R4, 5*np.pi/32,
R4, 15*np.pi/32,
R4, 13*np.pi/32,
R4, 9*np.pi/32,
R4, 11*np.pi/32,
R4, 31*np.pi/32,
R4, 29*np.pi/32,
R4, 25*np.pi/32,
R4, 27*np.pi/32,
R4, 17*np.pi/32,
R4, 19*np.pi/32,
R4, 23*np.pi/32,
R4, 21*np.pi/32,
R4, 63*np.pi/32,
R4, 61*np.pi/32,
R4, 57*np.pi/32,
R4, 59*np.pi/32,
R4, 49*np.pi/32,
R4, 51*np.pi/32,
R4, 55*np.pi/32,
R4, 53*np.pi/32,
R4, 33*np.pi/32,
R4, 35*np.pi/32,
R4, 39*np.pi/32,
R4, 37*np.pi/32,
R4, 47*np.pi/32,
R4, 45*np.pi/32,
R4, 41*np.pi/32,
R4, 43*np.pi/32,
R3, 1*np.pi/32,
R3, 3*np.pi/32,
R3, 7*np.pi/32,
R3, 5*np.pi/32,
R3, 15*np.pi/32,
R3, 13*np.pi/32,
R3, 9*np.pi/32,
R3, 11*np.pi/32,
R3, 31*np.pi/32,
R3, 29*np.pi/32,
R3, 25*np.pi/32,
R3, 27*np.pi/32,
R3, 17*np.pi/32,
R3, 19*np.pi/32,
R3, 23*np.pi/32,
R3, 21*np.pi/32,
R3, 63*np.pi/32,
R3, 61*np.pi/32,
R3, 57*np.pi/32,
R3, 59*np.pi/32,
R3, 49*np.pi/32,
R3, 51*np.pi/32,
R3, 55*np.pi/32,
R3, 53*np.pi/32,
R3, 33*np.pi/32,
R3, 35*np.pi/32,
R3, 39*np.pi/32,
R3, 37*np.pi/32,
R3, 47*np.pi/32,
R3, 45*np.pi/32,
R3, 41*np.pi/32,
R3, 43*np.pi/32,
R8, 1*np.pi/32,
R8, 3*np.pi/32,
R8, 7*np.pi/32,
R8, 5*np.pi/32,
R8, 15*np.pi/32,
R8, 13*np.pi/32,
R8, 9*np.pi/32,
R8, 11*np.pi/32,
R8, 31*np.pi/32,
R8, 29*np.pi/32,
R8, 25*np.pi/32,
R8, 27*np.pi/32,
R8, 17*np.pi/32,
R8, 19*np.pi/32,
R8, 23*np.pi/32,
R8, 21*np.pi/32,
R8, 63*np.pi/32,
R8, 61*np.pi/32,
R8, 57*np.pi/32,
R8, 59*np.pi/32,
R8, 49*np.pi/32,
R8, 51*np.pi/32,
R8, 55*np.pi/32,
R8, 53*np.pi/32,
R8, 33*np.pi/32,
R8, 35*np.pi/32,
R8, 39*np.pi/32,
R8, 37*np.pi/32,
R8, 47*np.pi/32,
R8, 45*np.pi/32,
R8, 41*np.pi/32,
R8, 43*np.pi/32,
R7, 1*np.pi/32,
R7, 3*np.pi/32,
R7, 7*np.pi/32,
R7, 5*np.pi/32,
R7, 15*np.pi/32,
R7, 13*np.pi/32,
R7, 9*np.pi/32,
R7, 11*np.pi/32,
R7, 31*np.pi/32,
R7, 29*np.pi/32,
R7, 25*np.pi/32,
R7, 27*np.pi/32,
R7, 17*np.pi/32,
R7, 19*np.pi/32,
R7, 23*np.pi/32,
R7, 21*np.pi/32,
R7, 63*np.pi/32,
R7, 61*np.pi/32,
R7, 57*np.pi/32,
R7, 59*np.pi/32,
R7, 49*np.pi/32,
R7, 51*np.pi/32,
R7, 55*np.pi/32,
R7, 53*np.pi/32,
R7, 33*np.pi/32,
R7, 35*np.pi/32,
R7, 39*np.pi/32,
R7, 37*np.pi/32,
R7, 47*np.pi/32,
R7, 45*np.pi/32,
R7, 41*np.pi/32,
R7, 43*np.pi/32,
R5, 1*np.pi/32,
R5, 3*np.pi/32,
R5, 7*np.pi/32,
R5, 5*np.pi/32,
R5, 15*np.pi/32,
R5, 13*np.pi/32,
R5, 9*np.pi/32,
R5, 11*np.pi/32,
R5, 31*np.pi/32,
R5, 29*np.pi/32,
R5, 25*np.pi/32,
R5, 27*np.pi/32,
R5, 17*np.pi/32,
R5, 19*np.pi/32,
R5, 23*np.pi/32,
R5, 21*np.pi/32,
R5, 63*np.pi/32,
R5, 61*np.pi/32,
R5, 57*np.pi/32,
R5, 59*np.pi/32,
R5, 49*np.pi/32,
R5, 51*np.pi/32,
R5, 55*np.pi/32,
R5, 53*np.pi/32,
R5, 33*np.pi/32,
R5, 35*np.pi/32,
R5, 39*np.pi/32,
R5, 37*np.pi/32,
R5, 47*np.pi/32,
R5, 45*np.pi/32,
R5, 41*np.pi/32,
R5, 43*np.pi/32,
R6, 1*np.pi/32,
R6, 3*np.pi/32,
R6, 7*np.pi/32,
R6, 5*np.pi/32,
R6, 15*np.pi/32,
R6, 13*np.pi/32,
R6, 9*np.pi/32,
R6, 11*np.pi/32,
R6, 31*np.pi/32,
R6, 29*np.pi/32,
R6, 25*np.pi/32,
R6, 27*np.pi/32,
R6, 17*np.pi/32,
R6, 19*np.pi/32,
R6, 23*np.pi/32,
R6, 21*np.pi/32,
R6, 63*np.pi/32,
R6, 61*np.pi/32,
R6, 57*np.pi/32,
R6, 59*np.pi/32,
R6, 49*np.pi/32,
R6, 51*np.pi/32,
R6, 55*np.pi/32,
R6, 53*np.pi/32,
R6, 33*np.pi/32,
R6, 35*np.pi/32,
R6, 39*np.pi/32,
R6, 37*np.pi/32,
R6, 47*np.pi/32,
R6, 45*np.pi/32,
R6, 41*np.pi/32,
R6, 43*np.pi/32]
# loop through all data
for n in range(int(np.ceil(number_of_bits/bits_per_symbol))):
# combine three bits and map to a complex symbol
symbol_int = 0
for i in range(bits_per_symbol):
symbol_int += 2**i * data[bits_per_symbol*n + i]
symbol = bit_map[symbol_int][0] * np.exp(1j*bit_map[symbol_int][1])
# break apart the complex symbol to inphase and quadrature arms
i_data[n*self.samples_per_symbol] = np.real(symbol)
q_data[n*self.samples_per_symbol] = np.imag(symbol)
# create the I and Q pulse filters
i_filter = fir_filter.fir_filter(self.pulse_coefficients)
q_filter = fir_filter.fir_filter(self.pulse_coefficients)
# create output waveforms
i_waveform = []
q_waveform = []
for n in range(len(i_data)):
i_waveform.append( i_filter.update( i_data[n] ) )
q_waveform.append( q_filter.update( q_data[n] ) )
# create the complex signal and frequency offset
waveform = [i_waveform[i] + 1j*q_waveform[i] for i in range(len(i_waveform))]
waveform = [_*np.exp(-1j*carrier_phase_offset) for _ in waveform]
# normalise the waveform
waveform_max = max( np.abs(waveform) )
waveform = [_/waveform_max for _ in waveform]
return waveform
| phase4ground/DVB-receiver | modem/python/library/generic_modulator.py | Python | gpl-3.0 | 36,780 |
class Predicate(object):
def __init__(self, name, arguments):
self.name = name
self.arguments = arguments
self.value_mapping = dict()
self.cost_str = ""
def __str__(self):
return "%s(%s)" % (self.name, ", ".join(map(str, self.arguments)))
def get_arity(self):
return len(self.arguments)
def get_arguments(self):
return self.arguments
def add_value(self, args, values):
self.value_mapping[args] = values
| robertmattmueller/sdac-compiler | pddl/predicates.py | Python | gpl-3.0 | 503 |
#!/usr/bin/env python
import chef
import datetime
from errbot import BotPlugin, botcmd
from time import time
STALE_TIME = 60 * 30 # 30 minutes
class Chef(BotPlugin):
def pretty_time(self, time):
return datetime.datetime.fromtimestamp(int(time)).strftime('%Y-%m-%d %H:%M:%S')
def search_node (self ,args):
api = chef.autoconfigure()
if not args:
raise Exception("No Search Query")
return chef.Search('node', args)
@botcmd
def search (self, mess, args):
""" Search and return nodes """
list = "Search results for query : %s\n" % args
for row in self.search_node(args):
list += "%s\n" % row.object.name
return(list)
@botcmd
def roles (self, mess, args):
""" Search and return roles """
api = chef.autoconfigure()
roles = ''
for row in chef.Search('role', 'name:*' + args + '*'):
roles += "%s\n" % row.object.name
return(roles)
@botcmd
def stale(self, mess, args):
""" Search for stale nodes """
list = "Stale nodes for query : %s ( stale time %s seconds )\n" % (args, STALE_TIME)
for row in self.search_node(args):
if row.object.attributes['ohai_time']:
ago = int(time() - row.object.attributes['ohai_time'])
pretty_ohai_time = self.pretty_time(row.object.attributes['ohai_time'])
if ago >= STALE_TIME:
list += "%s ran %s seconds ago ( %s )\n" % (row.object.name, ago, pretty_ohai_time)
return(list)
@botcmd
def dpkg (self, mess, args):
""" Search installed pacakge versions via Chef API ( requires ohai-dpkg) """
(search, package) = args.split()
if not package:
raise Exception("No package")
pacakges = ''
for row in self.search_node(search):
if not row.object.attributes['dpkg']:
continue
if not row.object.attributes['dpkg'][package]:
continue
pacakges += "%s\t%s\n" % ( row.object.name , row.object.attributes['dpkg'][package]['version'] )
return(pacakges)
| jordant/err-chef | chef.py | Python | gpl-3.0 | 2,609 |
# Copyright 2019-2020 by Christopher C. Little.
# This file is part of Abydos.
#
# Abydos is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Abydos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Abydos. If not, see <http://www.gnu.org/licenses/>.
"""abydos.tests.distance.test_distance_clark.
This module contains unit tests for abydos.distance.Clark
"""
import unittest
from abydos.distance import Clark
class ClarkTestCases(unittest.TestCase):
"""Test Clark functions.
abydos.distance.Clark
"""
cmp = Clark()
def test_clark_dist(self):
"""Test abydos.distance.Clark.dist."""
# Base cases
self.assertEqual(self.cmp.dist('', ''), 0.0)
self.assertEqual(self.cmp.dist('a', ''), 1.0)
self.assertEqual(self.cmp.dist('', 'a'), 1.0)
self.assertEqual(self.cmp.dist('a', 'a'), 0.0)
self.assertEqual(self.cmp.dist('abc', ''), 1.0)
self.assertEqual(self.cmp.dist('', 'abc'), 1.0)
self.assertEqual(self.cmp.dist('abc', 'abc'), 0.0)
self.assertEqual(self.cmp.dist('abcd', 'efgh'), 1.0)
self.assertAlmostEqual(self.cmp.dist('Nigel', 'Niall'), 0.8164965809)
self.assertAlmostEqual(self.cmp.dist('Niall', 'Nigel'), 0.8164965809)
self.assertAlmostEqual(self.cmp.dist('Colin', 'Coiln'), 0.8164965809)
self.assertAlmostEqual(self.cmp.dist('Coiln', 'Colin'), 0.8164965809)
self.assertAlmostEqual(
self.cmp.dist('ATCAACGAGT', 'AACGATTAG'), 0.707106781
)
if __name__ == '__main__':
unittest.main()
| chrislit/abydos | tests/distance/test_distance_clark.py | Python | gpl-3.0 | 2,001 |
#!/usr/bin/env python
"""casper.py
Utility class for getting and presenting information from casper.jxml.
The results from casper.jxml are undocumented and thus quite likely to be
removed. Do not rely on its continued existence!
Copyright (C) 2014 Shea G Craig <shea.craig@da.org>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import copy
from .contrib import requests
import urllib
from xml.etree import ElementTree
class Casper(ElementTree.Element):
def __init__(self, jss):
"""Initialize a Casper object.
jss: JSS object.
"""
self.jss = jss
self.url = "%s%s" % (self.jss.base_url, '/casper.jxml')
self.auth = urllib.urlencode({'username': self.jss.user,
'password': self.jss.password})
super(Casper, self).__init__(tag='Casper')
self.update()
def _indent(self, elem, level=0, more_sibs=False):
"""Indent an xml element object to prepare for pretty printing.
Method is internal to discourage indenting the self._root Element,
thus potentially corrupting it.
"""
i = "\n"
pad = ' '
if level:
i += (level - 1) * pad
num_kids = len(elem)
if num_kids:
if not elem.text or not elem.text.strip():
elem.text = i + pad
if level:
elem.text += pad
count = 0
for kid in elem:
self._indent(kid, level+1, count < num_kids - 1)
count += 1
if not elem.tail or not elem.tail.strip():
elem.tail = i
if more_sibs:
elem.tail += pad
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
if more_sibs:
elem.tail += pad
def __repr__(self):
"""Make our data human readable."""
# deepcopy so we don't mess with the valid XML.
pretty_data = copy.deepcopy(self)
self._indent(pretty_data)
elementstring = ElementTree.tostring(pretty_data)
return elementstring.encode('utf-8')
def makeelement(self, tag, attrib):
"""Return an Element."""
# We use ElementTree.SubElement() a lot. Unfortunately, it relies on a
# super() call to its __class__.makeelement(), which will fail due to
# the class NOT being Element.
# This handles that issue.
return ElementTree.Element(tag, attrib)
def update(self):
"""Request an updated set of data from casper.jxml."""
response = requests.post(self.url, data=self.auth)
response_xml = ElementTree.fromstring(response.text)
# Remove previous data, if any, and then add in response's XML.
self.clear()
for child in response_xml.getchildren():
self.append(child)
| novaksam/python-jss | jss/casper.py | Python | gpl-3.0 | 3,498 |
print ("Hello World !!")
| ronas/PythonGNF | Eduardo/L01.ExeSeq01.py | Python | gpl-3.0 | 25 |
"""
A "mirroring" ``stdout`` context manager.
While active, the context manager reverses text output to
``stdout``::
# BEGIN MIRROR_GEN_DEMO_1
>>> from mirror_gen import looking_glass
>>> with looking_glass() as what: # <1>
... print('Alice, Kitty and Snowdrop')
... print(what)
...
pordwonS dna yttiK ,ecilA
YKCOWREBBAJ
>>> what
'JABBERWOCKY'
# END MIRROR_GEN_DEMO_1
This exposes the context manager operation::
# BEGIN MIRROR_GEN_DEMO_2
>>> from mirror_gen import looking_glass
>>> manager = looking_glass() # <1>
>>> manager # doctest: +ELLIPSIS
<contextlib._GeneratorContextManager object at 0x...>
>>> monster = manager.__enter__() # <2>
>>> monster == 'JABBERWOCKY' # <3>
eurT
>>> monster
'YKCOWREBBAJ'
>>> manager # doctest: +ELLIPSIS
>...x0 ta tcejbo reganaMtxetnoCrotareneG_.biltxetnoc<
>>> manager.__exit__(None, None, None) # <4>
>>> monster
'JABBERWOCKY'
# END MIRROR_GEN_DEMO_2
"""
# BEGIN MIRROR_GEN_EX
import contextlib
@contextlib.contextmanager # <1>
def looking_glass():
import sys
original_write = sys.stdout.write # <2>
def reverse_write(text): # <3>
original_write(text[::-1])
sys.stdout.write = reverse_write # <4>
yield 'JABBERWOCKY' # <5>
sys.stdout.write = original_write # <6>
# END MIRROR_GEN_EX
| YuxuanLing/trunk | trunk/code/study/python/Fluent-Python-example-code/15-context-mngr/mirror_gen.py | Python | gpl-3.0 | 1,453 |
# -*- coding: utf-8 -*-
'''
Printmodel django module for condominium
@author: Laurent GAY
@organization: sd-libre.fr
@contact: info@sd-libre.fr
@copyright: 2016 sd-libre.fr
@license: This file is part of Lucterios.
Lucterios is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Lucterios is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Lucterios. If not, see <http://www.gnu.org/licenses/>.
'''
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from diacamma.condominium.models import Owner
name = _("owner")
kind = 2
modelname = Owner.get_long_name()
value = """
<model hmargin="10.0" vmargin="10.0" page_width="210.0" page_height="297.0">
<header extent="25.0">
<text height="10.0" width="120.0" top="0.0" left="70.0" padding="1.0" spacing="0.0" border_color="black" border_style="" border_width="0.2" text_align="center" line_height="20" font_family="sans-serif" font_weight="" font_size="20">
{[b]}#OUR_DETAIL.name{[/b]}
</text>
<text height="10.0" width="120.0" top="10.0" left="70.0" padding="1.0" spacing="0.0" border_color="black" border_style="" border_width="0.2" text_align="center" line_height="8" font_family="sans-serif" font_weight="" font_size="8">
{[italic]}
#OUR_DETAIL.address - #OUR_DETAIL.postal_code #OUR_DETAIL.city - #OUR_DETAIL.tel1 #OUR_DETAIL.tel2 #OUR_DETAIL.email{[br/]}#OUR_DETAIL.identify_number
{[/italic]}
</text>
<image height="25.0" width="30.0" top="0.0" left="10.0" padding="1.0" spacing="0.0" border_color="black" border_style="" border_width="0.2">
#OUR_DETAIL.image
</image>
</header>
<bottom extent="10.0">
</bottom>
<body>
<text height="8.0" width="190.0" top="0.0" left="0.0" padding="1.0" spacing="0.0" border_color="black" border_style="" border_width="0.2" text_align="center" line_height="15" font_family="sans-serif" font_weight="" font_size="15">
{[b]}%(title)s{[/b]}
</text>
<text height="8.0" width="190.0" top="8.0" left="0.0" padding="1.0" spacing="0.0" border_color="black" border_style="" border_width="0.2" text_align="center" line_height="13" font_family="sans-serif" font_weight="" font_size="13">
#date_begin - #date_end
</text>
<text height="20.0" width="100.0" top="25.0" left="80.0" padding="1.0" spacing="0.0" border_color="black" border_style="" border_width="0.2" text_align="center" line_height="11" font_family="sans-serif" font_weight="" font_size="11">
{[b]}#third.contact.str{[/b]}{[br/]}#third.contact.address{[br/]}#third.contact.postal_code #third.contact.city
</text>
<text height="20.0" width="100.0" top="25.0" left="0.0" padding="1.0" spacing="0.0" border_color="black" border_style="" border_width="0.2" text_align="center" line_height="11" font_family="sans-serif" font_weight="" font_size="11">
{[b]}%(info)s{[/b]}: #information
</text>
<text height="10.0" width="75.0" top="45.0" left="00.0" padding="1.0" spacing="0.0" border_color="black" border_style="" border_width="0.2" text_align="left" line_height="11" font_family="sans-serif" font_weight="" font_size="11">
{[b]}%(current)s{[/b]}
</text>
<table height="40.0" width="75.0" top="55.0" left="00.0" padding="1.0" spacing="0.0" border_color="black" border_style="" border_width="0.2">
<columns width="50.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="right" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
</columns>
<columns width="25.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="right" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
{[i]}%(value)s{[/i]}
</columns>
<rows>
<cell border_color="black" border_style="" border_width="0.2" text_align="right" line_height="11" font_family="sans-serif" font_weight="" font_size="10">
{[b]}%(total_current_initial)s{[/b]}
</cell>
<cell border_color="black" border_style="" border_width="0.2" text_align="right" line_height="11" font_family="sans-serif" font_weight="" font_size="10">
#total_current_initial
</cell>
</rows>
<rows>
<cell border_color="black" border_style="" border_width="0.2" text_align="right" line_height="11" font_family="sans-serif" font_weight="" font_size="10">
{[b]}%(total_current_call)s{[/b]}
</cell>
<cell border_color="black" border_style="" border_width="0.2" text_align="right" line_height="11" font_family="sans-serif" font_weight="" font_size="10">
#total_current_call{[br/]}
</cell>
</rows>
<rows>
<cell border_color="black" border_style="" border_width="0.2" text_align="right" line_height="11" font_family="sans-serif" font_weight="" font_size="10">
{[b]}%(total_current_payoff)s{[/b]}
</cell>
<cell border_color="black" border_style="" border_width="0.2" text_align="right" line_height="11" font_family="sans-serif" font_weight="" font_size="10">
#total_current_payoff
</cell>
</rows>
<rows>
<cell border_color="black" border_style="" border_width="0.2" text_align="right" line_height="11" font_family="sans-serif" font_weight="" font_size="10">
{[b]}%(total_current_owner)s{[/b]}
</cell>
<cell border_color="black" border_style="" border_width="0.2" text_align="right" line_height="11" font_family="sans-serif" font_weight="" font_size="10">
#total_current_owner
</cell>
</rows>
<rows>
<cell border_color="black" border_style="" border_width="0.2" text_align="right" line_height="11" font_family="sans-serif" font_weight="" font_size="10">
{[b]}%(total_current_regularization)s{[/b]}
</cell>
<cell border_color="black" border_style="" border_width="0.2" text_align="right" line_height="11" font_family="sans-serif" font_weight="" font_size="10">
#total_current_regularization
</cell>
</rows>
<rows>
<cell border_color="black" border_style="" border_width="0.2" text_align="right" line_height="11" font_family="sans-serif" font_weight="" font_size="10">
{[b]}%(total_current_ventilated)s{[/b]}
</cell>
<cell border_color="black" border_style="" border_width="0.2" text_align="right" line_height="11" font_family="sans-serif" font_weight="" font_size="10">
#total_current_ventilated
</cell>
</rows>
<rows>
<cell border_color="black" border_style="" border_width="0.2" text_align="right" line_height="11" font_family="sans-serif" font_weight="" font_size="10">
{[b]}%(total_recoverable_load)s{[/b]}
</cell>
<cell border_color="black" border_style="" border_width="0.2" text_align="right" line_height="11" font_family="sans-serif" font_weight="" font_size="10">
#total_recoverable_load
</cell>
</rows>
</table>
<text height="10.0" width="75.0" top="45.0" left="100.0" padding="1.0" spacing="0.0" border_color="black" border_style="" border_width="0.2" text_align="left" line_height="11" font_family="sans-serif" font_weight="" font_size="11">
{[b]}%(exceptional)s{[/b]}
</text>
<table height="40.0" width="75.0" top="55.0" left="100.0" padding="1.0" spacing="0.0" border_color="black" border_style="" border_width="0.2">
<columns width="50.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="right" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
</columns>
<columns width="25.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="right" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
{[i]}%(value)s{[/i]}
</columns>
<rows>
<cell border_color="black" border_style="" border_width="0.2" text_align="right" line_height="11" font_family="sans-serif" font_weight="" font_size="10">
{[b]}%(total_exceptional_initial)s{[/b]}
</cell>
<cell border_color="black" border_style="" border_width="0.2" text_align="right" line_height="11" font_family="sans-serif" font_weight="" font_size="10">
#total_exceptional_initial
</cell>
</rows>
<rows>
<cell border_color="black" border_style="" border_width="0.2" text_align="right" line_height="11" font_family="sans-serif" font_weight="" font_size="10">
{[b]}%(total_exceptional_call)s{[/b]}
</cell>
<cell border_color="black" border_style="" border_width="0.2" text_align="right" line_height="11" font_family="sans-serif" font_weight="" font_size="10">
#total_exceptional_call{[br/]}
</cell>
</rows>
<rows>
<cell border_color="black" border_style="" border_width="0.2" text_align="right" line_height="11" font_family="sans-serif" font_weight="" font_size="10">
{[b]}%(total_exceptional_payoff)s{[/b]}
</cell>
<cell border_color="black" border_style="" border_width="0.2" text_align="right" line_height="11" font_family="sans-serif" font_weight="" font_size="10">
#total_exceptional_payoff
</cell>
</rows>
<rows>
<cell border_color="black" border_style="" border_width="0.2" text_align="right" line_height="11" font_family="sans-serif" font_weight="" font_size="10">
{[b]}%(total_exceptional_owner)s{[/b]}
</cell>
<cell border_color="black" border_style="" border_width="0.2" text_align="right" line_height="11" font_family="sans-serif" font_weight="" font_size="10">
#total_exceptional_owner
</cell>
</rows>
</table>
<table height="40.0" width="76.0" top="100.0" left="100.0" padding="1.0" spacing="0.0" border_color="black" border_style="" border_width="0.2">
<columns width="30.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
{[b]}%(set)s{[/b]}
</columns>
<columns width="12.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
{[b]}%(ratio)s{[/b]}
</columns>
<columns width="17.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
{[b]}%(total_callfunds)s{[/b]}
</columns>
<columns width="17.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
{[b]}%(ventilated)s{[/b]}
</columns>
<rows data="exceptionnal_set">
<cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#set
</cell>
<cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#ratio
</cell>
<cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#total_callfunds
</cell>
<cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#ventilated_txt
</cell>
</rows>
</table>
<text height="10.0" width="70.0" top="120.0" left="120.0" padding="1.0" spacing="0.0" border_color="black" border_style="" border_width="0.2" text_align="left" line_height="11" font_family="sans-serif" font_weight="" font_size="11">
{[b]}%(property)s{[/b]}
</text>
<table height="30.0" width="70.0" top="130.0" left="120.0" padding="1.0" spacing="0.0" border_color="black" border_style="" border_width="0.2">
<columns width="10.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
{[b]}%(num)s{[/b]}
</columns>
<columns width="15.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
{[b]}%(value)s{[/b]}
</columns>
<columns width="12.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
{[b]}%(ratio)s{[/b]}
</columns>
<columns width="33.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
{[b]}%(description)s{[/b]}
</columns>
<rows data="propertylot_set">
<cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#num
</cell>
<cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#value
</cell>
<cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#ratio
</cell>
<cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#description
</cell>
</rows>
</table>
<text height="10.0" width="110.0" top="120.0" left="00.0" padding="1.0" spacing="0.0" border_color="black" border_style="" border_width="0.2" text_align="left" line_height="11" font_family="sans-serif" font_weight="" font_size="11">
{[b]}%(partition)s{[/b]}
</text>
<table height="50.0" width="110.0" top="130.0" left="00.0" padding="1.0" spacing="0.0" border_color="black" border_style="" border_width="0.2">
<columns width="30.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
{[b]}%(set)s{[/b]}
</columns>
<columns width="15.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="9" font_family="sans-serif" font_weight="" font_size="8">
{[b]}%(budget)s{[/b]}
</columns>
<columns width="15.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="9" font_family="sans-serif" font_weight="" font_size="8">
{[b]}%(expense)s{[/b]}
</columns>
<columns width="15.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="9" font_family="sans-serif" font_weight="" font_size="8">
{[b]}%(value)s{[/b]}
</columns>
<columns width="12.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="9" font_family="sans-serif" font_weight="" font_size="8">
{[b]}%(ratio)s{[/b]}
</columns>
<columns width="15.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="9" font_family="sans-serif" font_weight="" font_size="8">
{[b]}%(ventilated)s{[/b]}
</columns>
<columns width="15.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="9" font_family="sans-serif" font_weight="" font_size="8">
{[b]}%(recover_load)s{[/b]}
</columns>
<rows data="partition_set">
<cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#set
</cell>
<cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#set.budget_txt
</cell>
<cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#set.sumexpense_txt
</cell>
<cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#value
</cell>
<cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#ratio
</cell>
<cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#ventilated_txt
</cell>
<cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#recovery_load_txt
</cell>
</rows>
</table>
<text height="10.0" width="175.0" top="150.0" left="10.0" padding="0.5" spacing="1.0" border_color="black" border_style="" border_width="0.2" text_align="left" line_height="11" font_family="sans-serif" font_weight="" font_size="11">
{[b]}%(call of funds)s{[/b]}
</text>
<table height="20.0" width="175.0" top="160.0" left="10.0" padding="1.0" spacing="0.1" border_color="black" border_style="" border_width="0.2">
<columns width="10.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
{[b]}%(num)s{[/b]}
</columns>
<columns width="25.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
{[b]}%(date)s{[/b]}
</columns>
<columns width="90.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
{[b]}%(comment)s{[/b]}
</columns>
<columns width="25.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
{[b]}%(total)s{[/b]}
</columns>
<columns width="25.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
{[b]}%(rest_to_pay)s{[/b]}
</columns>
<rows data="callfunds_set">
<cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#num
</cell>
<cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#date
</cell>
<cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#comment
</cell>
<cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#total
</cell>
<cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#supporting.total_rest_topay
</cell>
</rows>
</table>
<text height="10.0" width="150.0" top="180.0" left="20.0" padding="1.0" spacing="1.0" border_color="black" border_style="" border_width="0.2" text_align="left" line_height="11" font_family="sans-serif" font_weight="" font_size="11">
{[b]}%(payments)s{[/b]}
</text>
<table height="20.0" width="150.0" top="190.0" left="20.0" padding="1.0" spacing="0.1" border_color="black" border_style="" border_width="0.2">
<columns width="25.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
{[b]}%(date)s{[/b]}
</columns>
<columns width="20.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
{[b]}%(amount)s{[/b]}
</columns>
<columns width="20.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
{[b]}%(mode)s{[/b]}
</columns>
<columns width="25.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
{[b]}%(bank_account)s{[/b]}
</columns>
<columns width="30.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
{[b]}%(reference)s{[/b]}
</columns>
<columns width="30.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
{[b]}%(assignment)s{[/b]}
</columns>
<rows data="payments_set">
<cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#date
</cell>
<cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#amount
</cell>
<cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#mode
</cell>
<cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#bank_account
</cell>
<cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#reference
</cell>
<cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#assignment
</cell>
</rows>
</table>
<text height="10.0" width="150.0" top="210.0" left="20.0" padding="1.0" spacing="1.0" border_color="black" border_style="" border_width="0.2" text_align="left" line_height="11" font_family="sans-serif" font_weight="" font_size="11">
{[b]}%(payoffs)s{[/b]}
</text>
<table height="20.0" width="150.0" top="220.0" left="20.0" padding="1.0" spacing="0.1" border_color="black" border_style="" border_width="0.2">
<columns width="25.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
{[b]}%(date)s{[/b]}
</columns>
<columns width="20.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
{[b]}%(amount)s{[/b]}
</columns>
<columns width="30.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
{[b]}%(payer)s{[/b]}
</columns>
<columns width="20.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
{[b]}%(mode)s{[/b]}
</columns>
<columns width="25.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
{[b]}%(bank_account)s{[/b]}
</columns>
<columns width="30.0" display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="center" line_height="10" font_family="sans-serif" font_weight="" font_size="9">
{[b]}%(reference)s{[/b]}
</columns>
<rows data="payoff_set">
<cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#date
</cell>
<cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#value
</cell>
<cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#payer
</cell>
<cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#mode
</cell>
<cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#bank_account
</cell>
<cell display_align="center" border_color="black" border_style="solid" border_width="0.2" text_align="start" line_height="7" font_family="sans-serif" font_weight="" font_size="7">
#reference
</cell>
</rows>
</table>
</body>
</model>""" % {'title': _('Owner situation'), 'info': _('information'), 'call of funds': _('call of funds'), 'num': _('numeros'), 'date': _('date'), 'comment': _('comment'), 'total': _('total'),
'exceptional': _('exceptional'), 'current': _('current'),
'partition': _('partition'), 'set': _('set'), 'budget': _('budget'), 'expense': _('expense'), 'value': _('tantime'), 'ratio': _('ratio'),
'ventilated': _('ventilated'), 'recover_load': _('recover. load'), 'total_callfunds': _('total call for funds'), 'rest_to_pay': _('rest to pay'),
'property': _('property lot'), 'num': _('numeros'), 'value': _('tantime'), 'ratio': _("ratio"), 'description': _('description'),
'payments': _('payments'), 'assignment': _('assignment'),
'payoffs': _('additional payoffs'), 'amount': _('amount'), 'payer': _('payer'), 'mode': _('mode'), 'bank_account': _('bank account'), 'reference': _('reference'),
'total_current_initial': _('current initial state'),
'total_current_call': _('current total call for funds'),
'total_current_payoff': _('current total payoff'),
'total_current_regularization': _('estimated regularization'),
'total_current_ventilated': _('current total ventilated'),
'total_recoverable_load': _('total recoverable load'),
'total_current_owner': _('current total owner'),
'total_exceptional_initial': _('exceptional initial state'),
'total_exceptional_call': _('exceptional total call for funds'),
'total_exceptional_payoff': _('exceptional total payoff'),
'total_exceptional_owner': _('exceptional total owner'),
}
| Diacamma2/syndic | diacamma/condominium/printmodel/Owner_0001.py | Python | gpl-3.0 | 28,968 |
# sqlalchemy/pool.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Connection pooling for DB-API connections.
Provides a number of connection pool implementations for a variety of
usage scenarios and thread behavior requirements imposed by the
application, DB-API or database itself.
Also provides a DB-API 2.0 connection proxying mechanism allowing
regular DB-API connect() methods to be transparently managed by a
SQLAlchemy connection pool.
"""
import weakref, time, threading
from sqlalchemy import exc, log
from sqlalchemy import queue as sqla_queue
from sqlalchemy.util import threading, pickle, as_interface, memoized_property
proxies = {}
def manage(module, **params):
"""Return a proxy for a DB-API module that automatically
pools connections.
Given a DB-API 2.0 module and pool management parameters, returns
a proxy for the module that will automatically pool connections,
creating new connection pools for each distinct set of connection
arguments sent to the decorated module's connect() function.
:param module: a DB-API 2.0 database module
:param poolclass: the class used by the pool module to provide
pooling. Defaults to :class:`QueuePool`.
:param \*\*params: will be passed through to *poolclass*
"""
try:
return proxies[module]
except KeyError:
return proxies.setdefault(module, _DBProxy(module, **params))
def clear_managers():
"""Remove all current DB-API 2.0 managers.
All pools and connections are disposed.
"""
for manager in proxies.itervalues():
manager.close()
proxies.clear()
class Pool(log.Identified):
"""Abstract base class for connection pools."""
def __init__(self,
creator, recycle=-1, echo=None,
use_threadlocal=False,
logging_name=None,
reset_on_return=True, listeners=None):
"""
Construct a Pool.
:param creator: a callable function that returns a DB-API
connection object. The function will be called with
parameters.
:param recycle: If set to non -1, number of seconds between
connection recycling, which means upon checkout, if this
timeout is surpassed the connection will be closed and
replaced with a newly opened connection. Defaults to -1.
:param logging_name: String identifier which will be used within
the "name" field of logging records generated within the
"sqlalchemy.pool" logger. Defaults to a hexstring of the object's
id.
:param echo: If True, connections being pulled and retrieved
from the pool will be logged to the standard output, as well
as pool sizing information. Echoing can also be achieved by
enabling logging for the "sqlalchemy.pool"
namespace. Defaults to False.
:param use_threadlocal: If set to True, repeated calls to
:meth:`connect` within the same application thread will be
guaranteed to return the same connection object, if one has
already been retrieved from the pool and has not been
returned yet. Offers a slight performance advantage at the
cost of individual transactions by default. The
:meth:`unique_connection` method is provided to bypass the
threadlocal behavior installed into :meth:`connect`.
:param reset_on_return: If true, reset the database state of
connections returned to the pool. This is typically a
ROLLBACK to release locks and transaction resources.
Disable at your own peril. Defaults to True.
:param listeners: A list of
:class:`~sqlalchemy.interfaces.PoolListener`-like objects or
dictionaries of callables that receive events when DB-API
connections are created, checked out and checked in to the
pool.
"""
if logging_name:
self.logging_name = self._orig_logging_name = logging_name
else:
self._orig_logging_name = None
self.logger = log.instance_logger(self, echoflag=echo)
self._threadconns = threading.local()
self._creator = creator
self._recycle = recycle
self._use_threadlocal = use_threadlocal
self._reset_on_return = reset_on_return
self.echo = echo
self.listeners = []
self._on_connect = []
self._on_first_connect = []
self._on_checkout = []
self._on_checkin = []
if listeners:
for l in listeners:
self.add_listener(l)
def unique_connection(self):
"""Produce a DBAPI connection that is not referenced by any
thread-local context.
This method is different from :meth:`.Pool.connect` only if the
``use_threadlocal`` flag has been set to ``True``.
"""
return _ConnectionFairy(self).checkout()
def create_connection(self):
"""Called by subclasses to create a new ConnectionRecord."""
return _ConnectionRecord(self)
def recreate(self):
"""Return a new :class:`.Pool`, of the same class as this one
and configured with identical creation arguments.
This method is used in conjunection with :meth:`dispose`
to close out an entire :class:`.Pool` and create a new one in
its place.
"""
raise NotImplementedError()
def dispose(self):
"""Dispose of this pool.
This method leaves the possibility of checked-out connections
remaining open, It is advised to not reuse the pool once dispose()
is called, and to instead use a new pool constructed by the
recreate() method.
"""
raise NotImplementedError()
def connect(self):
"""Return a DBAPI connection from the pool.
The connection is instrumented such that when its
``close()`` method is called, the connection will be returned to
the pool.
"""
if not self._use_threadlocal:
return _ConnectionFairy(self).checkout()
try:
rec = self._threadconns.current()
if rec:
return rec.checkout()
except AttributeError:
pass
agent = _ConnectionFairy(self)
self._threadconns.current = weakref.ref(agent)
return agent.checkout()
def return_conn(self, record):
"""Given a _ConnectionRecord, return it to the :class:`.Pool`.
This method is called when an instrumented DBAPI connection
has its ``close()`` method called.
"""
if self._use_threadlocal and hasattr(self._threadconns, "current"):
del self._threadconns.current
self.do_return_conn(record)
def get(self):
"""Return a non-instrumented DBAPI connection from this :class:`.Pool`.
This is called by ConnectionRecord in order to get its DBAPI
resource.
"""
return self.do_get()
def do_get(self):
"""Implementation for :meth:`get`, supplied by subclasses."""
raise NotImplementedError()
def do_return_conn(self, conn):
"""Implementation for :meth:`return_conn`, supplied by subclasses."""
raise NotImplementedError()
def status(self):
raise NotImplementedError()
def add_listener(self, listener):
"""Add a ``PoolListener``-like object to this pool.
``listener`` may be an object that implements some or all of
PoolListener, or a dictionary of callables containing implementations
of some or all of the named methods in PoolListener.
"""
listener = as_interface(listener,
methods=('connect', 'first_connect', 'checkout', 'checkin'))
self.listeners.append(listener)
if hasattr(listener, 'connect'):
self._on_connect.append(listener)
if hasattr(listener, 'first_connect'):
self._on_first_connect.append(listener)
if hasattr(listener, 'checkout'):
self._on_checkout.append(listener)
if hasattr(listener, 'checkin'):
self._on_checkin.append(listener)
class _ConnectionRecord(object):
def __init__(self, pool):
self.__pool = pool
self.connection = self.__connect()
self.info = {}
ls = pool.__dict__.pop('_on_first_connect', None)
if ls is not None:
for l in ls:
l.first_connect(self.connection, self)
if pool._on_connect:
for l in pool._on_connect:
l.connect(self.connection, self)
def close(self):
if self.connection is not None:
self.__pool.logger.debug("Closing connection %r", self.connection)
try:
self.connection.close()
except (SystemExit, KeyboardInterrupt):
raise
except:
self.__pool.logger.debug("Exception closing connection %r",
self.connection)
def invalidate(self, e=None):
if e is not None:
self.__pool.logger.info(
"Invalidate connection %r (reason: %s:%s)",
self.connection, e.__class__.__name__, e)
else:
self.__pool.logger.info(
"Invalidate connection %r", self.connection)
self.__close()
self.connection = None
def get_connection(self):
if self.connection is None:
self.connection = self.__connect()
self.info.clear()
if self.__pool._on_connect:
for l in self.__pool._on_connect:
l.connect(self.connection, self)
elif self.__pool._recycle > -1 and \
time.time() - self.starttime > self.__pool._recycle:
self.__pool.logger.info(
"Connection %r exceeded timeout; recycling",
self.connection)
self.__close()
self.connection = self.__connect()
self.info.clear()
if self.__pool._on_connect:
for l in self.__pool._on_connect:
l.connect(self.connection, self)
return self.connection
def __close(self):
try:
self.__pool.logger.debug("Closing connection %r", self.connection)
self.connection.close()
except (SystemExit, KeyboardInterrupt):
raise
except Exception, e:
self.__pool.logger.debug(
"Connection %r threw an error on close: %s",
self.connection, e)
def __connect(self):
try:
self.starttime = time.time()
connection = self.__pool._creator()
self.__pool.logger.debug("Created new connection %r", connection)
return connection
except Exception, e:
self.__pool.logger.debug("Error on connect(): %s", e)
raise
def _finalize_fairy(connection, connection_record, pool, ref=None):
_refs.discard(connection_record)
if ref is not None and \
(connection_record.fairy is not ref or
isinstance(pool, AssertionPool)):
return
if connection is not None:
try:
if pool._reset_on_return:
connection.rollback()
# Immediately close detached instances
if connection_record is None:
connection.close()
except Exception, e:
if connection_record is not None:
connection_record.invalidate(e=e)
if isinstance(e, (SystemExit, KeyboardInterrupt)):
raise
if connection_record is not None:
connection_record.fairy = None
pool.logger.debug("Connection %r being returned to pool", connection)
if pool._on_checkin:
for l in pool._on_checkin:
l.checkin(connection, connection_record)
pool.return_conn(connection_record)
_refs = set()
class _ConnectionFairy(object):
"""Proxies a DB-API connection and provides return-on-dereference
support."""
__slots__ = '_pool', '__counter', 'connection', \
'_connection_record', '__weakref__', '_detached_info'
def __init__(self, pool):
self._pool = pool
self.__counter = 0
try:
rec = self._connection_record = pool.get()
conn = self.connection = self._connection_record.get_connection()
rec.fairy = weakref.ref(
self,
lambda ref:_finalize_fairy(conn, rec, pool, ref)
)
_refs.add(rec)
except:
# helps with endless __getattr__ loops later on
self.connection = None
self._connection_record = None
raise
self._pool.logger.debug("Connection %r checked out from pool" %
self.connection)
@property
def _logger(self):
return self._pool.logger
@property
def is_valid(self):
return self.connection is not None
@property
def info(self):
"""An info collection unique to this DB-API connection."""
try:
return self._connection_record.info
except AttributeError:
if self.connection is None:
raise exc.InvalidRequestError("This connection is closed")
try:
return self._detached_info
except AttributeError:
self._detached_info = value = {}
return value
def invalidate(self, e=None):
"""Mark this connection as invalidated.
The connection will be immediately closed. The containing
ConnectionRecord will create a new connection when next used.
"""
if self.connection is None:
raise exc.InvalidRequestError("This connection is closed")
if self._connection_record is not None:
self._connection_record.invalidate(e=e)
self.connection = None
self._close()
def cursor(self, *args, **kwargs):
try:
c = self.connection.cursor(*args, **kwargs)
return _CursorFairy(self, c)
except Exception, e:
self.invalidate(e=e)
raise
def __getattr__(self, key):
return getattr(self.connection, key)
def checkout(self):
if self.connection is None:
raise exc.InvalidRequestError("This connection is closed")
self.__counter += 1
if not self._pool._on_checkout or self.__counter != 1:
return self
# Pool listeners can trigger a reconnection on checkout
attempts = 2
while attempts > 0:
try:
for l in self._pool._on_checkout:
l.checkout(self.connection, self._connection_record, self)
return self
except exc.DisconnectionError, e:
self._pool.logger.info(
"Disconnection detected on checkout: %s", e)
self._connection_record.invalidate(e)
self.connection = self._connection_record.get_connection()
attempts -= 1
self._pool.logger.info("Reconnection attempts exhausted on checkout")
self.invalidate()
raise exc.InvalidRequestError("This connection is closed")
def detach(self):
"""Separate this connection from its Pool.
This means that the connection will no longer be returned to the
pool when closed, and will instead be literally closed. The
containing ConnectionRecord is separated from the DB-API connection,
and will create a new connection when next used.
Note that any overall connection limiting constraints imposed by a
Pool implementation may be violated after a detach, as the detached
connection is removed from the pool's knowledge and control.
"""
if self._connection_record is not None:
_refs.remove(self._connection_record)
self._connection_record.fairy = None
self._connection_record.connection = None
self._pool.do_return_conn(self._connection_record)
self._detached_info = \
self._connection_record.info.copy()
self._connection_record = None
def close(self):
self.__counter -= 1
if self.__counter == 0:
self._close()
def _close(self):
_finalize_fairy(self.connection, self._connection_record, self._pool)
self.connection = None
self._connection_record = None
class _CursorFairy(object):
__slots__ = '_parent', 'cursor', 'execute'
def __init__(self, parent, cursor):
self._parent = parent
self.cursor = cursor
self.execute = cursor.execute
def invalidate(self, e=None):
self._parent.invalidate(e=e)
def __iter__(self):
return iter(self.cursor)
def close(self):
try:
self.cursor.close()
except Exception, e:
try:
ex_text = str(e)
except TypeError:
ex_text = repr(e)
self._parent._logger.warn("Error closing cursor: %s", ex_text)
if isinstance(e, (SystemExit, KeyboardInterrupt)):
raise
def __setattr__(self, key, value):
if key in self.__slots__:
object.__setattr__(self, key, value)
else:
setattr(self.cursor, key, value)
def __getattr__(self, key):
return getattr(self.cursor, key)
class SingletonThreadPool(Pool):
"""A Pool that maintains one connection per thread.
Maintains one connection per each thread, never moving a connection to a
thread other than the one which it was created in.
This is used for SQLite, which both does not handle multithreading by
default, and also requires a singleton connection if a :memory: database
is being used.
Options are the same as those of :class:`Pool`, as well as:
:param pool_size: The number of threads in which to maintain connections
at once. Defaults to five.
"""
def __init__(self, creator, pool_size=5, **kw):
kw['use_threadlocal'] = True
Pool.__init__(self, creator, **kw)
self._conn = threading.local()
self._all_conns = set()
self.size = pool_size
def recreate(self):
self.logger.info("Pool recreating")
return SingletonThreadPool(self._creator,
pool_size=self.size,
recycle=self._recycle,
echo=self.echo,
logging_name=self._orig_logging_name,
use_threadlocal=self._use_threadlocal,
listeners=self.listeners)
def dispose(self):
"""Dispose of this pool."""
for conn in self._all_conns:
try:
conn.close()
except (SystemExit, KeyboardInterrupt):
raise
except:
# pysqlite won't even let you close a conn from a thread
# that didn't create it
pass
self._all_conns.clear()
def dispose_local(self):
if hasattr(self._conn, 'current'):
conn = self._conn.current()
self._all_conns.discard(conn)
del self._conn.current
def cleanup(self):
while len(self._all_conns) > self.size:
c = self._all_conns.pop()
c.close()
def status(self):
return "SingletonThreadPool id:%d size: %d" % \
(id(self), len(self._all_conns))
def do_return_conn(self, conn):
pass
def do_get(self):
try:
c = self._conn.current()
if c:
return c
except AttributeError:
pass
c = self.create_connection()
self._conn.current = weakref.ref(c)
self._all_conns.add(c)
if len(self._all_conns) > self.size:
self.cleanup()
return c
class QueuePool(Pool):
"""A Pool that imposes a limit on the number of open connections."""
def __init__(self, creator, pool_size=5, max_overflow=10, timeout=30,
**kw):
"""
Construct a QueuePool.
:param creator: a callable function that returns a DB-API
connection object. The function will be called with
parameters.
:param pool_size: The size of the pool to be maintained,
defaults to 5. This is the largest number of connections that
will be kept persistently in the pool. Note that the pool
begins with no connections; once this number of connections
is requested, that number of connections will remain.
``pool_size`` can be set to 0 to indicate no size limit; to
disable pooling, use a :class:`~sqlalchemy.pool.NullPool`
instead.
:param max_overflow: The maximum overflow size of the
pool. When the number of checked-out connections reaches the
size set in pool_size, additional connections will be
returned up to this limit. When those additional connections
are returned to the pool, they are disconnected and
discarded. It follows then that the total number of
simultaneous connections the pool will allow is pool_size +
`max_overflow`, and the total number of "sleeping"
connections the pool will allow is pool_size. `max_overflow`
can be set to -1 to indicate no overflow limit; no limit
will be placed on the total number of concurrent
connections. Defaults to 10.
:param timeout: The number of seconds to wait before giving up
on returning a connection. Defaults to 30.
:param recycle: If set to non -1, number of seconds between
connection recycling, which means upon checkout, if this
timeout is surpassed the connection will be closed and
replaced with a newly opened connection. Defaults to -1.
:param echo: If True, connections being pulled and retrieved
from the pool will be logged to the standard output, as well
as pool sizing information. Echoing can also be achieved by
enabling logging for the "sqlalchemy.pool"
namespace. Defaults to False.
:param use_threadlocal: If set to True, repeated calls to
:meth:`connect` within the same application thread will be
guaranteed to return the same connection object, if one has
already been retrieved from the pool and has not been
returned yet. Offers a slight performance advantage at the
cost of individual transactions by default. The
:meth:`unique_connection` method is provided to bypass the
threadlocal behavior installed into :meth:`connect`.
:param reset_on_return: If true, reset the database state of
connections returned to the pool. This is typically a
ROLLBACK to release locks and transaction resources.
Disable at your own peril. Defaults to True.
:param listeners: A list of
:class:`~sqlalchemy.interfaces.PoolListener`-like objects or
dictionaries of callables that receive events when DB-API
connections are created, checked out and checked in to the
pool.
"""
Pool.__init__(self, creator, **kw)
self._pool = sqla_queue.Queue(pool_size)
self._overflow = 0 - pool_size
self._max_overflow = max_overflow
self._timeout = timeout
self._overflow_lock = self._max_overflow > -1 and \
threading.Lock() or None
def recreate(self):
self.logger.info("Pool recreating")
return QueuePool(self._creator, pool_size=self._pool.maxsize,
max_overflow=self._max_overflow,
timeout=self._timeout,
recycle=self._recycle, echo=self.echo,
logging_name=self._orig_logging_name,
use_threadlocal=self._use_threadlocal,
listeners=self.listeners)
def do_return_conn(self, conn):
try:
self._pool.put(conn, False)
except sqla_queue.Full:
conn.close()
if self._overflow_lock is None:
self._overflow -= 1
else:
self._overflow_lock.acquire()
try:
self._overflow -= 1
finally:
self._overflow_lock.release()
def do_get(self):
try:
wait = self._max_overflow > -1 and \
self._overflow >= self._max_overflow
return self._pool.get(wait, self._timeout)
except sqla_queue.Empty:
if self._max_overflow > -1 and \
self._overflow >= self._max_overflow:
if not wait:
return self.do_get()
else:
raise exc.TimeoutError(
"QueuePool limit of size %d overflow %d reached, "
"connection timed out, timeout %d" %
(self.size(), self.overflow(), self._timeout))
if self._overflow_lock is not None:
self._overflow_lock.acquire()
if self._max_overflow > -1 and \
self._overflow >= self._max_overflow:
if self._overflow_lock is not None:
self._overflow_lock.release()
return self.do_get()
try:
con = self.create_connection()
self._overflow += 1
finally:
if self._overflow_lock is not None:
self._overflow_lock.release()
return con
def dispose(self):
while True:
try:
conn = self._pool.get(False)
conn.close()
except sqla_queue.Empty:
break
self._overflow = 0 - self.size()
self.logger.info("Pool disposed. %s", self.status())
def status(self):
return "Pool size: %d Connections in pool: %d "\
"Current Overflow: %d Current Checked out "\
"connections: %d" % (self.size(),
self.checkedin(),
self.overflow(),
self.checkedout())
def size(self):
return self._pool.maxsize
def checkedin(self):
return self._pool.qsize()
def overflow(self):
return self._overflow
def checkedout(self):
return self._pool.maxsize - self._pool.qsize() + self._overflow
class NullPool(Pool):
"""A Pool which does not pool connections.
Instead it literally opens and closes the underlying DB-API connection
per each connection open/close.
Reconnect-related functions such as ``recycle`` and connection
invalidation are not supported by this Pool implementation, since
no connections are held persistently.
"""
def status(self):
return "NullPool"
def do_return_conn(self, conn):
conn.close()
def do_return_invalid(self, conn):
pass
def do_get(self):
return self.create_connection()
def recreate(self):
self.logger.info("Pool recreating")
return NullPool(self._creator,
recycle=self._recycle,
echo=self.echo,
logging_name=self._orig_logging_name,
use_threadlocal=self._use_threadlocal,
listeners=self.listeners)
def dispose(self):
pass
class StaticPool(Pool):
"""A Pool of exactly one connection, used for all requests.
Reconnect-related functions such as ``recycle`` and connection
invalidation (which is also used to support auto-reconnect) are not
currently supported by this Pool implementation but may be implemented
in a future release.
"""
@memoized_property
def _conn(self):
return self._creator()
@memoized_property
def connection(self):
return _ConnectionRecord(self)
def status(self):
return "StaticPool"
def dispose(self):
if '_conn' in self.__dict__:
self._conn.close()
self._conn = None
def recreate(self):
self.logger.info("Pool recreating")
return self.__class__(creator=self._creator,
recycle=self._recycle,
use_threadlocal=self._use_threadlocal,
reset_on_return=self._reset_on_return,
echo=self.echo,
logging_name=self._orig_logging_name,
listeners=self.listeners)
def create_connection(self):
return self._conn
def do_return_conn(self, conn):
pass
def do_return_invalid(self, conn):
pass
def do_get(self):
return self.connection
class AssertionPool(Pool):
"""A Pool that allows at most one checked out connection at any given
time.
This will raise an exception if more than one connection is checked out
at a time. Useful for debugging code that is using more connections
than desired.
"""
def __init__(self, *args, **kw):
self._conn = None
self._checked_out = False
Pool.__init__(self, *args, **kw)
def status(self):
return "AssertionPool"
def do_return_conn(self, conn):
if not self._checked_out:
raise AssertionError("connection is not checked out")
self._checked_out = False
assert conn is self._conn
def do_return_invalid(self, conn):
self._conn = None
self._checked_out = False
def dispose(self):
self._checked_out = False
if self._conn:
self._conn.close()
def recreate(self):
self.logger.info("Pool recreating")
return AssertionPool(self._creator, echo=self.echo,
logging_name=self._orig_logging_name,
listeners=self.listeners)
def do_get(self):
if self._checked_out:
raise AssertionError("connection is already checked out")
if not self._conn:
self._conn = self.create_connection()
self._checked_out = True
return self._conn
class _DBProxy(object):
"""Layers connection pooling behavior on top of a standard DB-API module.
Proxies a DB-API 2.0 connect() call to a connection pool keyed to the
specific connect parameters. Other functions and attributes are delegated
to the underlying DB-API module.
"""
def __init__(self, module, poolclass=QueuePool, **kw):
"""Initializes a new proxy.
module
a DB-API 2.0 module
poolclass
a Pool class, defaulting to QueuePool
Other parameters are sent to the Pool object's constructor.
"""
self.module = module
self.kw = kw
self.poolclass = poolclass
self.pools = {}
self._create_pool_mutex = threading.Lock()
def close(self):
for key in self.pools.keys():
del self.pools[key]
def __del__(self):
self.close()
def __getattr__(self, key):
return getattr(self.module, key)
def get_pool(self, *args, **kw):
key = self._serialize(*args, **kw)
try:
return self.pools[key]
except KeyError:
self._create_pool_mutex.acquire()
try:
if key not in self.pools:
pool = self.poolclass(lambda:
self.module.connect(*args, **kw), **self.kw)
self.pools[key] = pool
return pool
else:
return self.pools[key]
finally:
self._create_pool_mutex.release()
def connect(self, *args, **kw):
"""Activate a connection to the database.
Connect to the database using this DBProxy's module and the given
connect arguments. If the arguments match an existing pool, the
connection will be returned from the pool's current thread-local
connection instance, or if there is no thread-local connection
instance it will be checked out from the set of pooled connections.
If the pool has no available connections and allows new connections
to be created, a new database connection will be made.
"""
return self.get_pool(*args, **kw).connect()
def dispose(self, *args, **kw):
"""Dispose the pool referenced by the given connect arguments."""
key = self._serialize(*args, **kw)
try:
del self.pools[key]
except KeyError:
pass
def _serialize(self, *args, **kw):
return pickle.dumps([args, kw])
| jokajak/itweb | data/env/lib/python2.6/site-packages/SQLAlchemy-0.6.7-py2.6.egg/sqlalchemy/pool.py | Python | gpl-3.0 | 33,545 |
#!/usr/bin/env python3
#
# mmgen = Multi-Mode GENerator, command-line Bitcoin cold storage solution
# Copyright (C)2013-2022 The MMGen Project <mmgen@tuta.io>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
color.py: color handling for the MMGen suite
"""
_colors = {
'black': ( 232, (30,0) ),
'red': ( 210, (31,1) ),
'green': ( 121, (32,1) ),
'yellow': ( 229, (33,1) ),
'blue': ( 75, (34,1) ),
'magenta': ( 205, (35,1) ),
'cyan': ( 122, (36,1) ),
'pink': ( 218, (35,1) ),
'orange': ( 216, (31,1) ),
'gray': ( 246, (30,1) ),
'purple': ( 141, (35,1) ),
'brown': ( 208, (33,0) ),
'grndim': ( 108, (32,0) ),
'redbg': ( (232,210), (30,101) ),
'grnbg': ( (232,121), (30,102) ),
'blubg': ( (232,75), (30,104) ),
'yelbg': ( (232,229), (30,103) ),
}
def nocolor(s):
return s
def set_vt100():
'hack to put term into VT100 mode under MSWin'
from .globalvars import g
if g.platform == 'win':
from subprocess import run
run([],shell=True)
def get_terminfo_colors(term=None):
from subprocess import run,PIPE
cmd = ['infocmp','-0']
if term:
cmd.append(term)
try:
cmdout = run(cmd,stdout=PIPE,check=True).stdout.decode()
except:
return None
else:
s = [e.split('#')[1] for e in cmdout.split(',') if e.startswith('colors')][0]
from .util import is_hex_str
if s.isdecimal():
return int(s)
elif s.startswith('0x') and is_hex_str(s[2:]):
return int(s[2:],16)
else:
return None
def init_color(num_colors='auto'):
assert num_colors in ('auto',8,16,256,0)
import mmgen.color as self
if num_colors == 'auto':
import os
t = os.getenv('TERM')
num_colors = 256 if (t and t.endswith('256color')) or get_terminfo_colors() == 256 else 16
reset = '\033[0m'
if num_colors == 0:
ncc = (lambda s: s).__code__
for c in _colors:
getattr(self,c).__code__ = ncc
elif num_colors == 256:
for c,e in _colors.items():
start = (
'\033[38;5;{};1m'.format(e[0]) if type(e[0]) == int else
'\033[38;5;{};48;5;{};1m'.format(*e[0]) )
getattr(self,c).__code__ = eval(f'(lambda s: "{start}" + s + "{reset}").__code__')
elif num_colors in (8,16):
for c,e in _colors.items():
start = (
'\033[{}m'.format(e[1][0]) if e[1][1] == 0 else
'\033[{};{}m'.format(*e[1]) )
getattr(self,c).__code__ = eval(f'(lambda s: "{start}" + s + "{reset}").__code__')
set_vt100()
for _c in _colors:
exec(f'{_c} = lambda s: s')
| mmgen/mmgen | mmgen/color.py | Python | gpl-3.0 | 3,151 |
# -*- coding: UTF-8 -*-
# Copyright (C) 2007, 2009, 2011-2012 J. David Ibáñez <jdavid.ibp@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Import from the Standard Library
import os
from datetime import datetime, timedelta, time
from heapq import heappush, heappop
from multiprocessing import Process
from os.path import abspath, dirname
from uuid import uuid4
# Import from pygit2
from pygit2 import TreeBuilder, GIT_FILEMODE_TREE, init_repository
# Import from itools
from itools.database import Metadata
from itools.database.magic_ import magic_from_buffer
from itools.database.git import open_worktree
from itools.fs import lfs
# Import from here
from catalog import Catalog, _get_xquery, SearchResults, make_catalog
from patchs import PatchsBackend
from registry import register_backend
TEST_DB_WITHOUT_COMMITS = bool(int(os.environ.get('TEST_DB_WITHOUT_COMMITS') or 0))
TEST_DB_DESACTIVATE_GIT = bool(int(os.environ.get('TEST_DB_DESACTIVATE_GIT') or 0))
class Heap(object):
"""
This object behaves very much like a sorted dict, but for security only a
subset of the dict API is exposed:
>>> len(heap)
>>> heap[path] = value
>>> value = heap.get(path)
>>> path, value = heap.popitem()
The keys are relative paths as used in Git trees, like 'a/b/c' (and '' for
the root).
The dictionary is sorted so deeper paths are considered smaller, and so
returned first by 'popitem'. The order relation between two paths of equal
depth is undefined.
This data structure is used by RWDatabase._save_changes to build the tree
objects before commit.
"""
def __init__(self):
self._dict = {}
self._heap = []
def __len__(self):
return len(self._dict)
def get(self, path):
return self._dict.get(path)
def __setitem__(self, path, value):
if path not in self._dict:
n = -path.count('/') if path else 1
heappush(self._heap, (n, path))
self._dict[path] = value
def popitem(self):
key = heappop(self._heap)
path = key[1]
return path, self._dict.pop(path)
class GitBackend(object):
def __init__(self, path, fields, read_only=False):
self.nb_transactions = 0
self.last_transaction_dtime = None
self.path = abspath(path) + '/'
self.fields = fields
self.read_only = read_only
# Open database
self.path_data = '%s/database/' % self.path
# Check if is a folder
self.path_data = '%s/database/' % self.path
if not lfs.is_folder(self.path_data):
error = '"{0}" should be a folder, but it is not'.format(self.path_data)
raise ValueError(error)
# New interface to Git
self.worktree = open_worktree(self.path_data)
# Initialize the database, but chrooted
self.fs = lfs.open(self.path_data)
# Static FS
database_static_path = '{0}/database_static'.format(path)
if not lfs.exists(database_static_path):
self.init_backend_static(path)
self.static_fs = lfs.open(database_static_path)
# Patchs backend
self.patchs_backend = PatchsBackend(path, self.fs, read_only)
# Catalog
self.catalog = self.get_catalog()
@classmethod
def init_backend(cls, path, fields, init=False, soft=False):
# Metadata database
init_repository('{0}/database'.format(path), bare=False)
# Init backend static
cls.init_backend_static(path)
# Make catalog
make_catalog('{0}/catalog'.format(path), fields)
@classmethod
def init_backend_static(cls, path):
# Static database
lfs.make_folder('{0}/database_static'.format(path))
lfs.make_folder('{0}/database_static/.history'.format(path))
#######################################################################
# Database API
#######################################################################
def normalize_key(self, path, __root=None):
# Performance is critical so assume the path is already relative to
# the repository.
key = __root.resolve(path)
if key and key[0] == '.git':
err = "bad '{0}' path, access to the '.git' folder is denied"
raise ValueError(err.format(path))
return '/'.join(key)
def handler_exists(self, key):
fs = self.get_handler_fs_by_key(key)
return fs.exists(key)
def get_handler_names(self, key):
return self.fs.get_names(key)
def get_handler_data(self, key):
if not key:
return None
fs = self.get_handler_fs_by_key(key)
with fs.open(key) as f:
return f.read()
def get_handler_mimetype(self, key):
data = self.get_handler_data(key)
return magic_from_buffer(data)
def handler_is_file(self, key):
fs = self.get_handler_fs_by_key(key)
return fs.is_file(key)
def handler_is_folder(self, key):
fs = self.get_handler_fs_by_key(key)
return fs.is_folder(key)
def get_handler_mtime(self, key):
fs = self.get_handler_fs_by_key(key)
return fs.get_mtime(key)
def save_handler(self, key, handler):
data = handler.to_str()
# Save the file
fs = self.get_handler_fs(handler)
# Write and truncate (calls to "_save_state" must be done with the
# pointer pointing to the beginning)
if not fs.exists(key):
with fs.make_file(key) as f:
f.write(data)
f.truncate(f.tell())
else:
with fs.open(key, 'w') as f:
f.write(data)
f.truncate(f.tell())
# Set dirty = None
handler.timestamp = self.get_handler_mtime(key)
handler.dirty = None
def traverse_resources(self):
raise NotImplementedError
def get_handler_fs(self, handler):
if isinstance(handler, Metadata):
return self.fs
return self.static_fs
def get_handler_fs_by_key(self, key):
if key.endswith('metadata'):
return self.fs
return self.static_fs
def add_handler_into_static_history(self, key):
the_time = datetime.now().strftime('%Y%m%d%H%M%S')
new_key = '.history/{0}.{1}.{2}'.format(key, the_time, uuid4())
parent_path = dirname(new_key)
if not self.static_fs.exists(parent_path):
self.static_fs.make_folder(parent_path)
self.static_fs.copy(key, new_key)
def do_transaction(self, commit_message, data, added, changed, removed, handlers,
docs_to_index, docs_to_unindex):
git_author, git_date, git_msg, docs_to_index, docs_to_unindex = data
# Statistics
self.nb_transactions += 1
# Add static changed & removed files to ~/database_static/.history/
changed_and_removed = list(changed) + list(removed)
for key in changed_and_removed:
if not key.endswith('metadata'):
self.add_handler_into_static_history(key)
# Create patch if there's changed
if added or changed or removed:
self.patchs_backend.create_patch(added, changed, removed, handlers, git_author)
else:
# it's a catalog transaction, we have to do nothing
pass
# Added and changed
added_and_changed = list(added) + list(changed)
for key in added_and_changed:
handler = handlers.get(key)
parent_path = dirname(key)
fs = self.get_handler_fs(handler)
if not fs.exists(parent_path):
fs.make_folder(parent_path)
self.save_handler(key, handler)
# Remove files (if not removed via git-rm)
for key in removed:
if not key.endswith('metadata') or TEST_DB_WITHOUT_COMMITS:
fs = self.get_handler_fs_by_key(key)
fs.remove(key)
# Do git transaction for metadata
if not TEST_DB_WITHOUT_COMMITS:
self.do_git_transaction(commit_message, data, added, changed, removed, handlers)
else:
# Commit at start
if not self.last_transaction_dtime:
self.do_git_big_commit()
else:
now = datetime.now()
t = now.time()
is_night = time(21, 00) < t or t < time(06, 00)
done_recently = now - self.last_transaction_dtime < timedelta(minutes=120)
if is_night and not done_recently:
self.do_git_big_commit()
# Catalog
for path in docs_to_unindex:
self.catalog.unindex_document(path)
for resource, values in docs_to_index:
self.catalog.index_document(values)
self.catalog.save_changes()
def do_git_big_commit(self):
""" Some databases are really bigs (1 millions files). GIT is too slow in this cases.
So we don't commit at each transaction, but at each N transactions.
"""
if TEST_DB_DESACTIVATE_GIT is True:
return
p1 = Process(target=self._do_git_big_commit)
p1.start()
self.last_transaction_dtime = datetime.now()
def _do_git_big_commit(self):
worktree = self.worktree
worktree._call(['git', 'add', '-A'])
worktree._call(['git', 'commit', '-m', 'Autocommit'])
def do_git_transaction(self, commit_message, data, added, changed, removed, handlers):
worktree = self.worktree
# 3. Git add
git_add = list(added) + list(changed)
git_add = [x for x in git_add if x.endswith('metadata')]
worktree.git_add(*git_add)
# 3. Git rm
git_rm = list(removed)
git_rm = [x for x in git_rm if x.endswith('metadata')]
worktree.git_rm(*git_rm)
# 2. Build the 'git commit' command
git_author, git_date, git_msg, docs_to_index, docs_to_unindex = data
git_msg = git_msg or 'no comment'
# 4. Create the tree
repo = worktree.repo
index = repo.index
try:
head = repo.revparse_single('HEAD')
except KeyError:
git_tree = None
else:
root = head.tree
# Initialize the heap
heap = Heap()
heap[''] = repo.TreeBuilder(root)
for key in git_add:
entry = index[key]
heap[key] = (entry.oid, entry.mode)
for key in git_rm:
heap[key] = None
while heap:
path, value = heap.popitem()
# Stop condition
if path == '':
git_tree = value.write()
break
if type(value) is TreeBuilder:
if len(value) == 0:
value = None
else:
oid = value.write()
value = (oid, GIT_FILEMODE_TREE)
# Split the path
if '/' in path:
parent, name = path.rsplit('/', 1)
else:
parent = ''
name = path
# Get the tree builder
tb = heap.get(parent)
if tb is None:
try:
tentry = root[parent]
except KeyError:
tb = repo.TreeBuilder()
else:
tree = repo[tentry.oid]
tb = repo.TreeBuilder(tree)
heap[parent] = tb
# Modify
if value is None:
# Sometimes there are empty folders left in the
# filesystem, but not in the tree, then we get a
# "Failed to remove entry" error. Be robust.
if tb.get(name) is not None:
tb.remove(name)
else:
tb.insert(name, value[0], value[1])
# 5. Git commit
worktree.git_commit(git_msg, git_author, git_date, tree=git_tree)
def abort_transaction(self):
self.catalog.abort_changes()
#from pygit2 import GIT_CHECKOUT_FORCE, GIT_CHECKOUT_REMOVE_UNTRACKED
# Don't need to abort since git add is made à last minute
#strategy = GIT_CHECKOUT_FORCE | GIT_CHECKOUT_REMOVE_UNTRACKED
#if pygit2.__version__ >= '0.21.1':
# self.worktree.repo.checkout_head(strategy=strategy)
#else:
# self.worktree.repo.checkout_head(strategy)
def flush_catalog(self, docs_to_unindex, docs_to_index):
for path in docs_to_unindex:
self.catalog.unindex_document(path)
for resource, values in docs_to_index:
self.catalog.index_document(values)
def get_catalog(self):
path = '{0}/catalog'.format(self.path)
if not lfs.is_folder(path):
return None
return Catalog(path, self.fields, read_only=self.read_only)
def search(self, query=None, **kw):
"""Launch a search in the catalog.
"""
catalog = self.catalog
xquery = _get_xquery(catalog, query, **kw)
return SearchResults(catalog, xquery)
def close(self):
self.catalog.close()
register_backend('git', GitBackend)
| hforge/itools | itools/database/backends/git.py | Python | gpl-3.0 | 14,062 |
import nltk
PLUS = 'plus'
MUL = 'multiplied by'
DIV = 'divided by'
MIN = 'minus'
OPENB = 'open bracket'
CLOSEB = 'close bracket'
with open('../grammars/math.cfg', 'r') as file:
grammar_str = file.read()
def validate(text):
grammar = nltk.CFG.fromstring(grammar_str)
parser = nltk.ChartParser(grammar)
trees = parser.parse(list(text))
valid = False
answer = math_form = None
for tree in trees:
addition = tree[4].leaves()
operation_string = ''
for i in addition:
operation_string = operation_string + i
p = operation_string.replace(PLUS, '+')\
.replace(MUL, '*') \
.replace(DIV, '/') \
.replace(MIN, '-') \
.replace(OPENB, '(') \
.replace(CLOSEB, ')')
math_form= p
answer = eval(p)
valid = True
break
return (valid, math_form, answer)
def print_validity(text, state, marker = None):
if marker is not None:
text = 'Input text: ' + text + ' --> ' + marker + ' ' + state + ' Math question '
else:
text = 'Input text: ' + text + ' --> ' + state + ' Math question '
print(text)
lines = [line.rstrip('\n') for line in open('../test/inputs/math')]
for line in lines:
print('##################################################################\n')
invalid_marker = '??????????'
try:
result = validate(line)
if result[0]:
print_validity(line, "Valid")
print(' math form : ' + result[1])
print(' answer : ' + str(result[2]))
else:
print_validity(line, "Invalid", invalid_marker)
except ValueError as ve:
print(ve)
print_validity(line, "Invalid", invalid_marker)
continue
| HarishAtGitHub/Sentence-Understanding---Natural-Language-Understanding | sentance-understanding/core/understander/math.py | Python | gpl-3.0 | 1,682 |
class AbortAction(RuntimeError):
pass | aetherfirma/lazor | lazor/exceptions.py | Python | gpl-3.0 | 41 |
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "r2_description"
PROJECT_SPACE_DIR = "/home/mkhuthir/learnROS/src/chessbot/install"
PROJECT_VERSION = "0.0.0"
| mkhuthir/catkin_ws | src/chessbot/build/nasa_r2_common/r2_description/catkin_generated/pkg.installspace.context.pc.py | Python | gpl-3.0 | 392 |
from bot.server import main
main()
| fedorlol/Tolyan | bot/__main__.py | Python | gpl-3.0 | 36 |
from rambutan3.check_args.annotation.NUMBER import NUMBER
def test():
assert not NUMBER.matches("abc")
assert not NUMBER.matches(True)
assert NUMBER.matches(-1.234)
assert NUMBER.matches(-1)
assert NUMBER.matches(0)
assert NUMBER.matches(0.234)
assert NUMBER.matches(1)
assert NUMBER.matches(1.234)
assert NUMBER.matches(2)
| kevinarpe/kevinarpe-rambutan3 | tests/check_args/annotation/test_NUMBER.py | Python | gpl-3.0 | 362 |
#!/usr/bin/python3
#
# Copyright: Conor O'Callghan 2016
# Version: v1.1.3
#
# Please feel free to fork this project, modify the code and improve
# it on the github repo https://github.com/brioscaibriste/iarnrod
#
# Powered by TfL Open Data
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import argparse
import json
import sys
import tempfile
import time
import os
from urllib.request import urlopen
'''
ParseArgs
A simple function to parse the command line arguments passed to the function.
The function does very little sanitisation on the input variables. The
argument passed is then returned from the function.
'''
def ParseArgs():
# Parse our command line argument for the line name
parser = argparse.ArgumentParser()
parser.add_argument('--line',dest='LineName',help='Specify the London line you want to report on')
args = parser.parse_args()
# Check if the value is blank
Line = (args.LineName)
if not Line:
print ("\nError, you must specify a line name! e.g. --line district\n")
sys.exit(1)
# Convert the line name to lower case for easy comparison
Line = Line.lower()
# If the line isn't in the line list, fail badly
if Line not in ('district','circle','victoria','central','northern',
'bakerloo','hammersmith-city','jubilee','metropolitan',
'piccadilly','waterloo-city','dlr',):
print ("\nError, you have specified " + Line + " as your line. You must specify one of the following: "
"\n\tDistrict"
"\n\tCircle"
"\n\tVictora"
"\n\tCentral"
"\n\tNorthern"
"\n\tPiccadilly"
"\n\tBakerloo"
"\n\thammersmith-city"
"\n\twaterloo-city"
"\n\tDLR"
"\n\tMetropolitan"
"\n\tJubilee\n")
sys.exit(1)
# Convert the tube line back to upper case for nice display
Line = Line.upper()
return Line
'''
RetrieveTFLData
Inputs:
Line - Which line to retrieve information on
Run - Should the data retrieval be run or should the cache file be used
SFileName - The file in which to store the line status cache
This function takes the Line variable (a name of a Transport For London line
name) and polls the TFL API. The function then returns the current line
status for the specified line.
'''
def RetrieveTFLData(Line,Run,SFileName):
# TFL Unified API URL
TFLDataURL = "https://api.tfl.gov.uk/Line/" + Line + ("/Status?detail=False"
"&app_id=&app_key=")
if Run:
# Read all the information from JSON at the specified URL, can be re-done with requests?
RawData = urlopen(TFLDataURL).readall().decode('utf8') or die("Error, failed to "
"retrieve the data from the TFL website")
TFLData = json.loads(RawData)
# Sanitize the data to get the line status
Scratch = (TFLData[0]['lineStatuses'])
LineStatusData = (Scratch[0]['statusSeverityDescription'])
# Cache the staus in a file
with open(SFileName, 'w+') as SFile:
SFile.write(LineStatusData)
SFile.closed
else:
with open(SFileName, 'r+') as SFile:
LineStatusData = SFile.read()
SFile.closed
return LineStatusData
'''
Throttle
Inputs
PollIntervalMinutes - Polling interval in minutes
Throttle - Should we throttle the connection or not?
TFileName - The file where the timestamp for throttling usage is stored
This function is used to determine whether or not the next run of the retrieval of data should run.
It retrieves the previously run time from a file in /tmp if it exists, if the file does not exist
the run status will return as 1 and the current time stamp will be written into a new file.
If throttling is disabled, the file will be removed from /tmp and run will be set to 1.
'''
def Throttle(PollIntervalMinutes,Throttling,TFileName):
if Throttling == "True":
# Current epoch time
# CurrentStamp = str(time.time()).split('.')[0]
CurrentStamp = int(time.time())
# Does the temporary file exist or not
if os.path.isfile(TFileName):
# Open the temp file and read the time stamp
with open(TFileName, 'r+') as TFile:
TimeFile = TFile.read()
Remainder = CurrentStamp - int(TimeFile)
else:
# Get the current time stamp and write it to the temp file
with open(TFileName, 'w') as TFile:
TFile.write(str(CurrentStamp))
# Set the Remainder high to force the next run
Remainder = 1000000
# If the remainder is less than the poll interval don't run the command, if it isn't run the command
if ( Remainder < (PollIntervalMinutes * 60) ):
Run = 0
else:
Run = 1
# Set the command to run and re-write the poll time to file
with open(TFileName, 'w') as TFile:
TFile.write(str(CurrentStamp))
return Run
else:
# Remove the time file if it exists
try:
os.remove(TFileName)
except OSError:
pass
Run = 1
return Run
| brioscaibriste/iarnrod | coire.py | Python | gpl-3.0 | 6,003 |
#!/usr/bin/python
# Copyright: (c) 2018, Hewlett Packard Enterprise Development LP
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
short_description: Manage HPE StoreServ 3PAR CPG
author:
- Farhan Nomani (@farhan7500)
- Gautham P Hegde (@gautamphegde)
description:
- Create and delete CPG on HPE 3PAR.
module: ss_3par_cpg
options:
cpg_name:
description:
- Name of the CPG.
required: true
disk_type:
choices:
- FC
- NL
- SSD
description:
- Specifies that physical disks must have the specified device type.
domain:
description:
- Specifies the name of the domain in which the object will reside.
growth_increment:
description:
- Specifies the growth increment(in MiB, GiB or TiB) the amount of logical disk storage
created on each auto-grow operation.
growth_limit:
description:
- Specifies that the autogrow operation is limited to the specified
storage amount that sets the growth limit(in MiB, GiB or TiB).
growth_warning:
description:
- Specifies that the threshold(in MiB, GiB or TiB) of used logical disk space when exceeded
results in a warning alert.
high_availability:
choices:
- PORT
- CAGE
- MAG
description:
- Specifies that the layout must support the failure of one port pair,
one cage, or one magazine.
raid_type:
choices:
- R0
- R1
- R5
- R6
description:
- Specifies the RAID type for the logical disk.
set_size:
description:
- Specifies the set size in the number of chunklets.
state:
choices:
- present
- absent
description:
- Whether the specified CPG should exist or not.
required: true
secure:
description:
- Specifies whether the certificate needs to be validated while communicating.
type: bool
default: no
extends_documentation_fragment: hpe3par
version_added: 2.8
'''
EXAMPLES = r'''
- name: Create CPG sample_cpg
ss_3par_cpg:
storage_system_ip: 10.10.10.1
storage_system_username: username
storage_system_password: password
state: present
cpg_name: sample_cpg
domain: sample_domain
growth_increment: 32000 MiB
growth_limit: 64000 MiB
growth_warning: 48000 MiB
raid_type: R6
set_size: 8
high_availability: MAG
disk_type: FC
secure: no
- name: Delete CPG sample_cpg
ss_3par_cpg:
storage_system_ip: 10.10.10.1
storage_system_username: username
storage_system_password: password
state: absent
cpg_name: sample_cpg
secure: no
'''
RETURN = r'''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.storage.hpe3par import hpe3par
try:
from hpe3par_sdk import client
from hpe3parclient import exceptions
HAS_3PARCLIENT = True
except ImportError:
HAS_3PARCLIENT = False
def validate_set_size(raid_type, set_size):
if raid_type:
set_size_array = client.HPE3ParClient.RAID_MAP[raid_type]['set_sizes']
if set_size in set_size_array:
return True
return False
def cpg_ldlayout_map(ldlayout_dict):
if ldlayout_dict['RAIDType'] is not None and ldlayout_dict['RAIDType']:
ldlayout_dict['RAIDType'] = client.HPE3ParClient.RAID_MAP[
ldlayout_dict['RAIDType']]['raid_value']
if ldlayout_dict['HA'] is not None and ldlayout_dict['HA']:
ldlayout_dict['HA'] = getattr(
client.HPE3ParClient, ldlayout_dict['HA'])
return ldlayout_dict
def create_cpg(
client_obj,
cpg_name,
domain,
growth_increment,
growth_limit,
growth_warning,
raid_type,
set_size,
high_availability,
disk_type):
try:
if not validate_set_size(raid_type, set_size):
return (False, False, "Set size %s not part of RAID set %s" % (set_size, raid_type))
if not client_obj.cpgExists(cpg_name):
ld_layout = dict()
disk_patterns = []
if disk_type:
disk_type = getattr(client.HPE3ParClient, disk_type)
disk_patterns = [{'diskType': disk_type}]
ld_layout = {
'RAIDType': raid_type,
'setSize': set_size,
'HA': high_availability,
'diskPatterns': disk_patterns}
ld_layout = cpg_ldlayout_map(ld_layout)
if growth_increment is not None:
growth_increment = hpe3par.convert_to_binary_multiple(
growth_increment)
if growth_limit is not None:
growth_limit = hpe3par.convert_to_binary_multiple(
growth_limit)
if growth_warning is not None:
growth_warning = hpe3par.convert_to_binary_multiple(
growth_warning)
optional = {
'domain': domain,
'growthIncrementMiB': growth_increment,
'growthLimitMiB': growth_limit,
'usedLDWarningAlertMiB': growth_warning,
'LDLayout': ld_layout}
client_obj.createCPG(cpg_name, optional)
else:
return (True, False, "CPG already present")
except exceptions.ClientException as e:
return (False, False, "CPG creation failed | %s" % (e))
return (True, True, "Created CPG %s successfully." % cpg_name)
def delete_cpg(
client_obj,
cpg_name):
try:
if client_obj.cpgExists(cpg_name):
client_obj.deleteCPG(cpg_name)
else:
return (True, False, "CPG does not exist")
except exceptions.ClientException as e:
return (False, False, "CPG delete failed | %s" % e)
return (True, True, "Deleted CPG %s successfully." % cpg_name)
def main():
module = AnsibleModule(argument_spec=hpe3par.cpg_argument_spec(),
required_together=[['raid_type', 'set_size']])
if not HAS_3PARCLIENT:
module.fail_json(msg='the python hpe3par_sdk library is required (https://pypi.org/project/hpe3par_sdk)')
if len(module.params["cpg_name"]) < 1 or len(module.params["cpg_name"]) > 31:
module.fail_json(msg="CPG name must be at least 1 character and not more than 31 characters")
storage_system_ip = module.params["storage_system_ip"]
storage_system_username = module.params["storage_system_username"]
storage_system_password = module.params["storage_system_password"]
cpg_name = module.params["cpg_name"]
domain = module.params["domain"]
growth_increment = module.params["growth_increment"]
growth_limit = module.params["growth_limit"]
growth_warning = module.params["growth_warning"]
raid_type = module.params["raid_type"]
set_size = module.params["set_size"]
high_availability = module.params["high_availability"]
disk_type = module.params["disk_type"]
secure = module.params["secure"]
wsapi_url = 'https://%s:8080/api/v1' % storage_system_ip
try:
client_obj = client.HPE3ParClient(wsapi_url, secure)
except exceptions.SSLCertFailed:
module.fail_json(msg="SSL Certificate Failed")
except exceptions.ConnectionError:
module.fail_json(msg="Connection Error")
except exceptions.UnsupportedVersion:
module.fail_json(msg="Unsupported WSAPI version")
except Exception as e:
module.fail_json(msg="Initializing client failed. %s" % e)
if storage_system_username is None or storage_system_password is None:
module.fail_json(msg="Storage system username or password is None")
if cpg_name is None:
module.fail_json(msg="CPG Name is None")
# States
if module.params["state"] == "present":
try:
client_obj.login(storage_system_username, storage_system_password)
return_status, changed, msg = create_cpg(
client_obj,
cpg_name,
domain,
growth_increment,
growth_limit,
growth_warning,
raid_type,
set_size,
high_availability,
disk_type
)
except Exception as e:
module.fail_json(msg="CPG create failed | %s" % e)
finally:
client_obj.logout()
elif module.params["state"] == "absent":
try:
client_obj.login(storage_system_username, storage_system_password)
return_status, changed, msg = delete_cpg(
client_obj,
cpg_name
)
except Exception as e:
module.fail_json(msg="CPG create failed | %s" % e)
finally:
client_obj.logout()
if return_status:
module.exit_json(changed=changed, msg=msg)
else:
module.fail_json(msg=msg)
if __name__ == '__main__':
main()
| brandond/ansible | lib/ansible/modules/storage/hpe3par/ss_3par_cpg.py | Python | gpl-3.0 | 9,304 |
# painttheworld/game.py
#
# Represent and track the current game state.
import numpy as np
import datetime
import math
from painttheworld import constants
from painttheworld.constants import m1, m2, m3, m4, p1, p2, p3
''' Note that Latitude is North/South and Longitude is West/East'''
class GameState:
"""Keeps track of which teams have colored which areas of the map.
The map is a grid that's represented by a 2D array containing values
corresponding to which team controls that area/block of the map. Clients
perform the necessary GPS -> grid coordinate calculations and send their
game state updates in grid coordinates via the update() method.
TODO: might have to add coordinate transformations to our methods since
(0,0) is technically the center of our grid.
"""
def __init__(self, radius, gridsize):
"""Create a GameState object.
Args:
radius: the number of grid blocks from the center block in the
vertical/horizontal direction.
gridsize: The dimensions of a grid tile, in feet. This should be the
edge length
"""
size = 2*radius + 1
self.grid = np.zeros((size, size), dtype=np.int8)
self.radius = radius
self.gridsize = gridsize
self.user_count = 0
self.user_coords = []
self.user_grid = []
self.user_grid.extend([np.zeros((size, size), dtype=np.int8) for i in range(constants.lobby_size)])
def start_game(self):
"""Initialize the starting position of the grid.
This calculates the center coordinate by average the longitudes and
latitudes of all people (this might not work too well, as that's not
really how nautical miles work). Additionally, it sets the start time to
be 3 seconds from now.
"""
self.center_coord = np.mean(self.user_coords, axis=0)
self.conversion_rates = self.conversion_rates(self.center_coord)
self.start_time = datetime.datetime.now() + datetime.timedelta(seconds=3)
self.end_time = self.start_time + datetime.timedelta(minutes=3)
def update(self, coord, team):
"""Update the game state array."""
x, y = coord
self.grid[x][y] = team
def project(self, lon, lat):
""" Casts a GPS coordinate onto the grid, which has it's central
locations defined by center_coord.
"""
vert = GameState.haversine(self.center_coord[1], self.center_coord[0], self.center_coord[1], lat) # longitude is east-west, we ensure that's the sam'
horiz = GameState.haversine(self.center_coord[1], self.center_coord[0], lon, self.center_coord[0])
""" Vectorizes the latitude. The degree ranges from -90 to 90.
This latitude conversion doesn't handle poles.
I'm not sure how to handle you playing the game at the north and south pole.
"""
if lat > self.center_coord[0]:
vert = -vert
""" Vectorizes the longitude. The degree ranges from -180 to 180.
There's three cases:
1. They're both in the same hemisphere (east/west)
2. They cross over the 0 degree line
3. They cross over the 180 degree line
Case (1):
Check for case 1 by ensuring that the signs are identical.
If the longitude of the location is less than the longitude of the cenral
location, that means that we need to move left in the array.
We change the sign to be negative.
Case (2) + (3):
There's two cases here, where the signs are differing.
To determine which line we're crossing, the absolute value of the difference
in Longitudes is taken. If the difference >180,
that implies that the 180 degree is being crossed. Otherwise, it's the 0 degree line.
Case (2):
In case (2), if the longitude of the central point is negative, the distance must be positive.
If the longitude of the central point is positive, the distance must be negative.
Case (3):
In case (3), if the longitude of the central point is negative, the distance must be negative.
If the longitude of the central point is positive, the distance must be positive.
"""
if np.sign(self.center_coord[1]) == np.sign(lon): # Case 1
if lon > self.center_coord[1]:
horiz = -horiz
if math.fabs(self.center_coord[1] - lon) < 180: # Case 2
if self.center_coord[1] >= 0:
horiz = -horiz
elif self.center_coord[1] < 0: # Case 3
horiz = -horiz
horiz = math.floor(horiz * 1000 / constants.gridsize)
vert = math.floor(vert * 1000 / constants.gridsize)
return np.add((self.radius + 1, self.radius + 1), (horiz, vert))
def add_user(self, lat, lon):
""" Adds a user and their starting location to the grid.
Returns the user id number assosciated with that user, as well as their
locations. If there are enough users to begin the game, it initializes
the game variables.
"""
if self.user_count < constants.lobby_size:
self.user_count += 1
self.user_coords.append((float(lat), float(lon)))
if self.user_count == constants.lobby_size:
self.start_game()
return self.user_count-1
else:
return -1
def update_user(self, id, lon, lat):
currtime = datetime.datetime.now()
if self.start_time < currtime < self.end_time:
gridloc = self.project(lon, lat)
out_of_bounds = not self.inside_grid(gridloc)
if not out_of_bounds:
self.grid[gridloc[0]][gridloc[1]] = constants.Team.findTeam(id)
returngrid = self.diff(self.user_grid[id], self.grid)
np.copyto(self.user_grid[id], self.grid)
return returngrid, out_of_bounds
else:
if self.start_time > currtime:
raise RuntimeError('Game hasn\'t started.')
else:
raise RuntimeError('Game over.')
def inside_grid(self, coord):
lowest_coord = (0,0)
highest_coord = (constants.radius*2 + 1, constants.radius*2 + 1)
lower_bound = np.all(np.greater_equal(coord, lowest_coord))
upper_bound = np.all(np.less_equal(coord, highest_coord))
return lower_bound and upper_bound
@staticmethod
def diff(a, b):
"""Calculate the deltas of two GameState objects.
a is the "older" GameState object
b is the "updated" GameState object
Returns:
List of coordinate/team pairings of the form ((x,y), team_color).
"""
diff = np.absolute(a - b)
coord = np.nonzero(diff)
val = diff[coord]
coord = map(tuple, np.transpose(coord)) # turn coord into (x,y) tuples
return list(zip(coord, val))
@staticmethod
def conversion_rates(coord):
"""Calculates the conversion rate for 1 degree of longitude to a variety
of measurements, returned in a dict.
Args:
coord: a tuple (longitude, latitude)
Returns:
Conversion rate for 1 degree of longitude to miles
"""
latitude = math.radians(coord[1])
dict = {}
latlen = m1 + ( m2 * math.cos(2 * latitude) + \
m3 * math.cos(4 * latitude) + \
m4 * math.cos(6 * latitude) \
)
longlen = (p1 * math.cos(1 * latitude)) + \
(p2 * math.cos(3 * latitude)) + \
(p3 * math.cos(5 * latitude))
dict['lat_meters'] = latlen
dict['lat_feet'] = latlen * 3.28083333
dict['lat_miles'] = dict['lat_feet'] / 5280
dict['long_meters'] = longlen
dict['long_feet'] = longlen * 3.28083333
dict['long_miles'] = dict['long_feet'] / 5280
return dict
@staticmethod
def haversine(lon1, lat1, lon2, lat2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
Source code from: http://stackoverflow.com/questions/15736995/how-can-i-quickly-estimate-the-distance-between-two-latitude-longitude-points
"""
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(math.radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = math.sin(dlat/2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon/2)**2
c = 2 * math.asin(math.sqrt(a))
km = 6367 * c
return km
| richardmin97/PaintTheWorld | Server/painttheworld/game.py | Python | gpl-3.0 | 8,968 |
from functools import reduce
from itertools import chain, combinations, product, permutations
# This class is used to represent and examine algebras on atom tables.
# It is intended to be used for nonassociative algebras, but this is not assumed.
class AtomicAlgebra:
# Create an algebra from a table of atoms, which gives compositions, and a converse structure.
# An atom table is a list of lists, with each entry a set (as distinct from set) of atoms.
# The set of atoms is interpreted as a union. Atoms are 'a', 'b', 'c', etc.
# The converse pair is a list of 2-tuples of atoms.
# If 'a' is converse to 'b', write as ('a','b').
# If 'a' is symmetric, write as ('a', 'a').
# Can also give converses as a dictionary.
# Algebra may not necessarily meet all the axioms.
def __init__(self, atom_table, converse = None):
if type(atom_table) == str:
atom_table = self._string_to_atom_table(atom_table)
self.n_atoms = len(atom_table[0])
self.atoms = [set([chr(i + 97)]) for i in range(self.n_atoms)]
self.atom_table = atom_table
# If no converses given assume all atoms are symmetric.
if converse == None:
self.converse = [(x,x) for x in [chr(i + 97) for i in range(self.n_atoms)]]
# Can give atoms as a dictionary on atoms...
if type(converse) is dict:
self.converse_pairs = self.converse_dict_to_pairs(converse)
self.converse_dict = converse
# ... or as a list of tuples.
else:
self.converse_pairs = converse
self.converse_dict = self.converse_pairs_to_dict(converse)
# set up the basic properties of the algebra.
self._non_identity_atoms = None
self.top = reduce(lambda x, y : x | y, self.atoms)
self.zero = set()
# The elements are the power set of the atoms.
self.elements = [combinations(list(self.top), n) for n in range(self.n_atoms + 1)]
self.elements = list(chain.from_iterable(self.elements))
self.elements = [set(element) for element in self.elements]
self.n_elements = 2**self.n_atoms
self.n_non_zero_elements = self.n_elements - 1
self.symmetric_atoms = [x[0] for x in self.converse_pairs if x[0] == x[1]]
self.non_symmetric_pairs = [x for x in self.converse_pairs if x[0] != x[1]]
self._cyclePartition = self.cycle_partition(self.converse_dict, self.n_atoms)
self._identity = None
self._semigroup = None
# properties
self._is_NA = None
self._satisfies_WA_axiom = None
self._is_WA = None
self._satisfies_SA_axiom = None
self._is_SA = None
self._is_associative = None
self._is_RA = None
# A human-readable description of each relation algebra axiom.
AXIOMS = {
"R01": "+-commutativity: x + y = y + x",
"R02": "+-associativity: x + (y + z) = (x + y) + z",
"R03": "Huntington's axiom: -(-x + -y) + -(-x + y) = x",
"R04": ";-associativity: x;(y;z) = (x;y);z",
"R05": ";-distributivity: (x + y);z = x;z + y;z",
"R06": "identity law: x;1' = x",
"R07": "converse-involution: con(con(x)) = x",
"R08": "converse-distributivity: con(x + y) = con(x) + con(y)",
"R09": "converse-involutive distributivity: con(x;y) = con(y);con(x)",
"R10": "Tarski/De Morgan axiom: con(x); -(x;y) + -y = y",
"WA" : "((id . x) . top) . top = (id . x) . (top . top)",
"SA" : "(x . top) . top = x . (top . top)"
}
# Given an atom table as a string, convert it to a matrix (list of lists).
# This method seems to be powered by magic, and should be redone.
@staticmethod
def _string_to_atom_table(matrix_string):
M0 = matrix_string.replace(" ", "")
M1 = M0.strip()[1:-1]
M2 = M1.strip()[1:-1]
M3 = [line.split(',') for line in M2.split('],[')]
M4 = [[set(entry.split("+"))-set(['0']) for entry in line] for line in M3]
return M4
# Converses can be given as a list of tuples [('a', 'a'), ('b', 'c')] or a
# dictionary on atoms {'a': 'a', 'b': 'c', 'c': 'b'}. Tne following
# methods convert between the two.
@staticmethod
def converse_pairs_to_dict(converse_pairs):
converse_dict = dict()
for converse_pair in converse_pairs:
if converse_pair[0] == converse_pair[1]: # symmetric atom
converse_dict[converse_pair[0]] = converse_pair[0]
else: # non-symmetric atoms
converse_dict[converse_pair[0]] = converse_pair[1]
converse_dict[converse_pair[1]] = converse_pair[0]
return converse_dict
@staticmethod
def converse_dict_to_pairs(converse_dict):
converse_pairs = []
for pair in converse_dict.items():
if pair not in converse_pairs and pair[::-1] not in converse_pairs:
converse_pairs.append(pair)
return converse_pairs
# Given a triple and a converse structure, generate the cycle including that triple.
# This is an implementation of the relation algebra concept of a Peircean transform.
# Cycle generated by (x,y,z) is:
# [ (x,y,z), (con(x),z,y), (y,con(z),con(x)),
# (con(y),con(x),con(z)),(con(z),x,con(y)), (z,con(y),x) ]
# A triple in a cycle is consistent if and only if all triples in the cycle are consistent.
@staticmethod
def cycle(triple, converse_dict):
if type(converse_dict) is not dict:
converse_dict = AtomicAlgebra.converse_pairs_to_dict(converse_dict)
x, y, z = triple
cycle = []
cycle.append(triple)
cycle.append((converse_dict[x], z, y))
cycle.append((y, converse_dict[z], converse_dict[x]))
cycle.append((converse_dict[y], converse_dict[x], converse_dict[z]))
cycle.append((converse_dict[z], x, converse_dict[y]))
cycle.append((z, converse_dict[y], x))
cycle.sort() # Prevents duplicates when using cycle_partition
return list(set(cycle)) # Remove duplicates.
# Given a converse structure, partition the triples of elements into cycles.
@staticmethod
def cycle_partition(converse_dict, n_atoms):
if type(converse_dict) is not dict:
converse_dict = AtomicAlgebra.converse_pairs_to_dict(converse_dict)
atoms = [chr(i + 97) for i in range(n_atoms)]
parts = []
for triple in product(atoms, repeat = 3):
cycle = AtomicAlgebra.cycle(triple, converse_dict)
if cycle not in parts: parts.append(cycle)
return parts
# Give a human readable report on a list of failed axioms, eg. ["R01", "R02", "R07"].
@staticmethod
def report_failed_axioms(failed_axioms):
if type(failed_axioms) is not list: failed_axioms = [failed_axioms]
for axiom in failed_axioms:
print("Fails axiom " + axiom + ": " + AtomicAlgebra.AXIOMS[axiom] + ".")
# Through unions, we can extend any map between atoms to a map between
# elements of algebras. For example, if 'a' -> 'b' and 'c' -> 'd', then
# {'a', 'b'} -> {'c', 'd'}. Thus, every map between atoms uniquely defines
# a map between elements. In practice we always define maps on atoms only.
# We use the term "function" in reference to a map between elements.
@staticmethod
def atom_function(atom_map, element):
if type(element) is str:
return atom_map[element]
else:
return set([AtomicAlgebra.atom_function(atom_map, x) for x in element])
# Turns a single atom 'a' into a set(['a']).
@staticmethod
def make_set(x):
if type(x) == str:
x = set([x])
if type(x) != type(set()):
raise TypeError('An element of the algebra needs to be either a set of atoms or a string representing a single atom.')
return x
# Check if a map between atom structures preserves composition.
# This is a necessary, but not sufficient condition, for an atom_map or
# atom_function to be an isomorphism.
def preserves_composition(self, other, atom_map):
preserves_composition = True
for x, y in product(self.atoms, repeat = 2):
if AtomicAlgebra.atom_function(atom_map, self.compose(x, y)) != other.compose(AtomicAlgebra.atom_function(atom_map, x), AtomicAlgebra.atom_function(atom_map, y)):
preserves_composition = False
break
return preserves_composition
# Checks if a given algebra is isomorphic to the instance being called from.
# Can also return an isomorphism, if one exists.
def is_isomorphic(self, other, return_isomorphism = False):
# First we check that the algebras are the same size, and that the
# number of atoms in the identity is the same.
# These are necessary conditions for an isomorphism, so can save some time.
if self.n_atoms != other.n_atoms: return False
if len(self.identity) != len(other.identity): return False
# Next we check that the converse pairs match in number and structure.
# This is a necessary condition for isomorphism, so can save some time.
if len(self.symmetric_atoms) != len(other.symmetric_atoms):
return False
# Enumerate all possible functions respecting converse.
# First enumerate all possible ways to map symmetric atoms from
# the first algebra to self converse atoms from the second algebra.
possible_symmetric_maps = []
for perm in permutations(other.symmetric_atoms):
possible_symmetric_maps.append(zip(self.symmetric_atoms, perm))
possible_symmetric_maps = [list(p) for p in possible_symmetric_maps]
# Now enumerate all possible ways to map converse pairs from the
# first algebra to converse pairs from the second algebra.
possible_converse_pair_maps = []
for perm1 in list(product(*[[x,x[::-1]] for x in other.non_symmetric_pairs])):
for perm2 in permutations(perm1):
map = []
pairing = zip(self.non_symmetric_pairs, perm2)
for pair in pairing:
map.append((pair[0][0], pair[1][0]))
map.append((pair[0][1], pair[1][1]))
possible_converse_pair_maps.append(map)
# Now combine them to generate all maps respecting the converse structure.
possible_isomorphisms = []
for symmetric_map, converse_pair_map in product(possible_symmetric_maps, possible_converse_pair_maps):
possible_isomorphisms.append(symmetric_map + converse_pair_map)
possible_isomorphisms = [dict(x) for x in possible_isomorphisms]
# We can reduce the search space by exploiting the fact that an
# isomorphism will always map the identity of one algebra to the identity
# of the target algebra. We generate all possible maps from atoms in the
# identity of the first algebra to atoms in the identity of the second
# algebra, and then restrict the possible_isomorphisms to those that
# "agree" with one of the identity-preserving maps.
other_identity_permutations = [p for p in permutations(list(other.identity))]
possible_identity_maps = [dict((list(self.identity)[i], y[i])
for i in range(len(self.identity)))
for y in other_identity_permutations]
possible_isomorphisms = [iso for iso in possible_isomorphisms
if {k: iso[k] for k in list(self.identity)} in possible_identity_maps]
# Now we search through the possible isomorphisms.
# Our final search space includes only those that respect converse and
# identity. We now need to search through these for maps that respect
# composition. Break if an isomorphism is found, to save time.
is_isomorphic = False
for possible_isomorphism in possible_isomorphisms:
if self.preserves_composition(other, possible_isomorphism):
is_isomorphic = True
isomorphism = possible_isomorphism
break
if is_isomorphic and return_isomorphism:
return is_isomorphic, isomorphism
else:
return is_isomorphic
# Define composition of atoms or sets of atoms using the atom table.
# We allow for inputs of single atoms, but every element is properly
# viewed as a set of atoms.
def compose(self, x, y):
x = self.make_set(x)
y = self.make_set(y)
# Composition with the 0 element
if x == set() or y == set():
output = set()
else:
output = set()
for i, j in product(x, y):
row_pos = ord(i) - 97
col_pos = ord(j) - 97
try:
output = output.union(self.atom_table[row_pos][col_pos])
except IndexError:
"Out of bounds: composition "+ str(x) + "*" + str(y) + " contains a non-atomic element."
return output
# Define intersection as set intersection.
def intersection(self, x, y):
x = self.make_set(x)
y = self.make_set(y)
return x.intersection(y)
# Define union as set union.
def union(self, x, y):
x = self.make_set(x)
y = self.make_set(y)
return x.union(y)
# Define converse using the converse dictionary we made earlier.
def converse(self, x):
x = self.make_set(x)
return set([self.converse_dict[atom] for atom in x])
# Define complement as set complement relative to the top elemenet (set of all atoms).
def complement(self, x):
x = self.make_set(x)
return self.top.difference(x)
# Return the identity of an algebra if it exists, otherwise returns None
# If the identity element is not already recorded, will run through all
# elements and check for identity property.
@property
def identity(self):
if self._identity == None:
for candidate_identity in self.elements:
isId = True
for atom in self.atoms:
if self.compose(candidate_identity, atom) != atom or self.compose(atom, candidate_identity) != atom:
isId = False
break
if isId:
self._identity = candidate_identity
break
return self._identity
# All non-identity atoms.
@property
# Return a list of atoms which are not the identity atom.
def non_identity_atoms(self):
if self._non_identity_atoms == None:
if self.identity == None:
return self.atoms
else:
self._non_identity_atoms = [x for x in self.atoms if x != self.identity]
return self._non_identity_atoms
# Determines if the algebra generated by the atom table is a nonassociative algebra.
# Due to the construction, not all axioms need to be checked.
# Can control the amount of reporting done on failed axioms, if any.
def is_NA(self, what_fails = False, report = False):
if report:
what_fails = True
if self._is_NA == None or what_fails == True:
self._is_NA = True
failed_axioms = []
# Axiom R01: +-commutativity: x + y = y + x
# Axiom R02: +-associativity: x + (y + z) = (x + y) + z
# Axiom R03: Huntington's axiom: -(-x + -y) + -(-x + y) = x
for x,y in product(self.atoms, repeat = 2):
first_term = self.complement(self.union(self.complement(x), self.complement(y)))
second_term = self.complement(self.union(self.complement(x), y))
if self.union(first_term, second_term) != x:
failed_axioms.append("R03")
break
# Axiom R05: ;-distributivity: (x + y);z = x;z + y;z
# Axiom R06: identity law: x;1' = x
if self.identity == None:
failed_axioms.append("R06")
# Axiom R07: converse-involution: con(con(x)) = x
# - should not be needed if converse pairs are correctly defined.
for x in self.atoms:
if self.converse(self.converse(x)) != x:
failed_axioms.append("R07")
break
# Axiom R08: converse-distributivity: con(x + y) = con(x) + con(y)
for x,y in product(self.atoms, repeat = 2):
if self.converse(self.union(x,y)) != self.union(self.converse(x), self.converse(y)):
failed_axioms.append("R09")
break
# Axiom R09: converse-involutive distributivity: con(x;y) = con(y);con(x)
for x,y in product(self.atoms, repeat = 2):
if self.converse(self.compose(x,y)) != self.compose(self.converse(y), self.converse(x)):
failed_axioms.append("R09")
break
# Axiom R10: Tarski/De Morgan axiom: con(x); -(x;y) + -y = y
for x,y in product(self.atoms, repeat = 2):
if self.union(self.compose(self.converse(x), self.complement(self.compose(x,y))), self.complement(y)) != self.complement(y):
failed_axioms.append("R10")
break
if len(failed_axioms) > 0:
self._is_NA = False
if report:
self.report_failed_axioms(failed_axioms)
return self._is_NA
elif what_fails and not report:
return (self._is_NA, failed_axioms)
else:
return self._is_NA
# Determines if the algebra generated by the atom table satisfies the weakly associative axiom.
# Axiom WA: ((id . x) . top) . top = (id . x) . (top . top)
@property
def satisfies_WA_axiom(self):
if self._satisfies_WA_axiom == None:
if self.identity == None:
self._satisfies_WA_axiom = False
else:
self._satisfies_WA_axiom = True
for x in self.atoms:
LHS = self.compose(self.compose(
self.intersection(self.identity, x), self.top), self.top)
RHS = self.compose(self.compose(
self.intersection(self.identity, x), self.top), self.compose(self.top, self.top))
if LHS != RHS:
self._satisfies_WA_axiom = False
break
return self._satisfies_WA_axiom
# Determines if the algebra generated by the atom table is a weakly associative algebra.
# The algebra must be an nonassociative algebra and satisfy the weakly associative axiom.
def is_WA(self, what_fails = False, report = False):
if report:
what_fails = True
if what_fails == True:
self._is_WA = True
failed_axioms = []
failed_axioms.extend(self.is_NA(True,False)[1])
if self.satisfies_WA_axiom == False:
failed_axioms.append("WA")
if len(failed_axioms) > 0:
self._is_WA = False
elif self._is_WA == None:
self._is_WA = (self.is_NA() and self.satisfies_WA_axiom)
if report:
self.report_failed_axioms(failed_axioms)
return self._is_WA
elif what_fails and not report:
return (self._is_WA, failed_axioms)
else:
return self._is_WA
# Determines if the algebra generated by the atom table satisfies the semiassociative axiom.
# Axiom SA: (x . top) . top = x . (top . top)"
@property
def satisfies_SA_axiom(self):
if self._satisfies_SA_axiom == None:
self._satisfies_SA_axiom = True
for x in self.atoms:
if self.compose(self.compose(x, self.top), self.top) != self.compose(self.compose(x, self.top), self.compose(self.top, self.top)):
self._satisfies_SA_axiom = False
break
return self._satisfies_SA_axiom
# Determines if the algebra generated by the atom table is a semiassociative algebra.
# The algebra must be an nonassociative algebra and satisfy the semiassociative axiom.
def is_SA(self, what_fails = False, report = False):
if report:
what_fails = True
if what_fails == True:
self._is_SA = True
failed_axioms = []
failed_axioms.extend(self.is_WA(True,False)[1])
if self.satisfies_SA_axiom == False:
failed_axioms.append("SA")
if len(failed_axioms) > 0:
self._is_SA = False
elif self._is_SA == None:
self._is_SA = (self.is_NA() and self.satisfies_SA_axiom)
if report:
self.report_failed_axioms(failed_axioms)
return self._is_SA
elif what_fails and not report:
return (self._is_SA, failed_axioms)
else:
return self._is_SA
# Determines if the algebra generated by the atom table has an associative composition operation.
# Axiom R04: ;-associativity: x;(y;z) = (x;y);z."
@property
def is_associative(self):
if self._is_associative == None:
self._is_associative = True
for i, j, k in product(self.elements, repeat = 3):
if self.compose(self.compose(i,j), k) != self.compose(i, self.compose(j,k)):
self._is_associative = False
break
return self._is_associative
# Determines if the algebra generated by the atom table is a relation algebra.
# Must be an associative nonassociative algebra.
# If what_fails = True, will return a list of RA axioms that are not
# satisfied by the algebra.
# If report = True, a human-readable version of the failed axioms will
# instead be returned.
def is_RA(self, what_fails = False, report = False):
if report:
what_fails = True
if what_fails == True:
self._is_RA = True
failed_axioms = []
failed_axioms.extend(self.is_SA(True, False)[1])
if self.is_associative == False:
failed_axioms.append("R04")
if len(failed_axioms) > 0:
self._is_RA = False
elif self._is_RA == None:
self._is_RA = (self.is_NA() and self.is_associative)
if report:
self.report_failed_axioms(failed_axioms)
return self._is_RA
elif what_fails and not report:
return (self._is_RA, failed_axioms)
else:
return self._is_RA
| mdneuzerling/AtomicAlgebra | AtomicAlgebra.py | Python | gpl-3.0 | 23,406 |
"""Routines related to PyPI, indexes"""
import sys
import os
import re
import mimetypes
import posixpath
from pip.log import logger
from pip.util import Inf, normalize_name, splitext, is_prerelease
from pip.exceptions import (DistributionNotFound, BestVersionAlreadyInstalled,
InstallationError, InvalidWheelFilename, UnsupportedWheel)
from pip.backwardcompat import urlparse, url2pathname
from pip.download import PipSession, url_to_path, path_to_url
from pip.wheel import Wheel, wheel_ext
from pip.pep425tags import supported_tags, supported_tags_noarch, get_platform
from pip._vendor import html5lib, requests, pkg_resources
from pip._vendor.requests.exceptions import SSLError
__all__ = ['PackageFinder']
DEFAULT_MIRROR_HOSTNAME = "last.pypi.python.org"
INSECURE_SCHEMES = {
"http": ["https"],
}
class PackageFinder(object):
"""This finds packages.
This is meant to match easy_install's technique for looking for
packages, by reading pages and looking for appropriate links
"""
def __init__(self, find_links, index_urls,
use_wheel=True, allow_external=[], allow_unverified=[],
allow_all_external=False, allow_all_prereleases=False,
process_dependency_links=False, session=None):
self.find_links = find_links
self.index_urls = index_urls
self.dependency_links = []
self.cache = PageCache()
# These are boring links that have already been logged somehow:
self.logged_links = set()
self.use_wheel = use_wheel
# Do we allow (safe and verifiable) externally hosted files?
self.allow_external = set(normalize_name(n) for n in allow_external)
# Which names are allowed to install insecure and unverifiable files?
self.allow_unverified = set(
normalize_name(n) for n in allow_unverified
)
# Anything that is allowed unverified is also allowed external
self.allow_external |= self.allow_unverified
# Do we allow all (safe and verifiable) externally hosted files?
self.allow_all_external = allow_all_external
# Stores if we ignored any external links so that we can instruct
# end users how to install them if no distributions are available
self.need_warn_external = False
# Stores if we ignored any unsafe links so that we can instruct
# end users how to install them if no distributions are available
self.need_warn_unverified = False
# Do we want to allow _all_ pre-releases?
self.allow_all_prereleases = allow_all_prereleases
# Do we process dependency links?
self.process_dependency_links = process_dependency_links
self._have_warned_dependency_links = False
# The Session we'll use to make requests
self.session = session or PipSession()
def add_dependency_links(self, links):
## FIXME: this shouldn't be global list this, it should only
## apply to requirements of the package that specifies the
## dependency_links value
## FIXME: also, we should track comes_from (i.e., use Link)
if self.process_dependency_links:
if not self._have_warned_dependency_links:
logger.deprecated(
"1.6",
"Dependency Links processing has been deprecated with an "
"accelerated time schedule and will be removed in pip 1.6",
)
self._have_warned_dependency_links = True
self.dependency_links.extend(links)
def _sort_locations(self, locations):
"""
Sort locations into "files" (archives) and "urls", and return
a pair of lists (files,urls)
"""
files = []
urls = []
# puts the url for the given file path into the appropriate list
def sort_path(path):
url = path_to_url(path)
if mimetypes.guess_type(url, strict=False)[0] == 'text/html':
urls.append(url)
else:
files.append(url)
for url in locations:
is_local_path = os.path.exists(url)
is_file_url = url.startswith('file:')
is_find_link = url in self.find_links
if is_local_path or is_file_url:
if is_local_path:
path = url
else:
path = url_to_path(url)
if is_find_link and os.path.isdir(path):
path = os.path.realpath(path)
for item in os.listdir(path):
sort_path(os.path.join(path, item))
elif is_file_url and os.path.isdir(path):
urls.append(url)
elif os.path.isfile(path):
sort_path(path)
else:
urls.append(url)
return files, urls
def _link_sort_key(self, link_tuple):
"""
Function used to generate link sort key for link tuples.
The greater the return value, the more preferred it is.
If not finding wheels, then sorted by version only.
If finding wheels, then the sort order is by version, then:
1. existing installs
2. wheels ordered via Wheel.support_index_min()
3. source archives
Note: it was considered to embed this logic into the Link
comparison operators, but then different sdist links
with the same version, would have to be considered equal
"""
parsed_version, link, _ = link_tuple
if self.use_wheel:
support_num = len(supported_tags)
if link == INSTALLED_VERSION:
pri = 1
elif link.ext == wheel_ext:
wheel = Wheel(link.filename) # can raise InvalidWheelFilename
if not wheel.supported():
raise UnsupportedWheel("%s is not a supported wheel for this platform. It can't be sorted." % wheel.filename)
pri = -(wheel.support_index_min())
else: # sdist
pri = -(support_num)
return (parsed_version, pri)
else:
return parsed_version
def _sort_versions(self, applicable_versions):
"""
Bring the latest version (and wheels) to the front, but maintain the existing ordering as secondary.
See the docstring for `_link_sort_key` for details.
This function is isolated for easier unit testing.
"""
return sorted(applicable_versions, key=self._link_sort_key, reverse=True)
def find_requirement(self, req, upgrade):
def mkurl_pypi_url(url):
loc = posixpath.join(url, url_name)
# For maximum compatibility with easy_install, ensure the path
# ends in a trailing slash. Although this isn't in the spec
# (and PyPI can handle it without the slash) some other index
# implementations might break if they relied on easy_install's behavior.
if not loc.endswith('/'):
loc = loc + '/'
return loc
url_name = req.url_name
# Only check main index if index URL is given:
main_index_url = None
if self.index_urls:
# Check that we have the url_name correctly spelled:
main_index_url = Link(mkurl_pypi_url(self.index_urls[0]), trusted=True)
# This will also cache the page, so it's okay that we get it again later:
page = self._get_page(main_index_url, req)
if page is None:
url_name = self._find_url_name(Link(self.index_urls[0], trusted=True), url_name, req) or req.url_name
if url_name is not None:
locations = [
mkurl_pypi_url(url)
for url in self.index_urls] + self.find_links
else:
locations = list(self.find_links)
for version in req.absolute_versions:
if url_name is not None and main_index_url is not None:
locations = [
posixpath.join(main_index_url.url, version)] + locations
file_locations, url_locations = self._sort_locations(locations)
_flocations, _ulocations = self._sort_locations(self.dependency_links)
file_locations.extend(_flocations)
# We trust every url that the user has given us whether it was given
# via --index-url or --find-links
locations = [Link(url, trusted=True) for url in url_locations]
# We explicitly do not trust links that came from dependency_links
locations.extend([Link(url) for url in _ulocations])
logger.debug('URLs to search for versions for %s:' % req)
for location in locations:
logger.debug('* %s' % location)
# Determine if this url used a secure transport mechanism
parsed = urlparse.urlparse(str(location))
if parsed.scheme in INSECURE_SCHEMES:
secure_schemes = INSECURE_SCHEMES[parsed.scheme]
if len(secure_schemes) == 1:
ctx = (location, parsed.scheme, secure_schemes[0],
parsed.netloc)
logger.warn("%s uses an insecure transport scheme (%s). "
"Consider using %s if %s has it available" %
ctx)
elif len(secure_schemes) > 1:
ctx = (location, parsed.scheme, ", ".join(secure_schemes),
parsed.netloc)
logger.warn("%s uses an insecure transport scheme (%s). "
"Consider using one of %s if %s has any of "
"them available" % ctx)
else:
ctx = (location, parsed.scheme)
logger.warn("%s uses an insecure transport scheme (%s)." %
ctx)
found_versions = []
found_versions.extend(
self._package_versions(
# We trust every directly linked archive in find_links
[Link(url, '-f', trusted=True) for url in self.find_links], req.name.lower()))
page_versions = []
for page in self._get_pages(locations, req):
logger.debug('Analyzing links from page %s' % page.url)
logger.indent += 2
try:
page_versions.extend(self._package_versions(page.links, req.name.lower()))
finally:
logger.indent -= 2
dependency_versions = list(self._package_versions(
[Link(url) for url in self.dependency_links], req.name.lower()))
if dependency_versions:
logger.info('dependency_links found: %s' % ', '.join([link.url for parsed, link, version in dependency_versions]))
file_versions = list(self._package_versions(
[Link(url) for url in file_locations], req.name.lower()))
if not found_versions and not page_versions and not dependency_versions and not file_versions:
logger.fatal('Could not find any downloads that satisfy the requirement %s' % req)
if self.need_warn_external:
logger.warn("Some externally hosted files were ignored (use "
"--allow-external %s to allow)." % req.name)
if self.need_warn_unverified:
logger.warn("Some insecure and unverifiable files were ignored"
" (use --allow-unverified %s to allow)." %
req.name)
raise DistributionNotFound('No distributions at all found for %s' % req)
installed_version = []
if req.satisfied_by is not None:
installed_version = [(req.satisfied_by.parsed_version, INSTALLED_VERSION, req.satisfied_by.version)]
if file_versions:
file_versions.sort(reverse=True)
logger.info('Local files found: %s' % ', '.join([url_to_path(link.url) for parsed, link, version in file_versions]))
#this is an intentional priority ordering
all_versions = installed_version + file_versions + found_versions + page_versions + dependency_versions
applicable_versions = []
for (parsed_version, link, version) in all_versions:
if version not in req.req:
logger.info("Ignoring link %s, version %s doesn't match %s"
% (link, version, ','.join([''.join(s) for s in req.req.specs])))
continue
elif is_prerelease(version) and not (self.allow_all_prereleases or req.prereleases):
# If this version isn't the already installed one, then
# ignore it if it's a pre-release.
if link is not INSTALLED_VERSION:
logger.info("Ignoring link %s, version %s is a pre-release (use --pre to allow)." % (link, version))
continue
applicable_versions.append((parsed_version, link, version))
applicable_versions = self._sort_versions(applicable_versions)
existing_applicable = bool([link for parsed_version, link, version in applicable_versions if link is INSTALLED_VERSION])
if not upgrade and existing_applicable:
if applicable_versions[0][1] is INSTALLED_VERSION:
logger.info('Existing installed version (%s) is most up-to-date and satisfies requirement'
% req.satisfied_by.version)
else:
logger.info('Existing installed version (%s) satisfies requirement (most up-to-date version is %s)'
% (req.satisfied_by.version, applicable_versions[0][2]))
return None
if not applicable_versions:
logger.fatal('Could not find a version that satisfies the requirement %s (from versions: %s)'
% (req, ', '.join([version for parsed_version, link, version in all_versions])))
if self.need_warn_external:
logger.warn("Some externally hosted files were ignored (use "
"--allow-external to allow).")
if self.need_warn_unverified:
logger.warn("Some insecure and unverifiable files were ignored"
" (use --allow-unverified %s to allow)." %
req.name)
raise DistributionNotFound('No distributions matching the version for %s' % req)
if applicable_versions[0][1] is INSTALLED_VERSION:
# We have an existing version, and its the best version
logger.info('Installed version (%s) is most up-to-date (past versions: %s)'
% (req.satisfied_by.version, ', '.join([version for parsed_version, link, version in applicable_versions[1:]]) or 'none'))
raise BestVersionAlreadyInstalled
if len(applicable_versions) > 1:
logger.info('Using version %s (newest of versions: %s)' %
(applicable_versions[0][2], ', '.join([version for parsed_version, link, version in applicable_versions])))
selected_version = applicable_versions[0][1]
if (selected_version.internal is not None
and not selected_version.internal):
logger.warn("%s an externally hosted file and may be "
"unreliable" % req.name)
if (selected_version.verifiable is not None
and not selected_version.verifiable):
logger.warn("%s is potentially insecure and "
"unverifiable." % req.name)
if selected_version._deprecated_regex:
logger.deprecated(
"1.7",
"%s discovered using a deprecated method of parsing, "
"in the future it will no longer be discovered" % req.name
)
return selected_version
def _find_url_name(self, index_url, url_name, req):
"""Finds the true URL name of a package, when the given name isn't quite correct.
This is usually used to implement case-insensitivity."""
if not index_url.url.endswith('/'):
# Vaguely part of the PyPI API... weird but true.
## FIXME: bad to modify this?
index_url.url += '/'
page = self._get_page(index_url, req)
if page is None:
logger.fatal('Cannot fetch index base URL %s' % index_url)
return
norm_name = normalize_name(req.url_name)
for link in page.links:
base = posixpath.basename(link.path.rstrip('/'))
if norm_name == normalize_name(base):
logger.notify('Real name of requirement %s is %s' % (url_name, base))
return base
return None
def _get_pages(self, locations, req):
"""
Yields (page, page_url) from the given locations, skipping
locations that have errors, and adding download/homepage links
"""
all_locations = list(locations)
seen = set()
while all_locations:
location = all_locations.pop(0)
if location in seen:
continue
seen.add(location)
page = self._get_page(location, req)
if page is None:
continue
yield page
for link in page.rel_links():
normalized = normalize_name(req.name).lower()
if (not normalized in self.allow_external
and not self.allow_all_external):
self.need_warn_external = True
logger.debug("Not searching %s for files because external "
"urls are disallowed." % link)
continue
if (link.trusted is not None
and not link.trusted
and not normalized in self.allow_unverified):
logger.debug("Not searching %s for urls, it is an "
"untrusted link and cannot produce safe or "
"verifiable files." % link)
self.need_warn_unverified = True
continue
all_locations.append(link)
_egg_fragment_re = re.compile(r'#egg=([^&]*)')
_egg_info_re = re.compile(r'([a-z0-9_.]+)-([a-z0-9_.-]+)', re.I)
_py_version_re = re.compile(r'-py([123]\.?[0-9]?)$')
def _sort_links(self, links):
"Returns elements of links in order, non-egg links first, egg links second, while eliminating duplicates"
eggs, no_eggs = [], []
seen = set()
for link in links:
if link not in seen:
seen.add(link)
if link.egg_fragment:
eggs.append(link)
else:
no_eggs.append(link)
return no_eggs + eggs
def _package_versions(self, links, search_name):
for link in self._sort_links(links):
for v in self._link_package_versions(link, search_name):
yield v
def _known_extensions(self):
extensions = ('.tar.gz', '.tar.bz2', '.tar', '.tgz', '.zip')
if self.use_wheel:
return extensions + (wheel_ext,)
return extensions
def _link_package_versions(self, link, search_name):
"""
Return an iterable of triples (pkg_resources_version_key,
link, python_version) that can be extracted from the given
link.
Meant to be overridden by subclasses, not called by clients.
"""
platform = get_platform()
version = None
if link.egg_fragment:
egg_info = link.egg_fragment
else:
egg_info, ext = link.splitext()
if not ext:
if link not in self.logged_links:
logger.debug('Skipping link %s; not a file' % link)
self.logged_links.add(link)
return []
if egg_info.endswith('.tar'):
# Special double-extension case:
egg_info = egg_info[:-4]
ext = '.tar' + ext
if ext not in self._known_extensions():
if link not in self.logged_links:
logger.debug('Skipping link %s; unknown archive format: %s' % (link, ext))
self.logged_links.add(link)
return []
if "macosx10" in link.path and ext == '.zip':
if link not in self.logged_links:
logger.debug('Skipping link %s; macosx10 one' % (link))
self.logged_links.add(link)
return []
if ext == wheel_ext:
try:
wheel = Wheel(link.filename)
except InvalidWheelFilename:
logger.debug('Skipping %s because the wheel filename is invalid' % link)
return []
if wheel.name.lower() != search_name.lower():
logger.debug('Skipping link %s; wrong project name (not %s)' % (link, search_name))
return []
if not wheel.supported():
logger.debug('Skipping %s because it is not compatible with this Python' % link)
return []
# This is a dirty hack to prevent installing Binary Wheels from
# PyPI unless it is a Windows or Mac Binary Wheel. This is
# paired with a change to PyPI disabling uploads for the
# same. Once we have a mechanism for enabling support for binary
# wheels on linux that deals with the inherent problems of
# binary distribution this can be removed.
comes_from = getattr(link, "comes_from", None)
if ((
not platform.startswith('win')
and not platform.startswith('macosx')
)
and comes_from is not None
and urlparse.urlparse(comes_from.url).netloc.endswith(
"pypi.python.org")):
if not wheel.supported(tags=supported_tags_noarch):
logger.debug(
"Skipping %s because it is a pypi-hosted binary "
"Wheel on an unsupported platform" % link
)
return []
version = wheel.version
if not version:
version = self._egg_info_matches(egg_info, search_name, link)
if version is None:
logger.debug('Skipping link %s; wrong project name (not %s)' % (link, search_name))
return []
if (link.internal is not None
and not link.internal
and not normalize_name(search_name).lower() in self.allow_external
and not self.allow_all_external):
# We have a link that we are sure is external, so we should skip
# it unless we are allowing externals
logger.debug("Skipping %s because it is externally hosted." % link)
self.need_warn_external = True
return []
if (link.verifiable is not None
and not link.verifiable
and not (normalize_name(search_name).lower()
in self.allow_unverified)):
# We have a link that we are sure we cannot verify it's integrity,
# so we should skip it unless we are allowing unsafe installs
# for this requirement.
logger.debug("Skipping %s because it is an insecure and "
"unverifiable file." % link)
self.need_warn_unverified = True
return []
match = self._py_version_re.search(version)
if match:
version = version[:match.start()]
py_version = match.group(1)
if py_version != sys.version[:3]:
logger.debug('Skipping %s because Python version is incorrect' % link)
return []
logger.debug('Found link %s, version: %s' % (link, version))
return [(pkg_resources.parse_version(version),
link,
version)]
def _egg_info_matches(self, egg_info, search_name, link):
match = self._egg_info_re.search(egg_info)
if not match:
logger.debug('Could not parse version from link: %s' % link)
return None
name = match.group(0).lower()
# To match the "safe" name that pkg_resources creates:
name = name.replace('_', '-')
# project name and version must be separated by a dash
look_for = search_name.lower() + "-"
if name.startswith(look_for):
return match.group(0)[len(look_for):]
else:
return None
def _get_page(self, link, req):
return HTMLPage.get_page(link, req,
cache=self.cache,
session=self.session,
)
class PageCache(object):
"""Cache of HTML pages"""
failure_limit = 3
def __init__(self):
self._failures = {}
self._pages = {}
self._archives = {}
def too_many_failures(self, url):
return self._failures.get(url, 0) >= self.failure_limit
def get_page(self, url):
return self._pages.get(url)
def is_archive(self, url):
return self._archives.get(url, False)
def set_is_archive(self, url, value=True):
self._archives[url] = value
def add_page_failure(self, url, level):
self._failures[url] = self._failures.get(url, 0)+level
def add_page(self, urls, page):
for url in urls:
self._pages[url] = page
class HTMLPage(object):
"""Represents one page, along with its URL"""
## FIXME: these regexes are horrible hacks:
_homepage_re = re.compile(r'<th>\s*home.html\s*page', re.I)
_download_re = re.compile(r'<th>\s*download\s+url', re.I)
_href_re = re.compile('href=(?:"([^"]*)"|\'([^\']*)\'|([^>\\s\\n]*))', re.I|re.S)
def __init__(self, content, url, headers=None, trusted=None):
self.content = content
self.parsed = html5lib.parse(self.content, namespaceHTMLElements=False)
self.url = url
self.headers = headers
self.trusted = trusted
def __str__(self):
return self.url
@classmethod
def get_page(cls, link, req, cache=None, skip_archives=True, session=None):
if session is None:
session = PipSession()
url = link.url
url = url.split('#', 1)[0]
if cache.too_many_failures(url):
return None
# Check for VCS schemes that do not support lookup as web pages.
from pip.vcs import VcsSupport
for scheme in VcsSupport.schemes:
if url.lower().startswith(scheme) and url[len(scheme)] in '+:':
logger.debug('Cannot look at %(scheme)s URL %(link)s' % locals())
return None
if cache is not None:
inst = cache.get_page(url)
if inst is not None:
return inst
try:
if skip_archives:
if cache is not None:
if cache.is_archive(url):
return None
filename = link.filename
for bad_ext in ['.tar', '.tar.gz', '.tar.bz2', '.tgz', '.zip']:
if filename.endswith(bad_ext):
content_type = cls._get_content_type(url,
session=session,
)
if content_type.lower().startswith('text/html'):
break
else:
logger.debug('Skipping page %s because of Content-Type: %s' % (link, content_type))
if cache is not None:
cache.set_is_archive(url)
return None
logger.debug('Getting page %s' % url)
# Tack index.html onto file:// URLs that point to directories
(scheme, netloc, path, params, query, fragment) = urlparse.urlparse(url)
if scheme == 'file' and os.path.isdir(url2pathname(path)):
# add trailing slash if not present so urljoin doesn't trim final segment
if not url.endswith('/'):
url += '/'
url = urlparse.urljoin(url, 'index.html')
logger.debug(' file: URL is directory, getting %s' % url)
resp = session.get(url, headers={"Accept": "text/html"})
resp.raise_for_status()
# The check for archives above only works if the url ends with
# something that looks like an archive. However that is not a
# requirement. For instance http://sourceforge.net/projects/docutils/files/docutils/0.8.1/docutils-0.8.1.tar.gz/download
# redirects to http://superb-dca3.dl.sourceforge.net/project/docutils/docutils/0.8.1/docutils-0.8.1.tar.gz
# Unless we issue a HEAD request on every url we cannot know
# ahead of time for sure if something is HTML or not. However we
# can check after we've downloaded it.
content_type = resp.headers.get('Content-Type', 'unknown')
if not content_type.lower().startswith("text/html"):
logger.debug('Skipping page %s because of Content-Type: %s' %
(link, content_type))
if cache is not None:
cache.set_is_archive(url)
return None
inst = cls(resp.text, resp.url, resp.headers, trusted=link.trusted)
except requests.HTTPError as exc:
level = 2 if exc.response.status_code == 404 else 1
cls._handle_fail(req, link, exc, url, cache=cache, level=level)
except requests.ConnectionError as exc:
cls._handle_fail(
req, link, "connection error: %s" % exc, url,
cache=cache,
)
except requests.Timeout:
cls._handle_fail(req, link, "timed out", url, cache=cache)
except SSLError as exc:
reason = ("There was a problem confirming the ssl certificate: "
"%s" % exc)
cls._handle_fail(req, link, reason, url,
cache=cache,
level=2,
meth=logger.notify,
)
else:
if cache is not None:
cache.add_page([url, resp.url], inst)
return inst
@staticmethod
def _handle_fail(req, link, reason, url, cache=None, level=1, meth=None):
if meth is None:
meth = logger.info
meth("Could not fetch URL %s: %s", link, reason)
meth("Will skip URL %s when looking for download links for %s" %
(link.url, req))
if cache is not None:
cache.add_page_failure(url, level)
@staticmethod
def _get_content_type(url, session=None):
"""Get the Content-Type of the given url, using a HEAD request"""
if session is None:
session = PipSession()
scheme, netloc, path, query, fragment = urlparse.urlsplit(url)
if not scheme in ('http', 'https', 'ftp', 'ftps'):
## FIXME: some warning or something?
## assertion error?
return ''
resp = session.head(url, allow_redirects=True)
resp.raise_for_status()
return resp.headers.get("Content-Type", "")
@property
def api_version(self):
if not hasattr(self, "_api_version"):
_api_version = None
metas = [x for x in self.parsed.findall(".//meta")
if x.get("name", "").lower() == "api-version"]
if metas:
try:
_api_version = int(metas[0].get("value", None))
except (TypeError, ValueError):
_api_version = None
self._api_version = _api_version
return self._api_version
@property
def base_url(self):
if not hasattr(self, "_base_url"):
base = self.parsed.find(".//base")
if base is not None and base.get("href"):
self._base_url = base.get("href")
else:
self._base_url = self.url
return self._base_url
@property
def links(self):
"""Yields all links in the page"""
for anchor in self.parsed.findall(".//a"):
if anchor.get("href"):
href = anchor.get("href")
url = self.clean_link(urlparse.urljoin(self.base_url, href))
# Determine if this link is internal. If that distinction
# doesn't make sense in this context, then we don't make
# any distinction.
internal = None
if self.api_version and self.api_version >= 2:
# Only api_versions >= 2 have a distinction between
# external and internal links
internal = bool(anchor.get("rel")
and "internal" in anchor.get("rel").split())
yield Link(url, self, internal=internal)
def rel_links(self):
for url in self.explicit_rel_links():
yield url
for url in self.scraped_rel_links():
yield url
def explicit_rel_links(self, rels=('homepage', 'download')):
"""Yields all links with the given relations"""
rels = set(rels)
for anchor in self.parsed.findall(".//a"):
if anchor.get("rel") and anchor.get("href"):
found_rels = set(anchor.get("rel").split())
# Determine the intersection between what rels were found and
# what rels were being looked for
if found_rels & rels:
href = anchor.get("href")
url = self.clean_link(urlparse.urljoin(self.base_url, href))
yield Link(url, self, trusted=False)
def scraped_rel_links(self):
# Can we get rid of this horrible horrible method?
for regex in (self._homepage_re, self._download_re):
match = regex.search(self.content)
if not match:
continue
href_match = self._href_re.search(self.content, pos=match.end())
if not href_match:
continue
url = href_match.group(1) or href_match.group(2) or href_match.group(3)
if not url:
continue
url = self.clean_link(urlparse.urljoin(self.base_url, url))
yield Link(url, self, trusted=False, _deprecated_regex=True)
_clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I)
def clean_link(self, url):
"""Makes sure a link is fully encoded. That is, if a ' ' shows up in
the link, it will be rewritten to %20 (while not over-quoting
% or other characters)."""
return self._clean_re.sub(
lambda match: '%%%2x' % ord(match.group(0)), url)
class Link(object):
def __init__(self, url, comes_from=None, internal=None, trusted=None,
_deprecated_regex=False):
self.url = url
self.comes_from = comes_from
self.internal = internal
self.trusted = trusted
self._deprecated_regex = _deprecated_regex
def __str__(self):
if self.comes_from:
return '%s (from %s)' % (self.url, self.comes_from)
else:
return str(self.url)
def __repr__(self):
return '<Link %s>' % self
def __eq__(self, other):
return self.url == other.url
def __ne__(self, other):
return self.url != other.url
def __lt__(self, other):
return self.url < other.url
def __le__(self, other):
return self.url <= other.url
def __gt__(self, other):
return self.url > other.url
def __ge__(self, other):
return self.url >= other.url
def __hash__(self):
return hash(self.url)
@property
def filename(self):
_, netloc, path, _, _ = urlparse.urlsplit(self.url)
name = posixpath.basename(path.rstrip('/')) or netloc
assert name, ('URL %r produced no filename' % self.url)
return name
@property
def scheme(self):
return urlparse.urlsplit(self.url)[0]
@property
def path(self):
return urlparse.urlsplit(self.url)[2]
def splitext(self):
return splitext(posixpath.basename(self.path.rstrip('/')))
@property
def ext(self):
return self.splitext()[1]
@property
def url_without_fragment(self):
scheme, netloc, path, query, fragment = urlparse.urlsplit(self.url)
return urlparse.urlunsplit((scheme, netloc, path, query, None))
_egg_fragment_re = re.compile(r'#egg=([^&]*)')
@property
def egg_fragment(self):
match = self._egg_fragment_re.search(self.url)
if not match:
return None
return match.group(1)
_hash_re = re.compile(r'(sha1|sha224|sha384|sha256|sha512|md5)=([a-f0-9]+)')
@property
def hash(self):
match = self._hash_re.search(self.url)
if match:
return match.group(2)
return None
@property
def hash_name(self):
match = self._hash_re.search(self.url)
if match:
return match.group(1)
return None
@property
def show_url(self):
return posixpath.basename(self.url.split('#', 1)[0].split('?', 1)[0])
@property
def verifiable(self):
"""
Returns True if this link can be verified after download, False if it
cannot, and None if we cannot determine.
"""
trusted = self.trusted or getattr(self.comes_from, "trusted", None)
if trusted is not None and trusted:
# This link came from a trusted source. It *may* be verifiable but
# first we need to see if this page is operating under the new
# API version.
try:
api_version = getattr(self.comes_from, "api_version", None)
api_version = int(api_version)
except (ValueError, TypeError):
api_version = None
if api_version is None or api_version <= 1:
# This link is either trusted, or it came from a trusted,
# however it is not operating under the API version 2 so
# we can't make any claims about if it's safe or not
return
if self.hash:
# This link came from a trusted source and it has a hash, so we
# can consider it safe.
return True
else:
# This link came from a trusted source, using the new API
# version, and it does not have a hash. It is NOT verifiable
return False
elif trusted is not None:
# This link came from an untrusted source and we cannot trust it
return False
# An object to represent the "link" for the installed version of a requirement.
# Using Inf as the url makes it sort higher.
INSTALLED_VERSION = Link(Inf)
def get_requirement_from_url(url):
"""Get a requirement from the URL, if possible. This looks for #egg
in the URL"""
link = Link(url)
egg_info = link.egg_fragment
if not egg_info:
egg_info = splitext(link.filename)[0]
return package_to_requirement(egg_info)
def package_to_requirement(package_name):
"""Translate a name like Foo-1.2 to Foo==1.3"""
match = re.search(r'^(.*?)-(dev|\d.*)', package_name)
if match:
name = match.group(1)
version = match.group(2)
else:
name = package_name
version = ''
if version:
return '%s==%s' % (name, version)
else:
return name
| ncdesouza/bookworm | env/lib/python2.7/site-packages/pip/index.py | Python | gpl-3.0 | 40,408 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-30 12:53
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='added',
),
migrations.RemoveField(
model_name='user',
name='changed',
),
]
| pashinin-com/pashinin.com | src/core/migrations/0002_auto_20161030_1553.py | Python | gpl-3.0 | 478 |
# -*- coding: utf-8 -*-
############################################################################
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as #
# published by the Free Software Foundation, either version 3 of the #
# License, or (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
############################################################################
from module.plugins.internal.SimpleCrypter import SimpleCrypter
class PastebinCom(SimpleCrypter):
__name__ = "PastebinCom"
__type__ = "crypter"
__pattern__ = r"http://(?:w{3}.)?pastebin\.com/\w+"
__version__ = "0.01"
__description__ = """Pastebin.com Plugin"""
__author_name__ = ("stickell")
__author_mail__ = ("l.stickell@yahoo.it")
LINK_PATTERN = r'<div class="de\d+">(https?://[^ <]+)(?:[^<]*)</div>'
TITLE_PATTERN = r'<div class="paste_box_line1" title="(?P<title>[^"]+)">'
| wangjun/pyload | module/plugins/crypter/PastebinCom.py | Python | gpl-3.0 | 1,621 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "projects_morelab.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| OscarPDR/projects_morelab | manage.py | Python | gpl-3.0 | 259 |
from unittest import TestCase, main
from io import StringIO
from brocclib.parse import (
read_blast, iter_fasta, parse_accession,
)
class AccessionTests(TestCase):
def test_parse_accession_old_format(self):
self.assertEqual(
parse_accession('gi|259100874|gb|GQ513762.1|'),
"GQ513762.1")
self.assertEqual(
parse_accession("gi|1857499|gb|U83468.1|TSU83468"),
"U83468.1")
self.assertEqual(
parse_accession("gi|163263088|emb|AM922223.1|"),
"AM922223.1")
def test_parse_accession_new_format(self):
self.assertEqual(parse_accession('GQ513762.1'), "GQ513762.1")
class FastaTests(TestCase):
def test_basic(self):
lines = [
">lab1",
"TTTTCCC",
">lab2",
"CCAAAA",
]
seqs = iter_fasta(lines)
self.assertEqual(next(seqs), ("lab1", "TTTTCCC"))
self.assertEqual(next(seqs), ("lab2", "CCAAAA"))
self.assertRaises(StopIteration, next, seqs)
class BlastOutputTests(TestCase):
def test_normal_output(self):
obs = read_blast(StringIO(normal_output))
h = obs['0 E7_168192'][0]
self.assertEqual(h.accession, "GQ513762.1")
self.assertEqual(h.pct_id, 98.74)
self.assertEqual(h.length, 159)
def test_malformed_output(self):
obs = read_blast(StringIO(malformed_output))
h = obs['0 E7_168192'][0]
self.assertEqual(h.accession, "GQ513762.1")
self.assertEqual(h.pct_id, 98.74)
self.assertEqual(h.length, 159)
def test_missing_read(self):
obs = read_blast(StringIO(normal_output))
self.assertEqual(obs['sdlkj'], [])
normal_output = """\
# BLASTN 2.2.25+
# Query: 0 E7_168192
# Database: /home/rohinis/blastdb/blast_nt/nt
# Fields: query id, subject id, % identity, alignment length, mismatches, gap opens, q. start, q. end, s. start, s. end, evalue, bit score
# 100 hits found
0 gi|259100874|gb|GQ513762.1| 98.74 159 1 1 407 564 1 159 2e-70 275
0 gi|259098555|gb|GQ520853.1| 98.74 159 1 1 407 564 1 159 2e-70 275
0 gi|259098210|gb|GQ520508.1| 98.11 159 2 1 407 564 1 159 1e-68 269
0 gi|259092808|gb|GQ524514.1| 98.11 159 1 2 407 564 1 158 1e-67 266
0 gi|259107208|gb|GQ510686.1| 98.68 152 1 1 414 564 1 152 2e-66 262
0 gi|259103360|gb|GQ516248.1| 98.68 152 1 1 414 564 1 152 2e-66 262
0 gi|259101730|gb|GQ514618.1| 98.68 152 1 1 414 564 1 152 2e-66 262
# BLAST processed 608 queries
"""
malformed_output = """\
# BLASTN 2.2.25+
# Query: 0 E7_168192
# Database: /home/rohinis/blastdb/blast_nt/nt
# Fields: query id, subject id, % identity, alignment length, mismatches, gap opens, q. start, q. end, s. start, s. end, evalue, bit score
# 100 hits found
0 gi|259100874|gb|GQ513762.1| 98.74 159 1 1 407 564 1 159 2e-70 275
0 gi|259098555|gb|GQ520853.1| 98.74 159 1 1 407 564 1 159 2e-70 275
0 gi|259098210|gb|GQ520508.1| 98.11 159 2 1 407 564 1 159 1e-68 269
0 gi|259092808|gb|GQ524514.1| 98.11 159 1 2 407 564 1 158 1e-67 266
0 gi|259107208|gb|GQ510686.1| 98.68 152 1 1 414 564 1 152 2e-66 262
0 gi|259103360|gb|GQ516248.1| 98.68 152 1 1 414 564 1 152 2e-66 262
0 gi|259101730|gb|GQ514618.1| 98.68 152 1 1 414 564 1 152 2e-66 262
0 gi|259093119|gb|GQ524825.1| 98.68 152 1 1 414 564 1 152 2e-66 262
0 gi|259100068|gb|GQ522366.1| 98.67 150 1 1 416 564 1 150 2e-65 259
0 gi|259099396|gb|GQ521694.1| 98.67 150 1 1 416 564 1 150 2e-65 259
"""
if __name__ == '__main__':
main()
| kylebittinger/brocc | tests/test_parse.py | Python | gpl-3.0 | 3,541 |
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# Copyright 2015 Abhijit Menon-Sen <ams@2ndQuadrant.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import fcntl
import os
import pipes
import pty
import select
import shlex
import subprocess
import time
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound
from ansible.plugins.connection import ConnectionBase
from ansible.utils.path import unfrackpath, makedirs_safe
from ansible.utils.unicode import to_bytes, to_unicode
from ansible.compat.six import text_type, binary_type
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
SSHPASS_AVAILABLE = None
class Connection(ConnectionBase):
''' ssh based connections '''
transport = 'ssh'
has_pipelining = True
become_methods = frozenset(C.BECOME_METHODS).difference(['runas'])
def __init__(self, *args, **kwargs):
super(Connection, self).__init__(*args, **kwargs)
self.host = self._play_context.remote_addr
# The connection is created by running ssh/scp/sftp from the exec_command,
# put_file, and fetch_file methods, so we don't need to do any connection
# management here.
def _connect(self):
return self
@staticmethod
def _sshpass_available():
global SSHPASS_AVAILABLE
# We test once if sshpass is available, and remember the result. It
# would be nice to use distutils.spawn.find_executable for this, but
# distutils isn't always available; shutils.which() is Python3-only.
if SSHPASS_AVAILABLE is None:
try:
p = subprocess.Popen(["sshpass"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.communicate()
SSHPASS_AVAILABLE = True
except OSError:
SSHPASS_AVAILABLE = False
return SSHPASS_AVAILABLE
@staticmethod
def _persistence_controls(command):
'''
Takes a command array and scans it for ControlPersist and ControlPath
settings and returns two booleans indicating whether either was found.
This could be smarter, e.g. returning false if ControlPersist is 'no',
but for now we do it simple way.
'''
controlpersist = False
controlpath = False
for arg in command:
if 'controlpersist' in arg.lower():
controlpersist = True
elif 'controlpath' in arg.lower():
controlpath = True
return controlpersist, controlpath
@staticmethod
def _split_args(argstring):
"""
Takes a string like '-o Foo=1 -o Bar="foo bar"' and returns a
list ['-o', 'Foo=1', '-o', 'Bar=foo bar'] that can be added to
the argument list. The list will not contain any empty elements.
"""
return [to_unicode(x.strip()) for x in shlex.split(to_bytes(argstring)) if x.strip()]
def _add_args(self, explanation, args):
"""
Adds the given args to self._command and displays a caller-supplied
explanation of why they were added.
"""
self._command += args
display.vvvvv('SSH: ' + explanation + ': (%s)' % ')('.join(args), host=self._play_context.remote_addr)
def _build_command(self, binary, *other_args):
'''
Takes a binary (ssh, scp, sftp) and optional extra arguments and returns
a command line as an array that can be passed to subprocess.Popen.
'''
self._command = []
## First, the command name.
# If we want to use password authentication, we have to set up a pipe to
# write the password to sshpass.
if self._play_context.password:
if not self._sshpass_available():
raise AnsibleError("to use the 'ssh' connection type with passwords, you must install the sshpass program")
self.sshpass_pipe = os.pipe()
self._command += ['sshpass', '-d{0}'.format(self.sshpass_pipe[0])]
self._command += [binary]
## Next, additional arguments based on the configuration.
# sftp batch mode allows us to correctly catch failed transfers, but can
# be disabled if the client side doesn't support the option.
if binary == 'sftp' and C.DEFAULT_SFTP_BATCH_MODE:
self._command += ['-b', '-']
self._command += ['-C']
if self._play_context.verbosity > 3:
self._command += ['-vvv']
elif binary == 'ssh':
# Older versions of ssh (e.g. in RHEL 6) don't accept sftp -q.
self._command += ['-q']
# Next, we add [ssh_connection]ssh_args from ansible.cfg.
if self._play_context.ssh_args:
args = self._split_args(self._play_context.ssh_args)
self._add_args("ansible.cfg set ssh_args", args)
# Now we add various arguments controlled by configuration file settings
# (e.g. host_key_checking) or inventory variables (ansible_ssh_port) or
# a combination thereof.
if not C.HOST_KEY_CHECKING:
self._add_args(
"ANSIBLE_HOST_KEY_CHECKING/host_key_checking disabled",
("-o", "StrictHostKeyChecking=no")
)
if self._play_context.port is not None:
self._add_args(
"ANSIBLE_REMOTE_PORT/remote_port/ansible_port set",
("-o", "Port={0}".format(self._play_context.port))
)
key = self._play_context.private_key_file
if key:
self._add_args(
"ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set",
("-o", "IdentityFile=\"{0}\"".format(os.path.expanduser(key)))
)
if not self._play_context.password:
self._add_args(
"ansible_password/ansible_ssh_pass not set", (
"-o", "KbdInteractiveAuthentication=no",
"-o", "PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey",
"-o", "PasswordAuthentication=no"
)
)
user = self._play_context.remote_user
if user:
self._add_args(
"ANSIBLE_REMOTE_USER/remote_user/ansible_user/user/-u set",
("-o", "User={0}".format(self._play_context.remote_user))
)
self._add_args(
"ANSIBLE_TIMEOUT/timeout set",
("-o", "ConnectTimeout={0}".format(self._play_context.timeout))
)
# Add in any common or binary-specific arguments from the PlayContext
# (i.e. inventory or task settings or overrides on the command line).
for opt in ['ssh_common_args', binary + '_extra_args']:
attr = getattr(self._play_context, opt, None)
if attr is not None:
args = self._split_args(attr)
self._add_args("PlayContext set %s" % opt, args)
# Check if ControlPersist is enabled and add a ControlPath if one hasn't
# already been set.
controlpersist, controlpath = self._persistence_controls(self._command)
if controlpersist:
self._persistent = True
if not controlpath:
cpdir = unfrackpath('$HOME/.ansible/cp')
# The directory must exist and be writable.
makedirs_safe(cpdir, 0o700)
if not os.access(cpdir, os.W_OK):
raise AnsibleError("Cannot write to ControlPath %s" % cpdir)
args = ("-o", "ControlPath={0}".format(
C.ANSIBLE_SSH_CONTROL_PATH % dict(directory=cpdir))
)
self._add_args("found only ControlPersist; added ControlPath", args)
## Finally, we add any caller-supplied extras.
if other_args:
self._command += other_args
return self._command
def _send_initial_data(self, fh, in_data):
'''
Writes initial data to the stdin filehandle of the subprocess and closes
it. (The handle must be closed; otherwise, for example, "sftp -b -" will
just hang forever waiting for more commands.)
'''
display.debug('Sending initial data')
try:
fh.write(in_data)
fh.close()
except (OSError, IOError):
raise AnsibleConnectionFailure('SSH Error: data could not be sent to the remote host. Make sure this host can be reached over ssh')
display.debug('Sent initial data (%d bytes)' % len(in_data))
# Used by _run() to kill processes on failures
@staticmethod
def _terminate_process(p):
""" Terminate a process, ignoring errors """
try:
p.terminate()
except (OSError, IOError):
pass
# This is separate from _run() because we need to do the same thing for stdout
# and stderr.
def _examine_output(self, source, state, chunk, sudoable):
'''
Takes a string, extracts complete lines from it, tests to see if they
are a prompt, error message, etc., and sets appropriate flags in self.
Prompt and success lines are removed.
Returns the processed (i.e. possibly-edited) output and the unprocessed
remainder (to be processed with the next chunk) as strings.
'''
output = []
for l in chunk.splitlines(True):
suppress_output = False
#display.debug("Examining line (source=%s, state=%s): '%s'" % (source, state, l.rstrip('\r\n')))
if self._play_context.prompt and self.check_password_prompt(l):
display.debug("become_prompt: (source=%s, state=%s): '%s'" % (source, state, l.rstrip('\r\n')))
self._flags['become_prompt'] = True
suppress_output = True
elif self._play_context.success_key and self.check_become_success(l):
display.debug("become_success: (source=%s, state=%s): '%s'" % (source, state, l.rstrip('\r\n')))
self._flags['become_success'] = True
suppress_output = True
elif sudoable and self.check_incorrect_password(l):
display.debug("become_error: (source=%s, state=%s): '%s'" % (source, state, l.rstrip('\r\n')))
self._flags['become_error'] = True
elif sudoable and self.check_missing_password(l):
display.debug("become_nopasswd_error: (source=%s, state=%s): '%s'" % (source, state, l.rstrip('\r\n')))
self._flags['become_nopasswd_error'] = True
if not suppress_output:
output.append(l)
# The chunk we read was most likely a series of complete lines, but just
# in case the last line was incomplete (and not a prompt, which we would
# have removed from the output), we retain it to be processed with the
# next chunk.
remainder = ''
if output and not output[-1].endswith('\n'):
remainder = output[-1]
output = output[:-1]
return ''.join(output), remainder
def _run(self, cmd, in_data, sudoable=True):
'''
Starts the command and communicates with it until it ends.
'''
display_cmd = map(pipes.quote, cmd)
display.vvv(u'SSH: EXEC {0}'.format(u' '.join(display_cmd)), host=self.host)
# Start the given command. If we don't need to pipeline data, we can try
# to use a pseudo-tty (ssh will have been invoked with -tt). If we are
# pipelining data, or can't create a pty, we fall back to using plain
# old pipes.
p = None
if isinstance(cmd, (text_type, binary_type)):
cmd = to_bytes(cmd)
else:
cmd = map(to_bytes, cmd)
if not in_data:
try:
# Make sure stdin is a proper pty to avoid tcgetattr errors
master, slave = pty.openpty()
p = subprocess.Popen(cmd, stdin=slave, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdin = os.fdopen(master, 'w', 0)
os.close(slave)
except (OSError, IOError):
p = None
if not p:
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdin = p.stdin
# If we are using SSH password authentication, write the password into
# the pipe we opened in _build_command.
if self._play_context.password:
os.close(self.sshpass_pipe[0])
os.write(self.sshpass_pipe[1], "{0}\n".format(self._play_context.password))
os.close(self.sshpass_pipe[1])
## SSH state machine
#
# Now we read and accumulate output from the running process until it
# exits. Depending on the circumstances, we may also need to write an
# escalation password and/or pipelined input to the process.
states = [
'awaiting_prompt', 'awaiting_escalation', 'ready_to_send', 'awaiting_exit'
]
# Are we requesting privilege escalation? Right now, we may be invoked
# to execute sftp/scp with sudoable=True, but we can request escalation
# only when using ssh. Otherwise we can send initial data straightaway.
state = states.index('ready_to_send')
if b'ssh' in cmd:
if self._play_context.prompt:
# We're requesting escalation with a password, so we have to
# wait for a password prompt.
state = states.index('awaiting_prompt')
display.debug('Initial state: %s: %s' % (states[state], self._play_context.prompt))
elif self._play_context.become and self._play_context.success_key:
# We're requesting escalation without a password, so we have to
# detect success/failure before sending any initial data.
state = states.index('awaiting_escalation')
display.debug('Initial state: %s: %s' % (states[state], self._play_context.success_key))
# We store accumulated stdout and stderr output from the process here,
# but strip any privilege escalation prompt/confirmation lines first.
# Output is accumulated into tmp_*, complete lines are extracted into
# an array, then checked and removed or copied to stdout or stderr. We
# set any flags based on examining the output in self._flags.
stdout = stderr = ''
tmp_stdout = tmp_stderr = ''
self._flags = dict(
become_prompt=False, become_success=False,
become_error=False, become_nopasswd_error=False
)
# select timeout should be longer than the connect timeout, otherwise
# they will race each other when we can't connect, and the connect
# timeout usually fails
timeout = 2 + self._play_context.timeout
rpipes = [p.stdout, p.stderr]
for fd in rpipes:
fcntl.fcntl(fd, fcntl.F_SETFL, fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK)
# If we can send initial data without waiting for anything, we do so
# before we call select.
if states[state] == 'ready_to_send' and in_data:
self._send_initial_data(stdin, in_data)
state += 1
while True:
rfd, wfd, efd = select.select(rpipes, [], [], timeout)
# We pay attention to timeouts only while negotiating a prompt.
if not rfd:
if state <= states.index('awaiting_escalation'):
# If the process has already exited, then it's not really a
# timeout; we'll let the normal error handling deal with it.
if p.poll() is not None:
break
self._terminate_process(p)
raise AnsibleError('Timeout (%ds) waiting for privilege escalation prompt: %s' % (timeout, stdout))
# Read whatever output is available on stdout and stderr, and stop
# listening to the pipe if it's been closed.
if p.stdout in rfd:
chunk = p.stdout.read()
if chunk == '':
rpipes.remove(p.stdout)
tmp_stdout += chunk
display.debug("stdout chunk (state=%s):\n>>>%s<<<\n" % (state, chunk))
if p.stderr in rfd:
chunk = p.stderr.read()
if chunk == '':
rpipes.remove(p.stderr)
tmp_stderr += chunk
display.debug("stderr chunk (state=%s):\n>>>%s<<<\n" % (state, chunk))
# We examine the output line-by-line until we have negotiated any
# privilege escalation prompt and subsequent success/error message.
# Afterwards, we can accumulate output without looking at it.
if state < states.index('ready_to_send'):
if tmp_stdout:
output, unprocessed = self._examine_output('stdout', states[state], tmp_stdout, sudoable)
stdout += output
tmp_stdout = unprocessed
if tmp_stderr:
output, unprocessed = self._examine_output('stderr', states[state], tmp_stderr, sudoable)
stderr += output
tmp_stderr = unprocessed
else:
stdout += tmp_stdout
stderr += tmp_stderr
tmp_stdout = tmp_stderr = ''
# If we see a privilege escalation prompt, we send the password.
# (If we're expecting a prompt but the escalation succeeds, we
# didn't need the password and can carry on regardless.)
if states[state] == 'awaiting_prompt':
if self._flags['become_prompt']:
display.debug('Sending become_pass in response to prompt')
stdin.write('{0}\n'.format(to_bytes(self._play_context.become_pass )))
self._flags['become_prompt'] = False
state += 1
elif self._flags['become_success']:
state += 1
# We've requested escalation (with or without a password), now we
# wait for an error message or a successful escalation.
if states[state] == 'awaiting_escalation':
if self._flags['become_success']:
display.debug('Escalation succeeded')
self._flags['become_success'] = False
state += 1
elif self._flags['become_error']:
display.debug('Escalation failed')
self._terminate_process(p)
self._flags['become_error'] = False
raise AnsibleError('Incorrect %s password' % self._play_context.become_method)
elif self._flags['become_nopasswd_error']:
display.debug('Escalation requires password')
self._terminate_process(p)
self._flags['become_nopasswd_error'] = False
raise AnsibleError('Missing %s password' % self._play_context.become_method)
elif self._flags['become_prompt']:
# This shouldn't happen, because we should see the "Sorry,
# try again" message first.
display.debug('Escalation prompt repeated')
self._terminate_process(p)
self._flags['become_prompt'] = False
raise AnsibleError('Incorrect %s password' % self._play_context.become_method)
# Once we're sure that the privilege escalation prompt, if any, has
# been dealt with, we can send any initial data and start waiting
# for output.
if states[state] == 'ready_to_send':
if in_data:
self._send_initial_data(stdin, in_data)
state += 1
# Now we're awaiting_exit: has the child process exited? If it has,
# and we've read all available output from it, we're done.
if p.poll() is not None:
if not rpipes or not rfd:
break
# When ssh has ControlMaster (+ControlPath/Persist) enabled, the
# first connection goes into the background and we never see EOF
# on stderr. If we see EOF on stdout and the process has exited,
# we're probably done. We call select again with a zero timeout,
# just to make certain we don't miss anything that may have been
# written to stderr between the time we called select() and when
# we learned that the process had finished.
if p.stdout not in rpipes:
timeout = 0
continue
# If the process has not yet exited, but we've already read EOF from
# its stdout and stderr (and thus removed both from rpipes), we can
# just wait for it to exit.
elif not rpipes:
p.wait()
break
# Otherwise there may still be outstanding data to read.
# close stdin after process is terminated and stdout/stderr are read
# completely (see also issue #848)
stdin.close()
if C.HOST_KEY_CHECKING:
if cmd[0] == b"sshpass" and p.returncode == 6:
raise AnsibleError('Using a SSH password instead of a key is not possible because Host Key checking is enabled and sshpass does not support this. Please add this host\'s fingerprint to your known_hosts file to manage this host.')
controlpersisterror = 'Bad configuration option: ControlPersist' in stderr or 'unknown configuration option: ControlPersist' in stderr
if p.returncode != 0 and controlpersisterror:
raise AnsibleError('using -c ssh on certain older ssh versions may not support ControlPersist, set ANSIBLE_SSH_ARGS="" (or ssh_args in [ssh_connection] section of the config file) before running again')
if p.returncode == 255 and in_data:
raise AnsibleConnectionFailure('SSH Error: data could not be sent to the remote host. Make sure this host can be reached over ssh')
return (p.returncode, stdout, stderr)
def _exec_command(self, cmd, in_data=None, sudoable=True):
''' run a command on the remote host '''
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
display.vvv("ESTABLISH SSH CONNECTION FOR USER: {0}".format(self._play_context.remote_user), host=self._play_context.remote_addr)
# we can only use tty when we are not pipelining the modules. piping
# data into /usr/bin/python inside a tty automatically invokes the
# python interactive-mode but the modules are not compatible with the
# interactive-mode ("unexpected indent" mainly because of empty lines)
if in_data:
cmd = self._build_command('ssh', self.host, cmd)
else:
cmd = self._build_command('ssh', '-tt', self.host, cmd)
(returncode, stdout, stderr) = self._run(cmd, in_data, sudoable=sudoable)
return (returncode, stdout, stderr)
#
# Main public methods
#
def exec_command(self, *args, **kwargs):
"""
Wrapper around _exec_command to retry in the case of an ssh failure
Will retry if:
* an exception is caught
* ssh returns 255
Will not retry if
* remaining_tries is <2
* retries limit reached
"""
remaining_tries = int(C.ANSIBLE_SSH_RETRIES) + 1
cmd_summary = "%s..." % args[0]
for attempt in xrange(remaining_tries):
try:
return_tuple = self._exec_command(*args, **kwargs)
# 0 = success
# 1-254 = remote command return code
# 255 = failure from the ssh command itself
if return_tuple[0] != 255 or attempt == (remaining_tries - 1):
break
else:
raise AnsibleConnectionFailure("Failed to connect to the host via ssh.")
except (AnsibleConnectionFailure, Exception) as e:
if attempt == remaining_tries - 1:
raise
else:
pause = 2 ** attempt - 1
if pause > 30:
pause = 30
if isinstance(e, AnsibleConnectionFailure):
msg = "ssh_retry: attempt: %d, ssh return code is 255. cmd (%s), pausing for %d seconds" % (attempt, cmd_summary, pause)
else:
msg = "ssh_retry: attempt: %d, caught exception(%s) from cmd (%s), pausing for %d seconds" % (attempt, e, cmd_summary, pause)
display.vv(msg)
time.sleep(pause)
continue
return return_tuple
def put_file(self, in_path, out_path):
''' transfer a file from local to remote '''
super(Connection, self).put_file(in_path, out_path)
display.vvv("PUT {0} TO {1}".format(in_path, out_path), host=self.host)
if not os.path.exists(in_path):
raise AnsibleFileNotFound("file or module does not exist: {0}".format(in_path))
# scp and sftp require square brackets for IPv6 addresses, but
# accept them for hostnames and IPv4 addresses too.
host = '[%s]' % self.host
if C.DEFAULT_SCP_IF_SSH:
cmd = self._build_command('scp', in_path, '{0}:{1}'.format(host, pipes.quote(out_path)))
in_data = None
else:
cmd = self._build_command('sftp', host)
in_data = "put {0} {1}\n".format(pipes.quote(in_path), pipes.quote(out_path))
(returncode, stdout, stderr) = self._run(cmd, in_data)
if returncode != 0:
raise AnsibleError("failed to transfer file to {0}:\n{1}\n{2}".format(out_path, stdout, stderr))
def fetch_file(self, in_path, out_path):
''' fetch a file from remote to local '''
super(Connection, self).fetch_file(in_path, out_path)
display.vvv("FETCH {0} TO {1}".format(in_path, out_path), host=self.host)
# scp and sftp require square brackets for IPv6 addresses, but
# accept them for hostnames and IPv4 addresses too.
host = '[%s]' % self.host
if C.DEFAULT_SCP_IF_SSH:
cmd = self._build_command('scp', '{0}:{1}'.format(host, pipes.quote(in_path)), out_path)
in_data = None
else:
cmd = self._build_command('sftp', host)
in_data = "get {0} {1}\n".format(pipes.quote(in_path), pipes.quote(out_path))
(returncode, stdout, stderr) = self._run(cmd, in_data)
if returncode != 0:
raise AnsibleError("failed to transfer file from {0}:\n{1}\n{2}".format(in_path, stdout, stderr))
def close(self):
# If we have a persistent ssh connection (ControlPersist), we can ask it
# to stop listening. Otherwise, there's nothing to do here.
# TODO: reenable once winrm issues are fixed
# temporarily disabled as we are forced to currently close connections after every task because of winrm
# if self._connected and self._persistent:
# cmd = self._build_command('ssh', '-O', 'stop', self.host)
#
# cmd = map(to_bytes, cmd)
# p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# stdout, stderr = p.communicate()
self._connected = False
| ngpestelos/ansible | lib/ansible/plugins/connection/ssh.py | Python | gpl-3.0 | 28,684 |
import logging
logger = logging.getLogger(__name__)
class SyncSectors:
def __init__(self, sector_model, sectors, disable_on=None, simulate=False):
self.sector_model = sector_model
self.sectors = sectors
self.disable_on = disable_on
self.simulate = simulate
def log(self, msg, level=logging.DEBUG):
logger.log(level, msg)
def __call__(self, *args, **kwargs):
self.process()
def process(self):
self.add_new_sectors()
self.update_existing_sectors()
if self.disable_on:
self.disable_sectors()
def _get_sector(self, sector_id):
try:
return self.sector_model.objects.get(id=sector_id)
except self.sector_model.DoesNotExist:
return
def _update_sector_name(self, sector, sector_name):
if sector.name != sector_name:
self.log(f'Updating Sector {sector.id}: [{sector.name} to {sector_name}]')
if self.simulate:
return
sector.name = sector_name
sector.save()
def _create_sector(self, sector_id, sector_name):
self.log(f'Creating Sector {sector_id}: [{sector_name}]')
if self.simulate:
return
self.sector_model.objects.create(id=sector_id, name=sector_name)
def _disable_sector(self, sector):
self.log(f'Disabling Sector {sector.id}: [{sector.name}]')
if self.simulate:
return
sector.disabled_on = self.disable_on
sector.save()
def add_new_sectors(self):
for sector_id, sector_name in self.sectors:
sector = self._get_sector(sector_id)
if not sector:
self._create_sector(sector_id, sector_name)
def update_existing_sectors(self):
for sector_id, sector_name in self.sectors:
sector = self._get_sector(sector_id)
if not sector:
self.log(f'Sector {sector_id}: DOES NOT EXIST [{sector_name}]')
else:
self._update_sector_name(sector, sector_name)
def disable_sectors(self):
sector_ids = list(dict(self.sectors).keys())
deprecated_sectors = self.sector_model.objects.exclude(id__in=sector_ids).filter(
disabled_on__isnull=True
)
for sector in deprecated_sectors:
self._disable_sector(sector)
| UKTradeInvestment/export-wins-data | mi/sync_sectors.py | Python | gpl-3.0 | 2,373 |
#!/usr/bin/env python3
# Copyright 2016 - 2021 Bas van Meerten and Wouter Franssen
# This file is part of ssNake.
#
# ssNake is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ssNake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ssNake. If not, see <http://www.gnu.org/licenses/>.
import re
import numpy as np
import scipy.special
import hypercomplex as hc
def safeEval(inp, length=None, Type='All', x=None):
"""
Creates a more restricted eval environment.
Note that this method is still not acceptable to process strings from untrusted sources.
Parameters
----------
inp : str
String to evaluate.
length : int or float, optional
The variable length will be set to this value.
By default the variable length is not set.
Type : {'All', 'FI', 'C'}, optional
Type of expected output. 'All' will return all types, 'FI' will return a float or int, and 'C' will return a complex number.
By default Type is set to 'All'
x : array_like, optional
The variable x is set to this variable,
By default the variable x is not used.
Returns
-------
Object
The result of the evaluated string.
"""
env = vars(np).copy()
env.update(vars(hc).copy())
env.update(vars(scipy.special).copy())
env.update(vars(scipy.integrate).copy())
env["locals"] = None
env["globals"] = None
env["__name__"] = None
env["__file__"] = None
env["__builtins__"] = {'None': None, 'False': False, 'True':True} # None
env["slice"] = slice
if length is not None:
env["length"] = length
if x is not None:
env["x"] = x
inp = re.sub('([0-9]+)[kK]', '\g<1>*1024', str(inp))
try:
val = eval(inp, env)
if isinstance(val, str):
return None
if Type == 'All':
return val
if Type == 'FI': #single float/int type
if isinstance(val, (float, int)) and not np.isnan(val) and not np.isinf(val):
return val
return None
if Type == 'C': #single complex number
if isinstance(val, (float, int, complex)) and not np.isnan(val) and not np.isinf(val):
return val
return None
except Exception:
return None
| smeerten/ssnake | src/safeEval.py | Python | gpl-3.0 | 2,736 |
# -*- coding: utf-8 -*-
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
import os
from os import listdir
from os.path import isfile, join
import random
import datetime
def files_in_dir(directory):
"""
:param directory: The directory
:return: List of all files in directory
"""
return [f for f in listdir(directory) if isfile(join(directory,f))]
def random_string(length):
"""
:param length: Legnth of the returned string
:return: String og random characters
"""
chars = 'QAZWSXEDCRFVTGBYHNUJMKLP23456789'
return ''.join([random.choice(chars) for i in range(length)])
def is_christmas():
now = datetime.datetime.now()
if now.month != 12:
return False
if now.day < 30 and now.day > 10:
return True
return False
basedir = os.path.abspath(os.path.dirname(__file__))
app = Flask(__name__)
#app.config['CSRF_ENABLED'] = True
app.config['SECRET_KEY'] = '5d6d3e2u8d5g2D4S5DSF2sdf5s1df531sef'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(basedir, 'database.db')
db = SQLAlchemy(app)
app.jinja_env.globals.update(enumerate=enumerate, is_christmas=is_christmas)
from . import views, models
# Create database if it's not there
for file in files_in_dir(basedir):
if 'database.db' in file:
break
else:
db.create_all()
print('No DB. Creating....')
| wernersa/Streprogen_web | app/__init__.py | Python | gpl-3.0 | 1,393 |
# -*- coding: UTF-8 -*-
#
# Copyright (c) 2009 Ars Aperta, Itaapy, Pierlis, Talend.
#
# Authors: Romain Gauthier <romain@itaapy.com>
# Hervé Cauwelier <herve@itaapy.com>
# David Versmisse <david.versmisse@itaapy.com>
#
# This file is part of Lpod (see: http://lpod-project.org).
# Lpod is free software; you can redistribute it and/or modify it under
# the terms of either:
#
# a) the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option)
# any later version.
# Lpod is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Lpod. If not, see <http://www.gnu.org/licenses/>.
#
# b) the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Import from the Standard Library
from unittest import TestCase, main
# Import from lpod
from lpod.document import odf_get_document
class ContentTestCase(TestCase):
def setUp(self):
self.document = document = odf_get_document('samples/base_text.odt')
def tearDown(self):
del self.document
def test_get_body(self):
body = self.document.get_body()
expected = ('<office:text>\n'
' <text:sequence-decls>\n'
' <text:sequence-decl text:display-outline-level="0" '
'text:name="Illustration"/>\n'
' <text:sequence-decl text:display-outline-level="0" '
'text:name="Table"/>\n'
' <text:sequence-decl text:display-outline-level="0" '
'text:name="Text"/>\n'
' <text:sequence-decl text:display-outline-level="0" '
'text:name="Drawing"/>\n'
' </text:sequence-decls>\n'
' <text:section text:style-name="Sect1" '
'text:name="Section1">\n'
' <text:h text:style-name="Heading_20_1" '
'text:outline-level="1">LpOD Test Case Document</text:h>\n'
' <text:p text:style-name="Text_20_body">This is the '
'first paragraph.</text:p>\n'
' <text:p text:style-name="Text_20_body">This is the '
'second paragraph.</text:p>\n'
' <text:p text:style-name="Hanging_20_indent">This is '
'a paragraph with a named style.</text:p>\n'
' <text:h text:style-name="Heading_20_2" '
'text:outline-level="2">Level 2 Title</text:h>\n'
' <text:p text:style-name="Text_20_body">This is the '
'first paragraph of the second title.</text:p>\n'
' <text:p text:style-name="Text_20_body">This is the '
'last paragraph with diacritical signs: '
'éè</text:p>\n'
' </text:section>\n'
' <text:section text:style-name="Sect1" '
'text:name="Section2">\n'
' <text:h text:style-name="Heading_20_1" '
'text:outline-level="1" text:restart-numbering="true" '
'text:start-value="-1">First Title of the '
'Second Section</text:h>\n'
' <text:p text:style-name="Text_20_body">First '
'paragraph of the second section.</text:p>\n'
' <text:p text:style-name="Text_20_body">This is '
'the second paragraph with <text:a xlink:type="simple" '
'xlink:href="http://lpod-project.org/" office:name="Link '
'to the lpod project">an external link</text:a> inside.'
'</text:p>\n'
' </text:section>\n'
' </office:text>\n')
self.assertEqual(body.serialize(pretty=True), expected)
if __name__ == '__main__':
main()
| kiniou/blender-smooth-slides | tools/lpod/test/test_content.py | Python | gpl-3.0 | 4,392 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Response.ts'
db.alter_column(u'survey_response', 'ts', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True))
# Changing field 'Respondant.review_status'
db.alter_column(u'survey_respondant', 'review_status', self.gf('django.db.models.fields.CharField')(max_length=20))
# Changing field 'Respondant.ts'
db.alter_column(u'survey_respondant', 'ts', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True))
def backwards(self, orm):
# Changing field 'Response.ts'
db.alter_column(u'survey_response', 'ts', self.gf('django.db.models.fields.DateTimeField')())
# Changing field 'Respondant.review_status'
db.alter_column(u'survey_respondant', 'review_status', self.gf('django.db.models.fields.CharField')(max_length=20, null=True))
# Changing field 'Respondant.ts'
db.alter_column(u'survey_respondant', 'ts', self.gf('django.db.models.fields.DateTimeField')())
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'survey.block': {
'Meta': {'object_name': 'Block'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}),
'skip_condition': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}),
'skip_question': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Question']", 'null': 'True', 'blank': 'True'})
},
u'survey.gridanswer': {
'Meta': {'object_name': 'GridAnswer'},
'answer_number': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'answer_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'col_label': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'col_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'response': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Response']"}),
'row_label': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'row_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
u'survey.location': {
'Meta': {'object_name': 'Location'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lat': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '7'}),
'lng': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '7'}),
'respondant': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Respondant']", 'null': 'True', 'blank': 'True'}),
'response': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Response']"})
},
u'survey.locationanswer': {
'Meta': {'object_name': 'LocationAnswer'},
'answer': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Location']"})
},
u'survey.multianswer': {
'Meta': {'object_name': 'MultiAnswer'},
'answer_label': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'answer_text': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'response': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Response']"})
},
u'survey.option': {
'Meta': {'object_name': 'Option'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.SlugField', [], {'max_length': '64'}),
'max': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'min': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'rows': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '254'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'integer'", 'max_length': '20'})
},
u'survey.page': {
'Meta': {'ordering': "['survey', 'question__order']", 'object_name': 'Page'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Question']", 'null': 'True', 'blank': 'True'}),
'survey': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Survey']", 'null': 'True', 'blank': 'True'})
},
u'survey.question': {
'Meta': {'ordering': "['order']", 'object_name': 'Question'},
'allow_other': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'blocks': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['survey.Block']", 'null': 'True', 'blank': 'True'}),
'cols': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'filterBy': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'filter_questions': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'filter_questions_rel_+'", 'null': 'True', 'to': u"orm['survey.Question']"}),
'foreach_question': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'foreach'", 'null': 'True', 'to': u"orm['survey.Question']"}),
'grid_cols': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'grid_cols'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['survey.Option']"}),
'hoist_answers': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'hoisted'", 'null': 'True', 'to': u"orm['survey.Question']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'info': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}),
'integer_max': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'integer_min': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '254'}),
'lat': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'lng': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'min_zoom': ('django.db.models.fields.IntegerField', [], {'default': '10', 'null': 'True', 'blank': 'True'}),
'modalQuestion': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'modal_question'", 'null': 'True', 'to': u"orm['survey.Question']"}),
'options': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['survey.Option']", 'null': 'True', 'blank': 'True'}),
'options_from_previous_answer': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}),
'options_json': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'randomize_groups': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'report_type': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '20', 'null': 'True'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'rows': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'skip_condition': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}),
'skip_question': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Question']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '64'}),
'term_condition': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.TextField', [], {}),
'type': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '20'}),
'visualize': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'zoom': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'survey.respondant': {
'Meta': {'object_name': 'Respondant'},
'buy_or_catch': ('django.db.models.fields.CharField', [], {'max_length': '240', 'null': 'True', 'blank': 'True'}),
'complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email': ('django.db.models.fields.EmailField', [], {'default': 'None', 'max_length': '254', 'null': 'True', 'blank': 'True'}),
'how_sold': ('django.db.models.fields.CharField', [], {'max_length': '240', 'null': 'True', 'blank': 'True'}),
'last_question': ('django.db.models.fields.CharField', [], {'max_length': '240', 'null': 'True', 'blank': 'True'}),
'locations': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'responses': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'responses'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['survey.Response']"}),
'review_status': ('django.db.models.fields.CharField', [], {'default': "'needs review'", 'max_length': '20'}),
'status': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '20', 'null': 'True', 'blank': 'True'}),
'survey': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Survey']"}),
'survey_site': ('django.db.models.fields.CharField', [], {'max_length': '240', 'null': 'True', 'blank': 'True'}),
'surveyor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'test_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ts': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "'cf606b7a-d24b-4089-8b49-3f2078b5060c'", 'max_length': '36', 'primary_key': 'True'}),
'vendor': ('django.db.models.fields.CharField', [], {'max_length': '240', 'null': 'True', 'blank': 'True'})
},
u'survey.response': {
'Meta': {'object_name': 'Response'},
'answer': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'answer_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'answer_number': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'answer_raw': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Question']"}),
'respondant': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Respondant']", 'null': 'True', 'blank': 'True'}),
'ts': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
u'survey.survey': {
'Meta': {'object_name': 'Survey'},
'anon': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '254'}),
'offline': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'questions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['survey.Question']", 'null': 'True', 'through': u"orm['survey.Page']", 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '254'}),
'states': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['survey'] | point97/hapifis | server/apps/survey/migrations/0077_auto__chg_field_response_ts__chg_field_respondant_review_status__chg_f.py | Python | gpl-3.0 | 17,288 |
#!/usr/bin/env python
# vim: set fileencoding=utf-8
import os
import argparse
from gff3 import genes, get_gff3_id, get_rbs_from, feature_test_true, feature_lambda, feature_test_type
from cpt_gffParser import gffParse, gffWrite
from Bio import SeqIO
from jinja2 import Environment, FileSystemLoader
import logging
from math import floor
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger(name="pat")
# Path to script, required because of Galaxy.
SCRIPT_PATH = os.path.dirname(os.path.realpath(__file__))
# Path to the HTML template for the report
def genes_all(feature_list, feature_type=["gene"], sort=False):
"""
Simple filter to extract gene features from the feature set.
"""
if not sort:
for x in feature_lambda(
feature_list, feature_test_type, {"types": feature_type}, subfeatures=True
):
yield x
else:
data = list(genes_all(feature_list, feature_type, sort=False))
data = sorted(data, key=lambda feature: feature.location.start)
for x in data:
yield x
def checkSubs(feature, qualName):
subFeats = []
res = ""
subFeats = feature.sub_features
while (len(subFeats) > 0):
for feat in subFeats:
for i in feat.qualifiers.keys():
for j in qualName:
if i == j:
if res == "":
res = feat.qualifiers[i][0]
else:
res += "; " + feat.qualifiers[i][0]
if res != "":
return res
tempFeats = []
for feat in subFeats: # Should be breadth-first results
for x in feat.sub_features:
tempFeats.append(x)
subFeats = tempFeats
return res
def annotation_table_report(record, types, wanted_cols, gaf_data, searchSubs):
getTypes = []
for x in [y.strip() for y in types.split(",")]:
getTypes.append(x)
getTypes.append("gene")
sorted_features = list(genes_all(record.features, getTypes, sort=True))
if wanted_cols is None or len(wanted_cols.strip()) == 0:
return [], []
useSubs = searchSubs
def rid(record, feature):
"""Organism ID
"""
return record.id
def id(record, feature):
"""ID
"""
return feature.id
def featureType(record, feature):
"""Type
"""
return feature.type
def name(record, feature):
"""Name
"""
for x in ["Name", "name"]:
for y in feature.qualifiers.keys():
if x == y:
return feature.qualifiers[x][0]
if useSubs:
res = checkSubs(feature, ["Name", "name"])
if res != "":
return res
return "None"
def start(record, feature):
"""Boundary
"""
return str(feature.location.start + 1)
def end(record, feature):
"""Boundary
"""
return str(feature.location.end)
def location(record, feature):
"""Location
"""
return str(feature.location.start + 1) + "..{0.end}".format(feature.location)
def length(record, feature):
"""CDS Length (AA)
"""
if feature.type == "CDS":
cdss = [feature]
else:
cdss = list(genes(feature.sub_features, feature_type="CDS", sort=True))
if cdss == []:
return "None"
res = (sum([len(cds) for cds in cdss]) / 3) - 1
if floor(res) == res:
res = int(res)
return str(res)
def notes(record, feature):
"""User entered Notes"""
for x in ["Note", "note", "Notes", "notes"]:
for y in feature.qualifiers.keys():
if x == y:
return feature.qualifiers[x][0]
if useSubs:
res = checkSubs(feature, ["Note", "note", "Notes", "notes"])
if res != "":
return res
return "None"
def date_created(record, feature):
"""Created"""
return feature.qualifiers.get("date_creation", ["None"])[0]
def date_last_modified(record, feature):
"""Last Modified"""
res = feature.qualifiers.get("date_last_modified", ["None"])[0]
if res != "None":
return res
if useSubs:
res = checkSubs(feature, ["date_last_modified"])
if res != "":
return res
return "None"
def description(record, feature):
"""Description"""
res = feature.qualifiers.get("description", ["None"])[0]
if res != "None":
return res
if useSubs:
res = checkSubs(feature, ["description"])
if res != "":
return res
return "None"
def owner(record, feature):
"""Owner
User who created the feature. In a 464 scenario this may be one of
the TAs."""
for x in ["Owner", "owner"]:
for y in feature.qualifiers.keys():
if x == y:
return feature.qualifiers[x][0]
if useSubs:
res = checkSubs(feature, ["Owner", "owner"])
if res != "":
return res
return "None"
def product(record, feature):
"""Product
User entered product qualifier (collects "Product" and "product"
entries)"""
"""User entered Notes"""
for x in ["product", "Product"]:
for y in feature.qualifiers.keys():
if x == y:
return feature.qualifiers[x][0]
if useSubs:
res = checkSubs(feature, ["product", "Product"])
if res != "":
return res
return "None"
def note(record, feature):
"""Note
User entered Note qualifier(s)"""
return feature.qualifiers.get("Note", [])
def strand(record, feature):
"""Strand
"""
return "+" if feature.location.strand > 0 else "-"
def sd_spacing(record, feature):
"""Shine-Dalgarno spacing
"""
rbss = get_rbs_from(gene)
if len(rbss) == 0:
return "None"
else:
resp = []
for rbs in rbss:
cdss = list(genes(feature.sub_features, feature_type="CDS", sort=True))
if len(cdss) == 0:
return "No CDS"
if rbs.location.strand > 0:
distance = min(
cdss, key=lambda x: x.location.start - rbs.location.end
)
distance_val = str(distance.location.start - rbs.location.end)
resp.append(distance_val)
else:
distance = min(
cdss, key=lambda x: x.location.end - rbs.location.start
)
distance_val = str(rbs.location.start - distance.location.end)
resp.append(distance_val)
if len(resp) == 1:
return str(resp[0])
return resp
def sd_seq(record, feature):
"""Shine-Dalgarno sequence
"""
rbss = get_rbs_from(gene)
if len(rbss) == 0:
return "None"
else:
resp = []
for rbs in rbss:
resp.append(str(rbs.extract(record).seq))
if len(resp) == 1:
return str(resp[0])
else:
return resp
def start_codon(record, feature):
"""Start Codon
"""
if feature.type == "CDS":
cdss = [feature]
else:
cdss = list(genes(feature.sub_features, feature_type="CDS", sort=True))
data = [x for x in cdss]
if len(data) == 1:
return str(data[0].extract(record).seq[0:3])
else:
return [
"{0} ({1.location.start}..{1.location.end}:{1.location.strand})".format(
x.extract(record).seq[0:3], x
)
for x in data
]
def stop_codon(record, feature):
"""Stop Codon
"""
return str(feature.extract(record).seq[-3:])
def dbxrefs(record, feature):
"""DBxrefs
"""
"""User entered Notes"""
for x in ["Dbxref", "db_xref", "DB_xref", "DBxref", "DB_Xref", "DBXref"]:
for y in feature.qualifiers.keys():
if x == y:
return feature.qualifiers[x][0]
return "None"
def upstream_feature(record, feature):
"""Next gene upstream"""
if feature.strand > 0:
upstream_features = [
x for x in sorted_features if (x.location.start < feature.location.start and x.type == "gene" and x.strand == feature.strand)
]
if len(upstream_features) > 0:
foundSelf = False
featCheck = upstream_features[-1].sub_features
for x in featCheck:
if x == feature:
foundSelf = True
break
featCheck = featCheck + x.sub_features
if foundSelf:
if len(upstream_features) > 1:
return upstream_features[-2]
return None
return upstream_features[-1]
else:
return None
else:
upstream_features = [
x for x in sorted_features if (x.location.end > feature.location.end and x.type == "gene" and x.strand == feature.strand)
]
if len(upstream_features) > 0:
foundSelf = False
featCheck = upstream_features[0].sub_features
for x in featCheck:
if x == feature:
foundSelf = True
break
featCheck = featCheck + x.sub_features
if foundSelf:
if len(upstream_features) > 1:
return upstream_features[1]
return None
return upstream_features[0]
else:
return None
def upstream_feature__name(record, feature):
"""Next gene upstream"""
up = upstream_feature(record, feature)
if up:
return str(up.id)
return "None"
def ig_dist(record, feature):
"""Distance to next upstream gene on same strand"""
up = upstream_feature(record, feature)
if up:
dist = None
if feature.strand > 0:
dist = feature.location.start - up.location.end
else:
dist = up.location.start - feature.location.end
return str(dist)
else:
return "None"
def _main_gaf_func(record, feature, gaf_data, attr):
if feature.id in gaf_data:
return [x[attr] for x in gaf_data[feature.id]]
return []
def gaf_annotation_extension(record, feature, gaf_data):
"""GAF Annotation Extension
Contains cross references to other ontologies that can be used
to qualify or enhance the annotation. The cross-reference is
prefaced by an appropriate GO relationship; references to
multiple ontologies can be entered. For example, if a gene
product is localized to the mitochondria of lymphocytes, the GO
ID (column 5) would be mitochondrion ; GO:0005439, and the
annotation extension column would contain a cross-reference to
the term lymphocyte from the Cell Type Ontology.
"""
return _main_gaf_func(record, feature, gaf_data, "annotation_extension")
def gaf_aspect(record, feature, gaf_data):
"""GAF Aspect code
E.g. P (biological process), F (molecular function) or C (cellular component)
"""
return _main_gaf_func(record, feature, gaf_data, "aspect")
def gaf_assigned_by(record, feature, gaf_data):
"""GAF Creating Organisation
"""
return _main_gaf_func(record, feature, gaf_data, "assigned_by")
def gaf_date(record, feature, gaf_data):
"""GAF Creation Date
"""
return _main_gaf_func(record, feature, gaf_data, "date")
def gaf_db(record, feature, gaf_data):
"""GAF DB
"""
return _main_gaf_func(record, feature, gaf_data, "db")
def gaf_db_reference(record, feature, gaf_data):
"""GAF DB Reference
"""
return _main_gaf_func(record, feature, gaf_data, "db_reference")
def gaf_evidence_code(record, feature, gaf_data):
"""GAF Evidence Code
"""
return _main_gaf_func(record, feature, gaf_data, "evidence_code")
def gaf_go_id(record, feature, gaf_data):
"""GAF GO ID
"""
return _main_gaf_func(record, feature, gaf_data, "go_id")
def gaf_go_term(record, feature, gaf_data):
"""GAF GO Term
"""
return _main_gaf_func(record, feature, gaf_data, "go_term")
def gaf_id(record, feature, gaf_data):
"""GAF ID
"""
return _main_gaf_func(record, feature, gaf_data, "id")
def gaf_notes(record, feature, gaf_data):
"""GAF Notes
"""
return _main_gaf_func(record, feature, gaf_data, "notes")
def gaf_owner(record, feature, gaf_data):
"""GAF Creator
"""
return _main_gaf_func(record, feature, gaf_data, "owner")
def gaf_with_or_from(record, feature, gaf_data):
"""GAF With/From
"""
return _main_gaf_func(record, feature, gaf_data, "with_or_from")
cols = []
data = []
funcs = []
lcl = locals()
for x in [y.strip().lower() for y in wanted_cols.split(",")]:
if not x:
continue
if x == "type":
x = "featureType"
if x in lcl:
funcs.append(lcl[x])
# Keep track of docs
func_doc = lcl[x].__doc__.strip().split("\n\n")
# If there's a double newline, assume following text is the
# "help" and the first part is the "name". Generate empty help
# if not provided
if len(func_doc) == 1:
func_doc += [""]
cols.append(func_doc)
elif "__" in x:
chosen_funcs = [lcl[y] for y in x.split("__")]
func_doc = [
" of ".join(
[y.__doc__.strip().split("\n\n")[0] for y in chosen_funcs[::-1]]
)
]
cols.append(func_doc)
funcs.append(chosen_funcs)
for gene in genes_all(record.features, getTypes, sort=True):
row = []
for func in funcs:
if isinstance(func, list):
# If we have a list of functions, repeatedly apply them
value = gene
for f in func:
if value is None:
value = "None"
break
value = f(record, value)
else:
# Otherwise just apply the lone function
if func.__name__.startswith("gaf_"):
value = func(record, gene, gaf_data)
else:
value = func(record, gene)
if isinstance(value, list):
collapsed_value = ", ".join(value)
value = [str(collapsed_value)]#.encode("unicode_escape")]
else:
value = str(value)#.encode("unicode_escape")
row.append(value)
# print row
data.append(row)
return data, cols
def parseGafData(file):
cols = []
data = {}
# '10d04a01-5ed8-49c8-b724-d6aa4df5a98d': {
# 'annotation_extension': '',
# 'aspect': '',
# 'assigned_by': 'CPT',
# 'date': '2017-05-04T16:25:22.161916Z',
# 'db': 'UniProtKB',
# 'db_reference': 'GO_REF:0000100',
# 'evidence_code': 'ISA',
# 'gene': '0d307196-833d-46e8-90e9-d80f7a041d88',
# 'go_id': 'GO:0039660',
# 'go_term': 'structural constituent of virion',
# 'id': '10d04a01-5ed8-49c8-b724-d6aa4df5a98d',
# 'notes': 'hit was putative minor structural protein',
# 'owner': 'amarc1@tamu.edu',
# 'with_or_from': 'UNIREF90:B2ZYZ7'
# },
for row in file:
if row.startswith("#"):
# Header
cols = (
row.strip().replace("# ", "").replace("GO Term", "go_term").split("\t")
)
else:
line = row.strip().split("\t")
tmp = dict(zip(cols, line))
if "gene" not in tmp.keys():
continue
if tmp["gene"] not in data:
data[tmp["gene"]] = []
data[tmp["gene"]].append(tmp)
return data
def evaluate_and_report(
annotations,
genome,
types="gene",
reportTemplateName="phage_annotation_validator.html",
annotationTableCols="",
gafData=None,
searchSubs = False,
):
"""
Generate our HTML evaluation of the genome
"""
# Get features from GFF file
seq_dict = SeqIO.to_dict(SeqIO.parse(genome, "fasta"))
# Get the first GFF3 record
# TODO: support multiple GFF3 files.
at_table_data = []
gaf = {}
if gafData:
gaf = parseGafData(gafData)
for record in gffParse(annotations, base_dict=seq_dict):
if reportTemplateName.endswith(".html"):
record.id = record.id.replace(".", "-")
log.info("Producing an annotation table for %s" % record.id)
annotation_table_data, annotation_table_col_names = annotation_table_report(
record, types, annotationTableCols, gaf, searchSubs
)
at_table_data.append((record, annotation_table_data))
# break
# This is data that will go into our HTML template
kwargs = {
"annotation_table_data": at_table_data,
"annotation_table_col_names": annotation_table_col_names,
}
env = Environment(
loader=FileSystemLoader(SCRIPT_PATH), trim_blocks=True, lstrip_blocks=True
)
if reportTemplateName.endswith(".html"):
env.filters["nice_id"] = str(get_gff3_id).replace(".", "-")
else:
env.filters["nice_id"] = get_gff3_id
def join(listy):
return "\n".join(listy)
env.filters.update({"join": join})
tpl = env.get_template(reportTemplateName)
return tpl.render(**kwargs).encode("utf-8")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="rebase gff3 features against parent locations", epilog=""
)
parser.add_argument(
"annotations", type=argparse.FileType("r"), help="Parent GFF3 annotations"
)
parser.add_argument("genome", type=argparse.FileType("r"), help="Genome Sequence")
parser.add_argument(
"--types",
help="Select extra types to display in output (Will always include gene)",
)
parser.add_argument(
"--reportTemplateName",
help="Report template file name",
default="phageqc_report_full.html",
)
parser.add_argument(
"--annotationTableCols",
help="Select columns to report in the annotation table output format",
)
parser.add_argument(
"--gafData", help="CPT GAF-like table", type=argparse.FileType("r")
)
parser.add_argument(
"--searchSubs", help="Attempt to populate fields from sub-features if qualifier is empty", action="store_true"
)
args = parser.parse_args()
print(evaluate_and_report(**vars(args)).decode("utf-8"))
| TAMU-CPT/galaxy-tools | tools/phage/phage_annotation_table.py | Python | gpl-3.0 | 19,472 |
__license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
''' Post installation script for linux '''
import sys, os, cPickle, textwrap, stat, errno
from subprocess import check_call, check_output
from functools import partial
from calibre import __appname__, prints, guess_type
from calibre.constants import islinux, isbsd
from calibre.customize.ui import all_input_formats
from calibre.ptempfile import TemporaryDirectory
from calibre import CurrentDir
entry_points = {
'console_scripts': [
'ebook-device = calibre.devices.cli:main',
'ebook-meta = calibre.ebooks.metadata.cli:main',
'ebook-convert = calibre.ebooks.conversion.cli:main',
'ebook-polish = calibre.ebooks.oeb.polish.main:main',
'markdown-calibre = calibre.ebooks.markdown.__main__:run',
'web2disk = calibre.web.fetch.simple:main',
'calibre-server = calibre.srv.standalone:main',
'lrf2lrs = calibre.ebooks.lrf.lrfparser:main',
'lrs2lrf = calibre.ebooks.lrf.lrs.convert_from:main',
'calibre-debug = calibre.debug:main',
'calibredb = calibre.db.cli.main:main',
'calibre-parallel = calibre.utils.ipc.worker:main',
'calibre-customize = calibre.customize.ui:main',
'calibre-complete = calibre.utils.complete:main',
'fetch-ebook-metadata = calibre.ebooks.metadata.sources.cli:main',
'calibre-smtp = calibre.utils.smtp:main',
],
'gui_scripts' : [
__appname__+' = calibre.gui_launch:calibre',
'lrfviewer = calibre.gui2.lrf_renderer.main:main',
'ebook-viewer = calibre.gui_launch:ebook_viewer',
'ebook-edit = calibre.gui_launch:ebook_edit',
],
}
class PreserveMIMEDefaults(object):
def __init__(self):
self.initial_values = {}
def __enter__(self):
def_data_dirs = '/usr/local/share:/usr/share'
paths = os.environ.get('XDG_DATA_DIRS', def_data_dirs)
paths = paths.split(':')
paths.append(os.environ.get('XDG_DATA_HOME', os.path.expanduser(
'~/.local/share')))
paths = list(filter(os.path.isdir, paths))
if not paths:
# Env var had garbage in it, ignore it
paths = def_data_dirs.split(':')
paths = list(filter(os.path.isdir, paths))
self.paths = {os.path.join(x, 'applications/defaults.list') for x in
paths}
self.initial_values = {}
for x in self.paths:
try:
with open(x, 'rb') as f:
self.initial_values[x] = f.read()
except:
self.initial_values[x] = None
def __exit__(self, *args):
for path, val in self.initial_values.iteritems():
if val is None:
try:
os.remove(path)
except:
pass
elif os.path.exists(path):
try:
with open(path, 'r+b') as f:
if f.read() != val:
f.seek(0)
f.truncate()
f.write(val)
except EnvironmentError as e:
if e.errno != errno.EACCES:
raise
# Uninstall script {{{
UNINSTALL = '''\
#!{python}
from __future__ import print_function, unicode_literals
euid = {euid}
import os, subprocess, shutil
try:
raw_input
except NameError:
raw_input = input
if os.geteuid() != euid:
print ('The installer was last run as user id:', euid, 'To remove all files you must run the uninstaller as the same user')
if raw_input('Proceed anyway? [y/n]:').lower() != 'y':
raise SystemExit(1)
frozen_path = {frozen_path!r}
if not frozen_path or not os.path.exists(os.path.join(frozen_path, 'resources', 'calibre-mimetypes.xml')):
frozen_path = None
for f in {mime_resources!r}:
cmd = ['xdg-mime', 'uninstall', f]
print ('Removing mime resource:', os.path.basename(f))
ret = subprocess.call(cmd, shell=False)
if ret != 0:
print ('WARNING: Failed to remove mime resource', f)
for x in tuple({manifest!r}) + tuple({appdata_resources!r}) + (os.path.abspath(__file__), __file__, frozen_path):
if not x or not os.path.exists(x):
continue
print ('Removing', x)
try:
if os.path.isdir(x):
shutil.rmtree(x)
else:
os.unlink(x)
except Exception as e:
print ('Failed to delete', x)
print ('\t', e)
icr = {icon_resources!r}
mimetype_icons = []
def remove_icon(context, name, size, update=False):
cmd = ['xdg-icon-resource', 'uninstall', '--context', context, '--size', size, name]
if not update:
cmd.insert(2, '--noupdate')
print ('Removing icon:', name, 'from context:', context, 'at size:', size)
ret = subprocess.call(cmd, shell=False)
if ret != 0:
print ('WARNING: Failed to remove icon', name)
for i, (context, name, size) in enumerate(icr):
if context == 'mimetypes':
mimetype_icons.append((name, size))
continue
remove_icon(context, name, size, update=i == len(icr) - 1)
mr = {menu_resources!r}
for f in mr:
cmd = ['xdg-desktop-menu', 'uninstall', f]
print ('Removing desktop file:', f)
ret = subprocess.call(cmd, shell=False)
if ret != 0:
print ('WARNING: Failed to remove menu item', f)
print ()
if mimetype_icons and raw_input('Remove the e-book format icons? [y/n]:').lower() in ['', 'y']:
for i, (name, size) in enumerate(mimetype_icons):
remove_icon('mimetypes', name, size, update=i == len(mimetype_icons) - 1)
'''
# }}}
# Completion {{{
class ZshCompleter(object): # {{{
def __init__(self, opts):
self.opts = opts
self.dest = None
base = os.path.dirname(self.opts.staging_sharedir)
self.detect_zsh(base)
if not self.dest and base == '/usr/share':
# Ubuntu puts site-functions in /usr/local/share
self.detect_zsh('/usr/local/share')
self.commands = {}
def detect_zsh(self, base):
for x in ('vendor-completions', 'vendor-functions', 'site-functions'):
c = os.path.join(base, 'zsh', x)
if os.path.isdir(c) and os.access(c, os.W_OK):
self.dest = os.path.join(c, '_calibre')
break
def get_options(self, parser, cover_opts=('--cover',), opf_opts=('--opf',),
file_map={}):
if hasattr(parser, 'option_list'):
options = parser.option_list
for group in parser.option_groups:
options += group.option_list
else:
options = parser
for opt in options:
lo, so = opt._long_opts, opt._short_opts
if opt.takes_value():
lo = [x+'=' for x in lo]
so = [x+'+' for x in so]
ostrings = lo + so
ostrings = u'{%s}'%','.join(ostrings) if len(ostrings) > 1 else ostrings[0]
exclude = u''
if opt.dest is None:
exclude = u"'(- *)'"
h = opt.help or ''
h = h.replace('"', "'").replace('[', '(').replace(
']', ')').replace('\n', ' ').replace(':', '\\:').replace('`', "'")
h = h.replace('%default', type(u'')(opt.default))
arg = ''
if opt.takes_value():
arg = ':"%s":'%h
if opt.dest in {'extract_to', 'debug_pipeline', 'to_dir', 'outbox', 'with_library', 'library_path'}:
arg += "'_path_files -/'"
elif opt.choices:
arg += "(%s)"%'|'.join(opt.choices)
elif set(file_map).intersection(set(opt._long_opts)):
k = set(file_map).intersection(set(opt._long_opts))
exts = file_map[tuple(k)[0]]
if exts:
arg += "'_files -g \"%s\"'"%(' '.join('*.%s'%x for x in
tuple(exts) + tuple(x.upper() for x in exts)))
else:
arg += "_files"
elif (opt.dest in {'pidfile', 'attachment'}):
arg += "_files"
elif set(opf_opts).intersection(set(opt._long_opts)):
arg += "'_files -g \"*.opf\"'"
elif set(cover_opts).intersection(set(opt._long_opts)):
arg += "'_files -g \"%s\"'"%(' '.join('*.%s'%x for x in
tuple(pics) + tuple(x.upper() for x in pics)))
help_txt = u'"[%s]"'%h
yield u'%s%s%s%s '%(exclude, ostrings, help_txt, arg)
def opts_and_exts(self, name, op, exts, cover_opts=('--cover',),
opf_opts=('--opf',), file_map={}):
if not self.dest:
return
exts = sorted({x.lower() for x in exts})
extra = ('''"*:filename:_files -g '(#i)*.(%s)'" ''' % '|'.join(exts),)
opts = '\\\n '.join(tuple(self.get_options(
op(), cover_opts=cover_opts, opf_opts=opf_opts, file_map=file_map)) + extra)
txt = '_arguments -s \\\n ' + opts
self.commands[name] = txt
def opts_and_words(self, name, op, words, takes_files=False):
if not self.dest:
return
extra = ("'*:filename:_files' ",) if takes_files else ()
opts = '\\\n '.join(tuple(self.get_options(op())) + extra)
txt = '_arguments -s \\\n ' + opts
self.commands[name] = txt
def do_ebook_convert(self, f):
from calibre.ebooks.conversion.plumber import supported_input_formats
from calibre.web.feeds.recipes.collection import get_builtin_recipe_titles
from calibre.customize.ui import available_output_formats
from calibre.ebooks.conversion.cli import create_option_parser, group_titles
from calibre.utils.logging import DevNull
input_fmts = set(supported_input_formats())
output_fmts = set(available_output_formats())
iexts = {x.upper() for x in input_fmts}.union(input_fmts)
oexts = {x.upper() for x in output_fmts}.union(output_fmts)
w = lambda x: f.write(x if isinstance(x, bytes) else x.encode('utf-8'))
# Arg 1
w('\n_ebc_input_args() {')
w('\n local extras; extras=(')
w('\n {-h,--help}":Show Help"')
w('\n "--version:Show program version"')
w('\n "--list-recipes:List builtin recipe names"')
for recipe in sorted(set(get_builtin_recipe_titles())):
recipe = recipe.replace(':', '\\:').replace('"', '\\"')
w(u'\n "%s.recipe"'%(recipe))
w('\n ); _describe -t recipes "ebook-convert builtin recipes" extras')
w('\n _files -g "%s"'%' '.join(('*.%s'%x for x in iexts)))
w('\n}\n')
# Arg 2
w('\n_ebc_output_args() {')
w('\n local extras; extras=(')
for x in output_fmts:
w('\n ".{0}:Convert to a .{0} file with the same name as the input file"'.format(x))
w('\n ); _describe -t output "ebook-convert output" extras')
w('\n _files -g "%s"'%' '.join(('*.%s'%x for x in oexts)))
w('\n _path_files -/')
w('\n}\n')
log = DevNull()
def get_parser(input_fmt='epub', output_fmt=None):
of = ('dummy2.'+output_fmt) if output_fmt else 'dummy'
return create_option_parser(('ec', 'dummy1.'+input_fmt, of, '-h'), log)[0]
# Common options
input_group, output_group = group_titles()
p = get_parser()
opts = p.option_list
for group in p.option_groups:
if group.title not in {input_group, output_group}:
opts += group.option_list
opts.append(p.get_option('--pretty-print'))
opts.append(p.get_option('--input-encoding'))
opts = '\\\n '.join(tuple(
self.get_options(opts, file_map={'--search-replace':()})))
w('\n_ebc_common_opts() {')
w('\n _arguments -s \\\n ' + opts)
w('\n}\n')
# Input/Output format options
for fmts, group_title, func in (
(input_fmts, input_group, '_ebc_input_opts_%s'),
(output_fmts, output_group, '_ebc_output_opts_%s'),
):
for fmt in fmts:
is_input = group_title == input_group
if is_input and fmt in {'rar', 'zip', 'oebzip'}:
continue
p = (get_parser(input_fmt=fmt) if is_input
else get_parser(output_fmt=fmt))
opts = None
for group in p.option_groups:
if group.title == group_title:
opts = [o for o in group.option_list if
'--pretty-print' not in o._long_opts and
'--input-encoding' not in o._long_opts]
if not opts:
continue
opts = '\\\n '.join(tuple(self.get_options(opts)))
w('\n%s() {'%(func%fmt))
w('\n _arguments -s \\\n ' + opts)
w('\n}\n')
w('\n_ebook_convert() {')
w('\n local iarg oarg context state_descr state line\n typeset -A opt_args\n local ret=1')
w("\n _arguments '1: :_ebc_input_args' '*::ebook-convert output:->args' && ret=0")
w("\n case $state in \n (args)")
w('\n iarg=${line[1]##*.}; ')
w("\n _arguments '1: :_ebc_output_args' '*::ebook-convert options:->args' && ret=0")
w("\n case $state in \n (args)")
w('\n oarg=${line[1]##*.}')
w('\n iarg="_ebc_input_opts_${(L)iarg}"; oarg="_ebc_output_opts_${(L)oarg}"')
w('\n _call_function - $iarg; _call_function - $oarg; _ebc_common_opts; ret=0')
w('\n ;;\n esac')
w("\n ;;\n esac\n return ret")
w('\n}\n')
def do_ebook_edit(self, f):
from calibre.ebooks.oeb.polish.main import SUPPORTED
from calibre.ebooks.oeb.polish.import_book import IMPORTABLE
from calibre.gui2.tweak_book.main import option_parser
tweakable_fmts = SUPPORTED | IMPORTABLE
parser = option_parser()
opt_lines = []
for opt in parser.option_list:
lo, so = opt._long_opts, opt._short_opts
if opt.takes_value():
lo = [x+'=' for x in lo]
so = [x+'+' for x in so]
ostrings = lo + so
ostrings = u'{%s}'%','.join(ostrings) if len(ostrings) > 1 else '"%s"'%ostrings[0]
h = opt.help or ''
h = h.replace('"', "'").replace('[', '(').replace(
']', ')').replace('\n', ' ').replace(':', '\\:').replace('`', "'")
h = h.replace('%default', type(u'')(opt.default))
help_txt = u'"[%s]"'%h
opt_lines.append(ostrings + help_txt + ' \\')
opt_lines = ('\n' + (' ' * 8)).join(opt_lines)
f.write((ur'''
_ebook_edit() {
local curcontext="$curcontext" state line ebookfile expl
typeset -A opt_args
_arguments -C -s \
%s
"1:ebook file:_files -g '(#i)*.(%s)'" \
'*:file in ebook:->files' && return 0
case $state in
files)
ebookfile=${~${(Q)line[1]}}
if [[ -f "$ebookfile" && "$ebookfile" =~ '\.[eE][pP][uU][bB]$' ]]; then
_zip_cache_name="$ebookfile"
_zip_cache_list=( ${(f)"$(zipinfo -1 $_zip_cache_name 2>/dev/null)"} )
else
return 1
fi
_wanted files expl 'file from ebook' \
_multi_parts / _zip_cache_list && return 0
;;
esac
return 1
}
''' % (opt_lines, '|'.join(tweakable_fmts)) + '\n\n').encode('utf-8'))
def do_calibredb(self, f):
from calibre.db.cli.main import COMMANDS, option_parser_for
from calibre.customize.ui import available_catalog_formats
parsers, descs = {}, {}
for command in COMMANDS:
p = option_parser_for(command)()
parsers[command] = p
lines = [x.strip().partition('.')[0] for x in p.usage.splitlines() if x.strip() and
not x.strip().startswith('%prog')]
descs[command] = lines[0]
f.write('\n_calibredb_cmds() {\n local commands; commands=(\n')
f.write(' {-h,--help}":Show help"\n')
f.write(' "--version:Show version"\n')
for command, desc in descs.iteritems():
f.write(' "%s:%s"\n'%(
command, desc.replace(':', '\\:').replace('"', '\'')))
f.write(' )\n _describe -t commands "calibredb command" commands \n}\n')
subcommands = []
for command, parser in parsers.iteritems():
exts = []
if command == 'catalog':
exts = [x.lower() for x in available_catalog_formats()]
elif command == 'set_metadata':
exts = ['opf']
exts = set(exts).union(x.upper() for x in exts)
pats = ('*.%s'%x for x in exts)
extra = ("'*:filename:_files -g \"%s\"' "%' '.join(pats),) if exts else ()
if command in {'add', 'add_format'}:
extra = ("'*:filename:_files' ",)
opts = '\\\n '.join(tuple(self.get_options(
parser)) + extra)
txt = ' _arguments -s \\\n ' + opts
subcommands.append('(%s)'%command)
subcommands.append(txt)
subcommands.append(';;')
f.write('\n_calibredb() {')
f.write((
r'''
local state line state_descr context
typeset -A opt_args
local ret=1
_arguments \
'1: :_calibredb_cmds' \
'*::calibredb subcommand options:->args' \
&& ret=0
case $state in
(args)
case $line[1] in
(-h|--help|--version)
_message 'no more arguments' && ret=0
;;
%s
esac
;;
esac
return ret
'''%'\n '.join(subcommands)).encode('utf-8'))
f.write('\n}\n\n')
def write(self):
if self.dest:
for c in ('calibredb', 'ebook-convert', 'ebook-edit'):
self.commands[c] = ' _%s "$@"' % c.replace('-', '_')
with open(self.dest, 'wb') as f:
f.write('#compdef ' + ' '.join(self.commands)+'\n')
self.do_ebook_convert(f)
self.do_calibredb(f)
self.do_ebook_edit(f)
f.write('case $service in\n')
for c, txt in self.commands.iteritems():
if isinstance(txt, type(u'')):
txt = txt.encode('utf-8')
if isinstance(c, type(u'')):
c = c.encode('utf-8')
f.write(b'%s)\n%s\n;;\n'%(c, txt))
f.write('esac\n')
# }}}
def get_bash_completion_path(root, share, info):
if root == '/usr':
# Try to get the system bash completion dir since we are installing to
# /usr
try:
path = check_output('pkg-config --variable=completionsdir bash-completion'.split()).strip().partition(os.pathsep)[0]
except Exception:
info('Failed to find directory to install bash completions, using default.')
path = '/usr/share/bash-completion/completions'
if path and os.path.exists(path) and os.path.isdir(path):
return os.path.join(path, 'calibre')
else:
# Use the default bash-completion dir under staging_share
return os.path.join(share, 'bash-completion', 'completions', 'calibre')
def write_completion(bash_comp_dest, zsh):
from calibre.ebooks.metadata.cli import option_parser as metaop, filetypes as meta_filetypes
from calibre.ebooks.lrf.lrfparser import option_parser as lrf2lrsop
from calibre.gui2.lrf_renderer.main import option_parser as lrfviewerop
from calibre.gui2.viewer.main import option_parser as viewer_op
from calibre.gui2.tweak_book.main import option_parser as tweak_op
from calibre.ebooks.metadata.sources.cli import option_parser as fem_op
from calibre.gui2.main import option_parser as guiop
from calibre.utils.smtp import option_parser as smtp_op
from calibre.srv.standalone import create_option_parser as serv_op
from calibre.ebooks.oeb.polish.main import option_parser as polish_op, SUPPORTED
from calibre.ebooks.oeb.polish.import_book import IMPORTABLE
from calibre.debug import option_parser as debug_op
from calibre.ebooks import BOOK_EXTENSIONS
from calibre.customize.ui import available_input_formats
input_formats = sorted(all_input_formats())
tweak_formats = sorted(x.lower() for x in SUPPORTED|IMPORTABLE)
if bash_comp_dest and not os.path.exists(os.path.dirname(bash_comp_dest)):
os.makedirs(os.path.dirname(bash_comp_dest))
complete = 'calibre-complete'
if getattr(sys, 'frozen_path', None):
complete = os.path.join(getattr(sys, 'frozen_path'), complete)
with open(bash_comp_dest or os.devnull, 'wb') as f:
def o_and_e(*args, **kwargs):
f.write(opts_and_exts(*args, **kwargs))
zsh.opts_and_exts(*args, **kwargs)
def o_and_w(*args, **kwargs):
f.write(opts_and_words(*args, **kwargs))
zsh.opts_and_words(*args, **kwargs)
f.write('# calibre Bash Shell Completion\n')
o_and_e('calibre', guiop, BOOK_EXTENSIONS)
o_and_e('lrf2lrs', lrf2lrsop, ['lrf'], file_map={'--output':['lrs']})
o_and_e('ebook-meta', metaop,
list(meta_filetypes()), cover_opts=['--cover', '-c'],
opf_opts=['--to-opf', '--from-opf'])
o_and_e('ebook-polish', polish_op,
[x.lower() for x in SUPPORTED], cover_opts=['--cover', '-c'],
opf_opts=['--opf', '-o'])
o_and_e('lrfviewer', lrfviewerop, ['lrf'])
o_and_e('ebook-viewer', viewer_op, input_formats)
o_and_e('ebook-edit', tweak_op, tweak_formats)
o_and_w('fetch-ebook-metadata', fem_op, [])
o_and_w('calibre-smtp', smtp_op, [])
o_and_w('calibre-server', serv_op, [])
o_and_e('calibre-debug', debug_op, ['py', 'recipe', 'mobi', 'azw', 'azw3', 'docx'], file_map={
'--tweak-book':['epub', 'azw3', 'mobi'],
'--subset-font':['ttf', 'otf'],
'--exec-file':['py', 'recipe'],
'--add-simple-plugin':['py'],
'--inspect-mobi':['mobi', 'azw', 'azw3'],
'--viewer':sorted(available_input_formats()),
})
f.write(textwrap.dedent('''
_ebook_device_ls()
{
local pattern search listing prefix
pattern="$1"
search="$1"
if [[ -n "{$pattern}" ]]; then
if [[ "${pattern:(-1)}" == "/" ]]; then
pattern=""
else
pattern="$(basename ${pattern} 2> /dev/null)"
search="$(dirname ${search} 2> /dev/null)"
fi
fi
if [[ "x${search}" == "x" || "x${search}" == "x." ]]; then
search="/"
fi
listing="$(ebook-device ls ${search} 2>/dev/null)"
prefix="${search}"
if [[ "x${prefix:(-1)}" != "x/" ]]; then
prefix="${prefix}/"
fi
echo $(compgen -P "${prefix}" -W "${listing}" "${pattern}")
}
_ebook_device()
{
local cur prev
cur="${COMP_WORDS[COMP_CWORD]}"
prev="${COMP_WORDS[COMP_CWORD-1]}"
COMPREPLY=()
case "${prev}" in
ls|rm|mkdir|touch|cat )
COMPREPLY=( $(_ebook_device_ls "${cur}") )
return 0
;;
cp )
if [[ ${cur} == dev:* ]]; then
COMPREPLY=( $(_ebook_device_ls "${cur:7}") )
return 0
else
_filedir
return 0
fi
;;
dev )
COMPREPLY=( $(compgen -W "cp ls rm mkdir touch cat info books df" "${cur}") )
return 0
;;
* )
if [[ ${cur} == dev:* ]]; then
COMPREPLY=( $(_ebook_device_ls "${cur:7}") )
return 0
else
if [[ ${prev} == dev:* ]]; then
_filedir
return 0
else
COMPREPLY=( $(compgen -W "dev:" "${cur}") )
return 0
fi
return 0
fi
;;
esac
}
complete -o nospace -F _ebook_device ebook-device
complete -o nospace -C %s ebook-convert
''')%complete)
zsh.write()
# }}}
class PostInstall:
def task_failed(self, msg):
self.warn(msg, 'with error:')
import traceback
tb = '\n\t'.join(traceback.format_exc().splitlines())
self.info('\t'+tb)
print
def warning(self, *args, **kwargs):
print '\n'+'_'*20, 'WARNING','_'*20
prints(*args, **kwargs)
print '_'*50
print ('\n')
self.warnings.append((args, kwargs))
sys.stdout.flush()
def __init__(self, opts, info=prints, warn=None, manifest=None):
self.opts = opts
self.info = info
self.warn = warn
self.warnings = []
if self.warn is None:
self.warn = self.warning
if not self.opts.staging_bindir:
self.opts.staging_bindir = os.path.join(self.opts.staging_root,
'bin')
if not self.opts.staging_sharedir:
self.opts.staging_sharedir = os.path.join(self.opts.staging_root,
'share', 'calibre')
self.opts.staging_etc = '/etc' if self.opts.staging_root == '/usr' else \
os.path.join(self.opts.staging_root, 'etc')
scripts = cPickle.loads(P('scripts.pickle', data=True))
self.manifest = manifest or []
if getattr(sys, 'frozen_path', False):
if os.access(self.opts.staging_bindir, os.W_OK):
self.info('Creating symlinks...')
for exe in scripts.keys():
dest = os.path.join(self.opts.staging_bindir, exe)
if os.path.lexists(dest):
os.unlink(dest)
tgt = os.path.join(getattr(sys, 'frozen_path'), exe)
self.info('\tSymlinking %s to %s'%(tgt, dest))
os.symlink(tgt, dest)
self.manifest.append(dest)
else:
self.warning(textwrap.fill(
'No permission to write to %s, not creating program launch symlinks,'
' you should ensure that %s is in your PATH or create the symlinks yourself' % (
self.opts.staging_bindir, getattr(sys, 'frozen_path', 'the calibre installation directory'))))
self.icon_resources = []
self.menu_resources = []
self.mime_resources = []
self.appdata_resources = []
if islinux or isbsd:
self.setup_completion()
if islinux or isbsd:
self.setup_desktop_integration()
self.create_uninstaller()
from calibre.utils.config import config_dir
if os.path.exists(config_dir):
os.chdir(config_dir)
if islinux or isbsd:
for f in os.listdir('.'):
if os.stat(f).st_uid == 0:
import shutil
shutil.rmtree(f) if os.path.isdir(f) else os.unlink(f)
if os.stat(config_dir).st_uid == 0:
os.rmdir(config_dir)
if warn is None and self.warnings:
self.info('\n\nThere were %d warnings\n'%len(self.warnings))
for args, kwargs in self.warnings:
self.info('*', *args, **kwargs)
print
def create_uninstaller(self):
base = self.opts.staging_bindir
if not os.access(base, os.W_OK) and getattr(sys, 'frozen_path', False):
base = sys.frozen_path
dest = os.path.join(base, 'calibre-uninstall')
self.info('Creating un-installer:', dest)
raw = UNINSTALL.format(
python='/usr/bin/python', euid=os.geteuid(),
manifest=self.manifest, icon_resources=self.icon_resources,
mime_resources=self.mime_resources, menu_resources=self.menu_resources,
appdata_resources=self.appdata_resources, frozen_path=getattr(sys, 'frozen_path', None))
try:
with open(dest, 'wb') as f:
f.write(raw)
os.chmod(dest, stat.S_IRWXU|stat.S_IRGRP|stat.S_IROTH)
if os.geteuid() == 0:
os.chown(dest, 0, 0)
except:
if self.opts.fatal_errors:
raise
self.task_failed('Creating uninstaller failed')
def setup_completion(self): # {{{
try:
self.info('Setting up command-line completion...')
zsh = ZshCompleter(self.opts)
if zsh.dest:
self.info('Installing zsh completion to:', zsh.dest)
self.manifest.append(zsh.dest)
bash_comp_dest = get_bash_completion_path(self.opts.staging_root, os.path.dirname(self.opts.staging_sharedir), self.info)
if bash_comp_dest is not None:
self.info('Installing bash completion to:', bash_comp_dest)
self.manifest.append(bash_comp_dest)
write_completion(bash_comp_dest, zsh)
except TypeError as err:
if 'resolve_entities' in str(err):
print 'You need python-lxml >= 2.0.5 for calibre'
sys.exit(1)
raise
except EnvironmentError as e:
if e.errno == errno.EACCES:
self.warning('Failed to setup completion, permission denied')
if self.opts.fatal_errors:
raise
self.task_failed('Setting up completion failed')
except:
if self.opts.fatal_errors:
raise
self.task_failed('Setting up completion failed')
# }}}
def setup_desktop_integration(self): # {{{
try:
self.info('Setting up desktop integration...')
env = os.environ.copy()
cc = check_call
if getattr(sys, 'frozen_path', False) and 'LD_LIBRARY_PATH' in env:
paths = env.get('LD_LIBRARY_PATH', '').split(os.pathsep)
paths = [x for x in paths if x]
npaths = [x for x in paths if x != sys.frozen_path+'/lib']
env['LD_LIBRARY_PATH'] = os.pathsep.join(npaths)
cc = partial(check_call, env=env)
with TemporaryDirectory() as tdir, CurrentDir(tdir), PreserveMIMEDefaults():
def install_single_icon(iconsrc, basename, size, context, is_last_icon=False):
filename = '%s-%s.png' % (basename, size)
render_img(iconsrc, filename, width=int(size), height=int(size))
cmd = ['xdg-icon-resource', 'install', '--noupdate', '--context', context, '--size', str(size), filename, basename]
if is_last_icon:
del cmd[2]
cc(cmd)
self.icon_resources.append((context, basename, str(size)))
def install_icons(iconsrc, basename, context, is_last_icon=False):
sizes = (16, 32, 48, 64, 128, 256)
for size in sizes:
install_single_icon(iconsrc, basename, size, context, is_last_icon and size is sizes[-1])
icons = filter(None, [x.strip() for x in '''\
mimetypes/lrf.png application-lrf mimetypes
mimetypes/lrf.png text-lrs mimetypes
mimetypes/mobi.png application-x-mobipocket-ebook mimetypes
mimetypes/tpz.png application-x-topaz-ebook mimetypes
mimetypes/azw2.png application-x-kindle-application mimetypes
mimetypes/azw3.png application-x-mobi8-ebook mimetypes
lt.png calibre-gui apps
viewer.png calibre-viewer apps
tweak.png calibre-ebook-edit apps
'''.splitlines()])
for line in icons:
iconsrc, basename, context = line.split()
install_icons(iconsrc, basename, context, is_last_icon=line is icons[-1])
mimetypes = set()
for x in all_input_formats():
mt = guess_type('dummy.'+x)[0]
if mt and 'chemical' not in mt and 'ctc-posml' not in mt:
mimetypes.add(mt)
mimetypes.discard('application/octet-stream')
def write_mimetypes(f):
f.write('MimeType=%s;\n'%';'.join(mimetypes))
from calibre.ebooks.oeb.polish.main import SUPPORTED
from calibre.ebooks.oeb.polish.import_book import IMPORTABLE
f = open('calibre-lrfviewer.desktop', 'wb')
f.write(VIEWER)
f.close()
f = open('calibre-ebook-viewer.desktop', 'wb')
f.write(EVIEWER)
write_mimetypes(f)
f = open('calibre-ebook-edit.desktop', 'wb')
f.write(ETWEAK)
mt = {guess_type('a.' + x.lower())[0] for x in (SUPPORTED|IMPORTABLE)} - {None, 'application/octet-stream'}
f.write('MimeType=%s;\n'%';'.join(mt))
f.close()
f = open('calibre-gui.desktop', 'wb')
f.write(GUI)
write_mimetypes(f)
f.close()
des = ('calibre-gui.desktop', 'calibre-lrfviewer.desktop',
'calibre-ebook-viewer.desktop', 'calibre-ebook-edit.desktop')
appdata = os.path.join(os.path.dirname(self.opts.staging_sharedir), 'metainfo')
if not os.path.exists(appdata):
try:
os.mkdir(appdata)
except:
self.warning('Failed to create %s not installing appdata files' % appdata)
if os.path.exists(appdata) and not os.access(appdata, os.W_OK):
self.warning('Do not have write permissions for %s not installing appdata files' % appdata)
else:
from calibre.utils.localization import get_all_translators
translators = dict(get_all_translators())
APPDATA = get_appdata()
for x in des:
cmd = ['xdg-desktop-menu', 'install', '--noupdate', './'+x]
cc(' '.join(cmd), shell=True)
self.menu_resources.append(x)
ak = x.partition('.')[0]
if ak in APPDATA and os.access(appdata, os.W_OK):
self.appdata_resources.append(write_appdata(ak, APPDATA[ak], appdata, translators))
cc(['xdg-desktop-menu', 'forceupdate'])
MIME = P('calibre-mimetypes.xml')
self.mime_resources.append(MIME)
cc(['xdg-mime', 'install', MIME])
except Exception:
if self.opts.fatal_errors:
raise
self.task_failed('Setting up desktop integration failed')
# }}}
def option_parser():
from calibre.utils.config import OptionParser
parser = OptionParser()
parser.add_option('--make-errors-fatal', action='store_true', default=False,
dest='fatal_errors', help='If set die on errors.')
parser.add_option('--root', dest='staging_root', default='/usr',
help='Prefix under which to install files')
parser.add_option('--bindir', default=None, dest='staging_bindir',
help='Location where calibre launcher scripts were installed. Typically /usr/bin')
parser.add_option('--sharedir', default=None, dest='staging_sharedir',
help='Location where calibre resources were installed, typically /usr/share/calibre')
return parser
def options(option_parser):
parser = option_parser()
options = parser.option_list
for group in parser.option_groups:
options += group.option_list
opts = []
for opt in options:
opts.extend(opt._short_opts)
opts.extend(opt._long_opts)
return opts
def opts_and_words(name, op, words, takes_files=False):
opts = '|'.join(options(op))
words = '|'.join([w.replace("'", "\\'") for w in words])
fname = name.replace('-', '_')
return ('_'+fname+'()'+
'''
{
local cur opts
local IFS=$'|\\t'
COMPREPLY=()
cur="${COMP_WORDS[COMP_CWORD]}"
opts="%s"
words="%s"
case "${cur}" in
-* )
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
COMPREPLY=( $( echo ${COMPREPLY[@]} | sed 's/ /\\\\ /g' | tr '\\n' '\\t' ) )
return 0
;;
* )
COMPREPLY=( $(compgen -W "${words}" -- ${cur}) )
COMPREPLY=( $( echo ${COMPREPLY[@]} | sed 's/ /\\\\ /g' | tr '\\n' '\\t' ) )
return 0
;;
esac
}
complete -F _'''%(opts, words) + fname + ' ' + name +"\n\n").encode('utf-8')
pics = {'jpg', 'jpeg', 'gif', 'png', 'bmp'}
pics = list(sorted(pics)) # for reproducability
def opts_and_exts(name, op, exts, cover_opts=('--cover',), opf_opts=(),
file_map={}):
opts = ' '.join(options(op))
exts.extend([i.upper() for i in exts])
exts='|'.join(sorted(exts))
fname = name.replace('-', '_')
spics = pics + [i.upper() for i in pics]
spics = '|'.join(sorted(spics))
special_exts_template = '''\
%s )
_filedir %s
return 0
;;
'''
extras = []
for eopts, eexts in ((cover_opts, "${pics}"), (opf_opts, "'@(opf)'")):
for opt in eopts:
extras.append(special_exts_template%(opt, eexts))
extras = '\n'.join(extras)
return '_'+fname+'()'+\
'''
{
local cur prev opts
COMPREPLY=()
cur="${COMP_WORDS[COMP_CWORD]}"
prev="${COMP_WORDS[COMP_CWORD-1]}"
opts="%(opts)s"
pics="@(%(pics)s)"
case "${prev}" in
%(extras)s
esac
case "${cur}" in
%(extras)s
-* )
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
return 0
;;
* )
_filedir '@(%(exts)s)'
return 0
;;
esac
}
complete -o filenames -F _'''%dict(pics=spics,
opts=opts, extras=extras, exts=exts) + fname + ' ' + name +"\n\n"
VIEWER = '''\
[Desktop Entry]
Version=1.0
Type=Application
Name=LRF Viewer
GenericName=Viewer for LRF files
Comment=Viewer for LRF files (SONY ebook format files)
TryExec=lrfviewer
Exec=lrfviewer %f
Icon=calibre-viewer
MimeType=application/x-sony-bbeb;
Categories=Graphics;Viewer;
'''
EVIEWER = '''\
[Desktop Entry]
Version=1.0
Type=Application
Name=E-book Viewer
GenericName=Viewer for E-books
Comment=Viewer for E-books in all the major formats
TryExec=ebook-viewer
Exec=ebook-viewer --detach %f
Icon=calibre-viewer
Categories=Graphics;Viewer;
'''
ETWEAK = '''\
[Desktop Entry]
Version=1.0
Type=Application
Name=E-book Editor
GenericName=Editor for E-books
Comment=Edit E-books in various formats
TryExec=ebook-edit
Exec=ebook-edit --detach %f
Icon=calibre-ebook-edit
Categories=Office;
'''
GUI = '''\
[Desktop Entry]
Version=1.0
Type=Application
Name=calibre
GenericName=E-book library management
Comment=E-book library management: Convert, view, share, catalogue all your e-books
TryExec=calibre
Exec=calibre --detach %F
Icon=calibre-gui
Categories=Office;
'''
def get_appdata():
_ = lambda x: x # Make sure the text below is not translated, but is marked for translation
return {
'calibre-gui': {
'name':'calibre',
'summary':_('The one stop solution to all your e-book needs'),
'description':(
_('calibre is the one stop solution to all your e-book needs.'),
_('You can use calibre to catalog your books, fetch metadata for them automatically, convert them from and to all the various e-book formats, send them to your e-book reader devices, read the books on your computer, edit the books in a dedicated e-book editor and even make them available over the network with the built-in Content server. You can also download news and periodicals in e-book format from over a thousand different news and magazine websites.') # noqa
),
'screenshots':(
(1408, 792, 'https://lh4.googleusercontent.com/-bNE1hc_3pIc/UvHLwKPGBPI/AAAAAAAAASA/8oavs_c6xoU/w1408-h792-no/main-default.png',),
(1408, 792, 'https://lh4.googleusercontent.com/-Zu2httSKABE/UvHMYK30JJI/AAAAAAAAATg/dQTQUjBvV5s/w1408-h792-no/main-grid.png'),
(1408, 792, 'https://lh3.googleusercontent.com/-_trYUjU_BaY/UvHMYSdKhlI/AAAAAAAAATc/auPA3gyXc6o/w1408-h792-no/main-flow.png'),
),
},
'calibre-ebook-edit': {
'name':'calibre - E-book Editor',
'summary':_('Edit the text and styles inside e-books'),
'description':(
_('The calibre e-book editor allows you to edit the text and styles inside the book with a live preview of your changes.'),
_('It can edit books in both the EPUB and AZW3 (kindle) formats. It includes various useful tools for checking the book for errors, editing the Table of Contents, performing automated cleanups, etc.'), # noqa
),
'screenshots':(
(1408, 792, 'https://lh5.googleusercontent.com/-M2MAVc3A8e4/UvHMWqGRa8I/AAAAAAAAATA/cecQeWUYBVs/w1408-h792-no/edit-default.png',),
(1408, 792, 'https://lh4.googleusercontent.com/-WhoMxuRb34c/UvHMWqN8aGI/AAAAAAAAATI/8SDBYWXb7-8/w1408-h792-no/edit-check.png'),
(887, 575, 'https://lh6.googleusercontent.com/-KwaOwHabnBs/UvHMWidjyXI/AAAAAAAAAS8/H6xmCeLnSpk/w887-h575-no/edit-toc.png'),
),
},
'calibre-ebook-viewer': {
'name':'calibre - E-book Viewer',
'summary':_('Read e-books in over a dozen different formats'),
'description': (
_('The calibre E-book viewer allows you to read e-books in over a dozen different formats.'),
_('It has a full screen mode for distraction free reading and can display the text with multiple columns per screen.'),
),
'screenshots':(
(1408, 792, 'https://lh5.googleusercontent.com/-dzSO82BPpaE/UvHMYY5SpNI/AAAAAAAAATk/I_kF9fYWrZM/w1408-h792-no/viewer-default.png',),
(1920, 1080, 'https://lh6.googleusercontent.com/-n32Ae5RytAk/UvHMY0QD94I/AAAAAAAAATs/Zw8Yz08HIKk/w1920-h1080-no/viewer-fs.png'),
),
},
}
def write_appdata(key, entry, base, translators):
from lxml.etree import tostring
from lxml.builder import E
fpath = os.path.join(base, '%s.appdata.xml' % key)
screenshots = E.screenshots()
for w, h, url in entry['screenshots']:
s = E.screenshot(E.image(url, width=str(w), height=str(h)))
screenshots.append(s)
screenshots[0].set('type', 'default')
description = E.description()
for para in entry['description']:
description.append(E.p(para))
for lang, t in translators.iteritems():
tp = t.ugettext(para)
if tp != para:
description.append(E.p(tp))
description[-1].set('{http://www.w3.org/XML/1998/namespace}lang', lang)
root = E.component(
E.id(key + '.desktop'),
E.name(entry['name']),
E.metadata_license('CC0-1.0'),
E.project_license('GPL-3.0'),
E.summary(entry['summary']),
description,
E.url('https://calibre-ebook.com', type='homepage'),
screenshots,
type='desktop'
)
for lang, t in translators.iteritems():
tp = t.ugettext(entry['summary'])
if tp != entry['summary']:
root.append(E.summary(tp))
root[-1].set('{http://www.w3.org/XML/1998/namespace}lang', lang)
with open(fpath, 'wb') as f:
f.write(tostring(root, encoding='utf-8', xml_declaration=True, pretty_print=True))
return fpath
def render_img(image, dest, width=128, height=128):
from PyQt5.Qt import QImage, Qt
img = QImage(I(image)).scaled(width, height, Qt.IgnoreAspectRatio, Qt.SmoothTransformation)
img.save(dest)
def main():
p = option_parser()
opts, args = p.parse_args()
PostInstall(opts)
return 0
def cli_index_strings():
return _('Command Line Interface'), _(
'On macOS, the command line tools are inside the calibre bundle, for example,'
' if you installed calibre in :file:`/Applications` the command line tools'
' are in :file:`/Applications/calibre.app/Contents/console.app/Contents/MacOS/`.'), _(
'Documented commands'), _('Undocumented commands'), _(
'You can see usage for undocumented commands by executing them without arguments in a terminal.'), _(
'Change language'), _('Search')
if __name__ == '__main__':
sys.exit(main())
| jelly/calibre | src/calibre/linux.py | Python | gpl-3.0 | 45,645 |
#!/usr/bin/python
import os
import sys
import commentjson
import time
import json
import datetime
from parse_input import check_config_structure
from build_nova import build_infrastructure, wait_for_spawn
from url_requests import test_infrastructure
import subprocess
### MAIN
if __name__ == "__main__":
if len(sys.argv) > 4:
configFile = sys.argv[4]
else:
print "No config file specified. Using 'config_example.json'"
configFile = 'config_example.json'
try:
with open(sys.argv[3] + "/config_files/" + configFile) as json_data_file:
try:
configData = commentjson.load(json_data_file)
except ValueError:
print "Wrong data format. Should be json."
exit(1)
except commentjson.JSONLibraryException:
print "Wrong data format. Should be json."
exit(1)
except IOError:
print "File not found/permission was denied."
exit(1)
configData['creds']['os_password'] = sys.argv[1]
configData['framework_dir'] = sys.argv[3]
print "Checking JSON structure..."
if check_config_structure(configData) == -1:
print "problem reading config file"
exit(1)
configData['launch_time'] = datetime.datetime.now().strftime('%Y/%m/%d-%H:%M:%S')
print "Building the infrastructure..."
if build_infrastructure(configData) == -1:
print "problem building the infrastructure"
exit(1)
if wait_for_spawn(configData) == -1:
print "machines didn't spawn properly"
exit(1)
raw_input("Press Enter once the HA installation is ready:")
print "Sending test request to ensure the operability."
if test_infrastructure(configData) == -1:
print "Infrastructure not built properly"
#erase built VMs
configData['creds']['os_password'] = ""
with open(sys.argv[2], 'w') as outfile:
json.dump(configData, outfile)
exit(1)
print " Request received."
print "---"
time.sleep(5)
#configData['test_url']['full_url'] = "87.190.239.41" #TODO zakomentovat
configData['creds']['os_password'] = ""
#TODO perform always, even after an exception
with open( sys.argv[2], 'w') as outfile:
json.dump(configData, outfile)
print "Testing availability of a service " + configData['test_url']['full_url']
exit(10) # OK
| stepanvanecek/failover_test_framework | source/infrastructure/main.py | Python | gpl-3.0 | 2,430 |
#!/bin/env python
"""
#######################################################################
# #
# Copyright (c) 2012, Prateek Sureka. All Rights Reserved. #
# This module provides an idempotent mechanism to remotely configure #
# ntp sync to a server on a host. #
# #
#######################################################################
"""
from fabric.api import task, run, env
from fabric.colors import red
from utils import reconfigure, is_debian_or_ubuntu
env.warn_only = True
from config import config
NTP_CONF_PATH = "/etc/ntp.conf"
@task
def timezone(timezone=config.get("ntp_client", {}).get("ntp_timezone", "Asia/Calcutta")):
""" Set the timezone. """
if not is_debian_or_ubuntu():
print red("Cannot deploy to non-debian/ubuntu host: %s" % env.host)
return
import apt
apt.ensure(tzdata="latest")
return run("cp -f /usr/share/zoneinfo/%s /etc/localtime" % timezone)
@task
def configure(server=config.get("ntp_client", {}).get("ntp_server", "")):
""" Configure NTP sync to server. """
if not is_debian_or_ubuntu():
print red("Cannot deploy to non-debian/ubuntu host: %s" % env.host)
return
# Upload configuration
params = {'server':server}
reconfigure("ntp_client.conf.template", params, NTP_CONF_PATH)
@task
def deploy(server=config.get("ntp_client", {}).get("ntp_server", "")):
""" Install, configure and start ntp sync and timezone. """
if not is_debian_or_ubuntu():
print red("Cannot deploy to non-debian/ubuntu host: %s" % env.host)
return
import apt, service
packages = {"ntp":"latest", "ntpdate":"latest"}
apt.ensure(**packages)
configure()
# Sync with server
run("ntpdate %s" % server)
# Sync hardware clock to correct time
run("hwclock --systohc")
service.ensure(ntp="restarted")
timezone()
@task
def status():
""" List the servers with which the host is synchronized. """
print run("ntpq -p")
print run("ntpdc -p") | surekap/fabric-recipes | fabfile/ntp_client.py | Python | gpl-3.0 | 2,215 |
# coding=utf-8
import sys, os.path
sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), '../lib')))
sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import locale
import unittest
import test_lib as test
import sickbeard
from sickbeard.helpers import sanitizeFileName
from sickrage.helper.encoding import ek, ss, uu
class EncodingTests(test.SiCKRAGETestCase):
def test_encoding(self):
rootDir = 'C:\\Temp\\TV'
strings = [u'Les Enfants De La T\xe9l\xe9', u'RT� One']
sickbeard.SYS_ENCODING = None
try:
locale.setlocale(locale.LC_ALL, "")
sickbeard.SYS_ENCODING = locale.getpreferredencoding()
except (locale.Error, IOError):
pass
# For OSes that are poorly configured I'll just randomly force UTF-8
if not sickbeard.SYS_ENCODING or sickbeard.SYS_ENCODING in ('ANSI_X3.4-1968', 'US-ASCII', 'ASCII'):
sickbeard.SYS_ENCODING = 'UTF-8'
for s in strings:
show_dir = ek(os.path.join, rootDir, sanitizeFileName(s))
self.assertIsInstance(show_dir, unicode)
if __name__ == "__main__":
print "=================="
print "STARTING - ENCODING TESTS"
print "=================="
print "######################################################################"
suite = unittest.TestLoader().loadTestsFromTestCase(EncodingTests)
unittest.TextTestRunner(verbosity=2).run(suite)
| srluge/SickRage | tests/encoding_tests.py | Python | gpl-3.0 | 1,487 |
"""Проверки модуля system_wrappers."""
from logging import INFO
from unittest import TestCase
from unittest.mock import Mock, call, patch
from codestyle import system_wrappers
from codestyle.system_wrappers import (
ExitCodes,
check_output,
interrupt_program_flow,
)
class Test(TestCase):
"""Проверка функций модуля."""
@patch('codestyle.system_wrappers.sys', new_callable=Mock)
@patch.object(system_wrappers, '_logger', new_callable=Mock)
def test_interrupt_program_flow(
self, mocked_logger: Mock, mocked_sys: Mock
):
"""Проверка interrupt_program_flow."""
mock_log = Mock()
mocked_logger.log = mock_log
mock_exit = Mock()
mocked_sys.exit = mock_exit
interrupt_program_flow(log_message='Проверка вызова функции.')
self.assertEqual(True, mock_log.called)
self.assertEqual(1, mock_log.call_count)
args, kwargs = mock_log.call_args
self.assertTupleEqual((INFO, 'Проверка вызова функции.'), args)
self.assertDictEqual({}, kwargs)
self.assertEqual(True, mock_exit.called)
self.assertEqual(1, mock_exit.call_count)
args, kwargs = mock_exit.call_args
self.assertTupleEqual((ExitCodes.SUCCESS,), args)
self.assertDictEqual({}, kwargs)
@patch('codestyle.system_wrappers.check_process_output', new_callable=Mock)
@patch.object(system_wrappers, '_logger', new_callable=Mock)
def test_check_output(
self, mocked_logger: Mock, mocked_process_output_checker: Mock
):
"""Проверка check_output."""
mock_debug = Mock()
mocked_logger.debug = mock_debug
mock_rstrip = Mock()
mock_decode = Mock(return_value=Mock(rstrip=mock_rstrip))
mocked_process_output_checker.return_value = Mock(decode=mock_decode)
check_output(('application', 'run'))
self.assertEqual(True, mock_debug.called)
self.assertEqual(1, mock_debug.call_count)
args, kwargs = mock_debug.call_args
self.assertTupleEqual(
('Проверка наличия application в системе...',), args
)
self.assertDictEqual({}, kwargs)
self.assertEqual(True, mocked_process_output_checker.called)
self.assertEqual(1, mocked_process_output_checker.call_count)
args, kwargs = mocked_process_output_checker.call_args
self.assertTupleEqual((('application', 'run'),), args)
self.assertDictEqual({'timeout': 10}, kwargs)
self.assertEqual(True, mock_decode.called)
self.assertEqual(1, mock_decode.call_count)
args, kwargs = mock_decode.call_args
self.assertTupleEqual((), args)
self.assertDictEqual({}, kwargs)
self.assertEqual(True, mock_rstrip.called)
self.assertEqual(1, mock_rstrip.call_count)
args, kwargs = mock_rstrip.call_args
self.assertTupleEqual((), args)
self.assertDictEqual({}, kwargs)
@patch(
'codestyle.system_wrappers.interrupt_program_flow', new_callable=Mock
)
@patch('codestyle.system_wrappers.check_process_output', new_callable=Mock)
@patch.object(system_wrappers, '_logger', new_callable=Mock)
def test_check_output_with_error(
self,
mocked_logger: Mock,
mocked_process_output_checker: Mock,
mocked_interrupt_program_flow: Mock,
):
"""Проверка check_output с ошибкой внутри."""
mock_debug = Mock()
mock_warning = Mock()
mocked_logger.debug = mock_debug
mocked_logger.warning = mock_warning
mocked_process_output_checker.side_effect = FileNotFoundError(
'Исполняемый файл application не найден.'
)
check_output(('application', 'run'))
self.assertEqual(True, mock_debug.called)
self.assertEqual(2, mock_debug.call_count)
self.assertEqual(1, mock_warning.call_count)
self.assertIn(
call('Проверка наличия application в системе...'),
mock_debug.mock_calls,
)
self.assertIn(
call('Инструмент application не найден.'), mock_warning.mock_calls
)
self.assertIn(
call('Исполняемый файл application не найден.'),
mock_debug.mock_calls,
)
self.assertEqual(True, mocked_interrupt_program_flow.called)
self.assertEqual(1, mocked_interrupt_program_flow.call_count)
args, kwargs = mocked_interrupt_program_flow.call_args
self.assertTupleEqual((ExitCodes.UNSUCCESSFUL,), args)
self.assertDictEqual({}, kwargs)
| webpp-studio/codestyle | tests/test_system_wrappers.py | Python | gpl-3.0 | 4,814 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: postgresql_schema
short_description: Add or remove PostgreSQL schema
description:
- Add or remove PostgreSQL schema.
version_added: '2.3'
options:
name:
description:
- Name of the schema to add or remove.
required: true
type: str
database:
description:
- Name of the database to connect to and add or remove the schema.
type: str
default: postgres
aliases:
- db
- login_db
login_user:
description:
- The username used to authenticate with.
type: str
login_password:
description:
- The password used to authenticate with.
type: str
login_host:
description:
- Host running the database.
type: str
default: localhost
login_unix_socket:
description:
- Path to a Unix domain socket for local connections.
type: str
owner:
description:
- Name of the role to set as owner of the schema.
type: str
port:
description:
- Database port to connect to.
type: int
default: 5432
aliases:
- login_port
session_role:
version_added: '2.8'
description:
- Switch to session_role after connecting.
- The specified session_role must be a role that the current login_user is a member of.
- Permissions checking for SQL commands is carried out as though the session_role
were the one that had logged in originally.
type: str
state:
description:
- The schema state.
type: str
default: present
choices: [ absent, present ]
cascade_drop:
description:
- Drop schema with CASCADE to remove child objects.
type: bool
default: false
version_added: '2.8'
ssl_mode:
description:
- Determines whether or with what priority a secure SSL TCP/IP connection
will be negotiated with the server.
- See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for
more information on the modes.
- Default of C(prefer) matches libpq default.
type: str
default: prefer
choices: [ allow, disable, prefer, require, verify-ca, verify-full ]
version_added: '2.8'
ca_cert:
description:
- Specifies the name of a file containing SSL certificate authority (CA) certificate(s).
- If the file exists, the server's certificate will be verified to be signed by one of these authorities.
type: str
version_added: '2.8'
aliases: [ ssl_rootcert ]
notes:
- This module uses I(psycopg2), a Python PostgreSQL database adapter.
- You must ensure that psycopg2 is installed on the host before using this module.
- If the remote host is the PostgreSQL server (which is the default case),
then PostgreSQL must also be installed on the remote host.
- For Ubuntu-based systems, install the C(postgresql), C(libpq-dev),
and C(python-psycopg2) packages on the remote host before using this module.
requirements: [ psycopg2 ]
author:
- Flavien Chantelot (@Dorn-) <contact@flavien.io>
- Thomas O'Donnell (@andytom)
'''
EXAMPLES = r'''
- name: Create a new schema with name acme in test database
postgresql_schema:
db: test
name: acme
- name: Create a new schema acme with a user bob who will own it
postgresql_schema:
name: acme
owner: bob
- name: Drop schema "acme" with cascade
postgresql_schema:
name: acme
ensure: absent
cascade_drop: yes
'''
RETURN = r'''
schema:
description: Name of the schema.
returned: success, changed
type: str
sample: "acme"
queries:
description: List of executed queries.
returned: always
type: list
sample: ["CREATE SCHEMA \"acme\""]
'''
import traceback
PSYCOPG2_IMP_ERR = None
try:
import psycopg2
import psycopg2.extras
HAS_PSYCOPG2 = True
except ImportError:
PSYCOPG2_IMP_ERR = traceback.format_exc()
HAS_PSYCOPG2 = False
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils.postgres import postgres_common_argument_spec
from ansible.module_utils.database import SQLParseError, pg_quote_identifier
from ansible.module_utils.six import iteritems
from ansible.module_utils._text import to_native
executed_queries = []
class NotSupportedError(Exception):
pass
# ===========================================
# PostgreSQL module specific support methods.
#
def set_owner(cursor, schema, owner):
query = "ALTER SCHEMA %s OWNER TO %s" % (
pg_quote_identifier(schema, 'schema'),
pg_quote_identifier(owner, 'role'))
cursor.execute(query)
executed_queries.append(query)
return True
def get_schema_info(cursor, schema):
query = ("SELECT schema_owner AS owner "
"FROM information_schema.schemata "
"WHERE schema_name = '%s'" % schema)
cursor.execute(query)
return cursor.fetchone()
def schema_exists(cursor, schema):
query = ("SELECT schema_name FROM information_schema.schemata "
"WHERE schema_name = '%s'" % schema)
cursor.execute(query)
return cursor.rowcount == 1
def schema_delete(cursor, schema, cascade):
if schema_exists(cursor, schema):
query = "DROP SCHEMA %s" % pg_quote_identifier(schema, 'schema')
if cascade:
query += " CASCADE"
cursor.execute(query)
executed_queries.append(query)
return True
else:
return False
def schema_create(cursor, schema, owner):
if not schema_exists(cursor, schema):
query_fragments = ['CREATE SCHEMA %s' % pg_quote_identifier(schema, 'schema')]
if owner:
query_fragments.append('AUTHORIZATION %s' % pg_quote_identifier(owner, 'role'))
query = ' '.join(query_fragments)
cursor.execute(query)
executed_queries.append(query)
return True
else:
schema_info = get_schema_info(cursor, schema)
if owner and owner != schema_info['owner']:
return set_owner(cursor, schema, owner)
else:
return False
def schema_matches(cursor, schema, owner):
if not schema_exists(cursor, schema):
return False
else:
schema_info = get_schema_info(cursor, schema)
if owner and owner != schema_info['owner']:
return False
else:
return True
# ===========================================
# Module execution.
#
def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
login_user=dict(type="str", default="postgres"),
login_password=dict(type="str", default="", no_log=True),
login_host=dict(type="str", default=""),
login_unix_socket=dict(type="str", default=""),
port=dict(type="int", default=5432, aliases=["login_port"]),
schema=dict(type="str", required=True, aliases=['name']),
owner=dict(type="str", default=""),
database=dict(type="str", default="postgres", aliases=["db", "login_db"]),
cascade_drop=dict(type="bool", default=False),
state=dict(type="str", default="present", choices=["absent", "present"]),
ssl_mode=dict(type="str", default='prefer', choices=[
'disable', 'allow', 'prefer', 'require', 'verify-ca', 'verify-full']),
ca_cert=dict(type="str", default=None, aliases=['ssl_rootcert']),
session_role=dict(type="str"),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
if not HAS_PSYCOPG2:
module.fail_json(msg=missing_required_lib('psycopg2'), exception=PSYCOPG2_IMP_ERR)
schema = module.params["schema"]
owner = module.params["owner"]
state = module.params["state"]
sslrootcert = module.params["ca_cert"]
cascade_drop = module.params["cascade_drop"]
session_role = module.params["session_role"]
changed = False
# To use defaults values, keyword arguments must be absent, so
# check which values are empty and don't include in the **kw
# dictionary
params_map = {
"login_host": "host",
"login_user": "user",
"login_password": "password",
"port": "port",
"database": "database",
"ssl_mode": "sslmode",
"ca_cert": "sslrootcert"
}
kw = dict((params_map[k], v) for (k, v) in iteritems(module.params)
if k in params_map and v != "" and v is not None)
# If a login_unix_socket is specified, incorporate it here.
is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost"
if is_localhost and module.params["login_unix_socket"] != "":
kw["host"] = module.params["login_unix_socket"]
if psycopg2.__version__ < '2.4.3' and sslrootcert is not None:
module.fail_json(
msg='psycopg2 must be at least 2.4.3 in order to user the ca_cert parameter')
try:
db_connection = psycopg2.connect(**kw)
# Enable autocommit so we can create databases
if psycopg2.__version__ >= '2.4.2':
db_connection.autocommit = True
else:
db_connection.set_isolation_level(psycopg2
.extensions
.ISOLATION_LEVEL_AUTOCOMMIT)
cursor = db_connection.cursor(
cursor_factory=psycopg2.extras.DictCursor)
except TypeError as e:
if 'sslrootcert' in e.args[0]:
module.fail_json(
msg='Postgresql server must be at least version 8.4 to support sslrootcert')
module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
except Exception as e:
module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
if session_role:
try:
cursor.execute('SET ROLE %s' % pg_quote_identifier(session_role, 'role'))
except Exception as e:
module.fail_json(msg="Could not switch role: %s" % to_native(e), exception=traceback.format_exc())
try:
if module.check_mode:
if state == "absent":
changed = not schema_exists(cursor, schema)
elif state == "present":
changed = not schema_matches(cursor, schema, owner)
module.exit_json(changed=changed, schema=schema)
if state == "absent":
try:
changed = schema_delete(cursor, schema, cascade_drop)
except SQLParseError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
elif state == "present":
try:
changed = schema_create(cursor, schema, owner)
except SQLParseError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
except NotSupportedError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
except SystemExit:
# Avoid catching this on Python 2.4
raise
except Exception as e:
module.fail_json(msg="Database query failed: %s" % to_native(e), exception=traceback.format_exc())
module.exit_json(changed=changed, schema=schema, queries=executed_queries)
if __name__ == '__main__':
main()
| dagwieers/ansible | lib/ansible/modules/database/postgresql/postgresql_schema.py | Python | gpl-3.0 | 11,616 |
import random
import actor
from vector import Vector as v
class SmokePuff(actor.Actor):
collides = False
def __init__(self, world, pos):
super(SmokePuff, self).__init__(world, pos=pos, radius=10, image_file="images/all/star.svgz")
self.apply_impulse(v((random.gauss(0,2),random.gauss(0,2))))
def tick(self):
super(SmokePuff, self).tick()
self.radius += .5
if self.radius > 20:
self.dead = True
| italomaia/turtle-linux | games/DigbyMarshmallow/lib/effects.py | Python | gpl-3.0 | 472 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# slackbuild.py file is part of slpkg.
# Copyright 2014-2015 Dimitris Zlatanidis <d.zlatanidis@gmail.com>
# All rights reserved.
# Slpkg is a user-friendly package manager for Slackware installations
# https://github.com/dslackw/slpkg
# Slpkg is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from slpkg.utils import Utils
from slpkg.messages import Msg
from slpkg.toolbar import status
from slpkg.log_deps import write_deps
from slpkg.blacklist import BlackList
from slpkg.downloader import Download
from slpkg.__metadata__ import MetaData as _meta_
from slpkg.pkg.find import find_package
from slpkg.pkg.build import BuildPackage
from slpkg.pkg.manager import PackageManager
from slpkg.pkg.installed import GetFromInstalled
from slpkg.sbo.greps import SBoGrep
from slpkg.sbo.remove import delete
from slpkg.sbo.sbo_arch import SBoArch
from slpkg.sbo.compressed import SBoLink
from slpkg.sbo.dependency import Requires
from slpkg.sbo.search import sbo_search_pkg
from slpkg.sbo.slack_find import slack_package
class SBoInstall(object):
"""Build and install SBo packages with all dependencies
"""
def __init__(self, slackbuilds, flag):
self.slackbuilds = slackbuilds
self.flag = flag
self.meta = _meta_
self.msg = Msg()
self.arch = SBoArch().get()
self.build_folder = self.meta.build_path
for fl in self.flag:
if fl.startswith("--directory-prefix="):
self.build_folder = fl.split("=")[1]
if not self.build_folder.endswith("/"):
self.build_folder += "/"
self.unst = ["UNSUPPORTED", "UNTESTED"]
self.master_packages = []
self.deps = []
self.dependencies = []
self.package_not_found = []
self.package_found = []
self.deps_dict = {}
self.answer = ""
self.match = False
self.count_ins = 0
self.count_upg = 0
self.count_uni = 0
self.msg.reading()
self.data = SBoGrep(name="").names()
self.blacklist = BlackList().packages(pkgs=self.data, repo="sbo")
def start(self, if_upgrade):
"""Start view, build and install SBo packages
"""
tagc = ""
self.if_upgrade = if_upgrade
self.case_insensitive()
for _sbo in self.slackbuilds:
status(0.03)
if _sbo in self.data and _sbo not in self.blacklist:
sbo_deps = Requires(self.flag).sbo(_sbo)
self.deps += sbo_deps
self.deps_dict[_sbo] = self.one_for_all(sbo_deps)
self.package_found.append(_sbo)
else:
self.package_not_found.append(_sbo)
self.update_deps()
if not self.package_found:
self.match = True
self.matching()
self.master_packages, mas_src = self.sbo_version_source(
self.package_found)
self.msg.done()
if (self.meta.rsl_deps in ["on", "ON"] and
self.flag != "--resolve-off" and not self.match):
self.msg.resolving()
self.dependencies, dep_src = self.sbo_version_source(
self.one_for_all(self.deps))
if (self.meta.rsl_deps in ["on", "ON"] and
self.flag != "--resolve-off" and not self.match):
self.msg.done()
self.clear_masters()
if self.package_found:
print("\nThe following packages will be automatically "
"installed or upgraded \nwith new version:\n")
self.top_view()
self.msg.upg_inst(self.if_upgrade)
# view master packages
for sbo, arch in zip(self.master_packages, mas_src):
tagc = self.tag(sbo)
name = "-".join(sbo.split("-")[:-1])
self.view_packages(tagc, name, sbo.split("-")[-1],
self.select_arch(arch))
self.view_installing_for_deps()
# view dependencies
for dep, arch in zip(self.dependencies, dep_src):
tagc = self.tag(dep)
name = "-".join(dep.split("-")[:-1])
self.view_packages(tagc, name, dep.split("-")[-1],
self.select_arch(arch))
count_total = sum([self.count_ins, self.count_upg,
self.count_uni])
print("\nInstalling summary")
print("=" * 79)
print("{0}Total {1} {2}.".format(
self.meta.color["GREY"], count_total,
self.msg.pkg(count_total)))
print("{0} {1} will be installed, {2} already installed and "
"{3} {4}".format(self.count_uni,
self.msg.pkg(self.count_uni),
self.count_ins, self.count_upg,
self.msg.pkg(self.count_upg)))
print("will be upgraded.{0}\n".format(self.meta.color["ENDC"]))
self.continue_to_install()
else:
self.msg.not_found(self.if_upgrade)
def case_insensitive(self):
"""Matching packages distinguish between uppercase and
lowercase
"""
if "--case-ins" in self.flag:
data_dict = Utils().case_sensitive(self.data)
for name in self.slackbuilds:
index = self.slackbuilds.index(name)
for key, value in data_dict.iteritems():
if key == name.lower():
self.slackbuilds[index] = value
def update_deps(self):
"""Update dependencies dictionary with all package
"""
onelist, dependencies = [], []
onelist = Utils().dimensional_list(self.deps)
dependencies = Utils().remove_dbs(onelist)
for dep in dependencies:
deps = Requires(self.flag).sbo(dep)
self.deps_dict[dep] = self.one_for_all(deps)
def continue_to_install(self):
"""Continue to install ?
"""
if (self.count_uni > 0 or self.count_upg > 0 or
"--download-only" in self.flag):
if self.master_packages and self.msg.answer() in ["y", "Y"]:
installs, upgraded = self.build_install()
if "--download-only" in self.flag:
raise SystemExit()
self.msg.reference(installs, upgraded)
write_deps(self.deps_dict)
delete(self.build_folder)
def view_installing_for_deps(self):
"""View installing message for dependencies
"""
if not self.match and self.dependencies:
print("Installing for dependencies:")
def clear_masters(self):
"""Clear master slackbuilds if already exist in dependencies
or if added to install two or more times
"""
self.master_packages = Utils().remove_dbs(self.master_packages)
for mas in self.master_packages:
if mas in self.dependencies:
self.master_packages.remove(mas)
def matching(self):
"""Return found matching SBo packages
"""
for sbo in self.package_not_found:
for pkg in self.data:
if sbo in pkg and pkg not in self.blacklist:
self.package_found.append(pkg)
def sbo_version_source(self, slackbuilds):
"""Create sbo name with version
"""
sbo_versions, sources = [], []
for sbo in slackbuilds:
status(0.02)
sbo_ver = "{0}-{1}".format(sbo, SBoGrep(sbo).version())
sbo_versions.append(sbo_ver)
sources.append(SBoGrep(sbo).source())
return [sbo_versions, sources]
def one_for_all(self, deps):
"""Because there are dependencies that depend on other
dependencies are created lists into other lists.
Thus creating this loop create one-dimensional list and
remove double packages from dependencies.
"""
requires, dependencies = [], []
deps.reverse()
# Inverting the list brings the
# dependencies in order to be installed.
requires = Utils().dimensional_list(deps)
dependencies = Utils().remove_dbs(requires)
return dependencies
def top_view(self):
"""View top template
"""
self.msg.template(78)
print("{0}{1}{2}{3}{4}{5}{6}{7}{8}{9}{10}".format(
"| Package", " " * 17,
"New version", " " * 8,
"Arch", " " * 4,
"Build", " " * 2,
"Repos", " " * 10,
"Size"))
self.msg.template(78)
def view_packages(self, *args):
""":View slackbuild packages with version and arch
args[0] package color
args[1] package
args[2] version
args[3] arch
"""
ver = GetFromInstalled(args[1]).version()
print(" {0}{1}{2}{3} {4}{5} {6}{7}{8}{9}{10}{11:>11}{12}".format(
args[0], args[1] + ver, self.meta.color["ENDC"],
" " * (23-len(args[1] + ver)), args[2],
" " * (18-len(args[2])), args[3],
" " * (15-len(args[3])), "",
"", "SBo", "", "")).rstrip()
def tag(self, sbo):
"""Tag with color green if package already installed,
color yellow for packages to upgrade and color red
if not installed.
"""
# split sbo name with version and get name
sbo_name = "-".join(sbo.split("-")[:-1])
find = GetFromInstalled(sbo_name).name()
if find_package(sbo, self.meta.pkg_path):
paint = self.meta.color["GREEN"]
self.count_ins += 1
elif sbo_name == find:
paint = self.meta.color["YELLOW"]
self.count_upg += 1
else:
paint = self.meta.color["RED"]
self.count_uni += 1
return paint
def select_arch(self, src):
"""Looks if sources unsupported or untested
from arch else select arch.
"""
arch = self.arch
for item in self.unst:
if item in src:
arch = item
return arch
def filenames(self, sources):
"""Return filenames from sources links
"""
filename = []
for src in sources:
filename.append(src.split("/")[-1])
return filename
def build_install(self):
"""Build and install packages if not already installed
"""
slackbuilds = self.dependencies + self.master_packages
installs, upgraded, = [], []
if not os.path.exists(self.build_folder):
os.makedirs(self.build_folder)
os.chdir(self.build_folder)
for prgnam in slackbuilds:
pkg = "-".join(prgnam.split("-")[:-1])
installed = "".join(find_package(prgnam, self.meta.pkg_path))
src_link = SBoGrep(pkg).source().split()
if installed and "--download-only" not in self.flag:
self.msg.template(78)
self.msg.pkg_found(prgnam)
self.msg.template(78)
elif self.unst[0] in src_link or self.unst[1] in src_link:
self.msg.template(78)
print("| Package {0} {1}{2}{3}".format(
prgnam, self.meta.color["RED"], "".join(src_link),
self.meta.color["ENDC"]))
self.msg.template(78)
else:
sbo_url = sbo_search_pkg(pkg)
sbo_link = SBoLink(sbo_url).tar_gz()
script = sbo_link.split("/")[-1]
dwn_srcs = sbo_link.split() + src_link
Download(self.build_folder, dwn_srcs, repo="sbo").start()
if "--download-only" in self.flag:
continue
sources = self.filenames(src_link)
BuildPackage(script, sources, self.build_folder,
auto=False).build()
binary = slack_package(prgnam)
if GetFromInstalled(pkg).name() == pkg:
print("[ {0}Upgrading{1} ] --> {2}".format(
self.meta.color["YELLOW"],
self.meta.color["ENDC"], prgnam))
upgraded.append(prgnam)
else:
print("[ {0}Installing{1} ] --> {2}".format(
self.meta.color["GREEN"],
self.meta.color["ENDC"], prgnam))
installs.append(prgnam)
PackageManager(binary).upgrade(flag="--install-new")
return installs, upgraded
| BrentonEarl/slpkg | slpkg/sbo/slackbuild.py | Python | gpl-3.0 | 13,275 |
# coding=utf8
from setuptools import setup
setup(name='fbones',
version='0.0.5',
description='A bootstrap toolkit to kickoff a flask project',
url='https://github.com/ipconfiger/fbones',
author='Alexander.Li',
author_email='superpowerlee@gmail.com',
license='GNU GENERAL PUBLIC LICENSE',
packages=['fbones'],
install_requires=[
'flask',
'click',
'alembic',
'flask_doc',
'gunicorn',
'meinheld'
],
entry_points={
'console_scripts': ['fbones=fbones.fbones:main'],
},
zip_safe=False)
| ipconfiger/fbones | setup.py | Python | gpl-3.0 | 623 |
import sys
from os import sep
from PyQt4.QtGui import QApplication
from src import mainwindow
if __name__ == "__main__":
print "los gehts"
app = QApplication(sys.argv)
window = mainwindow.CCMainWindow()
sys.exit(app.exec_()) | AxXxel001/commentCreator_v2 | old/CommentCreator/main.py | Python | gpl-3.0 | 229 |
"""is_teklif_sistemi URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
]
| ufukdogan92/is-teklif-sistemi | is_teklif_sistemi/urls.py | Python | gpl-3.0 | 774 |
import kivy
kivy.require('1.9.1')
from kivy.app import Builder
from kivy.uix.gridlayout import GridLayout
from kivy.properties import ObjectProperty
from kivy.logger import Logger
from settingsview import SettingsView, SettingsSwitch, SettingsButton
from autosportlabs.widgets.separator import HLineSeparator
from autosportlabs.racecapture.views.util.alertview import editor_popup
from autosportlabs.racecapture.views.configuration.rcp.advancedbluetoothconfigview import AdvancedBluetoothConfigView
Builder.load_string('''
<BluetoothConfigView>
id: bluetooth
cols: 1
spacing: [0, dp(20)]
size_hint: [1, None]
height: self.minimum_height
HSeparator:
text: 'Bluetooth'
size_hint_y: None
SettingsView:
id: bt_enable
label_text: 'Bluetooth'
help_text: 'If the Bluetooth module is connected, enable it here'
SettingsView:
id: btconfig
label_text: 'Advanced configuration'
help_text: 'Change Bluetooth name and passkey. Firmware version 2.9.0 or greater required.'
''')
class BluetoothConfigView(GridLayout):
def __init__(self, config, **kwargs):
super(BluetoothConfigView, self).__init__(**kwargs)
self.config = None
self.register_event_type('on_modified')
self.config_updated(config)
self._bt_popup = None
self._bt_config_view = None
btConfig = self.ids.btconfig
btConfig.bind(on_setting=self.on_bt_configure)
btConfig.setControl(SettingsButton(text='Advanced'))
def on_bluetooth_enabled_change(self, instance, value):
if self.config:
self.config.connectivityConfig.bluetoothConfig.btEnabled = value
self.config.connectivityConfig.stale = True
self.dispatch('on_modified')
def config_updated(self, config):
self.config = config
value = self.config.connectivityConfig.bluetoothConfig.btEnabled
bluetooth_enabled = self.ids.bt_enable
bluetooth_enabled.setControl(SettingsSwitch(active=value))
bluetooth_enabled.control.bind(active=self.on_bluetooth_enabled_change)
def on_modified(self):
pass
def on_bt_configure(self, instance, value):
if not self._bt_popup:
content = AdvancedBluetoothConfigView(self.config.connectivityConfig)
popup = editor_popup(title="Configure Bluetooth", content=content,
answerCallback=self.on_bluetooth_popup_answer)
self._bt_popup = popup
self._bt_config_view = content
def on_bluetooth_popup_answer(self, instance, answer):
close = True
modified = False
# If the user clicked the checkbox to save, validate the view. If it's valid, close and save values to config.
# If invalid, leave it (view will show error messages)
if answer:
valid = self._bt_config_view.validate()
if valid:
bt_values = self._bt_config_view.values
if len(bt_values["name"]) > 0:
self.config.connectivityConfig.bluetoothConfig.name = bt_values["name"]
modified = True
if len(bt_values["passkey"]) > 0:
self.config.connectivityConfig.bluetoothConfig.passKey = bt_values["passkey"]
modified = True
else:
close = False
if modified:
self.config.connectivityConfig.stale = True
self.dispatch('on_modified')
if close:
self._bt_popup.dismiss()
self._bt_popup = None
self._bt_config_view = None
| ddimensia/RaceCapture_App | autosportlabs/racecapture/views/configuration/rcp/wireless/bluetoothconfigview.py | Python | gpl-3.0 | 3,672 |
# moduleBonusAfterburner
#
# Used by:
# Modules from group: Propulsion Module (62 of 127)
type = "active"
runTime = "late"
def handler(fit, module, context):
fit.ship.increaseItemAttr("mass", module.getModifiedItemAttr("massAddition"))
speedBoost = module.getModifiedItemAttr("speedFactor")
mass = fit.ship.getModifiedItemAttr("mass")
thrust = module.getModifiedItemAttr("speedBoostFactor")
fit.ship.boostItemAttr("maxVelocity", speedBoost * thrust / mass)
| Ebag333/Pyfa | eos/effects/modulebonusafterburner.py | Python | gpl-3.0 | 479 |
# -*- coding: utf-8 -*-
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import logging
from hyperspy.io_plugins import (msa, digital_micrograph, fei, mrc, ripple,
tiff, semper_unf, blockfile, dens, emd,
protochips)
io_plugins = [msa, digital_micrograph, fei, mrc, ripple, tiff, semper_unf,
blockfile, dens, emd, protochips]
_logger = logging.getLogger(__name__)
try:
from hyperspy.io_plugins import netcdf
io_plugins.append(netcdf)
except ImportError:
pass
# NetCDF is obsolate and is only provided for users who have
# old EELSLab files. Therefore, we silenly ignore if missing.
try:
from hyperspy.io_plugins import hdf5
io_plugins.append(hdf5)
from hyperspy.io_plugins import emd
io_plugins.append(emd)
except ImportError:
_logger.warning('The HDF5 IO features are not available. '
'It is highly reccomended to install h5py')
try:
from hyperspy.io_plugins import image
io_plugins.append(image)
except ImportError:
_logger.info('The Signal2D (PIL) IO features are not available')
try:
from hyperspy.io_plugins import bcf
io_plugins.append(bcf)
except ImportError:
_logger.warning('The Bruker composite file reader cant be loaded',
'due to lxml library missing. Please install lxml',
'and python bindings, to enable the bcf loader.')
default_write_ext = set()
for plugin in io_plugins:
if plugin.writes:
default_write_ext.add(
plugin.file_extensions[plugin.default_extension])
| vidartf/hyperspy | hyperspy/io_plugins/__init__.py | Python | gpl-3.0 | 2,279 |
# -*- coding: utf-8 -*-
from py3Des.pyDes import triple_des, ECB, PAD_PKCS5
class TripleDES:
__triple_des = None
@staticmethod
def init():
TripleDES.__triple_des = triple_des('1234567812345678',
mode=ECB,
IV = '\0\0\0\0\0\0\0\0',
pad=None,
padmode = PAD_PKCS5)
@staticmethod
def encrypt(data):
return TripleDES.__triple_des.encrypt(data)
@staticmethod
def decrypt(data):
return TripleDES.__triple_des.decrypt(data) | codeMarble/codeMarble_Web | codeMarble_Web/codeMarble_py3des.py | Python | gpl-3.0 | 649 |
#task_2
a = int(input())
print(sum(list(map(int, list((bin(a))[2:]))))) | Senbjorn/mipt_lab_2016 | contest_7/task_2.py | Python | gpl-3.0 | 71 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Taifxx
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
########## LAUNCHING FROM MEMORY SERVICE:
### Import modules ...
import context_ex as context
import context as cstart
import deb
### Define ...
ITD_FILE = cstart.ITD_FILE
error_file = cstart.error_file
adnname = context.tl(context.TAG_TTL_NM) % (context.addon.name)
### Messages ...
msgStrat = 'Launching from memory service started ...'
msgEnd = 'Launching from memory service stopped ...'
msgStratVisual = 'LFM service started'
msgEndVisual = 'LFM service stopped'
msgProcessError = 'Process ERROR'
### Base functions ...
log = lambda event : context.xbmc.log('[%s] >> %s' % (context.addon.id, event))
def starter():
isRaise = False
try:
context.plgMain (importLI=ITD_FILE)
except Exception as exc:
context.DOS.delf(context.DOS.join(context.addon.profile, context.TAG_PAR_STRARTF))
context.GUI.msgf(adnname, msgProcessError, context.GUI.notError)
deb.addraise(context.DOS.join(context.addon.profile, error_file))
isRaise = True
finally:
## If user try double run ...
if context.xbmcvfs.exists(context.DOS.join(context.addon.profile, ITD_FILE)):
context.DOS.delf(context.DOS.join(context.addon.profile, ITD_FILE))
if isRaise : raise
### Main ...
def service(externalAbort, report):
## Load monitor ...
monitor = context.xbmc.Monitor()
## Log start ...
log(msgStrat)
if report : context.GUI.msg(adnname, msgStratVisual)
## Start service ...
while not monitor.abortRequested():
## Check starter ...
if context.xbmcvfs.exists(context.DOS.join(context.addon.profile, ITD_FILE)) : starter()
## Check exit ...
if monitor.waitForAbort(1) or externalAbort() : break
## End service (log end) ...
del monitor
log(msgEnd)
context.GUI.msg(adnname, msgEndVisual)
| Taifxx/xxtrep | context.addtolib/lfm_service.py | Python | gpl-3.0 | 2,631 |
#!/usr/bin/env python
from __future__ import absolute_import, print_function
import copy
import matplotlib
from grid_cell_model.submitting import flagparse
import noisefigs
from noisefigs.env import NoiseEnvironment
import config
parser = flagparse.FlagParser()
parser.add_flag('--gammaSweep')
args = parser.parse_args()
env = NoiseEnvironment(user_config=config.get_config())
if args.gammaSweep or args.all:
env.register_plotter(noisefigs.plotters.GammaSweepsPlotter)
env.plot()
| MattNolanLab/ei-attractor | grid_cell_model/simulations/007_noise/figures/cosyne2015-abstract/figure_gamma.py | Python | gpl-3.0 | 492 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
---------------------------------------------------------------------------------------------------
dlg_subida
mantém as informações sobre a dialog de subida
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
revision 0.2 2015/nov mlabru
pep8 style conventions
revision 0.1 2014/nov mlabru
initial release (Linux/Python)
---------------------------------------------------------------------------------------------------
"""
__version__ = "$revision: 0.2$"
__author__ = "mlabru, sophosoft"
__date__ = "2015/12"
# < imports >--------------------------------------------------------------------------------------
# python library
import json
import os
# PyQt library
from PyQt4 import QtCore
from PyQt4 import QtGui
# view
import view.piloto.dlg_subida_ui as dlg
# < class CDlgSubida >-----------------------------------------------------------------------------
class CDlgSubida(QtGui.QDialog, dlg.Ui_CDlgSubida):
"""
mantém as informações sobre a dialog de subida
"""
# ---------------------------------------------------------------------------------------------
def __init__(self, fsck_http, fdct_config, f_strip_cur, fdct_sub, f_parent=None):
"""
@param fsck_http: socket de comunicação com o servidor
@param fdct_config: dicionário de configuração
@param f_strip_cur: strip selecionada
@param fdct_sub: dicionário de subidas
@param f_parent: janela pai
"""
# init super class
super(CDlgSubida, self).__init__(f_parent)
# socket de comunicação
self.__sck_http = fsck_http
assert self.__sck_http
# dicionário de configuração
self.__dct_config = fdct_config
assert self.__dct_config is not None
# dicionário de subidas
self.__dct_sub = fdct_sub
assert self.__dct_sub is not None
# monta a dialog
self.setupUi(self)
# configura título da dialog
self.setWindowTitle(u"Procedimento de Subida")
# configurações de conexões slot/signal
self.__config_connects()
# configurações de títulos e mensagens da janela de edição
self.__config_texts()
# restaura as configurações da janela de edição
self.__restore_settings()
# dicionário de subidas vazio ?
if not self.__dct_sub:
# carrega o dicionário
self.__load_sub()
# inicia valores
self.cbx_sub.addItems(sorted(self.__dct_sub.values()))
# configura botões
self.bbx_subida.button(QtGui.QDialogButtonBox.Cancel).setText("&Cancela")
self.bbx_subida.button(QtGui.QDialogButtonBox.Ok).setFocus()
# inicia os parâmetros da subida
self.__update_command()
# ---------------------------------------------------------------------------------------------
def __config_connects(self):
"""
configura as conexões slot/signal
"""
# conecta spinBox
self.cbx_sub.currentIndexChanged.connect(self.__on_cbx_currentIndexChanged)
# ---------------------------------------------------------------------------------------------
def __config_texts(self):
"""
DOCUMENT ME!
"""
# configura títulos e mensagens
self.__txt_settings = "CDlgSubida"
# ---------------------------------------------------------------------------------------------
def get_data(self):
"""
DOCUMENT ME!
"""
# return command line
return self.lbl_comando.text()
# ---------------------------------------------------------------------------------------------
def __load_sub(self):
"""
carrega o dicionário de subidas
"""
# check for requirements
assert self.__sck_http is not None
assert self.__dct_config is not None
assert self.__dct_sub is not None
# monta o request das subidas
ls_req = "data/sub.json"
# get server address
l_srv = self.__dct_config.get("srv.addr", None)
if l_srv is not None:
# obtém os dados de subidas do servidor
l_dict = self.__sck_http.get_data(l_srv, ls_req)
if l_dict is not None:
# coloca a subidas no dicionário
self.__dct_sub.update(json.loads(l_dict))
# senão, não achou no servidor...
else:
# logger
l_log = logging.getLogger("CDlgSubida::__load_sub")
l_log.setLevel(logging.ERROR)
l_log.error(u"<E01: tabela de subidas não existe no servidor.")
# senão, não achou endereço do servidor
else:
# logger
l_log = logging.getLogger("CDlgSubida::__load_sub")
l_log.setLevel(logging.WARNING)
l_log.warning(u"<E02: srv.addr não existe na configuração.")
# ---------------------------------------------------------------------------------------------
def __restore_settings(self):
"""
restaura as configurações salvas para esta janela
"""
# obtém os settings
l_set = QtCore.QSettings("sophosoft", "piloto")
assert l_set
# restaura geometria da janela
self.restoreGeometry(l_set.value("%s/Geometry" % (self.__txt_settings)).toByteArray())
# ---------------------------------------------------------------------------------------------
def __update_command(self):
"""
DOCUMENT ME!
"""
# para todas as subidas...
for l_key, l_sub in self.__dct_sub.iteritems():
# é a subida selecionada ?
if self.cbx_sub.currentText() == l_sub:
break
# inicia o comando
ls_cmd = "SUB {}".format(l_key)
# coloca o comando no label
self.lbl_comando.setText(ls_cmd)
# =============================================================================================
# edição de campos
# =============================================================================================
# ---------------------------------------------------------------------------------------------
@QtCore.pyqtSignature("int")
def __on_cbx_currentIndexChanged(self, f_val):
"""
DOCUMENT ME!
"""
# atualiza comando
self.__update_command()
# < the end >--------------------------------------------------------------------------------------
| mlabru/ptracks | view/piloto/dlg_subida.py | Python | gpl-3.0 | 7,178 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-05-16 18:59
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0012_auto_20180227_0858'),
]
operations = [
migrations.AlterModelOptions(
name='user',
options={'ordering': ('first_name', 'last_name'), 'permissions': (('menu_dados_auxiliares', 'Mostrar Menu Dados Auxiliares'), ('menu_tabelas_auxiliares', 'Mostrar Menu de Tabelas Auxiliares'), ('menu_contatos', 'Mostrar Menu de Cadastro de Contatos'), ('menu_grupocontatos', 'Mostrar Menu de Cadastro de Grupos de Contatos'), ('menu_processos', 'Mostrar Menu de Cadastro de Processos'), ('menu_area_trabalho', 'Mostrar Menu de Áreas de Trabalho'), ('menu_impresso_enderecamento', 'Mostrar Menu de Impressos de Endereçamento'), ('menu_relatorios', 'Mostrar Menu de Relatórios'), ('menu_administracao', 'Mostrar Menu de Administração'), ('menu_agenda', 'Mostrar Menu da Agenda de Eventos'))},
),
]
| cmjatai/cmj | cmj/core/migrations/0013_auto_20180516_1559.py | Python | gpl-3.0 | 1,070 |
'''
t4_adm.py - this file is part of S3QL (http://s3ql.googlecode.com)
Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
This program can be distributed under the terms of the GNU GPLv3.
'''
from __future__ import division, print_function, absolute_import
from s3ql.backends import local
from s3ql.backends.common import BetterBackend
import shutil
import sys
import tempfile
import unittest2 as unittest
import subprocess
import os.path
if __name__ == '__main__':
mypath = sys.argv[0]
else:
mypath = __file__
BASEDIR = os.path.abspath(os.path.join(os.path.dirname(mypath), '..'))
class AdmTests(unittest.TestCase):
def setUp(self):
self.cache_dir = tempfile.mkdtemp()
self.backend_dir = tempfile.mkdtemp()
self.storage_url = 'local://' + self.backend_dir
self.passphrase = 'oeut3d'
def tearDown(self):
shutil.rmtree(self.cache_dir)
shutil.rmtree(self.backend_dir)
def mkfs(self):
proc = subprocess.Popen([sys.executable, os.path.join(BASEDIR, 'bin', 'mkfs.s3ql'),
'-L', 'test fs', '--max-obj-size', '500',
'--cachedir', self.cache_dir, '--quiet',
self.storage_url ], stdin=subprocess.PIPE)
print(self.passphrase, file=proc.stdin)
print(self.passphrase, file=proc.stdin)
proc.stdin.close()
self.assertEqual(proc.wait(), 0)
def test_passphrase(self):
self.mkfs()
passphrase_new = 'sd982jhd'
proc = subprocess.Popen([sys.executable, os.path.join(BASEDIR, 'bin', 's3qladm'),
'--quiet', 'passphrase',
self.storage_url ], stdin=subprocess.PIPE)
print(self.passphrase, file=proc.stdin)
print(passphrase_new, file=proc.stdin)
print(passphrase_new, file=proc.stdin)
proc.stdin.close()
self.assertEqual(proc.wait(), 0)
plain_backend = local.Backend(self.storage_url, None, None)
backend = BetterBackend(passphrase_new, 'bzip2', plain_backend)
self.assertTrue(isinstance(backend['s3ql_passphrase'], str))
# Somehow important according to pyunit documentation
def suite():
return unittest.makeSuite(AdmTests)
# Allow calling from command line
if __name__ == "__main__":
unittest.main()
| thefirstwind/s3qloss | tests/t4_adm.py | Python | gpl-3.0 | 2,378 |
import urllib
import urllib2
import json
import functools
def buildUrl(url, params = []):
if(len(params) > 0):
if url.find('?') < 0:
# no '?' in the url
url += '?'
first = True
else:
first = False
for key, value in params:
if(first):
first = False
else:
url += '&'
url += urllib.quote(key) + '=' + urllib.quote(str(value))
return url
class UrlOpenFactory(object):
@property
def httpParams(self):
# we have to send anyting... so why not json?
return {
'Content-Type': 'application/json',
'Accept': 'application/json',
}
def createRequest(self, url, data = None):
return urllib2.Request(url, data, self.httpParams)
def urlopen(self, url, data = None):
return urllib2.urlopen(self.createRequest(url, data)).read()
class JsonUrlOpenFactory(UrlOpenFactory):
@property
def httpParams(self):
return {
'Content-Type': 'application/json',
'Accept': 'application/json',
}
def urlopen(self, url, data = None):
return json.loads(super(JsonUrlOpenFactory, self).urlopen(url, json.dumps(data) if not data is None else None))
def dumpHttpError(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except urllib2.HTTPError as e:
with open('httpError', 'w') as out:
out.write('\n'.join(e.read().split('\\n')))
raise e
return wrapper
| marook/python-crucible | src/modules/crucible/rest.py | Python | gpl-3.0 | 1,700 |
#!/usr/bin/env python
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
import re
from lxml.etree import XPath as X
from calibre.utils.filenames import ascii_text
DOCUMENT = 'http://schemas.openxmlformats.org/officeDocument/2006/relationships/officeDocument'
DOCPROPS = 'http://schemas.openxmlformats.org/package/2006/relationships/metadata/core-properties'
APPPROPS = 'http://schemas.openxmlformats.org/officeDocument/2006/relationships/extended-properties'
STYLES = 'http://schemas.openxmlformats.org/officeDocument/2006/relationships/styles'
NUMBERING = 'http://schemas.openxmlformats.org/officeDocument/2006/relationships/numbering'
FONTS = 'http://schemas.openxmlformats.org/officeDocument/2006/relationships/fontTable'
IMAGES = 'http://schemas.openxmlformats.org/officeDocument/2006/relationships/image'
LINKS = 'http://schemas.openxmlformats.org/officeDocument/2006/relationships/hyperlink'
FOOTNOTES = 'http://schemas.openxmlformats.org/officeDocument/2006/relationships/footnotes'
ENDNOTES = 'http://schemas.openxmlformats.org/officeDocument/2006/relationships/endnotes'
THEMES = 'http://schemas.openxmlformats.org/officeDocument/2006/relationships/theme'
namespaces = {
'mo': 'http://schemas.microsoft.com/office/mac/office/2008/main',
'o': 'urn:schemas-microsoft-com:office:office',
've': 'http://schemas.openxmlformats.org/markup-compatibility/2006',
# Text Content
'w': 'http://schemas.openxmlformats.org/wordprocessingml/2006/main',
'w10': 'urn:schemas-microsoft-com:office:word',
'wne': 'http://schemas.microsoft.com/office/word/2006/wordml',
'xml': 'http://www.w3.org/XML/1998/namespace',
# Drawing
'a': 'http://schemas.openxmlformats.org/drawingml/2006/main',
'm': 'http://schemas.openxmlformats.org/officeDocument/2006/math',
'mv': 'urn:schemas-microsoft-com:mac:vml',
'pic': 'http://schemas.openxmlformats.org/drawingml/2006/picture',
'v': 'urn:schemas-microsoft-com:vml',
'wp': 'http://schemas.openxmlformats.org/drawingml/2006/wordprocessingDrawing',
# Properties (core and extended)
'cp': 'http://schemas.openxmlformats.org/package/2006/metadata/core-properties',
'dc': 'http://purl.org/dc/elements/1.1/',
'ep': 'http://schemas.openxmlformats.org/officeDocument/2006/extended-properties',
'xsi': 'http://www.w3.org/2001/XMLSchema-instance',
# Content Types
'ct': 'http://schemas.openxmlformats.org/package/2006/content-types',
# Package Relationships
'r': 'http://schemas.openxmlformats.org/officeDocument/2006/relationships',
'pr': 'http://schemas.openxmlformats.org/package/2006/relationships',
# Dublin Core document properties
'dcmitype': 'http://purl.org/dc/dcmitype/',
'dcterms': 'http://purl.org/dc/terms/'
}
xpath_cache = {}
def XPath(expr):
ans = xpath_cache.get(expr, None)
if ans is None:
xpath_cache[expr] = ans = X(expr, namespaces=namespaces)
return ans
def is_tag(x, q):
tag = getattr(x, 'tag', x)
ns, name = q.partition(':')[0::2]
return '{%s}%s' % (namespaces.get(ns, None), name) == tag
def barename(x):
return x.rpartition('}')[-1]
def XML(x):
return '{%s}%s' % (namespaces['xml'], x)
def expand(name):
ns, tag = name.partition(':')[0::2]
if ns:
tag = '{%s}%s' % (namespaces[ns], tag)
return tag
def get(x, attr, default=None):
return x.attrib.get(expand(attr), default)
def ancestor(elem, name):
try:
return XPath('ancestor::%s[1]' % name)(elem)[0]
except IndexError:
return None
def generate_anchor(name, existing):
x = y = 'id_' + re.sub(r'[^0-9a-zA-Z_]', '', ascii_text(name)).lstrip('_')
c = 1
while y in existing:
y = '%s_%d' % (x, c)
c += 1
return y
def children(elem, *args):
return XPath('|'.join('child::%s' % a for a in args))(elem)
def descendants(elem, *args):
return XPath('|'.join('descendant::%s' % a for a in args))(elem)
| pra85/calibre | src/calibre/ebooks/docx/names.py | Python | gpl-3.0 | 4,127 |
"""
This file is part of Maml.
Maml is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Maml is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Maml. If not, see <http://www.gnu.org/licenses/>.
Copyright 2010 Brian Hawthorne
"""
from unittest import TestCase
from maml.parser import *
example1 = """
-def A(z)
%ul
%li
%html
%body
%h3
"""
example2 = """
-def A(z)
%ul
-for x in range(z)
.list-item#item_id
= x
foo
%html
%body
%h3 yup
= A(6)
"""
class TestParser (TestCase):
def test_tag_attrs(self):
good_results = {
'()': ('(', '', ')'),
'{}': ('{', '', '}'),
'(borp="baz" dorp="daz" blarp="blaz")':
('(', 'borp="baz" dorp="daz" blarp="blaz"', ')'),
'{borp:"baz", dorp:"daz", blarp:"blaz"}':
('{', 'borp:"baz", dorp:"daz", blarp:"blaz"', '}'),
}
for input, output in good_results.items():
self.assertEqual(tuple(tag_attrs.parseString(input)), output)
def test_tag_decl(self):
good_results = {
'%html':
('%', 'html', ''),
'%html foo':
('%', 'html', 'foo'),
'%html= foo':
('%', 'html', '=', 'foo'),
'%html()= foo':
('%', 'html', '(', '', ')', '=', 'foo'),
'%html.class-name()= foo':
('%', 'html', '.', 'class-name', '(', '', ')', '=', 'foo'),
'%html.class-name(borp="baz")= foo':
('%', 'html', '.', 'class-name', '(', 'borp="baz"', ')', '=', 'foo'),
'#foo.boo':
('#', 'foo', '.', 'boo', ''),
'.foo(){}':
('.', 'foo', '(', '', ')', '{', '', '}', ''),
}
for input, output in good_results.items():
self.assertEqual(tuple(tag_decl.parseString(input)), output)
def test_namespace(self):
namespace_example = "-namespace(/common/defs.mak, bnorp)"
assert Parser().parse(namespace_example).render_string()
| brianhawthorne/maml | maml/test/test_parser.py | Python | gpl-3.0 | 2,468 |
# -*- coding: utf8 -*-
from django.conf import settings
from django.contrib import auth
from django.http import Http404, HttpResponse, HttpResponseRedirect
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.views.decorators.csrf import csrf_exempt, csrf_protect
from django.views.decorators.cache import never_cache
from django.views.decorators.http import require_POST
from bootcamp.decorators import ajax_required
from registration.users import UserModel
from django.contrib.auth.models import User
from bootcamp.feeds.models import Feed
from django.core.context_processors import csrf
from django.template.loader import render_to_string
from django.shortcuts import render, redirect, get_object_or_404
import random
import json
FEEDS_NUM_PAGES = 20
MAJOR_VERSION = 0
MID_VERSION = 1
MIN_VERSION = 3
NOTE = """
更新内容:
1. 删除评论、帖子,取消赞扣分以防刷经验;
2. 增加修改资料功能;
"""
URL = "http://nqzx.net/media/ads/nqzx.apk"
def check_version(version):
ret = False
ls = version.split('.')
if MAJOR_VERSION > int(ls[0]):
ret = True
elif MID_VERSION > int(ls[1]):
ret = True
elif MIN_VERSION > int(ls[2]):
ret = True
else:
ret = False
return ret
def get_level(reputation):
if not reputation:
return 1;
if reputation < 5:
return 1
elif reputation < 15:
return 2
elif reputation < 30:
return 3
elif reputation < 50:
return 4
elif reputation < 100:
return 5
elif reputation < 200:
return 6
elif reputation < 500:
return 7
elif reputation < 1000:
return 8
elif reputation < 2000:
return 9
elif reputation < 3000:
return 10
elif reputation < 6000:
return 11
elif reputation < 10000:
return 12
elif reputation < 18000:
return 13
elif reputation < 30000:
return 14
elif reputation < 60000:
return 15
elif reputation < 100000:
return 16
elif reputation < 300000:
return 17
else:
return 18
@require_POST
@ajax_required
def login(request):
username = request.POST.get('account')
password = request.POST.get('password')
result = {"status": False, "data":""}
if not username or not password:
result = {"status": False, "data":"未收到用户名或密码!"}
return HttpResponse(json.dumps(result), content_type="application/json")
if username=="" or username.isspace():
result = {"status": False, "data":"用户名不能为空"}
return HttpResponse(json.dumps(result), content_type="application/json")
if password=="" or password.isspace():
result = {"status": False, "data":"密码不能为空"}
return HttpResponse(json.dumps(result), content_type="application/json")
user = auth.authenticate(username=username, password=password)
if user is not None:
if user.is_active:
auth.login(request, user)
result = {"status": True, "data": {"id": user.id, "email": user.email, \
"location": user.profile.location, "mobile": user.profile.mobile, "reputation": \
user.profile.reputation,"signdate": user.profile.signdate}}
else:
result = {"status": False, "data":"["+username+"]已被暂时禁用"}
else:
result = {"status": False, "data":"用户名或密码不正确,请重试"}
return HttpResponse(json.dumps(result), content_type="application/json")
@require_POST
@ajax_required
def reg(request):
username = request.POST.get('account')
password = request.POST.get('password')
email = request.POST.get('email')
result = {"status": False, "data":""}
if not username or not password or not email:
result = {"status": False, "data":"未收到用户名、密码或者用户名!"}
return HttpResponse(json.dumps(result), content_type="application/json")
if username=="" or username.isspace():
result = {"status": False, "data":"用户名不能为空"}
return HttpResponse(json.dumps(result), content_type="application/json")
if password=="" or password.isspace():
result = {"status": False, "data":"密码不能为空"}
return HttpResponse(json.dumps(result), content_type="application/json")
if email=="" or email.isspace():
result = {"status": False, "data":"邮箱不能为空"}
return HttpResponse(json.dumps(result), content_type="application/json")
# clean data
existing = UserModel().objects.filter(username__iexact=username)
if existing.exists():
result = {"status": False, "data":"用户名已经存在"}
return HttpResponse(json.dumps(result), content_type="application/json")
if UserModel().objects.filter(email__iexact=email):
result = {"status": False, "data":"邮箱已经存在"}
return HttpResponse(json.dumps(result), content_type="application/json")
user = UserModel().objects.create_user(username, email, password)
user.is_active = True
user.save()
result = {"status": True, "data": {"id": user.id, "email": user.email, \
"location": user.profile.location, "mobile": user.profile.mobile, "reputation": \
user.profile.reputation,"signdate": user.profile.signdate}}
return HttpResponse(json.dumps(result), content_type="application/json")
@require_POST
@ajax_required
def get_state(request):
user = request.user
state = {"id": user.id, "username": user.username, "email": user.email, "location": user.profile.location, \
"mobile": user.profile.mobile, "reputation": user.profile.reputation,"first_name": user.first_name, \
"sex": user.profile.sex,"signdate": user.profile.signdate}
return HttpResponse(json.dumps(state), content_type="application/json")
@require_POST
@ajax_required
def set_state(request):
result = {"status": False, "data": {}}
userid = request.POST.get('userid')
user = User.objects.get(pk=userid)
if not user:
return HttpResponse(json.dumps(state), content_type="application/json")
first_name = request.POST.get('first_name')
location = request.POST.get('location')
mobile = request.POST.get('mobile')
reputation = request.POST.get('reputation')
sex = request.POST.get('sex')
signdate = request.POST.get('signdate')
if first_name:
user.first_name = first_name;
if location:
user.profile.location = location
if mobile:
user.profile.mobile = mobile
if reputation:
user.profile.reputation = reputation
if sex:
user.profile.sex = sex
if signdate:
user.profile.signdate = signdate
user.save()
result = {"status": True, "data": {"first_name": first_name, "sex": sex, \
"location":location,"mobile":mobile,"reputation":reputation,"signdate":signdate}}
return HttpResponse(json.dumps(result), content_type="application/json")
def get_feeds(request):
page = 1
feed_id = request.POST["feed_id"]
csrf_token = unicode(csrf(request)['csrf_token'])
html = u''
if feed_id:
feed = Feed.objects.get(pk=feed_id)
html = u'{0}{1}'.format(html, render_to_string('app/partial_feed.html', {
'feed': feed,
'user': request.user,
'csrf_token': csrf_token,
'lvl': get_level(feed.user.profile.reputation),
})
)
else:
feeds = Feed.get_feeds()
paginator = Paginator(feeds, FEEDS_NUM_PAGES)
feeds = paginator.page(page)
for feed in feeds:
html = u'{0}{1}'.format(html, render_to_string('app/partial_feed.html', {
'feed': feed,
'user': request.user,
'csrf_token': csrf_token,
'lvl': get_level(feed.user.profile.reputation),
})
)
return HttpResponse(html)
@ajax_required
def checkupdate(request):
version = request.POST.get('version')
ret = {"status": check_version(version), "note": NOTE, "url": URL}
return HttpResponse(json.dumps(ret), content_type="application/json")
def _html_feeds(last_feed, user, csrf_token, feed_source='all'):
feeds = Feed.get_feeds_after(last_feed)
if feed_source != 'all':
feeds = feeds.filter(user__id=feed_source)
html = u''
for feed in feeds:
html = u'{0}{1}'.format(html, render_to_string('app/partial_feed.html', {
'feed': feed,
'user': user,
'csrf_token': csrf_token,
'lvl': get_level(feed.user.profile.reputation),
})
)
return html
def post(request):
last_feed = request.POST.get('last_feed')
user = request.user
rand_user = User.objects.get(pk=random.randint(318, 367))
csrf_token = unicode(csrf(request)['csrf_token'])
feed = Feed()
if user.id == 283:
feed.user = rand_user
else:
feed.user = user
post = request.POST['post']
post = post.strip()
if len(post) > 0:
feed.post = post[:255]
user.profile.reputation += 3
user.save()
feed.save()
html = _html_feeds(last_feed, user, csrf_token)
return HttpResponse(html)
def load(request):
from_feed = request.GET.get('from_feed')
page = request.GET.get('page')
active = request.GET.get('active')
feed_source = request.GET.get('feed_source')
if active and active != 'all':
all_feeds = Feed.get_feeds(from_feed, active)
else:
all_feeds = Feed.get_feeds(from_feed)
if feed_source != 'all':
all_feeds = all_feeds.filter(user__id=feed_source)
paginator = Paginator(all_feeds, FEEDS_NUM_PAGES)
try:
feeds = paginator.page(page)
except EmptyPage:
feeds = []
html = u''
csrf_token = unicode(csrf(request)['csrf_token'])
for feed in feeds:
html = u'{0}{1}'.format(html, render_to_string('app/partial_feed.html', {
'feed': feed,
'user': request.user,
'lvl': get_level(feed.user.profile.reputation),
'csrf_token': csrf_token
})
)
return HttpResponse(html)
def load_new(request):
last_feed = request.GET.get('last_feed')
user = request.user
csrf_token = unicode(csrf(request)['csrf_token'])
html = _html_feeds(last_feed, user, csrf_token)
return HttpResponse(html)
def comment(request):
if request.method == 'POST':
feed_id = request.POST['feed']
feed = Feed.objects.get(pk=feed_id)
post = request.POST['post']
post = post.strip()
if len(post) > 0:
post = post[:255]
user = request.user
feed.comment(user=user, post=post)
user.profile.notify_commented(feed)
user.profile.notify_also_commented(feed)
user.profile.reputation += 2
user.save()
return render(request, 'app/partial_feed_comments.html', {'feed': feed})
else:
feed_id = request.GET.get('feed')
feed = Feed.objects.get(pk=feed_id)
return render(request, 'app/partial_feed_comments.html', {'feed': feed})
def track_comments(request):
feed_id = request.GET.get('feed')
feed = Feed.objects.get(pk=feed_id)
return render(request, 'app/partial_feed_comments.html', {'feed': feed})
| Wang-Sen/nqzx-backend | bootcamp/app/views.py | Python | gpl-3.0 | 11,441 |
# vim: ts=4:sw=4:expandtab
# BleachBit
# Copyright (C) 2008-2020 Andrew Ziem
# https://www.bleachbit.org
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Test case for Common
"""
from tests import common
import bleachbit
import os
class CommonTestCase(common.BleachbitTestCase):
"""Test case for Common."""
def test_expandvars(self):
"""Unit test for expandvars."""
var = os.path.expandvars('$HOME')
self.assertIsString(var)
def test_environment(self):
"""Test for important environment variables"""
# useful for researching
# grep -Poh "([\\$%]\w+)" cleaners/*xml | cut -b2- | sort | uniq -i
envs = {'posix': ['XDG_DATA_HOME', 'XDG_CONFIG_HOME', 'XDG_CACHE_HOME', 'HOME'],
'nt': ['AppData', 'CommonAppData', 'Documents', 'ProgramFiles', 'UserProfile', 'WinDir']}
for env in envs[os.name]:
e = os.getenv(env)
self.assertIsNotNone(e)
self.assertGreater(len(e), 4)
def test_expanduser(self):
"""Unit test for expanduser."""
# Return Unicode when given Unicode.
self.assertIsString(os.path.expanduser('~'))
# Blank input should give blank output.
self.assertEqual(os.path.expanduser(''), '')
# An absolute path should not be altered.
abs_dirs = {'posix': '$HOME', 'nt': '%USERPROFILE%'}
abs_dir = os.path.expandvars(abs_dirs[os.name])
self.assertExists(abs_dir)
self.assertEqual(os.path.expanduser(abs_dir), abs_dir)
# A relative path (without a reference to the home directory)
# should not be expanded.
self.assertEqual(os.path.expanduser('common'), 'common')
| tstenner/bleachbit | tests/TestCommon.py | Python | gpl-3.0 | 2,298 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
##Copyright (c) 2017 Benoit Valot and Panisa Treepong
##benoit.valot@univ-fcomte.fr
##UMR 6249 Chrono-Environnement, Besançon, France
##Licence GPL
from . import variables
class ClipRead():
"""Clip read object"""
def __init__(self, alignedsegment):
self.read_seq = alignedsegment.query_sequence
self.read_name = alignedsegment.query_name
self.read_start = alignedsegment.query_alignment_start #0 left
self.read_end = alignedsegment.query_alignment_end #exclusive
self.read_len = alignedsegment.query_alignment_length
self.ref_start = alignedsegment.reference_start #0 left
self.ref_end = alignedsegment.reference_end # exclusive
self.ref_len = alignedsegment.reference_length
self.cigar = alignedsegment.cigarstring
self.cigartuples = alignedsegment.cigartuples
self.isreverse = alignedsegment.is_reverse
def isstartclip(self):
"""Test if the read is start or end clip, look at """
if self.cigartuples is None:
raise Exception("ClipRead must be aligned")
if self.cigartuples[0][0] in variables.cigarclip:
return True
elif self.cigartuples[-1][0] in variables.cigarclip:
return False
else:
raise Exception("ClipRead must contain clip part at start or end")
def getdr(self, drstart, drend):
"""Return the dr sequence if complete or return None"""
s = self.read_start + (drstart - self.ref_start) ##if < 0, incomplet dr
if s < 0:
return None
e = self.read_end - (self.ref_end - drend)
if e > len(self.read_seq):
return None
return self.read_seq[s:e]
def getclippos(self):
"""Return the position of the clip"""
if self.isstartclip():
return self.ref_start
else:
return self.ref_end
def getclipseq(self):
"""return clip part of the read, except for hard clip return None"""
if len(self.read_seq) == self.read_len:
return None
if self.isstartclip():
return self.read_seq[:self.read_start]
else:
return self.read_seq[self.read_end:]
def __len__(self):
return len(self.read_seq)
def __repr__(self):
return self.read_seq
def __str__(self):
return str(self.ref_start) + ": " + str(self.read_start) + self.read_seq + \
str(self.read_end) + " :" + str(self.ref_end)
if __name__=='__main__':
import doctest
doctest.testmod()
| bvalot/panISa | lib/clipread.py | Python | gpl-3.0 | 2,634 |
import contextlib
import unittest
import pytest
from cryptography import x509
import six
from ipapython.dn import DN, RDN, AVA
if six.PY3:
unicode = str
def cmp(a, b):
if a == b:
assert not a < b
assert not a > b
assert not a != b
assert a <= b
assert a >= b
return 0
elif a < b:
assert not a > b
assert a != b
assert a <= b
assert not a >= b
return -1
else:
assert a > b
assert a != b
assert not a <= b
assert a >= b
return 1
pytestmark = pytest.mark.tier0
def expected_class(klass, component):
if klass is AVA:
if component == 'self':
return AVA
elif klass is RDN:
if component == 'self':
return RDN
elif component == 'AVA':
return AVA
elif klass is DN:
if component == 'self':
return DN
elif component == 'AVA':
return AVA
elif component == 'RDN':
return RDN
raise ValueError("class %s with component '%s' unknown" % (klass.__name__, component))
class TestAVA(unittest.TestCase):
def setUp(self):
self.attr1 = 'cn'
self.value1 = 'Bob'
self.str_ava1 = '%s=%s' % (self.attr1, self.value1)
self.ava1 = AVA(self.attr1, self.value1)
self.attr2 = 'ou'
self.value2 = 'People'
self.str_ava2 = '%s=%s' % (self.attr2, self.value2)
self.ava2 = AVA(self.attr2, self.value2)
self.attr3 = 'c'
self.value3 = 'US'
self.str_ava3 = '%s=%s' % (self.attr3, self.value3)
self.ava3 = AVA(self.attr3, self.value3)
def assertExpectedClass(self, klass, obj, component):
self.assertIs(obj.__class__, expected_class(klass, component))
def test_create(self):
# Create with attr,value pair
ava1 = AVA(self.attr1, self.value1)
self.assertExpectedClass(AVA, ava1, 'self')
self.assertEqual(ava1, self.ava1)
# Create with "attr=value" string
ava1 = AVA(self.str_ava1)
self.assertExpectedClass(AVA, ava1, 'self')
self.assertEqual(ava1, self.ava1)
# Create with tuple (attr, value)
ava1 = AVA((self.attr1, self.value1))
self.assertExpectedClass(AVA, ava1, 'self')
self.assertEqual(ava1, self.ava1)
# Create with list [attr, value]
ava1 = AVA([self.attr1, self.value1])
self.assertExpectedClass(AVA, ava1, 'self')
self.assertEqual(ava1, self.ava1)
# Create with no args should fail
with self.assertRaises(TypeError):
AVA()
# Create with more than 3 args should fail
with self.assertRaises(TypeError):
AVA(self.attr1, self.value1, self.attr1, self.attr1)
# Create with 1 arg which is not string should fail
with self.assertRaises(TypeError):
AVA(1)
# Create with malformed AVA string should fail
with self.assertRaises(ValueError):
AVA("cn")
# Create with non-string parameters, should convert
ava1 = AVA(1, self.value1)
self.assertExpectedClass(AVA, ava1, 'self')
self.assertEqual(ava1.attr, u'1')
ava1 = AVA((1, self.value1))
self.assertExpectedClass(AVA, ava1, 'self')
self.assertEqual(ava1.attr, u'1')
ava1 = AVA(self.attr1, 1)
self.assertExpectedClass(AVA, ava1, 'self')
self.assertEqual(ava1.value, u'1')
ava1 = AVA((self.attr1, 1))
self.assertExpectedClass(AVA, ava1, 'self')
self.assertEqual(ava1.value, u'1')
def test_indexing(self):
ava1 = AVA(self.ava1)
self.assertEqual(ava1[self.attr1], self.value1)
self.assertEqual(ava1[0], self.attr1)
self.assertEqual(ava1[1], self.value1)
with self.assertRaises(KeyError):
ava1['foo'] # pylint: disable=pointless-statement
with self.assertRaises(KeyError):
ava1[3] # pylint: disable=pointless-statement
def test_properties(self):
ava1 = AVA(self.ava1)
self.assertEqual(ava1.attr, self.attr1)
self.assertIsInstance(ava1.attr, unicode)
self.assertEqual(ava1.value, self.value1)
self.assertIsInstance(ava1.value, unicode)
def test_str(self):
ava1 = AVA(self.ava1)
self.assertEqual(str(ava1), self.str_ava1)
self.assertIsInstance(str(ava1), str)
def test_cmp(self):
# Equality
ava1 = AVA(self.attr1, self.value1)
self.assertTrue(ava1 == self.ava1)
self.assertFalse(ava1 != self.ava1)
self.assertTrue(ava1 == self.str_ava1)
self.assertFalse(ava1 != self.str_ava1)
result = cmp(ava1, self.ava1)
self.assertEqual(result, 0)
# Upper case attr should still be equal
ava1 = AVA(self.attr1.upper(), self.value1)
self.assertFalse(ava1.attr == self.attr1)
self.assertTrue(ava1.value == self.value1)
self.assertTrue(ava1 == self.ava1)
self.assertFalse(ava1 != self.ava1)
result = cmp(ava1, self.ava1)
self.assertEqual(result, 0)
# Upper case value should still be equal
ava1 = AVA(self.attr1, self.value1.upper())
self.assertTrue(ava1.attr == self.attr1)
self.assertFalse(ava1.value == self.value1)
self.assertTrue(ava1 == self.ava1)
self.assertFalse(ava1 != self.ava1)
result = cmp(ava1, self.ava1)
self.assertEqual(result, 0)
# Make ava1's attr greater
with self.assertRaises(AttributeError):
ava1.attr = self.attr1 + "1"
ava1 = AVA(self.attr1 + "1", self.value1.upper())
self.assertFalse(ava1 == self.ava1)
self.assertTrue(ava1 != self.ava1)
result = cmp(ava1, self.ava1)
self.assertEqual(result, 1)
result = cmp(self.ava1, ava1)
self.assertEqual(result, -1)
# Reset ava1's attr, should be equal again
with self.assertRaises(AttributeError):
ava1.attr = self.attr1
ava1 = AVA(self.attr1, self.value1.upper())
result = cmp(ava1, self.ava1)
self.assertEqual(result, 0)
# Make ava1's value greater
# attr will be equal, this tests secondary comparision component
with self.assertRaises(AttributeError):
ava1.value = self.value1 + "1"
ava1 = AVA(self.attr1, self.value1 + "1")
result = cmp(ava1, self.ava1)
self.assertEqual(result, 1)
result = cmp(self.ava1, ava1)
self.assertEqual(result, -1)
def test_hashing(self):
# create AVA's that are equal but differ in case
ava1 = AVA((self.attr1.lower(), self.value1.upper()))
ava2 = AVA((self.attr1.upper(), self.value1.lower()))
# AVAs that are equal should hash to the same value.
self.assertEqual(ava1, ava2)
self.assertEqual(hash(ava1), hash(ava2))
# Different AVA objects with the same value should
# map to 1 common key and 1 member in a set. The key and
# member are based on the object's value.
ava1_a = AVA(self.ava1)
ava1_b = AVA(self.ava1)
ava2_a = AVA(self.ava2)
ava2_b = AVA(self.ava2)
ava3_a = AVA(self.ava3)
ava3_b = AVA(self.ava3)
self.assertEqual(ava1_a, ava1_b)
self.assertEqual(ava2_a, ava2_b)
self.assertEqual(ava3_a, ava3_b)
d = dict()
s = set()
d[ava1_a] = str(ava1_a)
d[ava1_b] = str(ava1_b)
d[ava2_a] = str(ava2_a)
d[ava2_b] = str(ava2_b)
s.add(ava1_a)
s.add(ava1_b)
s.add(ava2_a)
s.add(ava2_b)
self.assertEqual(len(d), 2)
self.assertEqual(len(s), 2)
self.assertEqual(sorted(d), sorted([ava1_a, ava2_a]))
self.assertEqual(sorted(s), sorted([ava1_a, ava2_a]))
self.assertTrue(ava1_a in d)
self.assertTrue(ava1_b in d)
self.assertTrue(ava2_a in d)
self.assertTrue(ava2_b in d)
self.assertFalse(ava3_a in d)
self.assertFalse(ava3_b in d)
self.assertTrue(ava1_a in s)
self.assertTrue(ava1_b in s)
self.assertTrue(ava2_a in s)
self.assertTrue(ava2_b in s)
self.assertFalse(ava3_a in s)
self.assertFalse(ava3_b in s)
class TestRDN(unittest.TestCase):
def setUp(self):
# ava1 must sort before ava2
self.attr1 = 'cn'
self.value1 = 'Bob'
self.str_ava1 = '%s=%s' % (self.attr1, self.value1)
self.ava1 = AVA(self.attr1, self.value1)
self.str_rdn1 = '%s=%s' % (self.attr1, self.value1)
self.rdn1 = RDN((self.attr1, self.value1))
self.attr2 = 'ou'
self.value2 = 'people'
self.str_ava2 = '%s=%s' % (self.attr2, self.value2)
self.ava2 = AVA(self.attr2, self.value2)
self.str_rdn2 = '%s=%s' % (self.attr2, self.value2)
self.rdn2 = RDN((self.attr2, self.value2))
self.str_ava3 = '%s=%s+%s=%s' % (self.attr1, self.value1, self.attr2, self.value2)
self.str_rdn3 = '%s=%s+%s=%s' % (self.attr1, self.value1, self.attr2, self.value2)
self.rdn3 = RDN(self.ava1, self.ava2)
def assertExpectedClass(self, klass, obj, component):
self.assertIs(obj.__class__, expected_class(klass, component))
def test_create(self):
# Create with single attr,value pair
rdn1 = RDN((self.attr1, self.value1))
self.assertEqual(len(rdn1), 1)
self.assertEqual(rdn1, self.rdn1)
self.assertExpectedClass(RDN, rdn1, 'self')
for i in range(0, len(rdn1)):
self.assertExpectedClass(RDN, rdn1[i], 'AVA')
self.assertEqual(rdn1[0], self.ava1)
# Create with multiple attr,value pairs
rdn3 = RDN((self.attr1, self.value1), (self.attr2, self.value2))
self.assertEqual(len(rdn3), 2)
self.assertEqual(rdn3, self.rdn3)
self.assertExpectedClass(RDN, rdn3, 'self')
for i in range(0, len(rdn3)):
self.assertExpectedClass(RDN, rdn3[i], 'AVA')
self.assertEqual(rdn3[0], self.ava1)
self.assertEqual(rdn3[1], self.ava2)
# Create with multiple attr,value pairs passed as lists
rdn3 = RDN([self.attr1, self.value1], [self.attr2, self.value2])
self.assertEqual(len(rdn3), 2)
self.assertEqual(rdn3, self.rdn3)
self.assertExpectedClass(RDN, rdn3, 'self')
for i in range(0, len(rdn3)):
self.assertExpectedClass(RDN, rdn3[i], 'AVA')
self.assertEqual(rdn3[0], self.ava1)
self.assertEqual(rdn3[1], self.ava2)
# Create with multiple attr,value pairs but reverse
# constructor parameter ordering. RDN canonical ordering
# should remain the same
rdn3 = RDN((self.attr2, self.value2), (self.attr1, self.value1))
self.assertEqual(len(rdn3), 2)
self.assertEqual(rdn3, self.rdn3)
self.assertExpectedClass(RDN, rdn3, 'self')
for i in range(0, len(rdn3)):
self.assertExpectedClass(RDN, rdn3[i], 'AVA')
self.assertEqual(rdn3[0], self.ava1)
self.assertEqual(rdn3[1], self.ava2)
# Create with single AVA object
rdn1 = RDN(self.ava1)
self.assertEqual(len(rdn1), 1)
self.assertEqual(rdn1, self.rdn1)
self.assertExpectedClass(RDN, rdn1, 'self')
for i in range(0, len(rdn1)):
self.assertExpectedClass(RDN, rdn1[i], 'AVA')
self.assertEqual(rdn1[0], self.ava1)
# Create with multiple AVA objects
rdn3 = RDN(self.ava1, self.ava2)
self.assertEqual(len(rdn3), 2)
self.assertEqual(rdn3, self.rdn3)
self.assertExpectedClass(RDN, rdn3, 'self')
for i in range(0, len(rdn3)):
self.assertExpectedClass(RDN, rdn3[i], 'AVA')
self.assertEqual(rdn3[0], self.ava1)
self.assertEqual(rdn3[1], self.ava2)
# Create with multiple AVA objects but reverse constructor
# parameter ordering. RDN canonical ordering should remain
# the same
rdn3 = RDN(self.ava2, self.ava1)
self.assertEqual(len(rdn3), 2)
self.assertEqual(rdn3, self.rdn3)
self.assertExpectedClass(RDN, rdn3, 'self')
for i in range(0, len(rdn3)):
self.assertExpectedClass(RDN, rdn3[i], 'AVA')
self.assertEqual(rdn3[0], self.ava1)
self.assertEqual(rdn3[1], self.ava2)
# Create with single string with 1 AVA
rdn1 = RDN(self.str_rdn1)
self.assertEqual(len(rdn1), 1)
self.assertEqual(rdn1, self.rdn1)
self.assertExpectedClass(RDN, rdn1, 'self')
for i in range(0, len(rdn1)):
self.assertExpectedClass(RDN, rdn1[i], 'AVA')
self.assertEqual(rdn1[0], self.ava1)
# Create with single string with 2 AVA's
rdn3 = RDN(self.str_rdn3)
self.assertEqual(len(rdn3), 2)
self.assertEqual(rdn3, self.rdn3)
self.assertExpectedClass(RDN, rdn3, 'self')
for i in range(0, len(rdn3)):
self.assertExpectedClass(RDN, rdn3[i], 'AVA')
self.assertEqual(rdn3[0], self.ava1)
self.assertEqual(rdn3[1], self.ava2)
def test_properties(self):
rdn1 = RDN(self.rdn1)
rdn2 = RDN(self.rdn2)
rdn3 = RDN(self.rdn3)
self.assertEqual(rdn1.attr, self.attr1)
self.assertIsInstance(rdn1.attr, unicode)
self.assertEqual(rdn1.value, self.value1)
self.assertIsInstance(rdn1.value, unicode)
self.assertEqual(rdn2.attr, self.attr2)
self.assertIsInstance(rdn2.attr, unicode)
self.assertEqual(rdn2.value, self.value2)
self.assertIsInstance(rdn2.value, unicode)
self.assertEqual(rdn3.attr, self.attr1)
self.assertIsInstance(rdn3.attr, unicode)
self.assertEqual(rdn3.value, self.value1)
self.assertIsInstance(rdn3.value, unicode)
def test_str(self):
rdn1 = RDN(self.rdn1)
rdn2 = RDN(self.rdn2)
rdn3 = RDN(self.rdn3)
self.assertEqual(str(rdn1), self.str_rdn1)
self.assertIsInstance(str(rdn1), str)
self.assertEqual(str(rdn2), self.str_rdn2)
self.assertIsInstance(str(rdn2), str)
self.assertEqual(str(rdn3), self.str_rdn3)
self.assertIsInstance(str(rdn3), str)
def test_cmp(self):
# Equality
rdn1 = RDN((self.attr1, self.value1))
self.assertTrue(rdn1 == self.rdn1)
self.assertFalse(rdn1 != self.rdn1)
self.assertTrue(rdn1 == self.str_rdn1)
self.assertFalse(rdn1 != self.str_rdn1)
result = cmp(rdn1, self.rdn1)
self.assertEqual(result, 0)
# Make rdn1's attr greater
rdn1 = RDN((self.attr1 + "1", self.value1))
self.assertFalse(rdn1 == self.rdn1)
self.assertTrue(rdn1 != self.rdn1)
result = cmp(rdn1, self.rdn1)
self.assertEqual(result, 1)
result = cmp(self.rdn1, rdn1)
self.assertEqual(result, -1)
# Reset rdn1's attr, should be equal again
rdn1 = RDN((self.attr1, self.value1))
result = cmp(rdn1, self.rdn1)
self.assertEqual(result, 0)
# Make rdn1's value greater
# attr will be equal, this tests secondary comparision component
rdn1 = RDN((self.attr1, self.value1 + "1"))
result = cmp(rdn1, self.rdn1)
self.assertEqual(result, 1)
result = cmp(self.rdn1, rdn1)
self.assertEqual(result, -1)
# Make sure rdn's with more ava's are greater
result = cmp(self.rdn1, self.rdn3)
self.assertEqual(result, -1)
result = cmp(self.rdn3, self.rdn1)
self.assertEqual(result, 1)
def test_indexing(self):
rdn1 = RDN(self.rdn1)
rdn2 = RDN(self.rdn2)
rdn3 = RDN(self.rdn3)
self.assertEqual(rdn1[0], self.ava1)
self.assertEqual(rdn1[self.ava1.attr], self.ava1.value)
with self.assertRaises(KeyError):
rdn1['foo'] # pylint: disable=pointless-statement
self.assertEqual(rdn2[0], self.ava2)
self.assertEqual(rdn2[self.ava2.attr], self.ava2.value)
with self.assertRaises(KeyError):
rdn2['foo'] # pylint: disable=pointless-statement
self.assertEqual(rdn3[0], self.ava1)
self.assertEqual(rdn3[self.ava1.attr], self.ava1.value)
self.assertEqual(rdn3[1], self.ava2)
self.assertEqual(rdn3[self.ava2.attr], self.ava2.value)
with self.assertRaises(KeyError):
rdn3['foo'] # pylint: disable=pointless-statement
self.assertEqual(rdn1.attr, self.attr1)
self.assertEqual(rdn1.value, self.value1)
with self.assertRaises(TypeError):
rdn3[1.0] # pylint: disable=pointless-statement
# Slices
self.assertEqual(rdn3[0:1], [self.ava1])
self.assertEqual(rdn3[:], [self.ava1, self.ava2])
def test_assignments(self):
rdn = RDN((self.attr1, self.value1))
with self.assertRaises(TypeError):
# pylint: disable=unsupported-assignment-operation
rdn[0] = self.ava2
def test_iter(self):
rdn1 = RDN(self.rdn1)
rdn2 = RDN(self.rdn2)
rdn3 = RDN(self.rdn3)
self.assertEqual(len(rdn1), 1)
self.assertEqual(rdn1[:], [self.ava1])
for i, ava in enumerate(rdn1):
if i == 0:
self.assertEqual(ava, self.ava1)
else:
self.fail("got iteration index %d, but len=%d" % (i, len(rdn1)))
self.assertEqual(len(rdn2), 1)
self.assertEqual(rdn2[:], [self.ava2])
for i, ava in enumerate(rdn2):
if i == 0:
self.assertEqual(ava, self.ava2)
else:
self.fail("got iteration index %d, but len=%d" % (i, len(rdn2)))
self.assertEqual(len(rdn3), 2)
self.assertEqual(rdn3[:], [self.ava1, self.ava2])
for i, ava in enumerate(rdn3):
if i == 0:
self.assertEqual(ava, self.ava1)
elif i == 1:
self.assertEqual(ava, self.ava2)
else:
self.fail("got iteration index %d, but len=%d" % (i, len(rdn3)))
def test_concat(self):
rdn1 = RDN((self.attr1, self.value1))
rdn2 = RDN((self.attr2, self.value2))
# in-place addtion
rdn1 += rdn2
self.assertEqual(rdn1, self.rdn3)
self.assertExpectedClass(RDN, rdn1, 'self')
for i in range(0, len(rdn1)):
self.assertExpectedClass(RDN, rdn1[i], 'AVA')
rdn1 = RDN((self.attr1, self.value1))
rdn1 += self.ava2
self.assertEqual(rdn1, self.rdn3)
self.assertExpectedClass(RDN, rdn1, 'self')
for i in range(0, len(rdn1)):
self.assertExpectedClass(RDN, rdn1[i], 'AVA')
rdn1 = RDN((self.attr1, self.value1))
rdn1 += self.str_ava2
self.assertEqual(rdn1, self.rdn3)
self.assertExpectedClass(RDN, rdn1, 'self')
for i in range(0, len(rdn1)):
self.assertExpectedClass(RDN, rdn1[i], 'AVA')
# concatenation
rdn1 = RDN((self.attr1, self.value1))
rdn3 = rdn1 + rdn2
self.assertEqual(rdn3, self.rdn3)
self.assertExpectedClass(RDN, rdn3, 'self')
for i in range(0, len(rdn3)):
self.assertExpectedClass(RDN, rdn3[i], 'AVA')
rdn3 = rdn1 + self.ava2
self.assertEqual(rdn3, self.rdn3)
self.assertExpectedClass(RDN, rdn3, 'self')
for i in range(0, len(rdn3)):
self.assertExpectedClass(RDN, rdn3[i], 'AVA')
rdn3 = rdn1 + self.str_ava2
self.assertEqual(rdn3, self.rdn3)
self.assertExpectedClass(RDN, rdn3, 'self')
for i in range(0, len(rdn3)):
self.assertExpectedClass(RDN, rdn3[i], 'AVA')
def test_hashing(self):
# create RDN's that are equal but differ in case
rdn1 = RDN((self.attr1.lower(), self.value1.upper()))
rdn2 = RDN((self.attr1.upper(), self.value1.lower()))
# RDNs that are equal should hash to the same value.
self.assertEqual(rdn1, rdn2)
self.assertEqual(hash(rdn1), hash(rdn2))
class TestDN(unittest.TestCase):
def setUp(self):
# ava1 must sort before ava2
self.attr1 = 'cn'
self.value1 = u'Bob'
self.str_ava1 = '%s=%s' % (self.attr1, self.value1)
self.ava1 = AVA(self.attr1, self.value1)
self.str_rdn1 = '%s=%s' % (self.attr1, self.value1)
self.rdn1 = RDN((self.attr1, self.value1))
self.attr2 = 'ou'
self.value2 = u'people'
self.str_ava2 = '%s=%s' % (self.attr2, self.value2)
self.ava2 = AVA(self.attr2, self.value2)
self.str_rdn2 = '%s=%s' % (self.attr2, self.value2)
self.rdn2 = RDN((self.attr2, self.value2))
self.str_dn1 = self.str_rdn1
self.dn1 = DN(self.rdn1)
self.str_dn2 = self.str_rdn2
self.dn2 = DN(self.rdn2)
self.str_dn3 = '%s,%s' % (self.str_rdn1, self.str_rdn2)
self.dn3 = DN(self.rdn1, self.rdn2)
self.base_rdn1 = RDN(('dc', 'redhat'))
self.base_rdn2 = RDN(('dc', 'com'))
self.base_dn = DN(self.base_rdn1, self.base_rdn2)
self.container_rdn1 = RDN(('cn', 'sudorules'))
self.container_rdn2 = RDN(('cn', 'sudo'))
self.container_dn = DN(self.container_rdn1, self.container_rdn2)
self.base_container_dn = DN((self.attr1, self.value1),
self.container_dn, self.base_dn)
self.x500name = x509.Name([
x509.NameAttribute(
x509.NameOID.ORGANIZATIONAL_UNIT_NAME, self.value2),
x509.NameAttribute(x509.NameOID.COMMON_NAME, self.value1),
])
def assertExpectedClass(self, klass, obj, component):
self.assertIs(obj.__class__, expected_class(klass, component))
def test_create(self):
# Create with single attr,value pair
dn1 = DN((self.attr1, self.value1))
self.assertEqual(len(dn1), 1)
self.assertExpectedClass(DN, dn1, 'self')
for i in range(0, len(dn1)):
self.assertExpectedClass(DN, dn1[i], 'RDN')
for j in range(0, len(dn1[i])):
self.assertExpectedClass(DN, dn1[i][j], 'AVA')
self.assertIsInstance(dn1[0].attr, unicode)
self.assertIsInstance(dn1[0].value, unicode)
self.assertEqual(dn1[0], self.rdn1)
# Create with single attr,value pair passed as a tuple
dn1 = DN((self.attr1, self.value1))
self.assertEqual(len(dn1), 1)
self.assertExpectedClass(DN, dn1, 'self')
for i in range(0, len(dn1)):
self.assertExpectedClass(DN, dn1[i], 'RDN')
for j in range(0, len(dn1[i])):
self.assertExpectedClass(DN, dn1[i][j], 'AVA')
self.assertIsInstance(dn1[i].attr, unicode)
self.assertIsInstance(dn1[i].value, unicode)
self.assertEqual(dn1[0], self.rdn1)
# Creation with multiple attr,value string pairs should fail
with self.assertRaises(ValueError):
dn1 = DN(self.attr1, self.value1, self.attr2, self.value2)
# Create with multiple attr,value pairs passed as tuples & lists
dn1 = DN((self.attr1, self.value1), [self.attr2, self.value2])
self.assertEqual(len(dn1), 2)
self.assertExpectedClass(DN, dn1, 'self')
for i in range(0, len(dn1)):
self.assertExpectedClass(DN, dn1[i], 'RDN')
for j in range(0, len(dn1[i])):
self.assertExpectedClass(DN, dn1[i][j], 'AVA')
self.assertIsInstance(dn1[i].attr, unicode)
self.assertIsInstance(dn1[i].value, unicode)
self.assertEqual(dn1[0], self.rdn1)
self.assertEqual(dn1[1], self.rdn2)
# Create with multiple attr,value pairs passed as tuple and RDN
dn1 = DN((self.attr1, self.value1), RDN((self.attr2, self.value2)))
self.assertEqual(len(dn1), 2)
self.assertExpectedClass(DN, dn1, 'self')
for i in range(0, len(dn1)):
self.assertExpectedClass(DN, dn1[i], 'RDN')
for j in range(0, len(dn1[i])):
self.assertExpectedClass(DN, dn1[i][j], 'AVA')
self.assertIsInstance(dn1[i].attr, unicode)
self.assertIsInstance(dn1[i].value, unicode)
self.assertEqual(dn1[0], self.rdn1)
self.assertEqual(dn1[1], self.rdn2)
# Create with multiple attr,value pairs but reverse
# constructor parameter ordering. RDN ordering should also be
# reversed because DN's are a ordered sequence of RDN's
dn1 = DN((self.attr2, self.value2), (self.attr1, self.value1))
self.assertEqual(len(dn1), 2)
self.assertExpectedClass(DN, dn1, 'self')
for i in range(0, len(dn1)):
self.assertExpectedClass(DN, dn1[i], 'RDN')
for j in range(0, len(dn1[i])):
self.assertExpectedClass(DN, dn1[i][j], 'AVA')
self.assertIsInstance(dn1[i].attr, unicode)
self.assertIsInstance(dn1[i].value, unicode)
self.assertEqual(dn1[0], self.rdn2)
self.assertEqual(dn1[1], self.rdn1)
# Create with single RDN object
dn1 = DN(self.rdn1)
self.assertEqual(len(dn1), 1)
self.assertExpectedClass(DN, dn1, 'self')
for i in range(0, len(dn1)):
self.assertExpectedClass(DN, dn1[i], 'RDN')
for j in range(0, len(dn1[i])):
self.assertExpectedClass(DN, dn1[i][j], 'AVA')
self.assertIsInstance(dn1[i].attr, unicode)
self.assertIsInstance(dn1[i].value, unicode)
self.assertEqual(dn1[0], self.rdn1)
# Create with multiple RDN objects, assure ordering is preserved.
dn1 = DN(self.rdn1, self.rdn2)
self.assertEqual(len(dn1), 2)
self.assertExpectedClass(DN, dn1, 'self')
for i in range(0, len(dn1)):
self.assertExpectedClass(DN, dn1[i], 'RDN')
for j in range(0, len(dn1[i])):
self.assertExpectedClass(DN, dn1[i][j], 'AVA')
self.assertIsInstance(dn1[i].attr, unicode)
self.assertIsInstance(dn1[i].value, unicode)
self.assertEqual(dn1[0], self.rdn1)
self.assertEqual(dn1[1], self.rdn2)
# Create with multiple RDN objects in different order, assure
# ordering is preserved.
dn1 = DN(self.rdn2, self.rdn1)
self.assertEqual(len(dn1), 2)
self.assertExpectedClass(DN, dn1, 'self')
for i in range(0, len(dn1)):
self.assertExpectedClass(DN, dn1[i], 'RDN')
for j in range(0, len(dn1[i])):
self.assertExpectedClass(DN, dn1[i][j], 'AVA')
self.assertIsInstance(dn1[i].attr, unicode)
self.assertIsInstance(dn1[i].value, unicode)
self.assertEqual(dn1[0], self.rdn2)
self.assertEqual(dn1[1], self.rdn1)
# Create with single string with 1 RDN
dn1 = DN(self.str_rdn1)
self.assertEqual(len(dn1), 1)
self.assertExpectedClass(DN, dn1, 'self')
for i in range(0, len(dn1)):
self.assertExpectedClass(DN, dn1[i], 'RDN')
for j in range(0, len(dn1[i])):
self.assertExpectedClass(DN, dn1[i][j], 'AVA')
self.assertIsInstance(dn1[i].attr, unicode)
self.assertIsInstance(dn1[i].value, unicode)
self.assertEqual(dn1[0], self.rdn1)
# Create with single string with 2 RDN's
dn1 = DN(self.str_dn3)
self.assertEqual(len(dn1), 2)
self.assertExpectedClass(DN, dn1, 'self')
for i in range(0, len(dn1)):
self.assertExpectedClass(DN, dn1[i], 'RDN')
for j in range(0, len(dn1[i])):
self.assertExpectedClass(DN, dn1[i][j], 'AVA')
self.assertIsInstance(dn1[i].attr, unicode)
self.assertIsInstance(dn1[i].value, unicode)
self.assertEqual(dn1[0], self.rdn1)
self.assertEqual(dn1[1], self.rdn2)
# Create with a python-cryptography 'Name'
dn1 = DN(self.x500name)
self.assertEqual(len(dn1), 2)
self.assertExpectedClass(DN, dn1, 'self')
for i in range(0, len(dn1)):
self.assertExpectedClass(DN, dn1[i], 'RDN')
for j in range(0, len(dn1[i])):
self.assertExpectedClass(DN, dn1[i][j], 'AVA')
self.assertIsInstance(dn1[i].attr, unicode)
self.assertIsInstance(dn1[i].value, unicode)
self.assertEqual(dn1[0], self.rdn1)
self.assertEqual(dn1[1], self.rdn2)
# Create with RDN, and 2 DN's (e.g. attr + container + base)
dn1 = DN((self.attr1, self.value1), self.container_dn, self.base_dn)
self.assertEqual(len(dn1), 5)
dn_str = ','.join([str(self.rdn1),
str(self.container_rdn1), str(self.container_rdn2),
str(self.base_rdn1), str(self.base_rdn2)])
self.assertEqual(str(dn1), dn_str)
def test_str(self):
dn1 = DN(self.dn1)
dn2 = DN(self.dn2)
dn3 = DN(self.dn3)
self.assertEqual(str(dn1), self.str_dn1)
self.assertIsInstance(str(dn1), str)
self.assertEqual(str(dn2), self.str_dn2)
self.assertIsInstance(str(dn2), str)
self.assertEqual(str(dn3), self.str_dn3)
self.assertIsInstance(str(dn3), str)
def test_cmp(self):
# Equality
dn1 = DN((self.attr1, self.value1))
self.assertTrue(dn1 == self.dn1)
self.assertFalse(dn1 != self.dn1)
self.assertTrue(dn1 == self.str_dn1)
self.assertFalse(dn1 != self.str_dn1)
result = cmp(dn1, self.dn1)
self.assertEqual(result, 0)
# Make dn1's attr greater
with self.assertRaises(AttributeError):
dn1[0].attr = self.attr1 + "1"
dn1 = DN((self.attr1 + "1", self.value1))
self.assertFalse(dn1 == self.dn1)
self.assertTrue(dn1 != self.dn1)
result = cmp(dn1, self.dn1)
self.assertEqual(result, 1)
result = cmp(self.dn1, dn1)
self.assertEqual(result, -1)
# Reset dn1's attr, should be equal again
with self.assertRaises(AttributeError):
dn1[0].attr = self.attr1
dn1 = DN((self.attr1, self.value1))
result = cmp(dn1, self.dn1)
self.assertEqual(result, 0)
# Make dn1's value greater
# attr will be equal, this tests secondary comparision component
with self.assertRaises(AttributeError):
dn1[0].value = self.value1 + "1"
dn1 = DN((self.attr1, self.value1 + "1"))
result = cmp(dn1, self.dn1)
self.assertEqual(result, 1)
result = cmp(self.dn1, dn1)
self.assertEqual(result, -1)
# Make sure dn's with more rdn's are greater
result = cmp(self.dn1, self.dn3)
self.assertEqual(result, -1)
result = cmp(self.dn3, self.dn1)
self.assertEqual(result, 1)
# Test startswith, endswith
container_dn = DN(self.container_dn)
base_container_dn = DN(self.base_container_dn)
self.assertTrue(base_container_dn.startswith(self.rdn1))
self.assertTrue(base_container_dn.startswith(self.dn1))
self.assertTrue(base_container_dn.startswith(self.dn1 + container_dn))
self.assertFalse(base_container_dn.startswith(self.dn2))
self.assertFalse(base_container_dn.startswith(self.rdn2))
self.assertTrue(base_container_dn.startswith((self.dn1)))
self.assertTrue(base_container_dn.startswith((self.rdn1)))
self.assertFalse(base_container_dn.startswith((self.rdn2)))
self.assertTrue(base_container_dn.startswith((self.rdn2, self.rdn1)))
self.assertTrue(base_container_dn.startswith((self.dn1, self.dn2)))
self.assertTrue(base_container_dn.endswith(self.base_dn))
self.assertTrue(base_container_dn.endswith(container_dn + self.base_dn))
self.assertFalse(base_container_dn.endswith(DN(self.base_rdn1)))
self.assertTrue(base_container_dn.endswith(DN(self.base_rdn2)))
self.assertTrue(base_container_dn.endswith((DN(self.base_rdn1), DN(self.base_rdn2))))
# Test "in" membership
self.assertTrue(self.container_rdn1 in container_dn)
self.assertTrue(container_dn in container_dn)
self.assertFalse(self.base_rdn1 in container_dn)
self.assertTrue(self.container_rdn1 in base_container_dn)
self.assertTrue(container_dn in base_container_dn)
self.assertTrue(container_dn + self.base_dn in
base_container_dn)
self.assertTrue(self.dn1 + container_dn + self.base_dn in
base_container_dn)
self.assertTrue(self.dn1 + container_dn + self.base_dn ==
base_container_dn)
self.assertFalse(self.container_rdn1 in self.base_dn)
def test_indexing(self):
dn1 = DN(self.dn1)
dn2 = DN(self.dn2)
dn3 = DN(self.dn3)
self.assertEqual(dn1[0], self.rdn1)
self.assertEqual(dn1[self.rdn1.attr], self.rdn1.value)
with self.assertRaises(KeyError):
dn1['foo'] # pylint: disable=pointless-statement
self.assertEqual(dn2[0], self.rdn2)
self.assertEqual(dn2[self.rdn2.attr], self.rdn2.value)
with self.assertRaises(KeyError):
dn2['foo'] # pylint: disable=pointless-statement
self.assertEqual(dn3[0], self.rdn1)
self.assertEqual(dn3[self.rdn1.attr], self.rdn1.value)
self.assertEqual(dn3[1], self.rdn2)
self.assertEqual(dn3[self.rdn2.attr], self.rdn2.value)
with self.assertRaises(KeyError):
dn3['foo'] # pylint: disable=pointless-statement
with self.assertRaises(TypeError):
dn3[1.0] # pylint: disable=pointless-statement
def test_assignments(self):
dn = DN('t=0,t=1,t=2,t=3,t=4,t=5,t=6,t=7,t=8,t=9')
with self.assertRaises(TypeError):
# pylint: disable=unsupported-assignment-operation
dn[0] = RDN('t=a')
with self.assertRaises(TypeError):
# pylint: disable=unsupported-assignment-operation
dn[0:1] = [RDN('t=a'), RDN('t=b')]
def test_iter(self):
dn1 = DN(self.dn1)
dn2 = DN(self.dn2)
dn3 = DN(self.dn3)
self.assertEqual(len(dn1), 1)
self.assertEqual(dn1[:], self.rdn1)
for i, ava in enumerate(dn1):
if i == 0:
self.assertEqual(ava, self.rdn1)
else:
self.fail("got iteration index %d, but len=%d" % (i, len(self.rdn1)))
self.assertEqual(len(dn2), 1)
self.assertEqual(dn2[:], self.rdn2)
for i, ava in enumerate(dn2):
if i == 0:
self.assertEqual(ava, self.rdn2)
else:
self.fail("got iteration index %d, but len=%d" % (i, len(self.rdn2)))
self.assertEqual(len(dn3), 2)
self.assertEqual(dn3[:], DN(self.rdn1, self.rdn2))
for i, ava in enumerate(dn3):
if i == 0:
self.assertEqual(ava, self.rdn1)
elif i == 1:
self.assertEqual(ava, self.rdn2)
else:
self.fail("got iteration index %d, but len=%d" % (i, len(dn3)))
def test_concat(self):
dn1 = DN((self.attr1, self.value1))
dn2 = DN([self.attr2, self.value2])
# in-place addtion
dn1 += dn2
self.assertEqual(dn1, self.dn3)
self.assertExpectedClass(DN, dn1, 'self')
for i in range(0, len(dn1)):
self.assertExpectedClass(DN, dn1[i], 'RDN')
for j in range(0, len(dn1[i])):
self.assertExpectedClass(DN, dn1[i][j], 'AVA')
dn1 = DN((self.attr1, self.value1))
dn1 += self.rdn2
self.assertEqual(dn1, self.dn3)
self.assertExpectedClass(DN, dn1, 'self')
for i in range(0, len(dn1)):
self.assertExpectedClass(DN, dn1[i], 'RDN')
for j in range(0, len(dn1[i])):
self.assertExpectedClass(DN, dn1[i][j], 'AVA')
dn1 = DN((self.attr1, self.value1))
dn1 += self.dn2
self.assertEqual(dn1, self.dn3)
self.assertExpectedClass(DN, dn1, 'self')
for i in range(0, len(dn1)):
self.assertExpectedClass(DN, dn1[i], 'RDN')
for j in range(0, len(dn1[i])):
self.assertExpectedClass(DN, dn1[i][j], 'AVA')
dn1 = DN((self.attr1, self.value1))
dn1 += self.str_dn2
self.assertEqual(dn1, self.dn3)
self.assertExpectedClass(DN, dn1, 'self')
for i in range(0, len(dn1)):
self.assertExpectedClass(DN, dn1[i], 'RDN')
for j in range(0, len(dn1[i])):
self.assertExpectedClass(DN, dn1[i][j], 'AVA')
# concatenation
dn1 = DN((self.attr1, self.value1))
dn3 = dn1 + dn2
self.assertEqual(dn3, self.dn3)
self.assertExpectedClass(DN, dn3, 'self')
for i in range(0, len(dn3)):
self.assertExpectedClass(DN, dn3[i], 'RDN')
for j in range(0, len(dn3[i])):
self.assertExpectedClass(DN, dn3[i][j], 'AVA')
dn1 = DN((self.attr1, self.value1))
dn3 = dn1 + self.rdn2
self.assertEqual(dn3, self.dn3)
self.assertExpectedClass(DN, dn3, 'self')
for i in range(0, len(dn3)):
self.assertExpectedClass(DN, dn3[i], 'RDN')
for j in range(0, len(dn3[i])):
self.assertExpectedClass(DN, dn3[i][j], 'AVA')
dn3 = dn1 + self.str_rdn2
self.assertEqual(dn3, self.dn3)
self.assertExpectedClass(DN, dn3, 'self')
for i in range(0, len(dn3)):
self.assertExpectedClass(DN, dn3[i], 'RDN')
self.assertExpectedClass(DN, dn3[i][0], 'AVA')
dn3 = dn1 + self.str_dn2
self.assertEqual(dn3, self.dn3)
self.assertExpectedClass(DN, dn3, 'self')
self.assertExpectedClass(DN, dn3, 'self')
for i in range(0, len(dn3)):
self.assertExpectedClass(DN, dn3[i], 'RDN')
for j in range(0, len(dn3[i])):
self.assertExpectedClass(DN, dn3[i][j], 'AVA')
dn3 = dn1 + self.dn2
self.assertEqual(dn3, self.dn3)
self.assertExpectedClass(DN, dn3, 'self')
self.assertExpectedClass(DN, dn3, 'self')
for i in range(0, len(dn3)):
self.assertExpectedClass(DN, dn3[i], 'RDN')
for j in range(0, len(dn3[i])):
self.assertExpectedClass(DN, dn3[i][j], 'AVA')
def test_find(self):
# -10 -9 -8 -7 -6 -5 -4 -3 -2 -1
dn = DN('t=0,t=1,cn=bob,t=3,t=4,t=5,cn=bob,t=7,t=8,t=9')
pat = DN('cn=bob')
# forward
self.assertEqual(dn.find(pat), 2)
self.assertEqual(dn.find(pat, 1), 2)
self.assertEqual(dn.find(pat, 1, 3), 2)
self.assertEqual(dn.find(pat, 2, 3), 2)
self.assertEqual(dn.find(pat, 6), 6)
self.assertEqual(dn.find(pat, 7), -1)
self.assertEqual(dn.find(pat, 1, 2), -1)
with self.assertRaises(ValueError):
self.assertEqual(dn.index(pat, 7), -1)
with self.assertRaises(ValueError):
self.assertEqual(dn.index(pat, 1, 2), -1)
# reverse
self.assertEqual(dn.rfind(pat), 6)
self.assertEqual(dn.rfind(pat, -4), 6)
self.assertEqual(dn.rfind(pat, 6), 6)
self.assertEqual(dn.rfind(pat, 6, 8), 6)
self.assertEqual(dn.rfind(pat, 6, 8), 6)
self.assertEqual(dn.rfind(pat, -8), 6)
self.assertEqual(dn.rfind(pat, -8, -4), 6)
self.assertEqual(dn.rfind(pat, -8, -5), 2)
self.assertEqual(dn.rfind(pat, 7), -1)
self.assertEqual(dn.rfind(pat, -3), -1)
with self.assertRaises(ValueError):
self.assertEqual(dn.rindex(pat, 7), -1)
with self.assertRaises(ValueError):
self.assertEqual(dn.rindex(pat, -3), -1)
def test_replace(self):
# pylint: disable=no-member
dn = DN('t=0,t=1,t=2,t=3,t=4,t=5,t=6,t=7,t=8,t=9')
with self.assertRaises(AttributeError):
dn.replace # pylint: disable=pointless-statement
def test_hashing(self):
# create DN's that are equal but differ in case
dn1 = DN((self.attr1.lower(), self.value1.upper()))
dn2 = DN((self.attr1.upper(), self.value1.lower()))
# DNs that are equal should hash to the same value.
self.assertEqual(dn1, dn2)
# Good, everyone's equal, now verify their hash values
self.assertEqual(hash(dn1), hash(dn2))
# Different DN objects with the same value should
# map to 1 common key and 1 member in a set. The key and
# member are based on the object's value.
dn1_a = DN(self.dn1)
dn1_b = DN(self.dn1)
dn2_a = DN(self.dn2)
dn2_b = DN(self.dn2)
dn3_a = DN(self.dn3)
dn3_b = DN(self.dn3)
self.assertEqual(dn1_a, dn1_b)
self.assertEqual(dn2_a, dn2_b)
self.assertEqual(dn3_a, dn3_b)
d = dict()
s = set()
d[dn1_a] = str(dn1_a)
d[dn1_b] = str(dn1_b)
d[dn2_a] = str(dn2_a)
d[dn2_b] = str(dn2_b)
s.add(dn1_a)
s.add(dn1_b)
s.add(dn2_a)
s.add(dn2_b)
self.assertEqual(len(d), 2)
self.assertEqual(len(s), 2)
self.assertEqual(sorted(d), sorted([dn1_a, dn2_a]))
self.assertEqual(sorted(s), sorted([dn1_a, dn2_a]))
self.assertTrue(dn1_a in d)
self.assertTrue(dn1_b in d)
self.assertTrue(dn2_a in d)
self.assertTrue(dn2_b in d)
self.assertFalse(dn3_a in d)
self.assertFalse(dn3_b in d)
self.assertTrue(dn1_a in s)
self.assertTrue(dn1_b in s)
self.assertTrue(dn2_a in s)
self.assertTrue(dn2_b in s)
self.assertFalse(dn3_a in s)
self.assertFalse(dn3_b in s)
def test_x500_text(self):
# null DN x500 ordering and LDAP ordering are the same
nulldn = DN()
self.assertEqual(nulldn.ldap_text(), nulldn.x500_text())
# reverse a DN with a single RDN
self.assertEqual(self.dn1.ldap_text(), self.dn1.x500_text())
# reverse a DN with 2 RDNs
dn3_x500 = self.dn3.x500_text()
dn3_rev = DN(self.rdn2, self.rdn1)
self.assertEqual(dn3_rev.ldap_text(), dn3_x500)
# reverse a longer DN
longdn_x500 = self.base_container_dn.x500_text()
longdn_rev = DN(longdn_x500)
l = len(self.base_container_dn)
for i in range(l):
self.assertEqual(longdn_rev[i], self.base_container_dn[l-1-i])
class TestEscapes(unittest.TestCase):
def setUp(self):
self.privilege = 'R,W privilege'
self.dn_str_hex_escape = 'cn=R\\2cW privilege,cn=privileges,cn=pbac,dc=idm,dc=lab,dc=bos,dc=redhat,dc=com'
self.dn_str_backslash_escape = 'cn=R\\,W privilege,cn=privileges,cn=pbac,dc=idm,dc=lab,dc=bos,dc=redhat,dc=com'
def test_escape(self):
dn = DN(self.dn_str_hex_escape)
self.assertEqual(dn['cn'], self.privilege)
self.assertEqual(dn[0].value, self.privilege)
dn = DN(self.dn_str_backslash_escape)
self.assertEqual(dn['cn'], self.privilege)
self.assertEqual(dn[0].value, self.privilege)
class TestInternationalization(unittest.TestCase):
def setUp(self):
# Hello in Arabic
self.arabic_hello_utf8 = (b'\xd9\x85\xd9\x83\xd9\x8a\xd9\x84' +
b'\xd8\xb9\x20\xd9\x85\xd8\xa7\xd9' +
b'\x84\xd9\x91\xd8\xb3\xd9\x84\xd8\xa7')
self.arabic_hello_unicode = self.arabic_hello_utf8.decode('utf-8')
def assert_equal_utf8(self, obj, b):
if six.PY2:
self.assertEqual(str(obj), b)
else:
self.assertEqual(str(obj), b.decode('utf-8'))
@contextlib.contextmanager
def fail_py3(self, exception_type):
try:
yield
except exception_type:
if six.PY2:
raise
def test_i18n(self):
self.assertEqual(self.arabic_hello_utf8,
self.arabic_hello_unicode.encode('utf-8'))
# AVA's
# test attr i18n
ava1 = AVA(self.arabic_hello_unicode, 'foo')
self.assertIsInstance(ava1.attr, unicode)
self.assertIsInstance(ava1.value, unicode)
self.assertEqual(ava1.attr, self.arabic_hello_unicode)
self.assert_equal_utf8(ava1, self.arabic_hello_utf8 + b'=foo')
with self.fail_py3(TypeError):
ava1 = AVA(self.arabic_hello_utf8, 'foo')
if six.PY2:
self.assertIsInstance(ava1.attr, unicode)
self.assertIsInstance(ava1.value, unicode)
self.assertEqual(ava1.attr, self.arabic_hello_unicode)
self.assert_equal_utf8(ava1, self.arabic_hello_utf8 + b'=foo')
# test value i18n
ava1 = AVA('cn', self.arabic_hello_unicode)
self.assertIsInstance(ava1.attr, unicode)
self.assertIsInstance(ava1.value, unicode)
self.assertEqual(ava1.value, self.arabic_hello_unicode)
self.assert_equal_utf8(ava1, b'cn=' + self.arabic_hello_utf8)
with self.fail_py3(TypeError):
ava1 = AVA('cn', self.arabic_hello_utf8)
if six.PY2:
self.assertIsInstance(ava1.attr, unicode)
self.assertIsInstance(ava1.value, unicode)
self.assertEqual(ava1.value, self.arabic_hello_unicode)
self.assert_equal_utf8(ava1, b'cn=' + self.arabic_hello_utf8)
# RDN's
# test attr i18n
rdn1 = RDN((self.arabic_hello_unicode, 'foo'))
self.assertIsInstance(rdn1.attr, unicode)
self.assertIsInstance(rdn1.value, unicode)
self.assertEqual(rdn1.attr, self.arabic_hello_unicode)
self.assert_equal_utf8(rdn1, self.arabic_hello_utf8 + b'=foo')
with self.fail_py3(TypeError):
rdn1 = RDN((self.arabic_hello_utf8, 'foo'))
if six.PY2:
self.assertIsInstance(rdn1.attr, unicode)
self.assertIsInstance(rdn1.value, unicode)
self.assertEqual(rdn1.attr, self.arabic_hello_unicode)
self.assertEqual(str(rdn1), self.arabic_hello_utf8 + b'=foo')
# test value i18n
rdn1 = RDN(('cn', self.arabic_hello_unicode))
self.assertIsInstance(rdn1.attr, unicode)
self.assertIsInstance(rdn1.value, unicode)
self.assertEqual(rdn1.value, self.arabic_hello_unicode)
self.assert_equal_utf8(rdn1, b'cn=' + self.arabic_hello_utf8)
with self.fail_py3(TypeError):
rdn1 = RDN(('cn', self.arabic_hello_utf8))
if six.PY2:
self.assertIsInstance(rdn1.attr, unicode)
self.assertIsInstance(rdn1.value, unicode)
self.assertEqual(rdn1.value, self.arabic_hello_unicode)
self.assertEqual(str(rdn1), b'cn=' + self.arabic_hello_utf8)
# DN's
# test attr i18n
dn1 = DN((self.arabic_hello_unicode, 'foo'))
self.assertIsInstance(dn1[0].attr, unicode)
self.assertIsInstance(dn1[0].value, unicode)
self.assertEqual(dn1[0].attr, self.arabic_hello_unicode)
self.assert_equal_utf8(dn1, self.arabic_hello_utf8 + b'=foo')
with self.fail_py3(TypeError):
dn1 = DN((self.arabic_hello_utf8, 'foo'))
if six.PY2:
self.assertIsInstance(dn1[0].attr, unicode)
self.assertIsInstance(dn1[0].value, unicode)
self.assertEqual(dn1[0].attr, self.arabic_hello_unicode)
self.assertEqual(str(dn1), self.arabic_hello_utf8 + b'=foo')
# test value i18n
dn1 = DN(('cn', self.arabic_hello_unicode))
self.assertIsInstance(dn1[0].attr, unicode)
self.assertIsInstance(dn1[0].value, unicode)
self.assertEqual(dn1[0].value, self.arabic_hello_unicode)
self.assert_equal_utf8(dn1, b'cn=' + self.arabic_hello_utf8)
with self.fail_py3(TypeError):
dn1 = DN(('cn', self.arabic_hello_utf8))
if six.PY2:
self.assertIsInstance(dn1[0].attr, unicode)
self.assertIsInstance(dn1[0].value, unicode)
self.assertEqual(dn1[0].value, self.arabic_hello_unicode)
self.assertEqual(str(dn1), b'cn=' + self.arabic_hello_utf8)
if __name__ == '__main__':
unittest.main()
| apophys/freeipa | ipatests/test_ipapython/test_dn.py | Python | gpl-3.0 | 48,955 |
import requests
import os
def post_var(token, ubi_source, ubi_var, value, timestamp=None, context=None):
'''
This function posts data to ubidots
Input parameters:
token: The Ubidots token
ubi_source: The name of the user's Ubidots datasource
ubi_var: The name of the user's Ubidots variable
value: Value to be sent
timestamp: Optional, for custom timestamp
context: Optional, for custom context
'''
try:
url = os.getenv("UBIDOTS_URL") if os.getenv("UBIDOTS_URL") is not None else "http://things.ubidots.com/"
url = url + "api/v1.6/devices/" + ubi_source + "/"
headers = {"X-Auth-Token": token,
"Content-Type":"application/json"}
data = {ubi_var:{"value": value}}
if (timestamp!=None):
data[ubi_var]["timestamp"]=timestamp
if (context!=None):
data[ubi_var]["context"]=context
r = requests.post(url=url, headers=headers, json= data)
except Exception as e:
return e
def get_var(token, ubi_source, ubi_var, value, timestamp=None, context=None):
'''
This function gets data from ubidots
Input parameters:
token: The Ubidots token
ubi_source: The name of the user's Ubidots datasource
ubi_var: The name of the user's Ubidots variable
Return:
Returns in this order the next parameters: value, timestamp, context
'''
try:
url = os.getenv("UBIDOTS_URL") if os.getenv("UBIDOTS_URL") is not None else "http://things.ubidots.com/"
url = url + "api/v1.6/devices/" + ubi_source + "/" + ubi_var + "/values?page_size=1"
headers = {"X-Auth-Token": token,
"Content-Type":"application/json"}
r = requests.get(url=url, headers=headers)
return r.json()[0]['value'], r.json()[0]['timestamp'], r.json()[0]['context']
except Exception as e:
return e
| jotathebest/ubi-tools | src/connect.py | Python | gpl-3.0 | 1,710 |
#!/usr/bin/env python2
from __future__ import division
import os
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import GObject as gobject
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import Notify
from gi.repository import AppIndicator3 as appindicator
from time import time
from math import floor
Gdk.threads_init()
class TomateConfig(object):
#Parameters
MIN_WORK_TIME = 60 * 10 # min work time in seconds
DONE_WORK_TIME = 60 * 25 # finished work time in seconds
POLL_INTERVAL = 5 # polling interval in seconds
class States(object):
IDLE = 1
WORKING = 2
OK = 3
DONE = 4
STATE_MESSAGES = {
States.IDLE : 'Idle',
States.WORKING : 'Working',
States.OK : 'Ok',
States.DONE : 'Done'
}
STATE_ICONS = {
States.IDLE : 'idle',
States.WORKING : 'working',
States.OK : 'ok',
States.DONE : 'done'
}
class Pomodoro:
def __init__(self):
# we start with an idle state
self.state = States.IDLE
self.tick_interval = TomateConfig.POLL_INTERVAL
self.start_working_time = 0
def init_ui(self):
Notify.init("Tomate")
self.ind = self.build_indicator()
menu = self.build_menu()
self.ind.set_menu(menu)
def build_indicator(self):
ind = appindicator.Indicator.new(
"Tomate",
self.get_icon(self.state),
appindicator.IndicatorCategory.APPLICATION_STATUS)
ind.set_status(appindicator.IndicatorStatus.ACTIVE)
return ind
def build_menu(self):
menu = Gtk.Menu()
self.st_menu = Gtk.MenuItem("Start")
self.st_menu.connect('activate',self.icon_click)
menu.append(self.st_menu)
mi = Gtk.ImageMenuItem("Quit")
img = Gtk.Image.new_from_stock(Gtk.STOCK_QUIT, Gtk.IconSize.MENU)
mi.set_image(img)
mi.connect('activate',Gtk.main_quit)
menu.append(mi)
menu.show_all()
return menu
def get_icon(self, state):
return self.icon_directory() + "/img/" + STATE_ICONS[state] + ".png"
def format_time(self,seconds):
if seconds < 60:
return "%d seconds" % seconds
minutes = floor( seconds / 60 )
hours = floor( minutes / 60 )
days = floor( hours / 24 )
d_string = ''
h_string = ''
m_string = ''
if minutes < 60:
if minutes > 1: return "%d minutes" % minutes
else: return "1 minute"
if days > 0:
hours = hours - ( days * 24 )
minutes = minutes - ( days * 24 * 60 )
if days == 1: d_string = "1 day "
else: d_string = "%d day%s " % (days, 's')
if hours > 0:
minutes = minutes - (hours * 60)
if hours == 1: h_string = '1 hour '
else: h_string = "%d hours " % hours
if minutes > 0 :
if minutes == 1: m_string = 'and 1 minute'
else: m_string = "and %d minutes" % minutes
return d_string + h_string + m_string
def set_state(self, state, time):
old_state=self.state
if self.state == state:
return
if state == States.IDLE:
delta = time - self.start_working_time
self.st_menu.set_label("Start")
if old_state == States.OK:
self.tooltip = "Good, you worked for " + self.format_time(delta) + "!"
elif old_state == States.WORKING:
self.tooltip = "Not good: worked for only " + self.format_time(delta)
elif old_state == States.DONE:
self.tooltip = "Good, you worked for " + self.format_time(delta) + "! \
Time for a break!"
elif state == States.WORKING:
self.start_working_time = time
delta = time - self.start_working_time
self.tooltip = "Working for " + self.format_time(delta) + "..."
self.st_menu.set_label("Working for %s... stop" % self.format_time(delta))
elif state == States.OK:
delta = time - self.start_working_time
self.tooltip = "Good, you worked for " + self.format_time(delta) + "!"
elif state == States.DONE:
self.tooltip = "Worked enough, take a break!"
self.state=state
self.ind.set_icon(self.get_icon(state))
self.show_notification(self.state, self.tooltip)
def show_notification(self, state, notification):
try:
nw = Notify.Notification.new("Tomate state changed to " +
STATE_MESSAGES[state],
notification, self.get_icon(state))
nw.show()
except:
pass
def icon_directory(self):
return os.path.dirname(os.path.realpath(__file__)) + os.path.sep
def icon_click(self, dummy):
if self.state == States.IDLE:
self.set_state(States.WORKING, time())
else:
self.set_state(States.IDLE, time())
def update(self, time):
"""This method is called everytime a tick interval occurs"""
delta = time - self.start_working_time
if self.state == States.IDLE:
pass
else:
self.st_menu.set_label("Working for %s... stop" % self.format_time(delta))
if self.state == States.WORKING:
if delta > TomateConfig.MIN_WORK_TIME:
self.set_state(States.OK, time)
elif self.state == States.OK:
if delta > TomateConfig.DONE_WORK_TIME:
self.set_state(States.DONE, time)
def tick(self):
self.update(time())
source_id = gobject.timeout_add(self.tick_interval*1000, self.tick)
def main(self):
# All PyGTK applications must have a gtk.main(). Control ends here
# and waits for an event to occur (like a key press or mouse event).
source_id = gobject.timeout_add(self.tick_interval, self.tick)
self.init_ui()
Gtk.main()
# If the program is run directly or passed as an argument to the python
# interpreter then create a Pomodoro instance and show it
if __name__ == "__main__":
app = Pomodoro()
app.main()
| bcostea/tomate2 | tomate2.py | Python | mpl-2.0 | 5,644 |
#!/usr/bin/env python
import os.path
config = {
"default_vcs": "tc-vcs",
"default_actions": [
'checkout-sources',
'build',
'build-symbols',
'make-updates',
'prep-upload',
'submit-to-balrog'
],
"balrog_credentials_file": "balrog_credentials",
"nightly_build": True,
"env": {
"GAIA_OPTIMIZE": "1",
"B2G_UPDATER": "1",
"LIGHTSABER": "1",
"DOGFOOD": "1",
"B2G_UPDATE_CHANNEL": "dogfood",
"BOWER_FLAGS": "--allow-root",
"B2G_PATH": "%(work_dir)s",
"GAIA_DISTRIBUTION_DIR": "%(work_dir)s/gaia/distros/spark",
"WGET_OPTS": "-c -q"
},
"is_automation": True,
"repo_remote_mappings": {
'https://android.googlesource.com/': 'https://git.mozilla.org/external/aosp',
'git://codeaurora.org/': 'https://git.mozilla.org/external/caf',
'https://git.mozilla.org/b2g': 'https://git.mozilla.org/b2g',
'git://github.com/mozilla-b2g/': 'https://git.mozilla.org/b2g',
'git://github.com/mozilla/': 'https://git.mozilla.org/b2g',
'https://git.mozilla.org/releases': 'https://git.mozilla.org/releases',
'http://android.git.linaro.org/git-ro/': 'https://git.mozilla.org/external/linaro',
'git://github.com/apitrace/': 'https://git.mozilla.org/external/apitrace',
},
}
| lissyx/build-mozharness | configs/b2g/taskcluster-spark-dogfood.py | Python | mpl-2.0 | 1,365 |
import os
import unittest
import mozharness.base.python as python
here = os.path.dirname(os.path.abspath(__file__))
class TestVirtualenvMixin(unittest.TestCase):
def test_package_versions(self):
example = os.path.join(here, 'pip-freeze.example.txt')
output = file(example).read()
mixin = python.VirtualenvMixin()
packages = mixin.package_versions(output)
# from the file
expected = {'MakeItSo': '0.2.6',
'PyYAML': '3.10',
'Tempita': '0.5.1',
'WebOb': '1.2b3',
'coverage': '3.5.1',
'logilab-astng': '0.23.1',
'logilab-common': '0.57.1',
'mozdevice': '0.2',
'mozhttpd': '0.3',
'mozinfo': '0.3.3',
'nose': '1.1.2',
'pyflakes': '0.5.0',
'pylint': '0.25.1',
'virtualenv': '1.7.1.2',
'wsgiref': '0.1.2'}
self.assertEqual(packages, expected)
if __name__ == '__main__':
unittest.main()
| ctalbert/mozharness | test/test_base_python.py | Python | mpl-2.0 | 1,129 |
import smtplib
import email.utils
from email.mime.text import MIMEText
import functools
import qiniu
try:
from config import AIRBB_HOST
except Exception, e:
print '==============no AIRBB_HOST set, ues airbb.xx as default==============', e
AIRBB_HOST = 'airbb.ml'
AIRBB_SUPPORT = 'support@%s' % AIRBB_HOST
def makes(s):
if type(s)==unicode:
return s.encode('utf8','ignore')
else:
return s
def makeu(s):
if type(s)==str:
return s.decode('utf8','ignore')
else:
return s
def ex(func):
@functools.wraps(func)
def foo(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception, e:
print '===================send email ex================', e
return foo
@ex
def send_email(title, content, addr_to, addr_fr=None):
# Create the message
if not addr_fr:
addr_fr = ALANCER_SUPPORT
msg = MIMEText(makes(content), 'html')
msg['To'] = email.utils.formataddr(('Recipient', addr_to))
msg['From'] = email.utils.formataddr(('Airbb', addr_fr))
msg['Subject'] = title
try:
server = smtplib.SMTP('localhost')
except Exception, e:
print 'no SMTP service available', e
return
#server.set_debuglevel(True) # show communication with the server
try:
server.sendmail(addr_fr, [addr_to], msg.as_string())
finally:
server.quit()
| txdywy/airbb | misc/util.py | Python | mpl-2.0 | 1,425 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, you can obtain one at http://mozilla.org/MPL/2.0/.
from atmo.stats.models import Metric
def test_metrics_record(now, one_hour_ago):
Metric.record("metric-key-1")
Metric.record("metric-key-2", 500)
Metric.record("metric-key-3", data={"other-value": "test"})
Metric.record("metric-key-4", created_at=one_hour_ago, data={"other-value-2": 100})
m = Metric.objects.get(key="metric-key-1")
assert m.value == 1
assert m.created_at.replace(microsecond=0) == now
assert m.data is None
m = Metric.objects.get(key="metric-key-2")
assert m.value == 500
assert m.created_at.replace(microsecond=0) == now
assert m.data is None
m = Metric.objects.get(key="metric-key-3")
assert m.value == 1
assert m.created_at.replace(microsecond=0) == now
assert m.data == {"other-value": "test"}
m = Metric.objects.get(key="metric-key-4")
assert m.value == 1
assert m.created_at.replace(microsecond=0) == one_hour_ago
assert m.data == {"other-value-2": 100}
| mozilla/telemetry-analysis-service | tests/test_stats.py | Python | mpl-2.0 | 1,163 |
from django.conf.urls import include, patterns, url
from django.views.generic.base import RedirectView
from mdn.urls import mdn_urlpatterns
from webplatformcompat.routers import router
from .views import RequestView, ViewFeature
webplatformcompat_urlpatterns = patterns(
'',
url(r'^$', RequestView.as_view(
template_name='webplatformcompat/home.jinja2'),
name='home'),
url(r'^about/', RequestView.as_view(
template_name='webplatformcompat/about.jinja2'),
name='about'),
url(r'^browse/', RequestView.as_view(
template_name='webplatformcompat/browse.jinja2'),
name='browse'),
url(r'^api-auth/', include('rest_framework.urls',
namespace='rest_framework')),
url(r'^api/$', RedirectView.as_view(url='/api/v1/', permanent=False),
name='api_root'),
url(r'^api/v1/', include(router.urls)),
url(r'^importer$', RedirectView.as_view(
url='/importer/', permanent=False)),
url(r'^importer/', include(mdn_urlpatterns)),
url(r'^view_feature/(?P<feature_id>\d+)(.html)?$', ViewFeature.as_view(
template_name='webplatformcompat/feature.js.jinja2'),
name='view_feature'),
)
| renoirb/browsercompat | webplatformcompat/urls.py | Python | mpl-2.0 | 1,187 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import mock
import os
from configman import ConfigurationManager
from socorro.external.elasticsearch import crashstorage
from socorro.middleware.middleware_app import MiddlewareApp
from socorro.unittest.testbase import TestCase
class ElasticSearchTestCase(TestCase):
"""Base class for Elastic Search related unit tests. """
def get_config_context(self, es_index=None):
mock_logging = mock.Mock()
storage_config = \
crashstorage.ElasticSearchCrashStorage.get_required_config()
middleware_config = MiddlewareApp.get_required_config()
middleware_config.add_option('logger', default=mock_logging)
values_source = {
'logger': mock_logging,
'resource.elasticsearch.elasticsearch_default_index': 'socorro_integration_test',
'resource.elasticsearch.elasticsearch_index': 'socorro_integration_test',
'resource.elasticsearch.backoff_delays': [1],
'resource.elasticsearch.elasticsearch_timeout': 5,
'resource.postgresql.database_name': 'socorro_integration_test'
}
if es_index:
values_source['resource.elasticsearch.elasticsearch_index'] = es_index
config_manager = ConfigurationManager(
[storage_config, middleware_config],
app_name='testapp',
app_version='1.0',
app_description='app description',
values_source_list=[os.environ, values_source],
argv_source=[],
)
return config_manager.get_config()
| bsmedberg/socorro | socorro/unittest/external/elasticsearch/unittestbase.py | Python | mpl-2.0 | 1,754 |
def get (request, query):
import badge
import archived
from ..issuers.library.badges import library
from ..issuers.library.programs.mit.badges import scratch
badges = [
badge.get(request, query)['badge'],
archived.get(request, query)['badge'],
library.get(request, query)['badge'],
scratch.get(request, query)['badge'],
]
if query.get('archived') != 'any':
badges = [badge for badge in badges if not badge['archived']]
return {'badges': badges}
def post (request, query):
badge = request['data']
errors = validateBadge(badge)
if len(errors):
return ({
'message': 'Could not validate required fields',
'code': 'ValidationError',
'details': [{'field': field, 'value': ''} for field in errors]
}, 400)
return {
'status': 'created',
'badge': {
'id': 99,
'slug': badge.get('slug'),
'name': badge.get('name'),
'strapline': badge.get('strapline'),
'earnerDescription': badge.get('earnerDescription'),
'consumerDescription': badge.get('consumerDescription'),
'issuerUrl': badge.get('issuerUrl'),
'rubricUrl': badge.get('rubricUrl'),
'timeValue': badge.get('timeValue'),
'timeUnits': badge.get('timeUnits'),
'limit': badge.get('limit'),
'unique': badge.get('unique'),
'created': '2014-06-17T13:14:33.000Z',
'imageUrl': badge.get('imageUrl'),
'type': badge.get('type'),
'archived': False,
'system': {
'id': 1,
'slug': 'chicago',
'url': 'http://cityofchicago.org',
'name': 'Chicago',
'email': 'mayor-emanuel@cityofchicago.org',
'imageUrl': None,
'issuers': []
},
'criteriaUrl': badge.get('criteriaUrl'),
'criteria': [],
'categories': [],
'tags': [],
'milestones': []
}
}
def validateBadge (badge):
missing = []
for field in required_fields:
if field not in badge:
missing.append(field)
return missing
required_fields = ('name', 'slug', 'earnerDescription', 'consumerDescription',
'criteriaUrl', 'unique', 'image', 'type',)
| mozilla/badgekit-api-python-client | test/fixtures/systems/chicago/badges/__init__.py | Python | mpl-2.0 | 2,435 |
# Generated by Django 3.2.5 on 2021-08-17 19:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fares', '0003_auto_20210602_1534'),
]
operations = [
migrations.AlterField(
model_name='tariff',
name='access_zones',
field=models.ManyToManyField(blank=True, to='fares.FareZone'),
),
]
| jclgoodwin/bustimes.org.uk | fares/migrations/0004_alter_tariff_access_zones.py | Python | mpl-2.0 | 414 |
from unittest import TestCase
import neuropsydia as n
n.start(open_window=False)
class TestColor(TestCase):
def test_is_string(self):
c = n.color("w")
self.assertTrue(isinstance(c, tuple)) | neuropsychology/Neuropsydia.py | neuropsydia/tests/test_color.py | Python | mpl-2.0 | 210 |
# encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski (kyle@lahnakoski.com)
#
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import sys
from time import time
from mo_future import text_type
from mo_logs import Log
from mo_logs.log_usingNothing import StructuredLogger
from mo_logs.strings import expand_template
from mo_threads import Thread, THREAD_STOP, Till
DEBUG_LOGGING = False
class StructuredLogger_usingThreadedStream(StructuredLogger):
# stream CAN BE AN OBJCET WITH write() METHOD, OR A STRING
# WHICH WILL eval() TO ONE
def __init__(self, stream):
assert stream
use_UTF8 = False
if isinstance(stream, text_type):
if stream.startswith("sys."):
use_UTF8 = True # sys.* ARE OLD AND CAN NOT HANDLE unicode
self.stream = eval(stream)
name = stream
else:
self.stream = stream
name = "stream"
# WRITE TO STREAMS CAN BE *REALLY* SLOW, WE WILL USE A THREAD
from mo_threads import Queue
if use_UTF8:
def utf8_appender(value):
if isinstance(value, text_type):
value = value.encode('utf8')
self.stream.write(value)
appender = utf8_appender
else:
appender = self.stream.write
self.queue = Queue("queue for " + self.__class__.__name__ + "(" + name + ")", max=10000, silent=True)
self.thread = Thread("log to " + self.__class__.__name__ + "(" + name + ")", time_delta_pusher, appender=appender, queue=self.queue, interval=0.3)
self.thread.parent.remove_child(self.thread) # LOGGING WILL BE RESPONSIBLE FOR THREAD stop()
self.thread.start()
def write(self, template, params):
try:
self.queue.add({"template": template, "params": params})
return self
except Exception as e:
raise e # OH NO!
def stop(self):
try:
self.queue.add(THREAD_STOP) # BE PATIENT, LET REST OF MESSAGE BE SENT
self.thread.join()
except Exception as e:
if DEBUG_LOGGING:
raise e
try:
self.queue.close()
except Exception as f:
if DEBUG_LOGGING:
raise f
def time_delta_pusher(please_stop, appender, queue, interval):
"""
appender - THE FUNCTION THAT ACCEPTS A STRING
queue - FILLED WITH LOG ENTRIES {"template":template, "params":params} TO WRITE
interval - timedelta
USE IN A THREAD TO BATCH LOGS BY TIME INTERVAL
"""
next_run = time() + interval
while not please_stop:
(Till(till=next_run) | please_stop).wait()
next_run = time() + interval
logs = queue.pop_all()
if not logs:
continue
lines = []
for log in logs:
try:
if log is THREAD_STOP:
please_stop.go()
next_run = time()
else:
expanded = expand_template(log.get("template"), log.get("params"))
lines.append(expanded)
except Exception as e:
location = log.get('params', {}).get('location', {})
Log.warning("Trouble formatting log from {{location}}", location=location, cause=e)
# SWALLOW ERROR, GOT TO KEEP RUNNING
try:
appender(u"\n".join(lines) + u"\n")
except Exception as e:
sys.stderr.write(b"Trouble with appender: " + str(e.__class__.__name__) + b"\n")
# SWALLOW ERROR, MUST KEEP RUNNING
| klahnakoski/MySQL-to-S3 | vendor/mo_logs/log_usingThreadedStream.py | Python | mpl-2.0 | 3,886 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-25 21:58
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0004_auto_20170716_0901'),
]
operations = [
migrations.RemoveField(
model_name='evidence',
name='student',
),
migrations.AlterField(
model_name='evidence',
name='done',
field=models.BooleanField(default=True),
),
]
| shmish/core-assess | core/migrations/0005_auto_20170725_1458.py | Python | mpl-2.0 | 556 |
#!/usr/bin/python
# This file is part of Espruino, a JavaScript interpreter for Microcontrollers
#
# Copyright (C) 2013 Gordon Williams <gw@pur3.co.uk>
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# ----------------------------------------------------------------------------------------
# Builds HTML documentation from the files in the boards directory
# ----------------------------------------------------------------------------------------
import subprocess;
import re;
import json;
import sys;
import os;
import importlib;
scriptdir = os.path.dirname(os.path.realpath(__file__))
basedir = scriptdir+"/../"
sys.path.append(basedir+"scripts");
sys.path.append(basedir+"boards");
import pinutils;
# -----------------------------------------------------------------------------------------
# Now scan AF file
print "Script location "+scriptdir
#if len(sys.argv)!=3:
# print "ERROR, USAGE: build_board_docs.py BOARD_NAME HTML_FILENAME"
# exit(1)
if len(sys.argv)!=2:
print "ERROR, USAGE: build_board_docs.py BOARD_NAME"
exit(1)
boardname = sys.argv[1]
#htmlFilename = sys.argv[2]
htmlFilename = "boards/"+boardname+".html"
print "HTML_FILENAME "+htmlFilename
print "BOARD "+boardname
# import the board def
board = importlib.import_module(boardname)
# Call the included board_specific file - it sets up 'pins' and 'fill_gaps'
pins = board.get_pins()
pins = pinutils.append_devices_to_pin_list(pins, board)
# -----------------------------------------------------------------------------------------
for pin in pins:
if pin["name"][0] == 'P':
pin["name"] = pin["name"][1:];
pinmap = {};
if '_pinmap' in board.board:
pinmap = board.board['_pinmap'];
# -----------------------------------------------------------------------------------------
htmlFile = open(htmlFilename, 'w')
def writeHTML(s): htmlFile.write(s+"\n");
def dump_pin(pin, pinstrip):
if pin in pinmap:
pin = pinmap[pin];
pininfo = pinutils.findpin(pins, pin, False)
not_five_volt = False
# print(json.dumps(pininfo))
if ("csv" in pininfo) and ("IO" in pininfo["csv"]) and ("Type" in pininfo["csv"]) and (pininfo["csv"]["Type"]=="I/O") and (pininfo["csv"]["IO"]!="FT") :
not_five_volt = True
writeHTML(' <DIV class="'+pinstrip+'pin pin">');
pinHTML = ' <SPAN class="pinname">'+pin+"</SPAN>";
pinHTML2 = '';
if not_five_volt:
pinHTML2 += '<SPAN class="pinfunction NOT_5V" title="Not 5v Tolerant">3.3v</SPAN>';
reverse = pinstrip=="left" or pinstrip=="right2";
if not reverse: writeHTML(pinHTML+"\n"+pinHTML2)
pinfuncs = {}
for func in sorted(pininfo["functions"]):
# writeHTML(' '+func)
if func in pinutils.CLASSES:
funcdata = str(pininfo["functions"][func])
cls = pinutils.CLASSES[func]
name = cls
title = func
if cls=="I2C" or cls=="SPI" or cls=="USART": name=func.replace("_"," ")
if cls=="DEVICE" and funcdata[:4]=="pin_":
title = title + " ("+funcdata[4:]+")";
# print title
if func in pinutils.NAMES: name = pinutils.NAMES[func]
writeHTML('<!-- '+func+' -->')
if name in pinfuncs:
pinfuncs[name]["title"] = pinfuncs[name]["title"] + " " + title
else:
pinfuncs[name] = { 'cls': cls, 'title': "["+pin+"] "+title, 'name': name, 'id': pin+"_"+func, 'func' : func };
for func in sorted(pinfuncs.items(),key=lambda x: x[1]['cls']):
pf = func[1]
url = False
if pf["cls"] in pinutils.URLS: url = pinutils.URLS[pf["cls"]]
if pf["func"] in pinutils.URLS: url = pinutils.URLS[pf["func"]]
if url != False: writeHTML(' <A href="'+url+'" class="pinfunctionlink">');
writeHTML(' <SPAN class="pinfunction '+pf["cls"]+'" title="'+pf["title"]+'" onMouseOver="showTT(\''+pf["id"]+'\')" onMouseOut="hideTT(\''+pf["id"]+'\')">'+pf["name"]+'</SPAN>')
if url != False: writeHTML(' </A>');
writeHTML(' <SPAN class="pintooltip" id="'+pf["id"]+'" style="display:none;">'+pf["title"]+'</SPAN>')
if reverse: writeHTML(pinHTML2+"\n"+pinHTML)
writeHTML(' </DIV>')
writeHTML("""
<HTML>
<HEAD>
<STYLE>
#boardcontainer { position: relative; }
#board {
position: absolute;
background-size: 100% auto; # width and height, can be %, px or whatever.
}
.pin { padding: 1px; height: 20px; }
.pinname {
background-color: #FFF;
border:1px solid black;
padding-left: 2px;
padding-right: 2px;
font-weight: bold;
}
.pinfunction {
border:1px solid black;
border-radius:3px;
padding-left: 2px;
padding-right: 2px;
}
.pinfunctionlink {
color : black;
text-decoration: none;
}
.pintooltip {
background-color: #FFD;
border:1px solid black;
padding-left: 2px;
padding-right: 2px;
font-weight: bold;
position: absolute;
z-index: 100;
}
.SPI { background-color: #8F8; }
.ADC { background-color: #88F; }
.DAC { background-color: #0CC; }
.PWM { background-color: #8FF; }
.USART { background-color: #FF8; }
.CAN { background-color: #8CC; }
.I2C { background-color: #F88; }
.DEVICE { background-color: #F8F; }
.NOT_5V { background-color: #FDD; }
#top { white-space: nowrap; }
#top2 { white-space: nowrap; }
#bottom { white-space: nowrap; }
#bottom2 { white-space: nowrap; }
#left { text-align:right; }
#right2 { text-align:right; }
.toppin {
-webkit-transform: rotate(-90deg);
-moz-transform: rotate(-90deg);
-ms-transform: rotate(-90deg);
-o-transform: rotate(-90deg);
transform: rotate(-90deg);
display: inline-block;
width: 20px;
}
.top2pin {
-webkit-transform: rotate(90deg);
-moz-transform: rotate(90deg);
-ms-transform: rotate(90deg);
-o-transform: rotate(90deg);
transform: rotate(90deg);
display: inline-block;
width: 20px;
}
.bottompin {
-webkit-transform: rotate(90deg);
-moz-transform: rotate(90deg);
-ms-transform: rotate(90deg);
-o-transform: rotate(90deg);
transform: rotate(90deg);
display: inline-block;
width: 20px;
}
.bottom2pin {
-webkit-transform: rotate(-90deg);
-moz-transform: rotate(-90deg);
-ms-transform: rotate(-90deg);
-o-transform: rotate(-90deg);
transform: rotate(-90deg);
display: inline-block;
width: 20px;
}
.line {
height:2px;background-color:red;position:absolute;
}
.line:hover {
background-color:#FF00FF;
}
""");
for pinstrip in board.board:
writeHTML(" #"+pinstrip+" { position: absolute; }")
writeHTML(" ."+pinstrip+"pin { white-space: nowrap; }")
writeHTML(board.board_css)
writeHTML(" </STYLE>"+'<script src="http://code.jquery.com/jquery-1.11.0.min.js"></script>')
writeHTML("""
<SCRIPT type="text/javascript">
function showTT(ttid) {
var e = document.getElementById(ttid);
e.style.display = "block";
}
function hideTT(ttid) {
var e = document.getElementById(ttid);
e.style.display = "none";
}
function drawLine(x1, y1, x2, y2, hover) {
if (x2 < x1) {
var temp = x1;
x1 = x2;
x2 = temp;
temp = y1;
y1 = y2;
y2 = temp;
}
var line = $('<div class="line" alt="'+hover+'"></div>').appendTo($("body"));
var length = Math.sqrt((x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2));
line.css('width', length + "px");
var angle = Math.atan((y2 - y1) / (x2 - x1));
line.css('top', y1 + 0.5 * length * Math.sin(angle) + "px");
line.css('left', x1 - 0.5 * length * (1 - Math.cos(angle)) + "px");
line.css('-moz-transform', "rotate(" + angle + "rad)");
line.css('-webkit-transform', "rotate(" + angle + "rad)");
line.css('-o-transform', "rotate(" + angle + "rad)");
}
</SCRIPT>
""")
writeHTML(" </HEAD>")
writeHTML(" <BODY>")
writeHTML(' <H1>'+board.info["name"]+'</H1>')
writeHTML(' <!-- '+boardname+' -->')
if "link" in board.info:
for link in board.info["link"]:
writeHTML(' <P><a href=\"'+link+'\"" target="_blank">'+link+'</a></P>')
writeHTML(' <H2>Specifications</H2>')
writeHTML(' <TABLE style="margin-left:100px;">')
writeHTML(' <TR><TH width="256">Chip</TH><TD>'+board.chip['part']+'</TD></TR>')
writeHTML(' <TR><TH>Package</TH><TD>'+board.chip['package']+'</TD></TR>')
writeHTML(' <TR><TH>RAM</TH><TD>'+str(board.chip['ram'])+' kBytes</TD></TR>')
writeHTML(' <TR><TH>Flash</TH><TD>'+str(board.chip['flash'])+' kBytes</TD></TR>')
writeHTML(' <TR><TH>Speed</TH><TD>'+str(board.chip['speed'])+' Mhz</TD></TR>')
writeHTML(' <TR><TH>USARTs</TH><TD>'+str(board.chip['usart'])+'</TD></TR>')
writeHTML(' <TR><TH>SPIs</TH><TD>'+str(board.chip['spi'])+'</TD></TR>')
writeHTML(' <TR><TH>I2Cs</TH><TD>'+str(board.chip['i2c'])+'</TD></TR>')
writeHTML(' <TR><TH>USB</TH><TD>'+("Yes" if "USB" in board.devices else "No")+'</TD></TR>')
writeHTML(' <TR><TH>DACs</TH><TD>'+(str(board.chip['dac']) if board.chip['dac']>0 else "No")+'</TD></TR>')
writeHTML(' <TR><TH>SD Card</TH><TD>'+("Yes" if "SD" in board.devices else "No")+'</TD></TR>')
writeHTML(' </TABLE>')
writeHTML(' <P>Like this? Please tell your friends, blog, or <a href="http://www.espruino.com/Order">support us by buying our board</a>!</P>')
writeHTML(' <H2>Pinout</H2>')
writeHTML("""
<P>Hover the mouse over a pin function for more information. Clicking in a function will tell you how to use it in Espruino.</P>
<ul>
<li><span class="pinfunction DEVICE">Purple</span> boxes show pins that are used for other functionality on the board. You should avoid using these unless you know that the marked device is not used.</li>
<li><span class="pinfunction NOT_5V">3.3v</span> boxes mark pins that are not 5v tolerant (they only take inputs from 0 - 3.3v, not 0 - 5v).</li>
<li><span class="pinfunction">3.3</span> is a 3.3v output from the on-board Voltage regulator.</li>
<li><span class="pinfunction">GND</span> is ground (0v).</li>
<li><span class="pinfunction">VBAT</span> is the battery voltage output (see <a href="/EspruinoBoard">the Espruino Board Reference</a>).</li>
<li><span class="pinfunction ADC">ADC</span> is an <a href="/ADC">Analog to Digital Converter</a> (for reading analog voltages)</li>
<li><span class="pinfunction DAC">DAC</span> is a <a href="/DAC">Digital to Analog Converter</a> (for creating analog voltages). This is not available on all boards.</li>
<li><span class="pinfunction PWM">PWM</span> is for <a href="/PWM">Pulse Width Modulation</a>. This creates analog voltages from a digital output by sending a series of pulses.</li>
<li><span class="pinfunction SPI">SPI</span> is the 3 wire <a href="/SPI">Serial Peripheral Interface</a>.</li>
<li><span class="pinfunction USART">USART</span> is a 2 wire peripheral for <a href="/USART">Serial Data</a>.</li>
<li><span class="pinfunction I2C">I2C</span> is the 2 wire <a href="/I2C">Inter-Integrated Circuit</a> bus.</li>
<li><span class="pinfunction CAN">CAN</span> is for the <a href="http://en.wikipedia.org/wiki/CAN_bus">Controller Area Network</a>. It is only available on some devices and is not supported by Espruino.</li>
</ul>
""");
writeHTML(' <DIV id="boardcontainer">')
writeHTML(' <DIV id="board">')
usedpins = []
for pinstrip in board.board:
if pinstrip[0]!='_':
writeHTML(' <DIV id="'+pinstrip+'">')
for pin in board.board[pinstrip]:
usedpins.append(pin)
dump_pin(pin, pinstrip)
writeHTML(' </DIV>')
otherpins=0
for pinstruct in pins:
pin = pinstruct["name"]
if not pin in usedpins:
otherpins = otherpins + 1
writeHTML(' </DIV id="board">')
writeHTML(' </DIV id="boardcontainer">')
if otherpins>0:
writeHTML(' <DIV id="otherpins">')
writeHTML(' <H2>Pins not on connectors</H2>')
for pinstruct in pins:
pin = pinstruct["name"]
if not pin in usedpins:
dump_pin(pin, "otherpins")
writeHTML(' </DIV>')
writeHTML(' <P></P>')
#writeHTML('<SCRIPT type="text/javascript"> $(function() {');
#writeHTML('var x = $("#board").offset().left+500;');
#writeHTML('var y = $("#board").offset().top+200;');
#d = 12
#writeHTML('drawLine(x+'+str(-5*d)+',y+'+str(-5*d)+',x+'+str(5*d)+',y+'+str(-5*d)+');');
#writeHTML('drawLine(x+'+str(5*d)+',y+'+str(-5*d)+',x+'+str(5*d)+',y+'+str(5*d)+');');
#writeHTML('drawLine(x+'+str(5*d)+',y+'+str(5*d)+',x+'+str(-5*d)+',y+'+str(5*d)+');');
#writeHTML('drawLine(x+'+str(-5*d)+',y+'+str(5*d)+',x+'+str(-5*d)+',y+'+str(-5*d)+');');
#writeHTML('var p;');
#for pinstrip in board.board:
# if pinstrip[0]!='_':
# for pin in board.board[pinstrip]:
# if pin in pinmap:
# pin = pinmap[pin];
# pininfo = pinutils.findpin(pins, pin, False)
# if "UQFN48" in pininfo["csv"]:
# n = int(pininfo["csv"]["UQFN48"])-1
# n = (n+12) % 48
# if n<12:
# px = (n-6)*d
# py = -6*d
# elif n<24:
# px = 6*d
# py = ((n-12)-6)*d
# elif n<36:
# px = (6-(n-24))*d
# py = 6*d
# else:
# px = -6*d
# py = (6-(n-36))*d
#
# writeHTML("p=$('.pinname:contains(\""+pin+".\")');");
# pinx = "p.offset().left+p.width()/2";
# piny = "p.offset().top+p.height()/2";
# writeHTML('drawLine(x+'+str(px)+',y+'+str(py)+','+pinx+','+piny+', "'+pin+'");');
#writeHTML('});</SCRIPT>');
writeHTML(" </BODY>")
writeHTML("</HTML>")
| 0xPIT/Espruino | scripts/build_board_docs.py | Python | mpl-2.0 | 13,737 |
NOT_GIT_REPO_MSG = "#{red}Not a git repository (or any of the parent directories)"
HOOK_ALREADY_INSTALLED_MSG = "The pre-commit hook has already been installed."
EXISTING_HOOK_MSG = (
"#{yellow}There is an existing pre-commit hook.\n"
"#{reset_all}Therapist can preserve this legacy hook and run it before the Therapist "
"pre-commit hook."
)
CONFIRM_PRESERVE_LEGACY_HOOK_MSG = "Would you like to preserve this legacy hook?"
COPYING_HOOK_MSG = "Copying `pre-commit` to `pre-commit.legacy`...\t"
DONE_COPYING_HOOK_MSG = "#{green}#{bright}DONE"
CONFIRM_REPLACE_HOOK_MSG = "Do you want to replace this hook?"
INSTALL_ABORTED_MSG = "Installation aborted."
INSTALLING_HOOK_MSG = "Installing pre-commit hook...\t"
DONE_INSTALLING_HOOK_MSG = "#{green}#{bright}DONE"
NO_HOOK_INSTALLED_MSG = "There is no pre-commit hook currently installed."
UNINSTALL_ABORTED_MSG = "Uninstallation aborted."
CONFIRM_UNINSTALL_HOOK_MSG = "Are you sure you want to uninstall the current pre-commit hook?"
CURRENT_HOOK_NOT_THERAPIST_MSG = (
"#{yellow}The current pre-commit hook is not the Therapist pre-commit hook.\n"
"#{reset_all}Uninstallation aborted."
)
LEGACY_HOOK_EXISTS_MSG = "#{yellow}There is a legacy pre-commit hook present."
CONFIRM_RESTORE_LEGACY_HOOK_MSG = "Would you like to restore the legacy hook?"
COPYING_LEGACY_HOOK_MSG = "Copying `pre-commit.legacy` to `pre-commit`...\t"
DONE_COPYING_LEGACY_HOOK_MSG = "#{green}#{bright}DONE"
REMOVING_LEGACY_HOOK_MSG = "Removing `pre-commit.legacy`...\t"
DONE_REMOVING_LEGACY_HOOK_MSG = "#{green}#{bright}DONE"
UNINSTALLING_HOOK_MSG = "Uninstalling pre-commit hook...\t"
DONE_UNINSTALLING_HOOK_MSG = "#{green}#{bright}DONE"
MISCONFIGURED_MSG = "#{{red}}Misconfigured: {}"
UNSTAGED_CHANGES_MSG = "#{yellow}You have unstaged changes."
NO_THERAPIST_CONFIG_FILE_MSG = "#{red}No Therapist configuration file was found."
UPGRADE_HOOK_MSG = (
"#{red}The installed pre-commit hook is incompatible with the current version of Therapist.\n"
"#{reset_all}Install the latest pre-commit hook by running `therapist install`."
)
| rehandalal/therapist | therapist/messages.py | Python | mpl-2.0 | 2,098 |
from astrodata.ReductionObjects import PrimitiveSet
class OBSERVEDPrimitives(PrimitiveSet):
astrotype = "OBSERVED"
def init(self, rc):
print "OBSERVEDPrimitives.init(rc)"
return
def typeSpecificPrimitive(self, rc):
print "OBSERVEDPrimitives::typeSpecificPrimitive()"
def mark(self, rc):
for ad in rc.get_inputs_as_astrodata():
if ad.is_type("MARKED"):
print "OBSERVEDPrimitives::mark(%s) already marked" % ad.filename
else:
ad.phu_set_key_value("S_MARKED", "TRUE")
rc.report_output(ad)
yield rc
def unmark(self, rc):
for ad in rc.get_inputs_as_astrodata():
if ad.is_type("UNMARKED"):
print "OBSERVEDPrimitives::unmark(%s) not marked" % ad.filename
else:
ad.phu_set_key_value("S_MARKED", None)
rc.report_output(ad)
yield rc
| pyrrho314/recipesystem | trunk/astrodata/samples/astrodata_Sample/RECIPES_Sample/primitives/primitives_OBSERVED.py | Python | mpl-2.0 | 968 |
import openerp.tests
inject = [
"./../../../website/static/src/js/website.tour.test.js",
"./../../../website_event_sale/static/src/js/website.tour.event_sale.js",
]
class TestUi(openerp.tests.HttpCase):
def test_admin(self):
self.phantom_js("/", "openerp.website.Tour.run_test('banner')", "openerp.website.Tour")
def test_demo(self):
self.phantom_js("/", "openerp.website.Tour.run_test('login_edit')", "openerp.website.Tour", login="demo", password="demo", inject=inject);
def test_public(self):
self.phantom_js("/", "openerp.website.Tour.run_test('login_edit')", "openerp.website.Tour", login=None, inject=inject);
| ttfseiko/openerp-trunk | openerp/addons/website_event_sale/tests/test_ui.py | Python | agpl-3.0 | 665 |
import gc
import os
import hashlib
import inspect
import shutil
import tempfile
import yaml
import zipfile
from juju.errors import CharmError, FileNotFound
from juju.charm.errors import InvalidCharmFile
from juju.charm.metadata import MetaData
from juju.charm.directory import CharmDirectory
from juju.charm.bundle import CharmBundle
from juju.lib.filehash import compute_file_hash
from juju.charm import tests
from juju.charm.tests.test_repository import RepositoryTestBase
sample_directory = os.path.join(
os.path.dirname(
inspect.getabsfile(tests)), "repository", "series", "dummy")
class DirectoryTest(RepositoryTestBase):
def setUp(self):
super(DirectoryTest, self).setUp()
# Ensure the empty/ directory exists under the dummy sample
# charm. Depending on how the source code is exported,
# empty directories may be ignored.
empty_dir = os.path.join(sample_directory, "empty")
if not os.path.isdir(empty_dir):
os.mkdir(empty_dir)
def copy_charm(self):
dir_ = os.path.join(self.makeDir(), "sample")
shutil.copytree(sample_directory, dir_)
return dir_
def delete_revision(self, dir_):
os.remove(os.path.join(dir_, "revision"))
def set_metadata_revision(self, dir_, revision):
metadata_path = os.path.join(dir_, "metadata.yaml")
with open(metadata_path) as f:
data = yaml.load(f.read())
data["revision"] = 999
with open(metadata_path, "w") as f:
f.write(yaml.dump(data))
def test_metadata_is_required(self):
directory = self.makeDir()
self.assertRaises(FileNotFound, CharmDirectory, directory)
def test_no_revision(self):
dir_ = self.copy_charm()
self.delete_revision(dir_)
charm = CharmDirectory(dir_)
self.assertEquals(charm.get_revision(), 0)
with open(os.path.join(dir_, "revision")) as f:
self.assertEquals(f.read(), "0\n")
def test_nonsense_revision(self):
dir_ = self.copy_charm()
with open(os.path.join(dir_, "revision"), "w") as f:
f.write("shifty look")
err = self.assertRaises(CharmError, CharmDirectory, dir_)
self.assertEquals(
str(err),
"Error processing %r: invalid charm revision 'shifty look'" % dir_)
def test_revision_in_metadata(self):
dir_ = self.copy_charm()
self.delete_revision(dir_)
self.set_metadata_revision(dir_, 999)
log = self.capture_logging("juju.charm")
charm = CharmDirectory(dir_)
self.assertEquals(charm.get_revision(), 999)
self.assertIn(
"revision field is obsolete. Move it to the 'revision' file.",
log.getvalue())
def test_competing_revisions(self):
dir_ = self.copy_charm()
self.set_metadata_revision(dir_, 999)
log = self.capture_logging("juju.charm")
charm = CharmDirectory(dir_)
self.assertEquals(charm.get_revision(), 1)
self.assertIn(
"revision field is obsolete. Move it to the 'revision' file.",
log.getvalue())
def test_set_revision(self):
dir_ = self.copy_charm()
charm = CharmDirectory(dir_)
charm.set_revision(123)
self.assertEquals(charm.get_revision(), 123)
with open(os.path.join(dir_, "revision")) as f:
self.assertEquals(f.read(), "123\n")
def test_info(self):
directory = CharmDirectory(sample_directory)
self.assertTrue(directory.metadata is not None)
self.assertTrue(isinstance(directory.metadata, MetaData))
self.assertEquals(directory.metadata.name, "dummy")
self.assertEquals(directory.type, "dir")
def test_make_archive(self):
# make archive from sample directory
directory = CharmDirectory(sample_directory)
f = tempfile.NamedTemporaryFile(suffix=".charm")
directory.make_archive(f.name)
# open archive in .zip-format and assert integrity
from zipfile import ZipFile
zf = ZipFile(f.name)
self.assertEqual(zf.testzip(), None)
# assert included
included = [info.filename for info in zf.infolist()]
self.assertEqual(
set(included),
set(("metadata.yaml", "empty/", "src/", "src/hello.c",
"config.yaml", "hooks/", "hooks/install", "revision")))
def test_as_bundle(self):
directory = CharmDirectory(self.sample_dir1)
charm_bundle = directory.as_bundle()
self.assertEquals(type(charm_bundle), CharmBundle)
self.assertEquals(charm_bundle.metadata.name, "sample")
self.assertIn("sample-1.charm", charm_bundle.path)
total_compressed = 0
total_uncompressed = 0
zip_file = zipfile.ZipFile(charm_bundle.path)
for n in zip_file.namelist():
info = zip_file.getinfo(n)
total_compressed += info.compress_size
total_uncompressed += info.file_size
self.assertTrue(total_compressed < total_uncompressed)
def test_as_bundle_file_lifetime(self):
"""
The temporary bundle file created should have a life time
equivalent to that of the directory object itself.
"""
directory = CharmDirectory(self.sample_dir1)
charm_bundle = directory.as_bundle()
gc.collect()
self.assertTrue(os.path.isfile(charm_bundle.path))
del directory
gc.collect()
self.assertFalse(os.path.isfile(charm_bundle.path))
def test_compute_sha256(self):
"""
Computing the sha256 of a directory will use the bundled
charm, since the hash of the file itself is needed.
"""
directory = CharmDirectory(self.sample_dir1)
sha256 = directory.compute_sha256()
charm_bundle = directory.as_bundle()
self.assertEquals(type(charm_bundle), CharmBundle)
self.assertEquals(compute_file_hash(hashlib.sha256,
charm_bundle.path),
sha256)
def test_as_bundle_with_relative_path(self):
"""
Ensure that as_bundle works correctly with relative paths.
"""
current_dir = os.getcwd()
os.chdir(self.sample_dir2)
self.addCleanup(os.chdir, current_dir)
charm_dir = "../%s" % os.path.basename(self.sample_dir1)
directory = CharmDirectory(charm_dir)
charm_bundle = directory.as_bundle()
self.assertEquals(type(charm_bundle), CharmBundle)
self.assertEquals(charm_bundle.metadata.name, "sample")
def test_charm_base_inheritance(self):
"""
get_sha256() should be implemented in the base class,
and should use compute_sha256 to calculate the digest.
"""
directory = CharmDirectory(self.sample_dir1)
bundle = directory.as_bundle()
digest = compute_file_hash(hashlib.sha256, bundle.path)
self.assertEquals(digest, directory.get_sha256())
def test_as_directory(self):
directory = CharmDirectory(self.sample_dir1)
self.assertIs(directory.as_directory(), directory)
def test_config(self):
"""Validate that ConfigOptions are available on the charm"""
from juju.charm.tests.test_config import sample_yaml_data
directory = CharmDirectory(sample_directory)
self.assertEquals(directory.config.get_serialization_data(),
sample_yaml_data)
def test_file_type(self):
charm_dir = self.copy_charm()
os.mkfifo(os.path.join(charm_dir, "foobar"))
directory = CharmDirectory(charm_dir)
e = self.assertRaises(InvalidCharmFile, directory.as_bundle)
self.assertIn("foobar' Invalid file type for a charm", str(e))
def test_internal_symlink(self):
charm_path = self.copy_charm()
os.symlink("/etc/lsb-release", os.path.join(charm_path, "foobar"))
directory = CharmDirectory(charm_path)
e = self.assertRaises(InvalidCharmFile, directory.as_bundle)
self.assertIn("foobar' Absolute links are invalid", str(e))
def test_extract_symlink(self):
charm_path = self.copy_charm()
os.symlink("/etc/lsb-release", os.path.join(charm_path, "foobar"))
directory = CharmDirectory(charm_path)
e = self.assertRaises(InvalidCharmFile, directory.as_bundle)
self.assertIn("foobar' Absolute links are invalid", str(e))
| anbangr/trusted-juju | juju/charm/tests/test_directory.py | Python | agpl-3.0 | 8,532 |
#!/usr/bin/env python3
"""
echo_cmdline_client.py
Copyright (c) 2018-2019 Alan Yorinks All right reserved.
Python Banyan is free software; you can redistribute it and/or
modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
Version 3 as published by the Free Software Foundation; either
or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU AFFERO GENERAL PUBLIC LICENSE
along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import argparse
import signal
import sys
import time
from python_banyan.banyan_base import BanyanBase
class EchoCmdClient(BanyanBase):
"""
This is an echo client that will allow the user
to specify command line arguments to change the default behavior
of the client.
It sends out a series of messages and expects an
echo reply from the server. When it completes, press enter, and it
will send a message to the server so that it also quits
To use: 1. Start the backplane.
2. Start the server.
3. Start this client.
usage: echo_cmdline_client.py [-h] [-b BACK_PLANE_IP_ADDRESS]
[-m NUMBER_OF_MESSAGES] [-n PROCESS_NAME]
[-p PUBLISHER_PORT] [-s SUBSCRIBER_PORT] [-t LOOP_TIME]
optional arguments:
-h, --help show this help message and exit
-b BACK_PLANE_IP_ADDRESS
None or IP address used by Back Plane
-m NUMBER_OF_MESSAGES
Number of messages to publish
-n PROCESS_NAME Set process name in banner
-p PUBLISHER_PORT Publisher IP port
-s SUBSCRIBER_PORT Subscriber IP port
-t LOOP_TIME Event Loop Timer in seconds
"""
def __init__(self, **kwargs):
"""
kwargs is a dictionary that will contain the following keys:
:param back_plane_ip_address: banyan_base back_planeIP Address -
if not specified, it will be set to the
local computer
:param subscriber_port: banyan_base back plane subscriber port.
This must match that of the banyan_base backplane
:param publisher_port: banyan_base back plane publisher port.
This must match that of the
banyan_base backplane.
:param number_of_messages: number of message to transmit
:param process_name: Component identifier
:param loop_time: receive loop sleep time
"""
# initialize the parent
super(EchoCmdClient, self).__init__(back_plane_ip_address=kwargs['back_plane_ip_address'],
subscriber_port=kwargs['subscriber_port'],
publisher_port=kwargs['publisher_port'],
process_name=kwargs['process_name'],
loop_time=kwargs['loop_time'])
# allow zmq connections to establish
time.sleep(.3)
# accept banyan messages with the topic of reply
self.set_subscriber_topic('reply')
# sequence number of messages
self.message_number = kwargs['number_of_messages']
# number of messages to send
self.number_of_messages = kwargs['number_of_messages']
# send the first message - make sure that the server is already started
self.publish_payload({'message_number': self.message_number}, 'echo')
self.message_number -= 1
# get the reply messages
try:
self.receive_loop()
except KeyboardInterrupt:
self.clean_up()
sys.exit(0)
def incoming_message_processing(self, topic, payload):
"""
Messages are sent here from the receive_loop
:param topic: Message Topic string
:param payload: Message Data
:return:
"""
# When a message is received and its number is zero, finish up.
if payload['message_number'] == 0:
print(str(self.number_of_messages) + ' messages sent and received. ')
input('Press enter to exit.')
self.clean_up()
sys.exit(0)
# bump the message number and send the message out
else:
self.message_number -= 1
if self.message_number >= 0:
self.publish_payload({'message_number': self.message_number}, 'echo')
def echo_cmdline_client():
parser = argparse.ArgumentParser()
# allow user to bypass the IP address auto-discovery.
# This is necessary if the component resides on a computer
# other than the computing running the backplane.
parser.add_argument("-b", dest="back_plane_ip_address", default="None",
help="None or IP address used by Back Plane")
parser.add_argument("-m", dest="number_of_messages", default="10",
help="Number of messages to publish")
# allow the user to specify a name for the component and have it shown on the console banner.
# modify the default process name to one you wish to see on the banner.
# change the default in the derived class to set the name
parser.add_argument("-n", dest="process_name", default="EchoCmdClient",
help="Set process name in banner")
parser.add_argument("-p", dest="publisher_port", default='43124',
help="Publisher IP port")
parser.add_argument("-s", dest="subscriber_port", default='43125',
help="Subscriber IP port")
parser.add_argument("-t", dest="loop_time", default=".1",
help="Event Loop Timer in seconds")
args = parser.parse_args()
if args.back_plane_ip_address == 'None':
args.back_plane_ip_address = None
kw_options = {'back_plane_ip_address': args.back_plane_ip_address,
'number_of_messages': int(args.number_of_messages),
'publisher_port': args.publisher_port,
'subscriber_port': args.subscriber_port,
'process_name': args.process_name,
'loop_time': float(args.loop_time)}
# replace with the name of your class
EchoCmdClient(**kw_options)
# signal handler function called when Control-C occurs
# noinspection PyShadowingNames,PyUnusedLocal,PyUnusedLocal
def signal_handler(sig, frame):
print('Exiting Through Signal Handler')
raise KeyboardInterrupt
# listen for SIGINT
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
if __name__ == '__main__':
echo_cmdline_client()
| MrYsLab/python_banyan | examples/echo_cmdline_client.py | Python | agpl-3.0 | 7,036 |
#!/usr/bin/env python2.6
# This file is part of VoltDB.
# Copyright (C) 2008-2018 VoltDB Inc.
#
# This file contains original code and/or modifications of original code.
# Any modifications made by VoltDB Inc. are licensed under the following
# terms and conditions:
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import datetime
import httplib2
import json
import random
import threading
import time
#Set these
server="volt4b"
port=8080
sleep_time=5 #Time to sleep between every VMC iteration (VMC delay is 5)
max_err=1 #Max number of errors on any thread before it quits
thr=20 #Concurrent threads of VMC emulation
request_format = "http://%s:%d/api/1.0/?%s"
#These are the requests being made every 5 seconds by VMC in V4.9
#This may change in the future
arguments = [
"Procedure=%40SystemInformation&Parameters=%5B%22OVERVIEW%22%5D&admin=true",
"Procedure=%40Statistics&Parameters=%5B%22MEMORY%22%2C0%5D&admin=true",
"Procedure=%40Statistics&Parameters=%5B%22LATENCY_HISTOGRAM%22%2C0%5D&admin=true",
"Procedure=%40Statistics&Parameters=%5B%22PROCEDUREPROFILE%22%2C0%5D&admin=true",
"Procedure=%40Statistics&Parameters=%5B%22CPU%22%2C0%5D&admin=true",
"Procedure=%40SystemInformation&Parameters=%5B%22DEPLOYMENT%22%5D&admin=true",
"Procedure=%40Statistics&Parameters=%5B%22TABLE%22%2C0%5D&admin=true",
"Procedure=%40Statistics&Parameters=%5B%22MEMORY%22%2C0%5D&admin=true",
"Procedure=%40Statistics&Parameters=%5B%22TABLE%22%2C0%5D&admin=true",
"Procedure=%40Statistics&Parameters=%5B%22PROCEDUREPROFILE%22%2C0%5D&admin=true",
"Procedure=%40SystemCatalog&Parameters=%5B%22TABLES%22%5D&admin=true",
]
HTTP_SUCCESS=200
STATUS_STRINGS = {
"VOLTDB_CONNECTION_LOST": -4,
"VOLTDB_CONNECTION_TIMEOUT": -6,
"VOLTDB_GRACEFUL_FAILURE": -2,
"VOLTDB_OPERATIONAL_FAILURE": -9,
"VOLTDB_RESPONSE_UNKNOWN": -7,
"VOLTDB_SERVER_UNAVAILABLE": -5,
"VOLTDB_SUCCESS": 1,
"VOLTDB_TXN_RESTART": -8,
"VOLTDB_UNEXPECTED_FAILURE": -3,
"VOLTDB_UNINITIALIZED_APP_STATUS_CODE": -128,
"VOLTDB_USER_ABORT": -1,
}
STATUS_CODES = dict((v,k) for k, v in STATUS_STRINGS.iteritems())
pause_on_error=False
def ts():
return datetime.datetime.today().strftime('%Y-%m-%d %H:%M:%S,%f')
def push_it(max_errors=1, sleep_time=0):
err_ct = 0
tid = str(threading.current_thread().name)
print ts() + " Starting thread " + tid
i = 1
http = httplib2.Http(cache=None, timeout=15.0)
#while forever
while ( i > 0 ) :
if not i % 100:
print "%s %s Loop Count: %4d" % (ts(), tid, i)
for a in arguments:
url = request_format % (server, port, a)
try:
#http = httplib2.Http(cache=None, timeout=15.0)
response,content = http.request(url, 'GET')
if response['status'] != str(HTTP_SUCCESS) or json.loads(content)['status'] != STATUS_STRINGS['VOLTDB_SUCCESS']:
statusstring = STATUS_CODES.get(json.loads(content)['status'],"Unknown status")
print "%s %s Request# %d - Error getting %s\n\thttp_status=%s\tresponse=%s" % (ts(), tid, i, a, response['status'], statusstring)
err_ct += 1
except AttributeError:
err_ct += 1
if err_ct >= max_errors:
if pause_on_error:
raw_input("Press any key to continue...")
else:
print "%s: Too many errors - I'm out of here" % tid
return (-1)
time.sleep(sleep_time)
i += 1
threads = []
for i in range(thr):
t = threading.Thread(target=push_it, args=(1,sleep_time))
t.daemon=True
threads.append(t)
for t in threads:
#Don't bunch them all up
time.sleep (random.randint(50,200)/100.0)
t.start()
for t in threads:
t.join()
#TODO: This threading is dodgy and doesn't end gracefully
#and doesn't handle ctrl-c gracefully
| simonzhangsm/voltdb | tools/vmc_stats_emulator.py | Python | agpl-3.0 | 4,874 |
#!/usr/bin/python2
#
# Copyright 2016 ScyllaDB
#
#
# This file is part of Scylla.
#
# Scylla is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Scylla is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Scylla. If not, see <http://www.gnu.org/licenses/>.
import sys
import yaml
import argparse
def get(config, key):
s = open(config).read()
cfg = yaml.load(s)
try:
val = cfg[key]
except KeyError:
print("key '%s' not found" % key)
sys.exit(1)
if isinstance(val, list):
for v in val:
print("%s" % v)
elif isinstance(val, dict):
for k, v in list(val.items()):
print("%s:%s" % (k, v))
else:
print(val)
def main():
parser = argparse.ArgumentParser(description='scylla.yaml config reader/writer from shellscript.')
parser.add_argument('-c', '--config', dest='config', action='store',
default='/etc/scylla/scylla.yaml',
help='path to scylla.yaml')
parser.add_argument('-g', '--get', dest='get', action='store',
required=True, help='get parameter')
args = parser.parse_args()
get(args.config, args.get)
if __name__ == "__main__":
main()
| duarten/scylla | dist/common/scripts/scylla_config_get.py | Python | agpl-3.0 | 1,682 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#.apidoc title: Amount to Text
#-------------------------------------------------------------
# French
#-------------------------------------------------------------
to_19_fr = ( 'zéro', 'un', 'deux', 'trois', 'quatre', 'cinq', 'six',
'sept', 'huit', 'neuf', 'dix', 'onze', 'douze', 'treize',
'quatorze', 'quinze', 'seize', 'dix-sept', 'dix-huit', 'dix-neuf' )
tens_fr = ( 'vingt', 'trente', 'quarante', 'Cinquante', 'Soixante', 'Soixante-dix', 'Quatre-vingts', 'Quatre-vingt Dix')
denom_fr = ( '',
'Mille', 'Millions', 'Milliards', 'Billions', 'Quadrillions',
'Quintillion', 'Sextillion', 'Septillion', 'Octillion', 'Nonillion',
'Décillion', 'Undecillion', 'Duodecillion', 'Tredecillion', 'Quattuordecillion',
'Sexdecillion', 'Septendecillion', 'Octodecillion', 'Icosillion', 'Vigintillion' )
def _convert_nn_fr(val):
""" convert a value < 100 to French
"""
if val < 20:
return to_19_fr[val]
for (dcap, dval) in ((k, 20 + (10 * v)) for (v, k) in enumerate(tens_fr)):
if dval + 10 > val:
if val % 10:
return dcap + '-' + to_19_fr[val % 10]
return dcap
def _convert_nnn_fr(val):
""" convert a value < 1000 to french
special cased because it is the level that kicks
off the < 100 special case. The rest are more general. This also allows you to
get strings in the form of 'forty-five hundred' if called directly.
"""
word = ''
(mod, rem) = (val % 100, val // 100)
if rem > 0:
word = to_19_fr[rem] + ' Cent'
if mod > 0:
word = word + ' '
if mod > 0:
word = word + _convert_nn_fr(mod)
return word
def french_number(val):
if val < 100:
return _convert_nn_fr(val)
if val < 1000:
return _convert_nnn_fr(val)
for (didx, dval) in ((v - 1, 1000 ** v) for v in range(len(denom_fr))):
if dval > val:
mod = 1000 ** didx
l = val // mod
r = val - (l * mod)
ret = _convert_nnn_fr(l) + ' ' + denom_fr[didx]
if r > 0:
ret = ret + ', ' + french_number(r)
return ret
def amount_to_text_fr(number, currency):
number = '%.2f' % number
units_name = currency
list = str(number).split('.')
start_word = french_number(abs(int(list[0])))
end_word = french_number(int(list[1]))
cents_number = int(list[1])
cents_name = (cents_number > 1) and ' Cents' or ' Cent'
final_result = start_word +' '+units_name+' '+ end_word +' '+cents_name
return final_result
#-------------------------------------------------------------
# Dutch
#-------------------------------------------------------------
to_19_nl = ( 'Nul', 'Een', 'Twee', 'Drie', 'Vier', 'Vijf', 'Zes',
'Zeven', 'Acht', 'Negen', 'Tien', 'Elf', 'Twaalf', 'Dertien',
'Veertien', 'Vijftien', 'Zestien', 'Zeventien', 'Achttien', 'Negentien' )
tens_nl = ( 'Twintig', 'Dertig', 'Veertig', 'Vijftig', 'Zestig', 'Zeventig', 'Tachtig', 'Negentig')
denom_nl = ( '',
'Duizend', 'Miljoen', 'Miljard', 'Triljoen', 'Quadriljoen',
'Quintillion', 'Sextiljoen', 'Septillion', 'Octillion', 'Nonillion',
'Decillion', 'Undecillion', 'Duodecillion', 'Tredecillion', 'Quattuordecillion',
'Sexdecillion', 'Septendecillion', 'Octodecillion', 'Novemdecillion', 'Vigintillion' )
def _convert_nn_nl(val):
""" convert a value < 100 to Dutch
"""
if val < 20:
return to_19_nl[val]
for (dcap, dval) in ((k, 20 + (10 * v)) for (v, k) in enumerate(tens_nl)):
if dval + 10 > val:
if val % 10:
return dcap + '-' + to_19_nl[val % 10]
return dcap
def _convert_nnn_nl(val):
""" convert a value < 1000 to Dutch
special cased because it is the level that kicks
off the < 100 special case. The rest are more general. This also allows you to
get strings in the form of 'forty-five hundred' if called directly.
"""
word = ''
(mod, rem) = (val % 100, val // 100)
if rem > 0:
word = to_19_nl[rem] + ' Honderd'
if mod > 0:
word = word + ' '
if mod > 0:
word = word + _convert_nn_nl(mod)
return word
def dutch_number(val):
if val < 100:
return _convert_nn_nl(val)
if val < 1000:
return _convert_nnn_nl(val)
for (didx, dval) in ((v - 1, 1000 ** v) for v in range(len(denom_nl))):
if dval > val:
mod = 1000 ** didx
l = val // mod
r = val - (l * mod)
ret = _convert_nnn_nl(l) + ' ' + denom_nl[didx]
if r > 0:
ret = ret + ', ' + dutch_number(r)
return ret
def amount_to_text_nl(number, currency):
number = '%.2f' % number
units_name = currency
list = str(number).split('.')
start_word = dutch_number(int(list[0]))
end_word = dutch_number(int(list[1]))
cents_number = int(list[1])
cents_name = (cents_number > 1) and 'cent' or 'cent'
final_result = start_word +' '+units_name+' '+ end_word +' '+cents_name
return final_result
#-------------------------------------------------------------
# Generic functions
#-------------------------------------------------------------
_translate_funcs = {'fr' : amount_to_text_fr, 'nl' : amount_to_text_nl}
def add_amount_to_text_function(lang, func):
_translate_funcs[lang] = func
#TODO: we should use the country AND language (ex: septante VS soixante dix)
#TODO: we should use en by default, but the translation func is yet to be implemented
def amount_to_text(nbr, lang='fr', currency='euro'):
""" Converts an integer to its textual representation, using the language set in the context if any.
Example::
1654: mille six cent cinquante-quatre.
"""
# if nbr > 1000000:
##TODO: use logger
# print "WARNING: number too large '%d', can't translate it!" % (nbr,)
# return str(nbr)
if not _translate_funcs.has_key(lang):
#TODO: use logger
print "WARNING: no translation function found for lang: '%s'" % (lang,)
#TODO: (default should be en) same as above
lang = 'fr'
return _translate_funcs[lang](abs(nbr), currency)
if __name__=='__main__':
from sys import argv
lang = 'nl'
if len(argv) < 2:
for i in range(1,200):
print i, ">>", amount_to_text(i, lang)
for i in range(200,999999,139):
print i, ">>", amount_to_text(i, lang)
else:
print amount_to_text(int(argv[1]), lang)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| xrg/openerp-server | bin/tools/amount_to_text.py | Python | agpl-3.0 | 7,774 |
from django.core.validators import validate_email
from django import forms
from captcha.fields import ReCaptchaField
from .models import ContactUs
class CreateContact(forms.ModelForm):
captcha = ReCaptchaField()
class Meta:
model = ContactUs
fields = '__all__'
widgets = {
'email': forms.EmailInput({'required': 'required',
'placeholder': 'Email'}),
'message': forms.Textarea(attrs={'required': 'required',
'placeholder': 'Message'})
}
def clean_first_name(self):
first_name = self.cleaned_data['first_name']
if not first_name.isalpha():
raise forms.ValidationError("Introdu un prenume valid")
return first_name
def clean_email(self):
email = self.cleaned_data['email']
if validate_email(email):
raise forms.ValidationError("Adresa de email nu e valida")
return email
def clean_last_name(self):
last_name = self.cleaned_data['last_name']
if not last_name.isalpha():
raise forms.ValidationError("Introdu un nume corect")
return last_name
def clean_message(self):
message = self.cleaned_data['message']
if len(message) < 50:
raise forms.ValidationError(
"Mesajul tau e prea scurt!"
"Trebuie sa contina minim 50 de caractere")
return message | emanuelcovaci/TLT | blog/contact/forms.py | Python | agpl-3.0 | 1,487 |
# -*- coding: utf-8 -*-
import controllers
#import models
| optima-ict/odoo | addons/professional_templates/controllers/__init__.py | Python | agpl-3.0 | 58 |
#! /usr/bin/env python
"""
Author: Gary Foreman
Created: August 6, 2016
This script scrapes thumbnail images from thread links in the For Sale: Bass
Guitars forum at talkbass.com
"""
from __future__ import print_function
from glob import glob
import os
import sys
import urllib
from PIL import Image, ImageOps
import pymongo
sys.path.append('..')
from utilities.utilities import pause_scrape, report_progress
MIN_PAUSE_SECONDS = 0.15
MAX_PAUSE_SECONDS = 0.5
REPORT_MESSAGE = 'Scraped image'
REPORT_FREQUENCY = 300
DATA_PATH = os.path.join('..', 'data', 'images')
def make_data_dir():
"""
Checks to see whether DATA_PATH exists. If not, creates it.
"""
if not os.path.isdir(DATA_PATH):
os.makedirs(DATA_PATH)
def filename_from_url(thumbnail_url):
"""
thumbnail_url : a string with a url to a bass image
Strips filename from the end of thumbnail_url and prepends DATA_PATH.
Also ensures the file extension is jpg
"""
filename = thumbnail_url.strip('/').split('/')[-1]
basename, ext = os.path.splitext(filename)
return os.path.join(DATA_PATH, basename + '.jpg')
def download_thumb(thumbnail_url):
"""
thumbnail_url : a string with a url to a bass image
Pulls dowm image from thumbnail_url and stores in DATA_DIR
"""
filename = filename_from_url(thumbnail_url)
try:
urllib.urlretrieve(thumbnail_url, filename)
except IOError:
# URL is not an image file
pass
except UnicodeError:
# URL contains non-ASCII characters
pass
def crop_image(filename):
"""
filename: a string with the name to a locally stored image file
Crops image at filename to 128 x 128 pixels and overwrites original
"""
try:
img = Image.open(filename)
img = ImageOps.fit(img, (128, 128), Image.ANTIALIAS)
img.save(filename)
except NameError:
# File does not exist
pass
except IOError:
# Image is corrupted
try:
os.remove(filename)
except OSError:
# Filename is too long
pass
def main():
make_data_dir()
# Establish connection to MongoDB open on port 27017
client = pymongo.MongoClient()
# Access threads database
db = client.for_sale_bass_guitars
# Get database documents
cursor = db.threads.find()
# Get list of images that have already been scraped
scraped_image_list = glob(os.path.join(DATA_PATH, '*.jpg'))
thumbnail_url_list = []
for document in cursor:
thumbnail_url = document[u'image_url']
try:
filename = filename_from_url(thumbnail_url)
if filename not in scraped_image_list:
thumbnail_url_list.append(thumbnail_url)
except AttributeError:
# thread has no associated thumbnail
pass
client.close()
thumbnail_count = 1
for thumbnail_url in thumbnail_url_list:
download_thumb(thumbnail_url)
filename = filename_from_url(thumbnail_url)
crop_image(filename)
pause_scrape(MIN_PAUSE_SECONDS, MAX_PAUSE_SECONDS)
report_progress(thumbnail_count, REPORT_MESSAGE, REPORT_FREQUENCY)
thumbnail_count += 1
if __name__ == "__main__":
main()
| garyForeman/LHBassClassifier | image_scrape/get_thumbnails.py | Python | agpl-3.0 | 3,278 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('website', '0016_auto_20151128_2006'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('content', models.TextField()),
('service_comment_id', models.IntegerField()),
('username', models.CharField(max_length=255)),
('created', models.DateTimeField()),
('updated', models.DateTimeField()),
('issue', models.ForeignKey(to='website.Issue')),
],
),
]
| atuljain/coderbounty | website/migrations/0017_comment.py | Python | agpl-3.0 | 819 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Odoo Addon, Open Source Management Solution
# Copyright (C) 2014-now Equitania Software GmbH(<http://www.equitania.de>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from odoo import models, fields, api, _
class report_stock_picking(models.Model):
_inherit = 'stock.picking'
# def get_tax(self, tax_id, language, currency_id):
# amount_net = 0;
# for line in self.order_line:
# if tax_id.id in [x.id for x in line.tax_id] and not line.eq_optional:
# amount_net += line.price_subtotal
#
# tax_amount = 0
# for tex in self.env['account.tax']._compute([tax_id], amount_net, 1):
# tax_amount += tex['amount']
#
# return self.env["eq_report_helper"].get_price(tax_amount, language, 'Sale Price Report', currency_id)
#
#
# @api.multi
# def get_price(self, value, currency_id, language):
# """
# Formatierung eines Preises mit Berücksichtigung der Einstellung Dezimalstellen Sale Price Report
# :param value:
# :param currency_id:
# :param language:
# :return:
# """
# return self.env["eq_report_helper"].get_price(value, language, 'Sale Price Report', currency_id)
#
# @api.multi
# def get_qty(self, value, language):
# """
# Formatierung für Mengenangabe mit Berücksichtigung der Einstellung Dezimalstellen Sale Quantity Report
# :param value:
# :param language:
# :return:
# """
# return self.env["eq_report_helper"].get_qty(value, language, 'Sale Quantity Report')
@api.multi
def html_text_is_set(self, value):
"""
Workaround für HTML-Texte: Autom. Inhalt nach Speichern ohne Inhalt: <p><br></p>
Entfernen der Zeilenumbrüche und Paragraphen für Test, ob ein Inhalt gesetzt wurde
:param value:
:return:
"""
if not value:
return False
value = value.replace('<br>', '')
value = value.replace('<p>', '')
value = value.replace('</p>', '')
value = value.replace('<', '')
value = value.replace('>', '')
value = value.replace('/', '')
value = value.strip()
return value != '' | equitania/myodoo-addons-v10 | eq_stock/models/eq_report_stock.py | Python | agpl-3.0 | 3,096 |
# -*- coding: utf-8 -*-
# © 2012-2016 Therp BV <http://therp.nl>
# © 2013 Agile Business Group sagl <http://www.agilebg.com>
# <lorenzo.battistini@agilebg.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
{
"name": "Accounting Financial Report Horizontal",
"version": "10.0.1.0.0",
"author": "Therp BV,Agile Business Group,Odoo Community Association (OCA)",
"category": 'Accounting & Finance',
'website': 'https://github.com/OCA/account-financial-reporting',
'license': 'AGPL-3',
"depends": ["account"],
'data': [
"data/report_paperformat.xml",
"data/ir_actions_report_xml.xml",
"report/report_financial.xml",
],
}
| luc-demeyer/account-financial-reporting | account_financial_report_horizontal/__manifest__.py | Python | agpl-3.0 | 701 |
# -*- coding: utf-8 -*-
# Copyright 2018 OpenSynergy Indonesia
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from openerp import models, fields, api
from openerp import tools
class LapKbMutasiCommon(models.AbstractModel):
_name = "l10n_id.djbc_kb_lap_mutasi_common"
_description = "Common Model for Laporan Mutasi Kawasan Berikat"
_auto = False
@api.multi
def _qty_from(self):
self.ensure_one()
str_select = """
FROM stock_move AS a
JOIN stock_picking_type AS b ON a.picking_type_id = b.id
JOIN stock_location AS c ON a.location_id = c.id
JOIN stock_location AS d ON a.location_dest_id =d.id
"""
return str_select
@api.multi
def _qty_where(
self, date_start, date_end, movement_type="in",
scrap=False, adjustment=False
):
self.ensure_one()
str_where = """
WHERE
a.product_id = %s AND
b.warehouse_id = %s AND
b.djbc_kb_movement_type = '%s' AND
b.djbc_kb_scrap %s AND
a.state = 'done' AND
""" % (
self.product_id.id,
self.warehouse_id.id,
movement_type,
scrap and 'IS TRUE' or 'IS FALSE',
)
if date_start:
str_where += """
a.date >= '%s' AND
a.date <= '%s'
""" % (date_start, date_end)
else:
str_where += """
a.date < '%s'
""" % (date_end)
if adjustment:
str_where += """
AND (a.inventory_id != %s OR
a.inventory_id IS NULL)
""" % (adjustment)
return str_where
@api.multi
def _qty_join(self):
self.ensure_one()
pass
@api.multi
def _qty_select(self):
self.ensure_one()
str_select = """
SELECT
a.product_qty AS qty
"""
return str_select
@api.multi
def _compute_qty(self):
date_start = self._context.get("date_start", False)
date_end = self._context.get("date_end", False)
obj_inv_line = self.env["stock.inventory.line"]
obj_inv = self.env["stock.inventory"]
for lap in self:
saldo_awal = pemasukan = pengeluaran = \
penyesuaian = saldo_akhir = stock_opname = \
selisih = 0.0
# Adjustment
inv = False
view_root_id = lap.warehouse_id.view_location_id.id
criteria1 = [
("date", ">=", date_start),
("date", "<=", date_end),
("state", "=", "done"),
("djbc", "=", True),
("location_id.id", "child_of", view_root_id),
]
invs = obj_inv.search(criteria1, order="date desc", limit=1)
if invs:
inv = invs[0]
saldo_awal_pemasukan = lap._get_qty(
False, date_start, "in", False, False)
saldo_awal_pengeluaran = lap._get_qty(
False, date_start, "out", False, False)
saldo_awal = saldo_awal_pemasukan - saldo_awal_pengeluaran
pemasukan = lap._get_qty(
date_start, date_end, "in", False, inv and inv.id or False)
pengeluaran = lap._get_qty(
date_start, date_end, "out", False, inv and inv.id or False)
if inv:
criteria = [
("inventory_id", "=", inv.id),
("product_id", "=", lap.product_id.id),
]
for inv_line in obj_inv_line.search(criteria):
stock_opname += inv_line.product_qty
penyesuaian += (inv_line.product_qty -
inv_line.theoretical_qty)
saldo_akhir = saldo_awal + pemasukan - pengeluaran + penyesuaian
selisih = saldo_akhir - stock_opname
if stock_opname == saldo_akhir:
keterangan = "sesuai"
elif saldo_akhir > stock_opname:
keterangan = "lebih"
else:
keterangan = "kurang"
lap.saldo_awal = saldo_awal
lap.stock_opname = stock_opname
lap.pemasukan = pemasukan
lap.pengeluaran = pengeluaran
lap.saldo_akhir = saldo_akhir
lap.penyesuaian = penyesuaian
lap.selisih = selisih
lap.keterangan = keterangan
@api.multi
def _get_qty(self, date_start, date_end, movement_type, scrap, adjustment):
self.ensure_one()
result = 0.0
# pylint: disable=locally-disabled, sql-injection
str_sql = """
%s
%s
%s
""" % (
self._qty_select(),
self._qty_from(),
self._qty_where(
date_start,
date_end,
movement_type,
scrap,
adjustment
)
)
self.env.cr.execute(str_sql)
a = self.env.cr.dictfetchall()
for row in a:
result += row["qty"]
return result
kode_barang = fields.Char(
string="Kode Barang",
readonly=True,
)
product_id = fields.Many2one(
string="Nama Barang",
comodel_name="product.product",
readonly=True,
)
uom_id = fields.Many2one(
string="Satuan",
comodel_name="product.uom",
readonly=True,
)
saldo_awal = fields.Float(
string="Saldo Awal",
readonly=True,
compute="_compute_qty",
store=False,
)
pemasukan = fields.Float(
string="Pemasukan",
readonly=True,
compute="_compute_qty",
store=False,
)
pengeluaran = fields.Float(
string="Pengeluaran",
readonly=True,
compute="_compute_qty",
store=False,
)
penyesuaian = fields.Float(
string="Penyesuaian",
readonly=True,
compute="_compute_qty",
store=False,
)
stock_opname = fields.Float(
string="Stock Opname",
readonly=True,
compute="_compute_qty",
store=False,
)
saldo_akhir = fields.Float(
string="Saldo Buku",
readonly=True,
compute="_compute_qty",
store=False,
)
selisih = fields.Float(
string="Selisih",
readonly=True,
compute="_compute_qty",
store=False,
)
keterangan = fields.Selection(
string="Ket",
compute="_compute_qty",
store=False,
selection=[
("sesuai", "Sesuai"),
("kurang", "Selisih Kurang"),
("lebih", "Selisih Lebih"),
],
)
warehouse_id = fields.Many2one(
string="Warehouse",
comodel_name="stock.warehouse",
)
def _select(self):
select_str = """
SELECT row_number() OVER() as id,
a.default_code AS kode_barang,
a.id AS product_id,
b.uom_id AS uom_id,
stock_warehouse.id AS warehouse_id
"""
return select_str
def _from(self):
from_str = """
product_product AS a
"""
return from_str
def _where(self):
where_str = """
WHERE b.djbc_kb_ok = TRUE
"""
return where_str
def _join(self):
join_str = """
CROSS JOIN stock_warehouse
JOIN product_template AS b
ON a.product_tmpl_id = b.id
"""
return join_str
def init(self, cr):
tools.drop_view_if_exists(cr, self._table)
# pylint: disable=locally-disabled, sql-injection
cr.execute("""CREATE or REPLACE VIEW %s as (
%s
FROM %s
%s
%s
)""" % (
self._table,
self._select(),
self._from(),
self._join(),
self._where()
))
| open-synergy/opnsynid-l10n-indonesia | l10n_id_djbc_kb_lap_common/reports/lap_kb_mutasi_common.py | Python | agpl-3.0 | 8,054 |
from . import manager
def warm_up_redis_func(app=None, db=None, user_model=None, redis_store=None):
not_available = set()
available = set()
cur = db.session.connection().connection.cursor()
cur.execute("""
SELECT taxi.id AS taxi_id, vd.status, vd.added_by FROM taxi
LEFT OUTER JOIN vehicle ON vehicle.id = taxi.vehicle_id
LEFT OUTER JOIN vehicle_description AS vd ON vehicle.id = vd.vehicle_id
""")
users = {u.id: u.email for u in user_model.query.all()}
for taxi_id, status, added_by in cur.fetchall():
user = users.get(added_by)
taxi_id_operator = "{}:{}".format(taxi_id, user)
if status == 'free':
available.add(taxi_id_operator)
else:
not_available.add(taxi_id_operator)
to_remove = list()
if redis_store.type(app.config['REDIS_NOT_AVAILABLE']) != 'zset':
redis_store.delete(app.config['REDIS_NOT_AVAILABLE'])
else:
cursor, keys = redis_store.zscan(app.config['REDIS_NOT_AVAILABLE'], 0)
keys = set([k[0] for k in keys])
while cursor != 0:
to_remove.extend(keys.intersection(available))
not_available.difference_update(keys)
cursor, keys = redis_store.zscan(app.config['REDIS_NOT_AVAILABLE'],
cursor)
keys = set([k[0] for k in keys])
if len(to_remove) > 0:
redis_store.zrem(app.config['REDIS_NOT_AVAILABLE'], to_remove)
if len(not_available) > 0:
redis_store.zadd(app.config['REDIS_NOT_AVAILABLE'], **{k:0 for k in not_available})
@manager.command
def warm_up_redis():
from flask import current_app
import APITaxi_models as models
from APITaxi.extensions import redis_store
warm_up_redis_func(current_app, models.db, models.User, redis_store)
| l-vincent-l/APITaxi | APITaxi/commands/warm_up_redis.py | Python | agpl-3.0 | 1,795 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2015 Serpent Consulting Services Pvt. Ltd. (<http://www.serpentcs.com>)
# Copyright (C) 2016 FairCoop (<http://fair.coop>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
{
'name': 'Product Brand and Country filtering in Website',
'category': 'Website',
'author': 'FairCoop',
'website':'http://fair.coop',
'summary': '',
'version': '1.0',
'description': """
Allows to use product brands and countries as filtering for products in website.\n
This Module depends on product_brand module -https://github.com/OCA/product-attribute/tree/8.0/product_brand
""",
'depends': ['product_brand_custom','website_sale','web','product_custom'],
'data': [
"data/demands.xml",
"security/ir.model.access.csv",
"views/product_brand.xml",
"views/brand_page.xml",
],
'installable': True,
'auto_install': False,
}
# vim:expandtab:tabstop=4:softtabstop=4:shiftwidth=4:
| Punto0/addons-fm | website_product_brand/__openerp__.py | Python | agpl-3.0 | 1,807 |
#!/usr/bin/env python
# This file is part of VoltDB.
# Copyright (C) 2008-2016 VoltDB Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
from subprocess import *
def run(procs, threadsPerProc, workload):
results = []
totalThroughput = 0.0
output = Popen(['./throughput', str(procs), str(threadsPerProc), workload], stdout=PIPE).communicate()[0]
lines = output.split('\n')
for line in lines:
if line.startswith('RESULT: '):
print line
line = line.split(' ')[1]
parts = line.split(',')
results += [float(parts[2])]
for r in results:
totalThroughput += r
print "--"
print "PER THREAD AVG: " + str(totalThroughput / (procs * threadsPerProc))
print "PER PROC AVG: " + str(totalThroughput / procs)
print "TOTAL THROUGHPUT: " + str(totalThroughput)
print "--"
run(1, 1, 'r')
run(1, 2, 'r')
run(1, 3, 'r')
run(1, 4, 'r')
| paulmartel/voltdb | tests/bench/throughput/run.py | Python | agpl-3.0 | 1,927 |
# Generated by Django 2.2.14 on 2020-09-03 02:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('enterprise', '0109_remove_use_enterprise_catalog_sample'),
]
operations = [
migrations.AddField(
model_name='enterprisecustomer',
name='default_contract_discount',
field=models.DecimalField(blank=True, decimal_places=5, help_text='Specifies the discount percent used for enrollments from the enrollment API where capturing the discount per order is not possible. This is passed to ecommerce when creating orders for financial data reporting.', max_digits=8, null=True),
),
migrations.AddField(
model_name='historicalenterprisecustomer',
name='default_contract_discount',
field=models.DecimalField(blank=True, decimal_places=5, help_text='Specifies the discount percent used for enrollments from the enrollment API where capturing the discount per order is not possible. This is passed to ecommerce when creating orders for financial data reporting.', max_digits=8, null=True),
),
]
| edx/edx-enterprise | enterprise/migrations/0110_add_default_contract_discount.py | Python | agpl-3.0 | 1,163 |
from . import geodis
from . import geodis_common_ws
from . import geodis_api_ws
from . import geodis_api_find_localite_ws
from . import geodis_api_edi
from . import geodis_api_rest_ws
from . import geodis_encoder_ws
from . import geodis_encoder_edi
from . import geodis_encoder_rest_ws
from . import geodis_decoder_ws
from . import geodis_decoder_rest_ws
from . import geodis_transport_ws
from . import geodis_transport_edi
from . import geodis_transport_rest_ws
| akretion/roulier | roulier/carriers/geodis/__init__.py | Python | agpl-3.0 | 463 |
# -*- coding: utf-8 -*-
# Akvo RSR is covered by the GNU Affero General Public License.
# See more details in the license.txt file located at the root folder of the Akvo RSR module.
# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
import logging
from typing import Type
from django.core.cache import cache
from django.core.exceptions import ValidationError
from django.apps import apps
from django.db import models
from django.db.models.signals import pre_save, pre_delete
from django.dispatch import receiver
from django.utils.translation import ugettext_lazy as _
import akvo.cache as akvo_cache
from ..fields import ValidXMLCharField
logger = logging.getLogger(__name__)
class Partnership(models.Model):
# the old way
FIELD_PARTNER = 'field'
FUNDING_PARTNER = 'funding'
SPONSOR_PARTNER = 'sponsor'
SUPPORT_PARTNER = 'support'
EXTENDING_PARTNER = 'extending'
PARTNER_TYPE_LIST = [
FIELD_PARTNER, FUNDING_PARTNER, SPONSOR_PARTNER, SUPPORT_PARTNER, EXTENDING_PARTNER
]
PARTNER_LABELS = [
_('Implementing partner'),
_('Funding partner'),
_('Sponsor partner'),
_('Accountable partner'),
_('Extending partner'),
]
PARTNER_TYPES = list(zip(PARTNER_TYPE_LIST, PARTNER_LABELS))
# the new way
IATI_FUNDING_PARTNER = 1
IATI_ACCOUNTABLE_PARTNER = 2
IATI_EXTENDING_PARTNER = 3
IATI_IMPLEMENTING_PARTNER = 4
AKVO_SPONSOR_PARTNER = 100 # not part of the IATI OrganisationRole codelist!
IATI_REPORTING_ORGANISATION = 101
# make sure the AKVO_SPONSOR_PARTNER is last in the list
IATI_ROLE_LIST = [
IATI_FUNDING_PARTNER, IATI_ACCOUNTABLE_PARTNER, IATI_EXTENDING_PARTNER,
IATI_IMPLEMENTING_PARTNER, AKVO_SPONSOR_PARTNER, IATI_REPORTING_ORGANISATION
]
IATI_ROLE_LABELS = [
_('Funding partner'),
_('Accountable partner'),
_('Extending partner'),
_('Implementing partner'),
_('Sponsor partner'),
_('Reporting organisation'),
]
IATI_ROLES = list(zip(IATI_ROLE_LIST, IATI_ROLE_LABELS))
# used when migrating
PARTNER_TYPES_TO_ROLES_MAP = {
FUNDING_PARTNER: IATI_FUNDING_PARTNER,
SUPPORT_PARTNER: IATI_ACCOUNTABLE_PARTNER,
FIELD_PARTNER: IATI_IMPLEMENTING_PARTNER,
SPONSOR_PARTNER: AKVO_SPONSOR_PARTNER,
}
# backwards compatibility
ROLES_TO_PARTNER_TYPES_MAP = {
IATI_FUNDING_PARTNER: FUNDING_PARTNER,
IATI_ACCOUNTABLE_PARTNER: SUPPORT_PARTNER,
IATI_EXTENDING_PARTNER: EXTENDING_PARTNER,
IATI_IMPLEMENTING_PARTNER: FIELD_PARTNER,
AKVO_SPONSOR_PARTNER: SPONSOR_PARTNER,
# TODO: not backwards compatible
IATI_REPORTING_ORGANISATION: ''
}
ALLIANCE_PARTNER = 'alliance'
KNOWLEDGE_PARTNER = 'knowledge'
NETWORK_PARTNER = 'network'
PARTNER_TYPE_EXTRAS_LIST = (ALLIANCE_PARTNER, KNOWLEDGE_PARTNER, NETWORK_PARTNER)
PARTNER_TYPE_EXTRA_LABELS = (
_('Alliance'),
_('Knowledge'),
_('Network')
)
PARTNER_TYPE_EXTRAS = list(zip(PARTNER_TYPE_EXTRAS_LIST, PARTNER_TYPE_EXTRA_LABELS))
organisation = models.ForeignKey(
'Organisation', on_delete=models.CASCADE, verbose_name=_('organisation'), related_name='partnerships', null=True,
blank=True,
help_text=_('Select an organisation that is taking an active role in the project.')
)
project = models.ForeignKey('Project', on_delete=models.CASCADE, verbose_name=_('project'), related_name='partnerships')
iati_organisation_role = models.PositiveSmallIntegerField(
_('organisation role'), choices=IATI_ROLES, db_index=True, null=True, blank=True,
help_text=_('Select the role of the organisation within the project:<br/>'
'- Funding organisation: a government or organisation that provides funds to '
'the project<br/>'
'- Implementing organisation: an organisation involved in carrying out the '
'activity or intervention<br/>'
'- Accountable organisation: an organisation responsible for oversight of '
'the project and its outcomes<br/>'
'- Extending organisation: an organisation that manages the budget and '
'direction of a project on behalf of the funding organisation<br/>'
'- Reporting organisation: an organisation that will report this project in '
'an IATI file')
)
# is_secondary_reporter is only used when the iati_organisation_role is set to
# IATI_REPORTING_ORGANISATION, thus the use of NullBooleanField
is_secondary_reporter = models.BooleanField(
_('secondary reporter'),
null=True,
help_text=_(
'This indicates whether the reporting organisation is a secondary publisher: '
'publishing data for which it is not directly responsible.'
)
)
funding_amount = models.DecimalField(
_('funding amount'), max_digits=14, decimal_places=2, blank=True, null=True, db_index=True,
help_text=_('It’s only possible to indicate a funding amount for funding partners. Use a '
'period to denote decimals.')
)
partner_type_extra = ValidXMLCharField(
_('partner type extra'), max_length=30, blank=True, null=True, choices=PARTNER_TYPE_EXTRAS,
help_text=_('RSR specific partner type.')
)
iati_activity_id = ValidXMLCharField(
_('IATI activity ID'), max_length=100, blank=True, null=True, db_index=True,
help_text=_('A valid activity identifier published by the participating organisation '
'which points to the activity that it has published to IATI that describes '
'its role in this activity.')
)
internal_id = ValidXMLCharField(
_('Internal ID'), max_length=75, blank=True, null=True, db_index=True,
help_text=_('This field can be used to indicate an internal identifier that is used by '
'the organisation for this project. (75 characters)')
)
iati_url = models.URLField(
blank=True,
help_text=_(
'Please enter the URL for where the IATI Activity Id Funding details are published. '
'For projects directly or indirectly funded by the Dutch Government, this should '
'be the OpenAid.nl page. For other projects, an alternative URL can be used.'
)
)
related_activity_id = ValidXMLCharField(
_('related IATI activity ID'), max_length=100, blank=True
)
def iati_organisation_role_label(self):
return dict(self.IATI_ROLES).get(self.iati_organisation_role, '')
def iati_organisation_role_label_unicode(self):
return "{}".format(self.iati_organisation_role_label())
def iati_role_to_partner_type(self):
return dict(self.ROLES_TO_PARTNER_TYPES_MAP).get(self.iati_organisation_role, '')
def iati_role_to_partner_type_unicode(self):
return "{}".format(self.iati_role_to_partner_type())
def organisation_show_link(self):
if self.organisation:
return '<a href="{0}">{1}</a>'.format(self.organisation.get_absolute_url(),
self.organisation.long_name
or self.organisation.name)
return ''
def funding_amount_with_currency(self):
"""Returns the funding amount, prepended by the project's currency."""
if self.funding_amount and self.project and self.project.currency:
return '{0} {1}'.format(self.project.currency, self.funding_amount)
return self.funding_amount
class Meta:
app_label = 'rsr'
verbose_name = _('project partner')
verbose_name_plural = _('project partners')
ordering = ['iati_organisation_role']
def __str__(self):
if self.organisation:
if self.organisation.name:
organisation_unicode = self.organisation.name
elif self.organisation.long_name:
organisation_unicode = self.organisation.long_name
else:
organisation_unicode = '%s' % _('Organisation name not specified')
else:
organisation_unicode = '%s' % _('Organisation not specified')
if self.iati_organisation_role:
organisation_unicode += ' ({})'.format(
str(dict(self.IATI_ROLES)[self.iati_organisation_role])
)
return organisation_unicode
def clean(self):
# Don't allow multiple reporting organisations
Project = apps.get_model('rsr', 'project')
try:
project = Project.objects.get(id=self.project_id)
except Project.DoesNotExist:
return
if self.iati_organisation_role == self.IATI_REPORTING_ORGANISATION:
reporting_orgs = project.partnerships.filter(
iati_organisation_role=self.IATI_REPORTING_ORGANISATION
)
if reporting_orgs.count() > 1:
raise ValidationError(
{'iati_organisation_role': '%s' % _('Project can only have one reporting '
'organisation')}
)
def save(self, *args, **kwargs):
super(Partnership, self).save(*args, **kwargs)
self.set_primary_organisation()
self.project.update_use_project_roles()
def delete(self, *args, **kwargs):
super(Partnership, self).delete(*args, **kwargs)
self.set_primary_organisation()
def set_primary_organisation(self):
# Check which organisation should be set to the primary organisation of the project
# This is done to get better performance on the project list page
Project = apps.get_model('rsr', 'project')
project = Project.objects.get(id=self.project_id)
project.primary_organisation = project.find_primary_organisation()
project.save(update_fields=['primary_organisation'])
@receiver([pre_delete, pre_save], sender=Partnership)
def invalidate_caches(sender: Type[Partnership], instance: Partnership = None, **kwargs):
"""Ensure related cache keys are removed to prevent access to old data"""
if instance is None:
return
from akvo.rest.viewsets import make_projects_filter_cache_prefix
if instance.id is None:
return
# Handle cache of akvo.rest.viewsets.PublicProjectViewSet.projects_filter_for_non_privileged_users
organisation = instance.organisation
# We might be deleting or replacing an org from the partnership
if organisation is None:
# Get the original org
partnership = Partnership.objects.filter(id=instance.id).first()
organisation = partnership.organisation
# There really is no org, let's bail
if organisation is None:
return
try:
# Delete the keys of of all users employed by the org
users = instance.organisation.users.all()
user_keys = [make_projects_filter_cache_prefix(user) for user in users]
keys = [
key for key in akvo_cache.list_cache_keys()
if any(key.startswith(user_key) for user_key in user_keys)
]
if keys:
logger.info("Deleting project_filter keys: %s", len(keys))
cache.delete_many(keys)
except Exception as exc:
logger.warning("Cannot invalidate cache: %s", exc)
| akvo/akvo-rsr | akvo/rsr/models/partnership.py | Python | agpl-3.0 | 11,614 |
# Generated by Django 2.0.8 on 2018-10-05 19:45
from django.db import migrations
from django.core.exceptions import ObjectDoesNotExist
def cria_sistema_cultura(apps, schema_editor):
erros = []
SistemaCultura = apps.get_model('adesao', 'SistemaCultura')
Municipio = apps.get_model('adesao', 'Municipio')
Cidade = apps.get_model('adesao', 'Cidade')
EnteFederado = apps.get_model('adesao', 'EnteFederado')
Secretario = apps.get_model('adesao', 'Secretario')
Funcionario = apps.get_model('adesao', 'Funcionario')
Gestor = apps.get_model('adesao', 'Gestor')
Sede = apps.get_model('adesao', 'Sede')
Diligencia = apps.get_model('gestao', 'Diligencia')
DiligenciaSimples = apps.get_model('gestao', 'DiligenciaSimples')
Componente = apps.get_model('planotrabalho', 'Componente')
for municipio in Municipio.objects.all():
sistema_cultura = SistemaCultura()
sistema_cultura.gestor = Gestor.objects.create(
cpf=municipio.cpf_prefeito,
rg=municipio.rg_prefeito,
orgao_expeditor_rg=municipio.orgao_expeditor_rg,
estado_expeditor=municipio.estado_expeditor,
nome=municipio.nome_prefeito,
telefone_um=municipio.telefone_um,
telefone_dois=municipio.telefone_dois,
telefone_tres=municipio.telefone_tres,
email_institucional=municipio.email_institucional_prefeito,
tipo_funcionario=3,
termo_posse=municipio.termo_posse_prefeito,
rg_copia=municipio.rg_copia_prefeito,
cpf_copia=municipio.cpf_copia_prefeito
)
sistema_cultura.sede = Sede.objects.create(
localizacao=municipio.localizacao,
cnpj=municipio.cnpj_prefeitura,
endereco=municipio.endereco,
complemento=municipio.complemento,
cep=municipio.cep,
bairro=municipio.bairro,
telefone_um=municipio.telefone_um,
telefone_dois=municipio.telefone_dois,
telefone_tres=municipio.telefone_tres,
endereco_eletronico=municipio.endereco_eletronico
)
if municipio.cidade is None:
try:
sistema_cultura.ente_federado = EnteFederado.objects.get(cod_ibge=municipio.estado.codigo_ibge)
except EnteFederado.DoesNotExist:
ente = EnteFederado.objects.filter(nome__icontains=municipio.estado.nome_uf)
if not ente or len(ente) > 1:
print(f"Erro ao procurar UF {municipio.estado.nome_uf} - {municipio.estado.codigo_ibge}\n")
erros.append(municipio.estado.codigo_ibge)
pass
sistema_cultura.ente_federado = ente[0]
else:
try:
cidade = Cidade.objects.get(nome_municipio=municipio.cidade.nome_municipio, uf=municipio.estado)
sistema_cultura.ente_federado = EnteFederado.objects.get(cod_ibge=cidade.codigo_ibge)
except EnteFederado.DoesNotExist:
ente = EnteFederado.objects.filter(cod_ibge__startswith=cidade.codigo_ibge)
if not ente or len(ente) > 1:
print(f"Erro ao procurar Municipio {municipio.cidade.nome_municipio} - {municipio.cidade.codigo_ibge}\n")
erros.append(municipio.estado.codigo_ibge)
pass
sistema_cultura.ente_federado = ente[0]
componentes_antigos = ('criacao_sistema', 'orgao_gestor', 'conselho_cultural', 'plano_cultura')
componente_type = ('36', '37', '38', '40')
componentes_novos = ('legislacao', 'orgao_gestor', 'conselho', 'plano')
sistema_cultura.numero_processo = municipio.numero_processo
try:
sistema_cultura.cadastrador = municipio.usuario
sistema_cultura.estado_processo = municipio.usuario.estado_processo
sistema_cultura.data_publicacao_acordo = municipio.usuario.data_publicacao_acordo
sistema_cultura.link_publicacao_acordo = municipio.usuario.link_publicacao_acordo
sistema_cultura.processo_sei = municipio.usuario.processo_sei
if municipio.usuario.plano_trabalho:
diligencia = Diligencia.objects.filter(
componente_id=municipio.usuario.plano_trabalho.id,
componente_type_id=35).order_by('-data_criacao').first()
if diligencia:
sistema_cultura.diligencia = DiligenciaSimples.objects.create(
texto_diligencia=diligencia.texto_diligencia,
classificacao_arquivo=diligencia.classificacao_arquivo,
usuario=diligencia.usuario)
sistema_cultura.diligencia.save()
for nome_componente_antigo, nome_componente_novo, tipo_componente in zip(componentes_antigos, componentes_novos, componente_type):
if municipio.usuario.plano_trabalho:
componente_antigo = getattr(municipio.usuario.plano_trabalho, nome_componente_antigo)
if componente_antigo:
setattr(sistema_cultura, nome_componente_novo, Componente.objects.create())
componente_novo = getattr(sistema_cultura, nome_componente_novo)
componente_novo.tipo = componentes_novos.index(nome_componente_novo)
componente_novo.arquivo = componente_antigo.arquivo
componente_novo.situacao = componente_antigo.situacao.id
componente_novo.data_envio = componente_antigo.data_envio
componente_novo.data_publicacao = componente_antigo.data_publicacao
diligencia = Diligencia.objects.filter(
componente_id=componente_antigo.id,
componente_type_id=tipo_componente).order_by('-data_criacao').first()
if diligencia:
componente_novo.diligencia = DiligenciaSimples.objects.create(
texto_diligencia=diligencia.texto_diligencia,
classificacao_arquivo=diligencia.classificacao_arquivo,
usuario=diligencia.usuario)
componente_novo.save()
secretario = municipio.usuario.secretario
if secretario:
sistema_cultura.secretario = Funcionario.objects.create(cpf=secretario.cpf_secretario,
rg=secretario.rg_secretario, orgao_expeditor_rg=secretario.orgao_expeditor_rg,
estado_expeditor=secretario.estado_expeditor, nome=secretario.nome_secretario,
cargo=secretario.cargo_secretario, instituicao=secretario.instituicao_secretario,
telefone_um=secretario.telefone_um, telefone_dois=secretario.telefone_dois,
telefone_tres=secretario.telefone_tres,
email_institucional=secretario.email_institucional_secretario,
tipo_funcionario=0)
responsavel = municipio.usuario.responsavel
if responsavel:
sistema_cultura.responsavel = Funcionario.objects.create(cpf=responsavel.cpf_responsavel,
rg=responsavel.rg_responsavel, orgao_expeditor_rg=responsavel.orgao_expeditor_rg,
estado_expeditor=responsavel.estado_expeditor, nome=responsavel.nome_responsavel,
cargo=responsavel.cargo_responsavel, instituicao=responsavel.instituicao_responsavel,
telefone_um=responsavel.telefone_um, telefone_dois=responsavel.telefone_dois,
telefone_tres=responsavel.telefone_tres,
email_institucional=responsavel.email_institucional_responsavel,
tipo_funcionario=1)
except ObjectDoesNotExist:
sistema_cultura.estado_processo = 6
sistema_cultura.save()
class Migration(migrations.Migration):
dependencies = [
('planotrabalho', '0008_componente_data_publicacao'),
('gestao', '0006_remove_diligenciasimples_tipo_diligencia'),
('adesao', '0020_auto_20181008_1610'),
]
operations = [
migrations.RunPython(cria_sistema_cultura),
]
| culturagovbr/sistema-nacional-cultura | adesao/migrations/0019_auto_20181005_1645.py | Python | agpl-3.0 | 8,604 |
# -*- coding: utf-8 -*-
from ast import literal_eval
from odoo import models, fields, api
class SaleOrderLine(models.Model):
_inherit = 'sale.order.line'
config_ok = fields.Boolean(
related='product_id.config_ok',
string="Configurable",
readonly=True
)
@api.multi
def reconfigure_product(self):
""" Creates and launches a product configurator wizard with a linked
template and variant in order to re-configure a existing product. It is
esetially a shortcut to pre-fill configuration data of a variant"""
cfg_steps = self.product_id.product_tmpl_id.config_step_line_ids
active_step = str(cfg_steps[0].id) if cfg_steps else 'configure'
product_modifiable = literal_eval(self.env['ir.config_parameter'].sudo().get_param(
'product_configurator.product_modifiable', default='False'))
wizard_obj = self.env['product.configurator']
wizard = wizard_obj.create({
'product_modifiable': product_modifiable,
'product_id': self.product_id.id,
'state': active_step,
'order_line_id': self.id,
})
return {
'type': 'ir.actions.act_window',
'res_model': 'product.configurator',
'name': "Configure Product",
'view_mode': 'form',
'context': dict(
self.env.context,
wizard_id=wizard.id,
),
'target': 'new',
'res_id': wizard.id,
}
| microcom/odoo-product-configurator | product_configurator_wizard/models/sale.py | Python | agpl-3.0 | 1,538 |
from . import test_account_invoice_validate_tax
| oihane/odoo-addons | account_invoice_validate_tax/tests/__init__.py | Python | agpl-3.0 | 48 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Post-installation configuration helpers
# Copyright (C) 2015 OpusVL (<http://opusvl.com/>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
"""Common code for scripting installation of a chart of accounts
into a company.
The function you probably want to use is setup_company_accounts()
"""
from datetime import date
import logging
from . import confutil
_logger = logging.getLogger(__name__)
def setup_company_accounts(cr, registry, uid, company, chart_template, code_digits=None, context=None):
"""This sets up accounts, fiscal year and periods for the given company.
company: A res.company object
chart_template: An account.chart.template object
code_digits: The number of digits (the default is usually 6)
context: e.g. {'lang': 'en_GB', 'tz': False, 'uid': openerp.SUPERUSER_ID}
A financial year is set up starting this year on 1st Jan and ending this year on 31st Dec.
"""
unconfigured_companies = unconfigured_company_ids(cr, registry, uid, context=context)
if company.id in unconfigured_companies:
setup_chart_of_accounts(cr, registry, uid,
company_id=company.id,
chart_template_id=chart_template.id,
code_digits=code_digits,
context=context,
)
today = date.today()
fy_name = today.strftime('%Y')
fy_code = 'FY' + fy_name
account_start = today.strftime('%Y-01-01')
account_end = today.strftime('%Y-12-31')
create_fiscal_year(cr, registry, uid,
company_id=company.id,
name=fy_name,
code=fy_code,
start_date=account_start,
end_date=account_end,
context=context,
)
confutil.set_account_settings(cr, registry, uid,
company=company,
changes={
'date_start': account_start,
'date_stop': account_end,
'period': 'month',
},
context=context,
)
def unconfigured_company_ids(cr, registry, uid, context=None):
"""Return list of ids of companies without a chart of accounts.
"""
account_installer = registry['account.installer']
return account_installer.get_unconfigured_cmp(cr, uid, context=context)
def setup_chart_of_accounts(cr, registry, uid, company_id, chart_template_id, code_digits=None, context=None):
chart_wizard = registry['wizard.multi.charts.accounts']
defaults = chart_wizard.default_get(cr, uid, ['bank_accounts_id', 'currency_id'], context=context)
bank_accounts_spec = defaults.pop('bank_accounts_id')
bank_accounts_id = [(0, False, i) for i in bank_accounts_spec]
data = defaults.copy()
data.update({
"chart_template_id": chart_template_id,
'company_id': company_id,
'bank_accounts_id': bank_accounts_id,
})
onchange = chart_wizard.onchange_chart_template_id(cr, uid, [], data['chart_template_id'], context=context)
data.update(onchange['value'])
if code_digits:
data.update({'code_digits': code_digits})
conf_id = chart_wizard.create(cr, uid, data, context=context)
chart_wizard.execute(cr, uid, [conf_id], context=context)
def create_fiscal_year(cr, registry, uid, company_id, name, code, start_date, end_date, context=None):
fy_model = registry['account.fiscalyear']
fy_data = fy_model.default_get(cr, uid, ['state', 'company_id'], context=context).copy()
fy_data.update({
'company_id': company_id,
'name': name,
'code': code,
'date_start': start_date,
'date_stop': end_date,
})
fy_id = fy_model.create(cr, uid, fy_data, context=context)
fy_model.create_period(cr, uid, [fy_id], context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| OpusVL/odoo-confutil | confutil/account_setup.py | Python | agpl-3.0 | 4,594 |