code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
"""Family module for Meta Wiki."""
#
# (C) Pywikibot team, 2005-2020
#
# Distributed under the terms of the MIT license.
#
from pywikibot import family
# The Wikimedia Meta-Wiki family
class Family(family.WikimediaOrgFamily):
"""Family class for Meta Wiki."""
name = 'meta'
interwiki_forward = 'wikipedia'
cross_allowed = ['meta', ]
category_redirect_templates = {
'meta': (
'Category redirect',
),
}
# Subpages for documentation.
doc_subpages = {
'_default': (('/doc',), ['meta']),
}
|
wikimedia/pywikibot-core
|
pywikibot/families/meta_family.py
|
Python
|
mit
| 564
|
from office365.runtime.client_value import ClientValue
class ServicePlanInfo(ClientValue):
"""Contains information about a service plan associated with a subscribed SKU. The servicePlans property of
the subscribedSku entity is a collection of servicePlanInfo."""
def __init__(self, _id=None, name=None, provisioning_status=None, applies_to=None):
"""
:param str applies_to: The object the service plan can be assigned to. Possible values:
"User" - service plan can be assigned to individual users.
"Company" - service plan can be assigned to the entire tenant.
:param str provisioning_status: The provisioning status of the service plan. Possible values:
"Success" - Service is fully provisioned.
"Disabled" - Service has been disabled.
"PendingInput" - Service is not yet provisioned; awaiting service confirmation.
"PendingActivation" - Service is provisioned but requires explicit activation by administrator
(for example, Intune_O365 service plan)
"PendingProvisioning" - Microsoft has added a new service to the product SKU and it has not been
activated in the tenant, yet.
:param str name: The name of the service plan.
:param str _id: The unique identifier of the service plan.
"""
super(ServicePlanInfo, self).__init__()
self.servicePlanId = _id
self.servicePlanName = name
self.provisioningStatus = provisioning_status
self.appliesTo = applies_to
|
vgrem/Office365-REST-Python-Client
|
office365/directory/licenses/service_plan_info.py
|
Python
|
mit
| 1,595
|
from django.views.generic import TemplateView
#from apiclient.discovery import build
from googleapiclient.discovery import build
from .utils import SearchResults
from . import *
class SearchView(TemplateView):
template_name = "googlesearch/search_results.html"
def get_context_data(self, **kwargs):
context = super(SearchView, self).get_context_data(**kwargs)
service = build("customsearch", GOOGLE_SEARCH_API_VERSION,
developerKey=GOOGLE_SEARCH_API_KEY)
#add a "try" block to see if googleapiclient throws a 400 error
try:
results = service.cse().list(
q=self.request.GET.get('q', ''),
start=self.page_to_index(),
num=GOOGLE_SEARCH_RESULTS_PER_PAGE,
cx=GOOGLE_SEARCH_ENGINE_ID,
).execute()
results = SearchResults(results)
pages = self.calculate_pages()
#if googleapiclient raises an error, we need to catch it here
except:
#run the search again starting with a defined page 1 instead of the "user" defined
results = service.cse().list(
q=self.request.GET.get('q', ''),
start=1,
num=GOOGLE_SEARCH_RESULTS_PER_PAGE,
cx=GOOGLE_SEARCH_ENGINE_ID,
).execute()
#set some default values used for the context below
page = 1
# previous, current, next pages
pages = [0, 1, 2]
results = SearchResults(results)
""" Set some defaults """
context.update({
'items': [],
'total_results': 0,
'current_page': 0,
'prev_page': 0,
'next_page': 0,
'search_terms': self.request.GET.get('q', ''),
'error': results
})
""" Now parse the results and send back some
useful data """
context.update({
'items': results.items,
'total_results': results.total_results,
'current_page': pages[1],
'prev_page': pages[0],
'next_page': pages[2],
'search_terms': results.search_terms,
})
return context
def calculate_pages(self):
""" Returns a tuple consisting of
the previous page, the current page,
and the next page """
current_page = int(self.request.GET.get('p', 1))
return (current_page - 1, current_page, current_page + 1)
def page_to_index(self, page=None):
""" Converts a page to the start index """
if page is None:
page = self.request.GET.get('p', 1)
return int(page) * int(GOOGLE_SEARCH_RESULTS_PER_PAGE) + 1 - int(GOOGLE_SEARCH_RESULTS_PER_PAGE)
|
hzdg/django-google-search
|
googlesearch/views.py
|
Python
|
mit
| 2,809
|
'''
RP_extract: Rhythm Patterns Audio Feature Extractor
@author: 2014-2015 Alexander Schindler, Thomas Lidy
Re-implementation by Alexander Schindler of RP_extract for Matlab
Matlab version originally by Thomas Lidy, based on Musik Analysis Toolbox by Elias Pampalk
( see http://ifs.tuwien.ac.at/mir/downloads.html )
Main function is rp_extract. See function definition and description for more information,
or example usage in main function.
Note: All required functions are provided by the two main scientific libraries numpy and scipy.
Note: In case you alter the code to use transform2mel, librosa needs to be installed: pip install librosa
'''
import numpy as np
from scipy import stats
from scipy.fftpack import fft
#from scipy.fftpack import rfft # Discrete Fourier transform of a real sequence.
from scipy import interpolate
# suppress numpy warnings (divide by 0 etc.)
np.set_printoptions(suppress=True)
# required for debugging
np.set_printoptions(precision=8,
threshold=10,
suppress=True,
linewidth=200,
edgeitems=10)
# INITIALIZATION: Constants & Mappings
# Bark Scale
bark = [100, 200, 300, 400, 510, 630, 770, 920, 1080, 1270, 1480, 1720, 2000, 2320, 2700, 3150, 3700, 4400, 5300, 6400, 7700, 9500, 12000, 15500]
n_bark_bands = len(bark)
# copy the bark vector (using [:]) and add a 0 in front (to make calculations below easier)
barks = bark[:]
barks.insert(0,0)
# Phone Scale
phon = [3, 20, 40, 60, 80, 100, 101]
# copy the bark vector (using [:]) and add a 0 in front (to make calculations below easier)
phons = phon[:]
phons.insert(0,0)
phons = np.asarray(phons)
# Loudness Curves
eq_loudness = np.array([[55, 40, 32, 24, 19, 14, 10, 6, 4, 3, 2, 2, 0,-2,-5,-4, 0, 5, 10, 14, 25, 35],
[66, 52, 43, 37, 32, 27, 23, 21, 20, 20, 20, 20,19,16,13,13,18, 22, 25, 30, 40, 50],
[76, 64, 57, 51, 47, 43, 41, 41, 40, 40, 40,39.5,38,35,33,33,35, 41, 46, 50, 60, 70],
[89, 79, 74, 70, 66, 63, 61, 60, 60, 60, 60, 59,56,53,52,53,56, 61, 65, 70, 80, 90],
[103, 96, 92, 88, 85, 83, 81, 80, 80, 80, 80, 79,76,72,70,70,75, 79, 83, 87, 95,105],
[118,110,107,105,103,102,101,100,100,100,100, 99,97,94,90,90,95,100,103,105,108,115]])
loudn_freq = np.array([31.62, 50, 70.7, 100, 141.4, 200, 316.2, 500, 707.1, 1000, 1414, 1682, 2000, 2515, 3162, 3976, 5000, 7071, 10000, 11890, 14140, 15500])
# We have the loudness values for the frequencies in loudn_freq
# now we calculate in loudn_bark a matrix of loudness sensation values for the bark bands margins
i = 0
j = 0
loudn_bark = np.zeros((eq_loudness.shape[0], len(bark)))
for bsi in bark:
while j < len(loudn_freq) and bsi > loudn_freq[j]:
j += 1
j -= 1
if np.where(loudn_freq == bsi)[0].size != 0: # loudness value for this frequency already exists
loudn_bark[:,i] = eq_loudness[:,np.where(loudn_freq == bsi)][:,0,0]
else:
w1 = 1 / np.abs(loudn_freq[j] - bsi)
w2 = 1 / np.abs(loudn_freq[j + 1] - bsi)
loudn_bark[:,i] = (eq_loudness[:,j]*w1 + eq_loudness[:,j+1]*w2) / (w1 + w2)
i += 1
# SPECTRAL MASKING Spreading Function
# CONST_spread contains matrix of spectral frequency masking factors
CONST_spread = np.zeros((n_bark_bands,n_bark_bands))
for i in range(n_bark_bands):
CONST_spread[i,:] = 10**((15.81+7.5*((i-np.arange(n_bark_bands))+0.474)-17.5*(1+((i-np.arange(n_bark_bands))+0.474)**2)**0.5)/10)
# UTILITY FUNCTIONS
def nextpow2(num):
'''NextPow2
find the next highest number to the power of 2 to a given number
and return the exponent to 2
(analogously to Matlab's nextpow2() function)
'''
n = 2
i = 1
while n < num:
n *= 2
i += 1
return i
# FFT FUNCTIONS
def periodogram(x,win,Fs=None,nfft=1024):
''' Periodogram
Periodogram power spectral density estimate
Note: this function was written with 1:1 Matlab compatibility in mind.
The number of points, nfft, in the discrete Fourier transform (DFT) is the maximum of 256 or the next power of two greater than the signal length.
:param x: time series data (e.g. audio signal), ideally length matches nfft
:param win: window function to be applied (e.g. Hanning window). in this case win expects already data points of the window to be provided.
:param Fs: sampling frequency (unused)
:param nfft: number of bins for FFT (ideally matches length of x)
:return: Periodogram power spectrum (np.array)
'''
#if Fs == None:
# Fs = 2 * np.pi # commented out because unused
U = np.dot(win.conj().transpose(), win) # compensates for the power of the window.
Xx = fft((x * win),nfft) # verified
P = Xx*np.conjugate(Xx)/U
# Compute the 1-sided or 2-sided PSD [Power/freq] or mean-square [Power].
# Also, compute the corresponding freq vector & freq units.
# Generate the one-sided spectrum [Power] if so wanted
if nfft % 2 != 0:
select = np.arange((nfft+1)/2) # ODD
P = P[select,:] # Take only [0,pi] or [0,pi)
P[1:-1] = P[1:-1] * 2 # Only DC is a unique point and doesn't get doubled
else:
#select = np.arange(nfft/2+1); # EVEN
#P = P[select,:] # Take only [0,pi] or [0,pi) # TODO: why commented out?
P[1:-2] = P[1:-2] * 2
P = P / (2 * np.pi)
return P
def calc_spectrogram(wavsegment,fft_window_size,fft_overlap = 0.5,real_values=True):
''' Calc_Spectrogram
calculate spectrogram using periodogram function (which performs FFT) to convert wave signal data
from time to frequency domain (applying a Hanning window and (by default) 50 % window overlap)
:param wavsegment: audio wave file data for a segment to be analyzed (mono (i.e. 1-dimensional vector) only
:param fft_window_size: windows size to apply FFT to
:param fft_overlap: overlap to apply during FFT analysis in % fraction (e.g. default = 0.5, means 50% overlap)
:param real_values: if True, return real values by taking abs(spectrogram), if False return complex values
:return: spectrogram matrix as numpy array (fft_window_size, n_frames)
'''
# hop_size (increment step in samples, determined by fft_window_size and fft_overlap)
hop_size = int(fft_window_size*(1-fft_overlap))
# this would compute the segment length, but it's pre-defined above ...
# segment_size = fft_window_size + (frames-1) * hop_size
# ... therefore we convert the formula to give the number of frames needed to iterate over the segment:
n_frames = (wavsegment.shape[0] - fft_window_size) / hop_size + 1
# n_frames_old = wavsegment.shape[0] / fft_window_size * 2 - 1 # number of iterations with 50% overlap
# TODO: provide this as parameter for better caching?
han_window = np.hanning(fft_window_size) # verified
# initialize result matrix for spectrogram
spectrogram = np.zeros((fft_window_size, n_frames), dtype=np.complex128)
# start index for frame-wise iteration
ix = 0
for i in range(n_frames): # stepping through the wave segment, building spectrum for each window
spectrogram[:,i] = periodogram(wavsegment[ix:ix+fft_window_size], win=han_window,nfft=fft_window_size)
ix = ix + hop_size
# NOTE: tested scipy periodogram BUT it delivers totally different values AND takes 2x the time of our periodogram function (0.13 sec vs. 0.06 sec)
# from scipy.signal import periodogram # move on top
#f, spec = periodogram(x=wavsegment[idx],fs=samplerate,window='hann',nfft=fft_window_size,scaling='spectrum',return_onesided=True)
if real_values: spectrogram = np.abs(spectrogram)
return (spectrogram)
# FEATURE FUNCTIONS
def calc_statistical_features(matrix):
result = np.zeros((matrix.shape[0],7))
result[:,0] = np.mean(matrix, axis=1)
result[:,1] = np.var(matrix, axis=1, dtype=np.float64) # the values for variance differ between MATLAB and Numpy!
result[:,2] = stats.skew(matrix, axis=1)
result[:,3] = stats.kurtosis(matrix, axis=1, fisher=False) # Matlab calculates Pearson's Kurtosis
result[:,4] = np.median(matrix, axis=1)
result[:,5] = np.min(matrix, axis=1)
result[:,6] = np.max(matrix, axis=1)
result[np.where(np.isnan(result))] = 0
return result
# PSYCHO-ACOUSTIC TRANSFORMS as individual functions
# Transform 2 Mel Scale: NOT USED by rp_extract, but included for testing purposes or for import into other programs
def transform2mel(spectrogram,samplerate,fft_window_size,n_mel_bands = 80,freq_min = 0,freq_max = None):
'''Transform to Mel
convert a spectrogram to a Mel scale spectrogram by grouping original frequency bins
to Mel frequency bands (using Mel filter from Librosa)
Parameters
spectrogram: input spectrogram
samplerate: samplerate of audio signal
fft_window_size: number of time window / frequency bins in the FFT analysis
n_mel_bands: number of desired Mel bands, typically 20, 40, 80 (max. 128 which is default when 'None' is provided)
freq_min: minimum frequency (Mel filters will be applied >= this frequency, but still return n_meld_bands number of bands)
freq_max: cut-off frequency (Mel filters will be applied <= this frequency, but still return n_meld_bands number of bands)
Returns:
mel_spectrogram: Mel spectrogram: np.array of shape(n_mel_bands,frames) maintaining the number of frames in the original spectrogram
'''
import librosa.filters
# Syntax: librosa.filters.mel(sr, n_fft, n_mels=128, fmin=0.0, fmax=None, htk=False)
mel_basis = librosa.filters.mel(samplerate,fft_window_size, n_mels=n_mel_bands,fmin=freq_min,fmax=freq_max)
freq_bin_max = mel_basis.shape[1] # will be fft_window_size / 2 + 1
# IMPLEMENTATION WITH FOR LOOP
# initialize Mel Spectrogram matrix
#n_mel_bands = mel_basis.shape[0] # get the number of bands from result in case 'None' was specified as parameter
#mel_spectrogram = np.empty((n_mel_bands, frames))
#for i in range(frames): # stepping through the wave segment, building spectrum for each window
# mel_spectrogram[:,i] = np.dot(mel_basis,spectrogram[0:freq_bin_max,i])
# IMPLEMENTATION WITH DOT PRODUCT (15% faster)
# multiply the mel filter of each band with the spectogram frame (dot product executes it on all frames)
mel_spectrogram = np.dot(mel_basis,spectrogram[0:freq_bin_max,:])
return (mel_spectrogram)
# Bark Transform: Convert Spectrogram to Bark Scale
# matrix: Spectrogram values as returned from periodogram function
# freq_axis: array of frequency values along the frequency axis
# max_bands: limit number of Bark bands (1...24) (counting from lowest band)
def transform2bark(matrix, freq_axis, max_bands=None):
# barks and n_bark_bands have been initialized globally above
if max_bands == None:
max_band = n_bark_bands
else:
max_band = min(n_bark_bands,max_bands)
matrix_out = np.zeros((max_band,matrix.shape[1]),dtype=matrix.dtype)
for b in range(max_band-1):
matrix_out[b] = np.sum(matrix[((freq_axis >= barks[b]) & (freq_axis < barks[b+1]))], axis=0)
return(matrix_out)
# Spectral Masking (assumes values are arranged in <=24 Bark bands)
def do_spectral_masking(matrix):
n_bands = matrix.shape[0]
# CONST_spread has been initialized globally above
spread = CONST_spread[0:n_bands,0:n_bands] # not sure if column limitation is right here; was originally written for n_bark_bands = 24 only
matrix = np.dot(spread, matrix)
return(matrix)
# Map to Decibel Scale
def transform2db(matrix):
'''Map to Decibel Scale'''
matrix[np.where(matrix < 1)] = 1
matrix = 10 * np.log10(matrix)
return(matrix)
# Transform to Phon (assumes matrix is in dB scale)
def transform2phon(matrix):
old_npsetting = np.seterr(invalid='ignore') # avoid 'RuntimeWarning: invalid value encountered in divide' at ifac division below
# number of bark bands, matrix length in time dim
n_bands = matrix.shape[0]
t = matrix.shape[1]
# DB-TO-PHON BARK-SCALE-LIMIT TABLE
# introducing 1 level more with level(1) being infinite
# to avoid (levels - 1) producing errors like division by 0
#%%table_dim = size(CONST_loudn_bark,2);
table_dim = n_bands; # OK
cbv = np.concatenate((np.tile(np.inf,(table_dim,1)), loudn_bark[:,0:n_bands].transpose()),1) # OK
# init lowest level = 2
levels = np.tile(2,(n_bands,t)) # OK
for lev in range(1,6): # OK
db_thislev = np.tile(np.asarray([cbv[:,lev]]).transpose(),(1,t))
levels[np.where(matrix > db_thislev)] = lev + 2
# the matrix 'levels' stores the correct Phon level for each data point
cbv_ind_hi = np.ravel_multi_index(dims=(table_dim,7), multi_index=np.array([np.tile(np.array([range(0,table_dim)]).transpose(),(1,t)), levels-1]), order='F')
cbv_ind_lo = np.ravel_multi_index(dims=(table_dim,7), multi_index=np.array([np.tile(np.array([range(0,table_dim)]).transpose(),(1,t)), levels-2]), order='F')
# interpolation factor % OPT: pre-calc diff
ifac = (matrix[:,0:t] - cbv.transpose().ravel()[cbv_ind_lo]) / (cbv.transpose().ravel()[cbv_ind_hi] - cbv.transpose().ravel()[cbv_ind_lo])
ifac[np.where(levels==2)] = 1 # keeps the upper phon value;
ifac[np.where(levels==8)] = 1 # keeps the upper phon value;
# phons has been initialized globally above
matrix[:,0:t] = phons.transpose().ravel()[levels - 2] + (ifac * (phons.transpose().ravel()[levels - 1] - phons.transpose().ravel()[levels - 2])) # OPT: pre-calc diff
np.seterr(invalid=old_npsetting['invalid']) # restore RuntimeWarning setting for np division error
return(matrix)
# Transform to Sone scale (assumes matrix is in Phon scale)
def transform2sone(matrix):
idx = np.where(matrix >= 40)
not_idx = np.where(matrix < 40)
matrix[idx] = 2**((matrix[idx]-40)/10) #
matrix[not_idx] = (matrix[not_idx]/40)**2.642 # max => 438.53
return(matrix)
# MAIN Rhythm Pattern Extraction Function
def rp_extract( wavedata, # pcm (wav) signal data normalized to (-1,1)
samplerate, # signal sampling rate
# which features to extract
extract_rp = False, # extract Rhythm Patterns features
extract_ssd = False, # extract Statistical Spectrum Descriptor
extract_tssd = False, # extract temporal Statistical Spectrum Descriptor
extract_rh = False, # extract Rhythm Histogram features
extract_rh2 = False, # extract Rhythm Histogram features including Fluctuation Strength Weighting
extract_trh = False, # extract temporal Rhythm Histogram features
extract_mvd = False, # extract modulation variance descriptor
# processing options
skip_leadin_fadeout = 1, # >=0 how many sample windows to skip at the beginning and the end
step_width = 1, # >=1 each step_width'th sample window is analyzed
n_bark_bands = 24, # 2..24 number of desired Bark bands (from low frequencies to high) (e.g. 15 or 20 or 24 for 11, 22 and 44 kHz audio respectively) (1 delivers undefined output)
mod_ampl_limit = 60, # 2..257 number of modulation frequencies on x-axis
# enable/disable parts of feature extraction
transform_bark = True, # [S2] transform to Bark scale
spectral_masking = True, # [S3] compute Spectral Masking
transform_db = True, # [S4] transfrom to dB: advisable only to turn off when [S5] and [S6] are turned off too
transform_phon = True, # [S5] transform to Phon: if disabled, Sone_transform will be disabled too
transform_sone = True, # [S6] transform to Sone scale (only applies if transform_phon = True)
fluctuation_strength_weighting = True, # [R2] apply Fluctuation Strength weighting curve
#blurring = True # [R3] Gradient+Gauss filter # TODO: not yet implemented
return_segment_features = False, # this will return features per each analyzed segment instead of aggregated ones
verbose = False # print messages whats going on
):
'''Rhythm Pattern Feature Extraction
performs segment-wise audio feature extraction from provided audio wave (PCM) data
and extracts the following features:
Rhythm Pattern
Statistical Spectrum Descriptor
Statistical Histogram
temporal Statistical Spectrum Descriptor
Rhythm Histogram
temporal Rhythm Histogram features
Modulation Variance Descriptor
Examples:
>>> from audiofile_read import *
>>> samplerate, samplewidth, wavedata = audiofile_read("music/BoxCat_Games_-_10_-_Epic_Song.mp3") #doctest: +ELLIPSIS
Decoded .mp3 with: mpg123 -q -w /....wav music/BoxCat_Games_-_10_-_Epic_Song.mp3
>>> feat = rp_extract(wavedata, samplerate, extract_rp=True, extract_ssd=True, extract_rh=True)
Analyzing 7 segments
>>> for k in feat.keys():
... print k.upper() + ":", feat[k].shape[0], "dimensions"
SSD: 168 dimensions
RH: 60 dimensions
RP: 1440 dimensions
>>> print feat["rp"]
[ 0.01599218 0.01979605 0.01564305 0.01674175 0.00959912 0.00931604 0.00937831 0.00709122 0.00929631 0.00754473 ..., 0.02998088 0.03602739 0.03633861 0.03664331 0.02589753 0.02110256
0.01457744 0.01221825 0.0073788 0.00164668]
>>> print feat["rh"]
[ 7.11614842 12.58303013 6.96717295 5.24244146 6.49677561 4.21249659 12.43844045 4.19672357 5.30714983 6.1674115 ..., 1.55870044 2.69988854 2.75075831 3.67269877 13.0351257
11.7871738 3.76106713 2.45225195 2.20457928 2.06494926]
>>> print feat["ssd"]
[ 3.7783279 5.84444695 5.58439197 4.87849697 4.14983056 4.09638223 4.04971225 3.96152261 3.65551062 3.2857232 ..., 14.45953191 14.6088727 14.03351539 12.84783095 10.81735946
9.04121124 7.13804008 5.6633501 3.09678286 0.52076428]
'''
# PARAMETER INITIALIZATION
# non-exhibited parameters
include_DC = False
FLATTEN_ORDER = 'F' # order how matrices are flattened to vector: 'F' for Matlab/Fortran, 'C' for C order (IMPORTANT TO USE THE SAME WHEN reading+reshaping the features)
# segment_size should always be ~6 sec, fft_window_size should always be ~ 23ms
if (samplerate == 11025):
segment_size = 2**16
fft_window_size = 256
elif (samplerate == 22050):
segment_size = 2**17
fft_window_size = 512
elif (samplerate == 44100):
segment_size = 2**18
fft_window_size = 1024
else:
# throw error not supported
raise ValueError('A sample rate of ' + str(samplerate) + " is not supported (only 11, 22 and 44 kHz).")
# calculate frequency values on y-axis (for Bark scale calculation):
# freq_axis = float(samplerate)/fft_window_size * np.arange(0,(fft_window_size/2) + 1)
# linear space from 0 to samplerate/2 in (fft_window_size/2+1) steps
freq_axis = np.linspace(0, float(samplerate)/2, int(fft_window_size//2) + 1, endpoint=True)
# CONVERT STEREO TO MONO: Average the channels
if wavedata.ndim > 1: # if we have more than 1 dimension
if wavedata.shape[1] == 1: # check if 2nd dimension is just 1
wavedata = wavedata[:,0] # then we take first and only channel
else:
wavedata = np.mean(wavedata, 1) # otherwise we average the signals over the channels
# SEGMENT INITIALIZATION
# find positions of wave segments
skip_seg = skip_leadin_fadeout
seg_pos = np.array([1, segment_size]) # array with 2 entries: start and end position of selected segment
seg_pos_list = [] # list to store all the individual segment positions (only when return_segment_features == True)
# if file is too small, don't skip leadin/fadeout and set step_width to 1
"""
if ((skip_leadin_fadeout > 0) or (step_width > 1)):
duration = wavedata.shape[0]/samplerate
if (duration < 45):
step_width = 1
skip_seg = 0
# TODO: do this as a warning?
if verbose: print "Duration < 45 seconds: setting step_width to 1 and skip_leadin_fadeout to 0."
else:
# advance by number of skip_seg segments (i.e. skip lead_in)
seg_pos = seg_pos + segment_size * skip_seg
"""
# calculate number of segments
n_segments = 1 #int(np.floor( (np.floor( (wavedata.shape[0] - (skip_seg*2*segment_size)) / segment_size ) - 1 ) / step_width ) + 1)
if verbose: print "Analyzing", n_segments, "segments"
#if n_segments == 0:
# raise ValueError("Not enough data to analyze! Minimum sample length needs to be " +
# str(segment_size) + " (5.94 seconds) but it is " + str(wavedata.shape[0]) +
# " (" + str(round(wavedata.shape[0] * 1.0 / samplerate,2)) + " seconds)")
# initialize output
features = {}
ssd_list = []
sh_list = []
rh_list = []
rh2_list = []
rp_list = []
mvd_list = []
hearing_threshold_factor = 0.0875 * (2**15)
# SEGMENT ITERATION
for seg_id in range(n_segments):
# keep track of segment position
if return_segment_features:
seg_pos_list.append(seg_pos)
# EXTRACT WAVE SEGMENT that will be processed
# data is assumed to be mono waveform
wavsegment = wavedata #[seg_pos[0]-1:seg_pos[1]] # verified
# v210715
# Python : [-0.0269165 -0.02128601 -0.01864624 -0.01893616 -0.02166748 -0.02694702 -0.03457642 -0.04333496 -0.05166626 -0.05891418]
# Matlab : [-0,0269165 -0,02125549 -0,01861572 -0,01893616 -0,02165222 -0,02694702 -0,03456115 -0,04331970 -0,05166626 -0,05891418]
# adjust hearing threshold # TODO: move after stereo-mono conversion above?
wavsegment = wavsegment * hearing_threshold_factor
# v210715
# Python : [ -77.175 -61.03125 -53.4625 -54.29375 -62.125 -77.2625 -99.1375 -124.25 -148.1375 -168.91875]
# Matlab : [ -77,175 -60,94375 -53,3750 -54,29375 -62,081 -77,2625 -99,0938 -124,21 -148,1375 -168,91875]
matrix = calc_spectrogram(wavsegment,fft_window_size)
# v210715
#Python: 0.01372537 0.51454915 72.96077581 84.86663379 2.09940049 3.29631279 97373.2756834 23228.2065494 2678.44451741 30467.235416
# : 84.50635406 58.32826049 1263.82538188 234.11858349 85.48176796 97.26094525 214067.91208223 3570917.53366476 2303291.96676741 1681002.94519665
# : 171.47168402 1498.04129116 3746.45491915 153.01444364 37.20801758 177.74229702 238810.1975412 3064388.50572536 5501187.79635479 4172009.81345923
#Matlab: 0,01528259 0,49653179 73,32978523 85,38774541 2,00416767 3,36618763 97416,24267209 23239,84650814 2677,01521862 30460,9231041364
# : 84,73805309 57,84524803 1263,40594029 235,62185973 85,13826606 97,61122652 214078,02415144 3571346,74831746 2303286,74666381 1680967,41922679
# : 170,15377915 1500,98052242 3744,98456435 154,14108817 36,69362260 177,48982263 238812,02171250 3064642,99278220 5501230,26588318 4172058,72803277
#
# PSYCHO-ACOUSTIC TRANSFORMS
# Map to Bark Scale
if transform_bark:
matrix = transform2bark(matrix,freq_axis,n_bark_bands)
# v210715
# Python: 255.991763 1556.884100 5083.2410768 471.9996609 124.789186 278.299555 550251.385306 6658534.245939 7807158.207639 5883479.99407189
# : 77128.354925 10446.109041 22613.8525735 13266.2502432 2593.395039 1367.697057 675114.554043 23401741.536499 6300109.471193 8039710.71759598
# : 127165.795400 91270.354107 15240.3501050 16291.2234730 1413.851495 2166.723800 868138.817452 20682384.237884 8971171.605009 5919089.97818692
# Matlab: 254,907114 1559,322302 5081,720289 475,1506933 123,836056 278,46723 550306,288536 6659229,587607 7807194,027765 5883487,07036370
# : 77118,196343 10447,961479 22605,559124 13266,4432995 2591,064037 1368,48462 675116,996782 23400723,570438 6300124,132022 8039688,83884099
# : 127172,560642 91251,040768 15246,639683 16286,4542687 1414,053166 2166,42874 868063,055613 20681863,052695 8971108,607811 5919136,16752791
# Spectral Masking
if spectral_masking:
matrix = do_spectral_masking(matrix)
# v210715
# Python: 12978.051641 3416.109125 8769.913963 2648.888265 547.12360 503.50224 660888.17361 10480839.33617 8840234.405272 7193404.23970964
# : 100713.471006 27602.656332 27169.741240 16288.350176 2887.60281 1842.05959 1021358.42618 29229962.41626 10653981.441005 11182818.62910279
# : 426733.607945 262537.326945 43522.106075 41091.381283 4254.39289 4617.45877 1315036.85377 31353824.35688 12417010.121754 9673923.23590653
# Matlab: 12975,335615 3418,81282 8767,062187 2652,061105 545,79379 503,79683 660943,32199 10481368,76411 8840272,477464 7193407,85259461
# : 100704,175421 27602,34142 27161,901160 16288,924458 2884,94883 1842,86020 1021368,99046 29229118,99738 10653999,341989 11182806,7524195
# : 426751,992198 262523,89306 43524,970883 41085,415594 4253,42029 4617,35691 1314966,73269 31353021,99155 12416968,806879 9673951,88376021
# Map to Decibel Scale
if transform_db:
matrix = transform2db(matrix)
# v210715
# Python: 41.13209498 35.33531736 39.42995333 34.23063639 27.38085455 27.02001413 58.2012798 70.20396064 69.46463781 68.56934467
# : 50.03087564 44.40950878 44.34085502 42.11877097 34.60537456 32.65303677 60.09178176 74.65828257 70.27511936 70.48551281
# : 56.30156848 54.19191059 46.38709903 46.1375074 36.28837595 36.64403027 61.18937924 74.96290521 70.94017035 69.85602637
# Matlab: 41,13118599 35,33875324 39,42854087 34,23583526 27,37028596 27,02255437 58,20164218 70,20418000 69,46465651 68,56934684
# : 50,03047477 44,40945923 44,33960164 42,11892409 34,60138115 32,65492392 60,09182668 74,65815725 70,27512665 70,48550820
# : 56,30175557 54,19168835 46,38738489 46,13687684 36,28738298 36,64393446 61,18914765 74,96279407 70,94015590 69,85603922
# Transform Phon
if transform_phon:
matrix = transform2phon(matrix)
# v210715
# Python: 25.90299283 17.82310731 23.4713619 16.37852452 7.42111749 6.94924924 47.58029453 60.22662293 59.43646085 58.49404702
# : 47.03087564 41.40950878 41.34085502 38.89846372 29.5067182 27.06629597 57.09178176 71.65828257 67.27511936 67.48551281
# : 55.02273887 52.91308099 45.10826943 44.8586778 34.3678058 34.769195 59.91054964 73.68407561 69.66134075 68.57719676
# Matlab: 25,90169428 17,82760039 23,46934410 16,38532303 7,40729702 6,95257110 47,58067598 60,22686667 59,43648053 58,49404931
# : 47,03047477 41,40945923 41,33960164 38,89865511 29,50172644 27,06865491 57,09182668 71,65815725 67,27512665 67,48550820
# : 55,02292596 52,91285875 45,10855528 44,85804723 34,36668514 34,76908687 59,91031805 73,68396446 69,66132629 68,57720962
# Transform Sone
if transform_sone:
matrix = transform2sone(matrix)
# v210715
# Python: 0.31726931 0.11815598 0.24452297 0.09450863 0.01167179 0.009812 1.6911791 4.06332931 3.84676603 3.60351463
# : 1.62798518 1.10263162 1.09739697 0.92887876 0.44759842 0.35631529 3.26974511 8.97447943 6.62312431 6.72041945
# : 2.83288863 2.44749871 1.42486669 1.40042797 0.669685 0.69054778 3.97527582 10.327417 7.81439442 7.24868691
# Matlab: 0,31722728 0,11823469 0,24446743 0,09461230 0,01161444 0,00982439 1,69122381 4,06339796 3,84677128 3,60351520
# : 1,62793994 1,10262783 1,09730163 0,92889083 0,44739839 0,35639734 3,26975529 8,97440147 6,62312765 6,72041730
# : 2,83292537 2,44746100 1,42489491 1,40036676 0,66962731 0,69054210 3,97521200 10,32733744 7,81438659 7,24869337
# FEATURES: now we got a Sonogram and extract statistical features
# SSD: Statistical Spectrum Descriptors
if (extract_ssd or extract_tssd):
ssd = calc_statistical_features(matrix)
ssd_list.append(ssd.flatten(FLATTEN_ORDER))
# v210715
# Python: 2.97307486 5.10356599 0.65305978 2.35489911 2.439558 0.009812 8.1447095
# : 4.72262845 7.30899976 0.17862996 2.10446264 4.58595337 0.25538117 12.83339251
# : 4.77858109 5.52646859 0.23911764 2.9056742 4.96338019 0.589568 13.6683906
# : 4.43503421 3.69422906 0.41473155 3.06743402 4.33220988 0.88354694 10.89393754
# : 3.77216546 2.3993334 0.84001713 4.35548197 3.65140589 1.01199696 11.07806891
# : 3.60563073 2.09907968 1.49906811 7.07183968 3.35596471 1.00619842 11.2872743
# : 3.56816128 2.20237398 1.69790808 7.57870223 3.33806767 1.10826324 10.84965392
# : 3.43734647 2.38648202 1.59655791 6.86704341 3.23361995 1.10198021 11.89470587
# : 3.18466303 2.39479532 1.99223131 8.83987184 2.8819031 0.93982524 11.28737448
# : 2.90996406 1.85412568 1.97247446 8.36738395 2.68063918 0.81760102 9.64247378
# Matlab: 2,97309758 5,11366933 0,65306558 2,35489605 2,43956735 0,00982439 8,14473582
# : 4,72264163 7,32338449 0,17863061 2,10444843 4,58593777 0,25568703 12,83335168
# : 4,77859306 5,53731457 0,23911126 2,90567055 4,96338616 0,58959588 13,66839858
# : 4,43505068 3,70148292 0,41473410 3,06742263 4,33222037 0,88357883 10,89397920
# : 3,77217541 2,40405654 0,84000183 4,35540491 3,65136495 1,01191651 11,07802201
# : 3,60563459 2,10319516 1,49905911 7,07181623 3,35609824 1,00628652 11,28728291
# : 3,56820841 2,20675908 1,69792784 7,57880557 3,33819690 1,10830805 10,84975850
# : 3,43736757 2,39117736 1,59656951 6,86710630 3,23366165 1,10199096 11,89486723
# : 3,18467212 2,39951286 1,99223621 8,83991021 2,88200015 0,93978494 11,28733449
# : 2,90997546 1,85776617 1,97246361 8,36742039 2,68074853 0,81790606 9,64262886
# values verified
# RP: RHYTHM PATTERNS
feature_part_xaxis1 = range(0,mod_ampl_limit) # take first (opts.mod_ampl_limit) values of fft result including DC component
feature_part_xaxis2 = range(1,mod_ampl_limit+1) # leave DC component and take next (opts.mod_ampl_limit) values of fft result
if (include_DC):
feature_part_xaxis_rp = feature_part_xaxis1
else:
feature_part_xaxis_rp = feature_part_xaxis2
# 2nd FFT
fft_size = 2**(nextpow2(matrix.shape[1]))
if (mod_ampl_limit >= fft_size):
return {"rh":[]}
#raise(ValueError("mod_ampl_limit option must be smaller than FFT window size (" + str(fft_size) + ")."))
# NOTE: in fact only half of it (256) makes sense due to the symmetry of the FFT result
rhythm_patterns = np.zeros((matrix.shape[0], fft_size), dtype=np.complex128)
#rhythm_patterns = np.zeros((matrix.shape[0], fft_size), dtype=np.float64)
# real_matrix = abs(matrix)
for b in range(0,matrix.shape[0]):
rhythm_patterns[b,:] = fft(matrix[b,:], fft_size)
# tried this instead, but ...
#rhythm_patterns[b,:] = fft(real_matrix[b,:], fft_size) # ... no performance improvement
#rhythm_patterns[b,:] = rfft(real_matrix[b,:], fft_size) # ... different output values
rhythm_patterns = rhythm_patterns / 256 # why 256?
# convert from complex128 to float64 (real)
rp = np.abs(rhythm_patterns[:,feature_part_xaxis_rp]) # verified
# MVD: Modulation Variance Descriptors
if extract_mvd:
mvd = calc_statistical_features(rp.transpose()) # verified
mvd_list.append(mvd.flatten(FLATTEN_ORDER))
# RH: Rhythm Histograms - OPTION 1: before fluctuation_strength_weighting (as in Matlab)
if extract_rh:
rh = np.sum(np.abs(rhythm_patterns[:,feature_part_xaxis2]),axis=0) #without DC component # verified
rh_list.append(rh.flatten(FLATTEN_ORDER))
# final steps for RP:
# Fluctuation Strength weighting curve
if fluctuation_strength_weighting:
# modulation frequency x-axis (after 2nd FFT)
# mod_freq_res = resolution of modulation frequency axis (0.17 Hz)
mod_freq_res = 1 / (float(segment_size) / samplerate)
# modulation frequencies along x-axis from index 0 to 256)
mod_freq_axis = mod_freq_res * np.array(feature_part_xaxis_rp)
# fluctuation strength curve
fluct_curve = 1 / (mod_freq_axis/4 + 4/mod_freq_axis)
for b in range(rp.shape[0]):
rp[b,:] = rp[b,:] * fluct_curve #[feature_part_xaxis_rp]
#values verified
# RH: Rhythm Histograms - OPTION 2 (after Fluctuation weighting)
if extract_rh2:
rh2 = np.sum(rp,axis=0) #TODO: adapt to do always without DC component
rh2_list.append(rh2.flatten(FLATTEN_ORDER))
# Gradient+Gauss filter
#if extract_rp:
# TODO Gradient+Gauss filter
#for i in range(1,rp.shape[1]):
# rp[:,i-1] = np.abs(rp[:,i] - rp[:,i-1]);
#
#rp = blur1 * rp * blur2;
rp_list.append(rp.flatten(FLATTEN_ORDER))
seg_pos = seg_pos + segment_size * step_width
if extract_rp:
if return_segment_features:
features["rp"] = np.array(rp_list)
else:
features["rp"] = np.median(np.asarray(rp_list), axis=0)
if extract_ssd:
if return_segment_features:
features["ssd"] = np.array(ssd_list)
else:
features["ssd"] = np.mean(np.asarray(ssd_list), axis=0)
if extract_rh:
if return_segment_features:
features["rh"] = np.array(rh_list)
else:
features["rh"] = np.median(np.asarray(rh_list), axis=0)
if extract_mvd:
if return_segment_features:
features["mvd"] = np.array(mvd_list)
else:
features["mvd"] = np.mean(np.asarray(mvd_list), axis=0)
# NOTE: no return_segment_features for temporal features as they measure variation of features over time
if extract_tssd:
features["tssd"] = calc_statistical_features(np.asarray(ssd_list).transpose()).flatten(FLATTEN_ORDER)
if extract_trh:
features["trh"] = calc_statistical_features(np.asarray(rh_list).transpose()).flatten(FLATTEN_ORDER)
if return_segment_features:
# also include the segment positions in the result
features["segpos"] = np.array(seg_pos_list)
features["timepos"] = features["segpos"] / (samplerate * 1.0)
return features
# function to self test rp_extract if working properly
def self_test():
import doctest
#doctest.testmod()
doctest.run_docstring_examples(rp_extract, globals(), verbose=True)
if __name__ == '__main__':
import sys
from audiofile_read import * # import our library for reading wav and mp3 files
# process file given on command line or default song (included)
if len(sys.argv) > 1:
if sys.argv[1] == '-test': # RUN DOCSTRING SELF TEST
print "Doing self test. If nothing is printed, it is ok."
import doctest
doctest.run_docstring_examples(rp_extract, globals()) #, verbose=True)
exit() # Note: no output means that everything went fine
else:
audiofile = sys.argv[1]
else:
audiofile = "music/BoxCat_Games_-_10_-_Epic_Song.mp3"
# Read audio file and extract features
try:
samplerate, samplewidth, wavedata = audiofile_read(audiofile)
np.set_printoptions(suppress=True)
bark_bands = 24 # choose the number of Bark bands (2..24)
mod_ampl_limit = 60 # number modulation frequencies on x-axis
feat = rp_extract(wavedata,
samplerate,
extract_rp=True,
extract_ssd=True,
extract_tssd=False,
extract_rh=True,
n_bark_bands=bark_bands,
spectral_masking=True,
transform_db=True,
transform_phon=True,
transform_sone=True,
fluctuation_strength_weighting=True,
skip_leadin_fadeout=1,
step_width=1,
mod_ampl_limit=mod_ampl_limit)
# feat is a dict containing arrays for different feature sets
print "Successfully extracted features:" , feat.keys()
except ValueError, e:
print e
exit()
print "Rhythm Histogram feature vector:"
print feat["rh"]
# EXAMPLE on how to plot the features
do_plots = False
if do_plots:
from rp_plot import *
plotrp(feat["rp"],rows=bark_bands,cols=mod_ampl_limit)
plotrh(feat["rh"])
plotssd(feat["ssd"],rows=bark_bands)
# EXAMPLE on how to store RP features in CSV file
# import pandas as pd
# filename = "features.rp.csv"
# rp = pd.DataFrame(feat["rp"].reshape([1,feat["rp"].shape[0]]))
# rp.to_csv(filename)
|
bastustrump/genimpro
|
rp_extract.py
|
Python
|
mit
| 39,383
|
"""
Script used to convert data into sparse matrix format that
can easily be imported into MATLAB.
Use like this
python convertToSparseMatrix.py ../../../../../data/train_triplets.txt 1000 ../../../../../data/eval/year1_test_triplets_visible.txt ../../../../../data/eval/year1_test_triplets_hidden.txt 100
"""
import sys
import time
# Analysing command line arguments
if len(sys.argv) < 5:
print 'Usage:'
print ' python %s <triplets training file> <number of triplets> <triplets visible history file> <triplets hidden history file> <number of triplets>' % sys.argv[0]
exit()
inputTrainingFile = sys.argv[1]
numTriplets = int(sys.argv[2])
inputTestFile = sys.argv[3]
inputHiddenTestFile = sys.argv[4]
numTripletsTest = int(sys.argv[5])
start = time.time()
userIdToIndex = {} # Key: userid, Value: Row in matrix
songIdToIndex = {} # Key: songid, Value: Column in matrix
userIndex = 0
songIndex = 0
rows = []
columns = []
entries = []
linesRead = 0
maxLines = numTriplets
for inputFile in [inputTrainingFile, inputTestFile, inputHiddenTestFile]:
linesRead = 0
f = open(inputFile)
for line in f:
userid, song, songCount = line.strip().split('\t')
# Fill in indices
if song not in songIdToIndex:
songIdToIndex[song] = songIndex
songIndex += 1
if userid not in userIdToIndex:
userIdToIndex[userid] = userIndex
userIndex += 1
# Fill in rows, columns and entries
rows.append(userIdToIndex[userid])
columns.append(songIdToIndex[song])
entries.append(int(songCount))
linesRead += 1
if linesRead >= maxLines:
break
if inputFile == inputTrainingFile:
numUsersInTraining = userIndex
maxLines = numTripletsTest
if inputFile == inputTestFile:
numSongs = songIndex
numUsers = userIndex
numNonZeros = len(entries)
rows = rows
columns = columns
entries = entries
# Write to a sparse matrix file that can be read with MATLAB
matrix_file = open('UserSongSparseMatrix' + str(numTriplets) + '_' + str(numTripletsTest) + '.txt', 'w')
for i in range(len(entries)):
matrix_file.write(str(rows[i]+1) + "\t" + str(columns[i]+1) + "\t" + str(entries[i]) + "\n")
#matrix_file.write(str(numUsers-1) + "\t" + str(numSongs-1) + "\t" + str(0.000000) + "\n")
matrix_file.close()
# reset everything to zero to read in the hidden matrix
rows = []
columns = []
entries = []
if inputFile == inputHiddenTestFile:
# Write to a sparse matrix file that can be read with MATLAB
matrix_file_test = open('UserSongSparseMatrixTest' + str(numTriplets) + '_' + str(numTripletsTest) + '.txt', 'w')
for i in range(len(entries)):
matrix_file_test.write(str(rows[i]+1) + "\t" + str(columns[i]+1) + "\t" + str(entries[i]) + "\n")
#matrix_file_test.write(str(userIndex-1) + "\t" + str(songIndex-1) + "\t" + str(0.000000) + "\n")
matrix_file_test.close()
f.close()
print "Done loading %d triplets!" % (numTriplets + numTripletsTest)
end = time.time()
print "Took %s seconds" % (end - start)
print "Number of users", numUsers
print "Number of songs", numSongs
print "You need to predict for the last %s users" % (numUsers - numUsersInTraining)
|
EmilienDupont/cs229project
|
convertToSparseMatrix.py
|
Python
|
mit
| 3,343
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import subprocess
running = os.system("nc -u -l -p 5001 | mplayer -cache 1024 -")
#subprocess.check_call('/opt/vc/bin/raspivid -n -w 800 -h 600 -fps 24 -t 0 -o - | socat - udp-sendto:' + '129.16.194.248' + ':5001')
|
twistedretard/LaserSimulatedSecurityTurret
|
src/streaming/server.py
|
Python
|
mit
| 272
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.IEC61970.Core.IdentifiedObject import IdentifiedObject
class LimitSet(IdentifiedObject):
"""Specifies a set of Limits that are associated with a Measurement. A Measurement may have several LimitSets corresponding to seasonal or other changing conditions. The condition is captured in the name and description attributes. The same LimitSet may be used for several Measurements. In particular percentage limits are used this way.
"""
def __init__(self, isPercentageLimits=False, *args, **kw_args):
"""Initialises a new 'LimitSet' instance.
@param isPercentageLimits: Tells if the limit values are in percentage of normalValue or the specified Unit for Measurements and Controls.
"""
#: Tells if the limit values are in percentage of normalValue or the specified Unit for Measurements and Controls.
self.isPercentageLimits = isPercentageLimits
super(LimitSet, self).__init__(*args, **kw_args)
_attrs = ["isPercentageLimits"]
_attr_types = {"isPercentageLimits": bool}
_defaults = {"isPercentageLimits": False}
_enums = {}
_refs = []
_many_refs = []
|
rwl/PyCIM
|
CIM14/IEC61970/Meas/LimitSet.py
|
Python
|
mit
| 2,248
|
#-*- coding: utf-8 -*-
""" EOSS catalog system
external catalog management package
"""
__author__ = "Thilo Wehrmann, Steffen Gebhardt"
__copyright__ = "Copyright 2016, EOSS GmbH"
__credits__ = ["Thilo Wehrmann", "Steffen Gebhardt"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Thilo Wehrmann"
__email__ = "twehrmann@eoss.cloud"
__status__ = "Production"
from abc import ABCMeta, abstractmethod
from utilities import with_metaclass
@with_metaclass(ABCMeta)
class ICatalog(object):
"""
Simple catalog interface class
"""
def __init__(self):
pass
@abstractmethod
def find(self):
pass
@abstractmethod
def register(self, ds):
pass
|
eoss-cloud/madxxx_catalog_api
|
catalog/manage/__init__.py
|
Python
|
mit
| 705
|
import _plotly_utils.basevalidators
class SizesrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name="sizesrc",
parent_name="scattermapbox.hoverlabel.font",
**kwargs
):
super(SizesrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
plotly/python-api
|
packages/python/plotly/plotly/validators/scattermapbox/hoverlabel/font/_sizesrc.py
|
Python
|
mit
| 498
|
class Solution:
def crackSafe(self, n: int, k: int) -> str:
result = ['0'] * n
visited = set([''.join(result)])
for i in range(k ** n):
prev = result[len(result) - n + 1:]
for j in range(k - 1, -1, -1):
curr = ''.join(prev) + str(j)
if curr not in visited:
visited.add(curr)
result.append(str(j))
break
return ''.join(result)
|
jiadaizhao/LeetCode
|
0701-0800/0753-Cracking the Safe/0753-Cracking the Safe.py
|
Python
|
mit
| 478
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import tensorflow as tf
class DCGAN(object):
"""
Tensorflow implementation of DCGAN, with four CNN layers.
We assume the input images are of size 32x32.
"""
def __init__(self):
# self.image_size = 64
self.image_size = 32
self.noise_size = 100
self.lrelu_alpha = 0.2
self.num_channels = 3
self.lr = 0.0002
self.beta_1 = 0.5
self._create_placeholders()
self.generator_output = self._create_generator()
self.real_predictions, self.real_logits = self._create_discriminator(
inputs=self.input_images)
self.fake_predictions, self.fake_logits = self._create_discriminator(
inputs=self.generator_output, reuse=True)
self._compute_loss()
self.summary_op = tf.summary.merge_all()
def _create_placeholders(self):
self.input_images = tf.placeholder(
shape=[None, self.image_size, self.image_size, self.num_channels],
dtype=tf.float32,
name="input_images")
self.input_noise = tf.placeholder(
shape=[None, self.noise_size],
dtype=tf.float32,
name="input_noise")
def _create_generator(self):
xav_init = tf.contrib.layers.xavier_initializer
bnorm = tf.layers.batch_normalization
with tf.variable_scope("generator"):
"""
fc_1 = tf.layers.dense(
inputs=self.input_noise, units=4 * 4 * 512, name="fc_1")
"""
fc_1 = tf.layers.dense(
inputs=self.input_noise,
units=4 * 4 * 256,
kernel_initializer=xav_init(),
name="fc_1")
reshaped_fc_1 = tf.reshape(
fc_1,
shape=[tf.shape(fc_1)[0], 4, 4, 256],
name="reshapsed_noise")
def _create_deconv_bnorm_block(inputs,
name,
filters,
activation=tf.nn.relu):
with tf.variable_scope(name):
deconv = tf.layers.conv2d_transpose(
inputs=inputs,
filters=filters,
kernel_size=[5, 5],
strides=2,
padding="same",
kernel_initializer=xav_init(),
name="deconv")
deconv = activation(deconv)
bnorm_op = bnorm(deconv, name="bnorm")
return bnorm_op
"""
bnorm_1 = _create_deconv_bnorm_block(
inputs=reshaped_fc_1, filters=256, name="block_1")
bnorm_2 = _create_deconv_bnorm_block(
inputs=bnorm_1, filters=128, name="block_2")
"""
bnorm_2 = _create_deconv_bnorm_block(
inputs=reshaped_fc_1, filters=128, name="block_2")
bnorm_3 = _create_deconv_bnorm_block(
inputs=bnorm_2, filters=64, name="block_3")
bnorm_4 = _create_deconv_bnorm_block(
inputs=bnorm_3,
filters=3,
activation=tf.nn.tanh,
name="block_4")
return bnorm_4
def _create_discriminator(self, inputs, reuse=False):
xav_init = tf.contrib.layers.xavier_initializer
bnorm = tf.layers.batch_normalization
with tf.variable_scope("discriminator", reuse=reuse):
def _create_conv_bnorm_block(inputs, filters, name):
with tf.variable_scope(name, reuse=reuse):
conv = tf.layers.conv2d(
inputs=inputs,
filters=filters,
kernel_size=[5, 5],
strides=2,
padding="same",
kernel_initializer=xav_init(),
name="conv")
conv = tf.maximum(conv, self.lrelu_alpha * conv)
bnorm_op = bnorm(conv, name="bnorm")
return bnorm_op
conv_1 = tf.layers.conv2d(
inputs=inputs,
filters=64,
kernel_size=[5, 5],
strides=2,
kernel_initializer=xav_init(),
padding="same",
name="conv_1")
conv_1 = tf.maximum(conv_1, self.lrelu_alpha * conv_1)
bnorm_1 = _create_conv_bnorm_block(
inputs=conv_1, filters=128, name="block_1")
bnorm_2 = _create_conv_bnorm_block(
inputs=bnorm_1, filters=256, name="block_2")
"""
bnorm_3 = _create_conv_bnorm_block(
inputs=bnorm_2, filters=512, name="block_3")
reshaped_bnorm_3 = tf.reshape(
bnorm_3,
shape=[tf.shape(bnorm_3)[0], 4 * 4 * 512],
name="reshaped_bnorm_3")
logits = tf.layers.dense(
inputs=reshaped_bnorm_3, units=1, name="fc_1")
"""
reshaped_bnorm_2 = tf.reshape(
bnorm_2,
shape=[tf.shape(bnorm_2)[0], 4 * 4 * 256],
name="reshaped_bnorm_2")
logits = tf.layers.dense(
inputs=reshaped_bnorm_2,
units=1,
kernel_initializer=xav_init(),
name="fc_1")
fc_1 = tf.sigmoid(logits)
return fc_1, logits
def _compute_loss(self):
self.d_loss_real = tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.real_logits, labels=tf.ones_like(self.real_logits))
self.d_loss_real = tf.reduce_mean(self.d_loss_real)
self.d_loss_fake = tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.fake_logits, labels=tf.zeros_like(self.fake_logits))
self.d_loss_fake = tf.reduce_mean(self.d_loss_fake)
self.d_loss = self.d_loss_real + self.d_loss_fake
self.g_loss = tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.fake_logits, labels=tf.ones_like(self.fake_logits))
self.g_loss = tf.reduce_mean(self.g_loss)
tf.summary.scalar("disc_loss_real", self.d_loss_real)
tf.summary.scalar("disc_loss_fake", self.d_loss_fake)
tf.summary.scalar("disc_loss", self.d_loss)
tf.summary.scalar("gen_loss", self.g_loss)
d_opt = tf.train.AdamOptimizer(
learning_rate=self.lr, beta1=self.beta_1)
g_opt = tf.train.AdamOptimizer(
learning_rate=self.lr, beta1=self.beta_1)
d_vars = tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, scope="discriminator")
g_vars = tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, scope="generator")
self.d_train = d_opt.minimize(self.d_loss, var_list=d_vars)
self.g_train = g_opt.minimize(self.g_loss, var_list=g_vars)
|
gokul-uf/TF-DCGAN
|
model.py
|
Python
|
mit
| 7,113
|
# -*- coding: utf-8 -*-
from datetime import datetime, date
import six
def fix_number(target_type):
return lambda value: None if isinstance(value, (str, six.text_type)) and len(value) == 0 else target_type(value)
fixed_datetime = lambda time_str: datetime.strptime(time_str, '%Y-%m-%d %H:%M:%S')
fixed_date = lambda time_str: date.fromtimestamp(time_str)
fixed_int = fix_number(int)
fixed_float = fix_number(float)
|
anjianshi/flask-restful-extend
|
flask_restful_extend/reqparse_fixed_type.py
|
Python
|
mit
| 425
|
import pymake.data, pymake.functions, pymake.util
import unittest
import re
def multitest(cls):
for name in cls.testdata.keys():
def m(self, name=name):
return self.runSingle(*self.testdata[name])
setattr(cls, 'test_%s' % name, m)
return cls
class SplitWordsTest(unittest.TestCase):
testdata = (
(' test test.c test.o ', ['test', 'test.c', 'test.o']),
('\ttest\t test.c \ntest.o', ['test', 'test.c', 'test.o']),
)
def runTest(self):
for s, e in self.testdata:
w = s.split()
self.assertEqual(w, e, 'splitwords(%r)' % (s,))
class GetPatSubstTest(unittest.TestCase):
testdata = (
('%.c', '%.o', ' test test.c test.o ', 'test test.o test.o'),
('%', '%.o', ' test.c test.o ', 'test.c.o test.o.o'),
('foo', 'bar', 'test foo bar', 'test bar bar'),
('foo', '%bar', 'test foo bar', 'test %bar bar'),
('%', 'perc_%', 'path', 'perc_path'),
('\\%', 'sub%', 'p %', 'p sub%'),
('%.c', '\\%%.o', 'foo.c bar.o baz.cpp', '%foo.o bar.o baz.cpp'),
)
def runTest(self):
for s, r, d, e in self.testdata:
words = d.split()
p = pymake.data.Pattern(s)
a = ' '.join((p.subst(r, word, False)
for word in words))
self.assertEqual(a, e, 'Pattern(%r).subst(%r, %r)' % (s, r, d))
class LRUTest(unittest.TestCase):
# getkey, expected, funccount, debugitems
expected = (
(0, '', 1, (0,)),
(0, '', 2, (0,)),
(1, ' ', 3, (1, 0)),
(1, ' ', 3, (1, 0)),
(0, '', 4, (0, 1)),
(2, ' ', 5, (2, 0, 1)),
(1, ' ', 5, (1, 2, 0)),
(3, ' ', 6, (3, 1, 2)),
)
def spaceFunc(self, l):
self.funccount += 1
return ''.ljust(l)
def runTest(self):
self.funccount = 0
c = pymake.util.LRUCache(3, self.spaceFunc, lambda k, v: k % 2)
self.assertEqual(tuple(c.debugitems()), ())
for i in range(0, len(self.expected)):
k, e, fc, di = self.expected[i]
v = c.get(k)
self.assertEqual(v, e)
self.assertEqual(self.funccount, fc,
"funccount, iteration %i, got %i expected %i" % (i, self.funccount, fc))
goti = tuple(c.debugitems())
self.assertEqual(goti, di,
"debugitems, iteration %i, got %r expected %r" % (i, goti, di))
class EqualityTest(unittest.TestCase):
def test_string_expansion(self):
s1 = pymake.data.StringExpansion('foo bar', None)
s2 = pymake.data.StringExpansion('foo bar', None)
self.assertEqual(s1, s2)
def test_expansion_simple(self):
s1 = pymake.data.Expansion(None)
s2 = pymake.data.Expansion(None)
self.assertEqual(s1, s2)
s1.appendstr('foo')
s2.appendstr('foo')
self.assertEqual(s1, s2)
def test_expansion_string_finish(self):
"""Adjacent strings should normalize to same value."""
s1 = pymake.data.Expansion(None)
s2 = pymake.data.Expansion(None)
s1.appendstr('foo')
s2.appendstr('foo')
s1.appendstr(' bar')
s1.appendstr(' baz')
s2.appendstr(' bar baz')
self.assertEqual(s1, s2)
def test_function(self):
s1 = pymake.data.Expansion(None)
s2 = pymake.data.Expansion(None)
n1 = pymake.data.StringExpansion('FOO', None)
n2 = pymake.data.StringExpansion('FOO', None)
v1 = pymake.functions.VariableRef(None, n1)
v2 = pymake.functions.VariableRef(None, n2)
s1.appendfunc(v1)
s2.appendfunc(v2)
self.assertEqual(s1, s2)
class StringExpansionTest(unittest.TestCase):
def test_base_expansion_interface(self):
s1 = pymake.data.StringExpansion('FOO', None)
self.assertTrue(s1.is_static_string)
funcs = list(s1.functions())
self.assertEqual(len(funcs), 0)
funcs = list(s1.functions(True))
self.assertEqual(len(funcs), 0)
refs = list(s1.variable_references())
self.assertEqual(len(refs), 0)
class ExpansionTest(unittest.TestCase):
def test_is_static_string(self):
e1 = pymake.data.Expansion()
e1.appendstr('foo')
self.assertTrue(e1.is_static_string)
e1.appendstr('bar')
self.assertTrue(e1.is_static_string)
vname = pymake.data.StringExpansion('FOO', None)
func = pymake.functions.VariableRef(None, vname)
e1.appendfunc(func)
self.assertFalse(e1.is_static_string)
def test_get_functions(self):
e1 = pymake.data.Expansion()
e1.appendstr('foo')
vname1 = pymake.data.StringExpansion('FOO', None)
vname2 = pymake.data.StringExpansion('BAR', None)
func1 = pymake.functions.VariableRef(None, vname1)
func2 = pymake.functions.VariableRef(None, vname2)
e1.appendfunc(func1)
e1.appendfunc(func2)
funcs = list(e1.functions())
self.assertEqual(len(funcs), 2)
func3 = pymake.functions.SortFunction(None)
func3.append(vname1)
e1.appendfunc(func3)
funcs = list(e1.functions())
self.assertEqual(len(funcs), 3)
refs = list(e1.variable_references())
self.assertEqual(len(refs), 2)
def test_get_functions_descend(self):
e1 = pymake.data.Expansion()
vname1 = pymake.data.StringExpansion('FOO', None)
func1 = pymake.functions.VariableRef(None, vname1)
e2 = pymake.data.Expansion()
e2.appendfunc(func1)
func2 = pymake.functions.SortFunction(None)
func2.append(e2)
e1.appendfunc(func2)
funcs = list(e1.functions())
self.assertEqual(len(funcs), 1)
funcs = list(e1.functions(True))
self.assertEqual(len(funcs), 2)
self.assertTrue(isinstance(funcs[0], pymake.functions.SortFunction))
def test_is_filesystem_dependent(self):
e = pymake.data.Expansion()
vname1 = pymake.data.StringExpansion('FOO', None)
func1 = pymake.functions.VariableRef(None, vname1)
e.appendfunc(func1)
self.assertFalse(e.is_filesystem_dependent)
func2 = pymake.functions.WildcardFunction(None)
func2.append(vname1)
e.appendfunc(func2)
self.assertTrue(e.is_filesystem_dependent)
def test_is_filesystem_dependent_descend(self):
sort = pymake.functions.SortFunction(None)
wildcard = pymake.functions.WildcardFunction(None)
e = pymake.data.StringExpansion('foo/*', None)
wildcard.append(e)
e = pymake.data.Expansion(None)
e.appendfunc(wildcard)
sort.append(e)
e = pymake.data.Expansion(None)
e.appendfunc(sort)
self.assertTrue(e.is_filesystem_dependent)
if __name__ == '__main__':
unittest.main()
|
mozilla/pymake
|
tests/datatests.py
|
Python
|
mit
| 6,946
|
from botapi.settings import *
DEBUG = True
ALLOWED_HOSTS = ['*']
|
naelstrof/PugBot-Discord-Django
|
botapi/apache/override.py
|
Python
|
mit
| 66
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
class TestLengthsTopKOps(serial.SerializedTestCase):
@serial.given(N=st.integers(min_value=0, max_value=10),
K=st.integers(min_value=1, max_value=10),
**hu.gcs_cpu_only)
def test_lengths_top_k_op(self, N, K, gc, dc):
lens = np.random.randint(low=1, high=2 * K + 1, size=N).astype(np.int32)
X = []
for i in lens:
X.extend(map(lambda x: x / 100.0, range(0, 6 * i, 6)))
X = np.array(X, dtype=np.float32)
op = core.CreateOperator("LengthsTopK", ["X", "Y"], ["values", "indices"], k=K)
def lengths_top_k(X, lens):
N, si = lens.shape[0], 0
values, indices = [], []
for i in range(N):
cur_indices = X[si:si + lens[i]].argsort()[-K:][::-1]
cur_values = X[si:si + lens[i]][cur_indices]
values.extend(cur_values)
indices.extend(cur_indices)
si += lens[i]
if lens[i] < K:
values.extend([0] * (K - lens[i]))
indices.extend([-1] * (K - lens[i]))
return (np.array(values, dtype=np.float32).reshape(-1, K),
np.array(indices, dtype=np.int32).reshape(-1, K))
self.assertDeviceChecks(dc, op, [X, lens], [0, 1])
self.assertReferenceChecks(gc, op, [X, lens], lengths_top_k)
self.assertGradientChecks(gc, op, [X, lens], 0, [0])
@given(N=st.integers(min_value=0, max_value=10),
K=st.integers(min_value=1, max_value=10),
**hu.gcs_cpu_only)
def test_lengths_top_k_empty_op(self, N, K, gc, dc):
lens = np.zeros((N, ), dtype=np.int32)
X = np.array([], dtype=np.float32)
op = core.CreateOperator("LengthsTopK", ["X", "Y"], ["values", "indices"], k=K)
def lengths_top_k(X, lens):
return (np.zeros((N, K), dtype=np.float32),
-1 * np.ones((N, K), dtype=np.int32))
self.assertDeviceChecks(dc, op, [X, lens], [0, 1])
self.assertReferenceChecks(gc, op, [X, lens], lengths_top_k)
self.assertGradientChecks(gc, op, [X, lens], 0, [0])
|
ryfeus/lambda-packs
|
pytorch/source/caffe2/python/operator_test/lengths_top_k_ops_test.py
|
Python
|
mit
| 2,523
|
import unittest
converter = __import__("obj-to-sm-conversion")
model = """
# Blender v2.71 (sub 0) OBJ File:
# www.blender.org
mtllib object.mtl
o Cube
v 1.000000 -1.000000 -1.000000
v 1.000000 -1.000000 1.000000
v -1.000000 -1.000000 1.000000
v -1.000000 -1.000000 -1.000000
v 1.000000 1.000000 -0.999999
v 0.999999 1.000000 1.000001
v -1.000000 1.000000 1.000000
v -1.000000 1.000000 -1.000000
v 0.493105 -0.493106 2.246419
v -0.493106 -0.493106 2.246419
v 0.493105 0.493105 2.246419
v -0.493106 0.493105 2.246419
v 0.493105 -0.493106 3.738037
v -0.493106 -0.493106 3.738037
v 0.493104 0.493105 3.738037
v -0.493107 0.493105 3.738037
v 0.493105 -0.493106 4.284467
v -0.493107 -0.493106 4.284467
v 0.493104 0.493105 4.284468
v -0.493107 0.493105 4.284467
v 0.493104 1.012896 3.738037
v -0.493107 1.012896 3.738037
v 0.493104 1.343554 4.284468
v -0.493107 1.343554 4.284467
v 0.493105 1.845343 3.234304
v -0.493106 1.845343 3.234304
v 0.493105 2.176001 3.780735
v -0.493106 2.176001 3.780734
v 0.570207 -1.571936 -0.570207
v 0.570207 -1.571936 0.570207
v -0.570207 -1.571936 0.570207
v -0.570207 -1.571936 -0.570208
v 0.570207 -3.115134 -0.570207
v 0.570207 -3.115134 0.570207
v -0.570207 -3.115134 0.570207
v -0.570207 -3.115134 -0.570208
vn -0.799400 -0.600800 -0.000000
vn 0.000000 1.000000 0.000000
vn 1.000000 -0.000000 0.000000
vn -0.000000 0.926300 0.376700
vn -1.000000 -0.000000 -0.000000
vn 0.000000 0.000000 -1.000000
vn -0.926300 -0.000000 0.376700
vn 0.926300 0.000000 0.376700
vn 0.000000 -0.926300 0.376700
vn 0.000000 -1.000000 0.000000
vn -0.000000 -0.000000 1.000000
vn 0.000000 0.855600 -0.517700
vn -0.000000 0.517700 0.855600
vn 0.000000 -0.517700 -0.855600
vn -0.000000 -0.600800 0.799400
vn 0.000000 -0.600800 -0.799400
vn 0.799400 -0.600800 0.000000
usemtl Material
s off
f 4//1 32//1 31//1
f 8//2 7//2 6//2
f 1//3 5//3 6//3
f 7//4 12//4 11//4
f 7//5 8//5 4//5
f 1//6 4//6 8//6
f 12//2 16//2 15//2
f 7//7 3//7 10//7
f 2//8 6//8 11//8
f 2//9 9//9 10//9
f 16//5 20//5 24//5
f 12//5 10//5 14//5
f 9//3 11//3 15//3
f 9//10 13//10 14//10
f 17//11 19//11 20//11
f 16//5 14//5 18//5
f 15//3 19//3 17//3
f 13//10 17//10 18//10
f 22//5 24//5 28//5
f 15//3 21//3 23//3
f 19//11 23//11 24//11
f 16//6 22//6 21//6
f 26//12 28//12 27//12
f 23//3 21//3 25//3
f 23//13 27//13 28//13
f 22//14 26//14 25//14
f 32//5 36//5 35//5
f 3//15 31//15 30//15
f 1//16 29//16 32//16
f 2//17 30//17 29//17
f 34//10 35//10 36//10
f 31//11 35//11 34//11
f 29//6 33//6 36//6
f 29//3 30//3 34//3
f 3//1 4//1 31//1
f 5//2 8//2 6//2
f 2//3 1//3 6//3
f 6//4 7//4 11//4
f 3//5 7//5 4//5
f 5//6 1//6 8//6
f 11//2 12//2 15//2
f 12//7 7//7 10//7
f 9//8 2//8 11//8
f 3//9 2//9 10//9
f 22//5 16//5 24//5
f 16//5 12//5 14//5
f 13//3 9//3 15//3
f 10//10 9//10 14//10
f 18//11 17//11 20//11
f 20//5 16//5 18//5
f 13//3 15//3 17//3
f 14//10 13//10 18//10
f 26//5 22//5 28//5
f 19//3 15//3 23//3
f 20//11 19//11 24//11
f 15//6 16//6 21//6
f 25//12 26//12 27//12
f 27//3 23//3 25//3
f 24//13 23//13 28//13
f 21//14 22//14 25//14
f 31//5 32//5 35//5
f 2//15 3//15 30//15
f 4//16 1//16 32//16
f 1//17 2//17 29//17
f 33//10 34//10 36//10
f 30//11 31//11 34//11
f 32//6 29//6 36//6
f 33//3 29//3 34//3
"""
class TestConvertFunctions(unittest.TestCase):
def test_conversion(self):
global model
(format, faces, vertexes, normals, texture) = converter.convert_to_objects(model)
self.assertEqual(len(faces), 68)
self.assertEqual(len(vertexes), 36)
self.assertEqual(len(normals), 17)
self.assertEqual(len(texture), 0)
self.assertEqual(format, 'vn')
return 0
|
stbd/stoolbox
|
tests/obj-to-sm-test/conversion-test.py
|
Python
|
mit
| 3,598
|
#!/usr/bin/env python
import argparse
import binascii
import datetime
import gzip
import json
import magic
import os
import pymongo
import sys
def read_gzip(filename):
with gzip.open(filename) as file:
content = file.read()
return content
def read_plain(filename):
with open(filename) as file:
content = file.read()
return content
readers = {
b'application/x-gzip': read_gzip,
b'text/plain': read_plain,
}
def read(filename):
type = magic.from_file(filename, mime=True)
return readers[type](filename).decode()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-m', help='path to moita configuration file',
dest='moita', metavar='MOITA', required=True)
parser.add_argument('filename', nargs='+')
args = parser.parse_args()
sys.path.append(os.path.dirname(args.moita))
import config
connection = pymongo.MongoClient()
collection = connection[config.DATABASE].timetables
for file in args.filename:
content = json.loads(read(file))
identifier = binascii.unhexlify(
os.path.basename(file).split('.', 1)[0]).decode()
content['_id'] = identifier
mtime = datetime.datetime.fromtimestamp(os.path.getmtime(file))
content['updated_at'] = mtime
collection.save(content)
|
ranisalt/moita-migrant
|
migrant.py
|
Python
|
mit
| 1,377
|
"""Translate cli commands to non-cli code."""
import logging
from urllib.error import HTTPError, URLError
import requests
from kytos.utils.config import KytosConfig
LOG = logging.getLogger(__name__)
class WebAPI: # pylint: disable=too-few-public-methods
"""An API for the command-line interface."""
@classmethod
def update(cls, args):
"""Call the method to update the Web UI."""
kytos_api = KytosConfig().config.get('kytos', 'api')
url = f"{kytos_api}api/kytos/core/web/update"
version = args["<version>"]
if version:
url += f"/{version}"
try:
result = requests.post(url)
except(HTTPError, URLError, requests.exceptions.ConnectionError):
LOG.error("Can't connect to server: %s", kytos_api)
return
if result.status_code != 200:
LOG.info("Error while updating web ui: %s", result.content)
else:
LOG.info("Web UI updated.")
|
kytos/kytos-utils
|
kytos/cli/commands/web/api.py
|
Python
|
mit
| 986
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-29 05:42
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0025_auto_20170626_0008'),
]
operations = [
migrations.AlterModelOptions(
name='category',
options={'ordering': ['-id'], 'verbose_name': '分类', 'verbose_name_plural': '分类'},
),
]
|
r26zhao/django_blog
|
blog/migrations/0026_auto_20170629_1342.py
|
Python
|
mit
| 467
|
import string
import unittest
import datetime
import collections
from unittest import mock
from flumine.order.order import (
BaseOrder,
BetfairOrder,
ExchangeType,
OrderTypes,
OrderStatus,
VALID_BETFAIR_CUSTOMER_ORDER_REF_CHARACTERS,
LIVE_STATUS,
COMPLETE_STATUS,
)
from flumine.exceptions import OrderUpdateError
class BaseOrderTest(unittest.TestCase):
def setUp(self) -> None:
mock_client = mock.Mock(paper_trade=False)
self.mock_trade = mock.Mock(
client=mock_client, market_id="1.1", selection_id=123, info={}
)
self.mock_order_type = mock.Mock(info={})
self.order = BaseOrder(
self.mock_trade, "BACK", self.mock_order_type, 1, context={1: 2}
)
def test_init(self):
self.assertIsNotNone(self.order.id)
self.assertEqual(self.order.trade, self.mock_trade)
self.assertEqual(self.order.side, "BACK")
self.assertEqual(self.order.order_type, self.mock_order_type)
self.assertEqual(self.order.selection_id, self.mock_trade.selection_id)
self.assertEqual(self.order.handicap, 1)
self.assertEqual(
self.order.lookup,
(self.order.market_id, self.order.selection_id, self.order.handicap),
)
self.assertIsNone(self.order.runner_status)
self.assertIsNone(self.order.market_type)
self.assertEqual(self.order.each_way_divisor, 1)
self.assertIsNone(self.order.status)
self.assertFalse(self.order.complete)
self.assertEqual(self.order.status_log, [])
self.assertIsNone(self.order.violation_msg)
self.assertEqual(self.order.context, {1: 2})
self.assertEqual(self.order.notes, {})
self.assertIsNone(self.order.market_notes)
self.assertIsNone(self.order.bet_id)
self.assertIsNone(self.order.EXCHANGE)
self.assertEqual(self.order.update_data, {})
self.assertIsNone(self.order.publish_time)
self.assertIsNone(self.order.market_version)
self.assertIsNone(self.order.async_)
self.assertIsNotNone(self.order.date_time_created)
self.assertIsNone(self.order.date_time_execution_complete)
self.assertFalse(self.order.simulated)
self.assertFalse(self.order._simulated)
self.assertEqual(
LIVE_STATUS,
[
OrderStatus.PENDING,
OrderStatus.CANCELLING,
OrderStatus.UPDATING,
OrderStatus.REPLACING,
OrderStatus.EXECUTABLE,
],
)
self.assertEqual(
COMPLETE_STATUS,
[
OrderStatus.EXECUTION_COMPLETE,
OrderStatus.EXPIRED,
OrderStatus.VIOLATION,
],
)
@mock.patch("flumine.order.order.BaseOrder._is_complete")
@mock.patch("flumine.order.order.BaseOrder.info")
def test__update_status(self, mock_info, mock__is_complete):
self.mock_trade.complete = True
self.order._update_status(OrderStatus.EXECUTION_COMPLETE)
self.assertEqual(self.order.status_log, [OrderStatus.EXECUTION_COMPLETE])
self.assertEqual(self.order.status, OrderStatus.EXECUTION_COMPLETE)
self.mock_trade.complete_trade.assert_called()
mock__is_complete.assert_called()
@mock.patch("flumine.order.order.BaseOrder._update_status")
def test_placing(self, mock__update_status):
self.order.placing()
mock__update_status.assert_called_with(OrderStatus.PENDING)
@mock.patch("flumine.order.order.BaseOrder._update_status")
def test_executable(self, mock__update_status):
self.order.update_data = {123: 456}
self.order.executable()
mock__update_status.assert_called_with(OrderStatus.EXECUTABLE)
self.assertEqual(self.order.update_data, {})
@mock.patch("flumine.order.order.BaseOrder._update_status")
def test_execution_complete(self, mock__update_status):
self.order.update_data = {123: 456}
self.order.execution_complete()
mock__update_status.assert_called_with(OrderStatus.EXECUTION_COMPLETE)
self.assertIsNotNone(self.order.date_time_execution_complete)
self.assertEqual(self.order.update_data, {})
@mock.patch("flumine.order.order.BaseOrder._update_status")
def test_cancelling(self, mock__update_status):
self.order.cancelling()
mock__update_status.assert_called_with(OrderStatus.CANCELLING)
@mock.patch("flumine.order.order.BaseOrder._update_status")
def test_updating(self, mock__update_status):
self.order.updating()
mock__update_status.assert_called_with(OrderStatus.UPDATING)
@mock.patch("flumine.order.order.BaseOrder._update_status")
def test_replacing(self, mock__update_status):
self.order.replacing()
mock__update_status.assert_called_with(OrderStatus.REPLACING)
@mock.patch("flumine.order.order.BaseOrder._update_status")
def test_violation(self, mock__update_status):
self.order.update_data = {123: 456}
self.order.violation("the murder capital")
mock__update_status.assert_called_with(OrderStatus.VIOLATION)
self.assertEqual(self.order.update_data, {})
self.assertEqual(self.order.violation_msg, "the murder capital")
def test_place(self):
with self.assertRaises(NotImplementedError):
self.order.place(123, 456, False)
def test_cancel(self):
with self.assertRaises(NotImplementedError):
self.order.cancel()
def test_update(self):
with self.assertRaises(NotImplementedError):
self.order.update("PERSIST")
def test_replace(self):
with self.assertRaises(NotImplementedError):
self.order.replace(20.0)
def test_create_place_instruction(self):
with self.assertRaises(NotImplementedError):
self.order.create_place_instruction()
def test_create_cancel_instruction(self):
with self.assertRaises(NotImplementedError):
self.order.create_cancel_instruction()
def test_create_update_instruction(self):
with self.assertRaises(NotImplementedError):
self.order.create_update_instruction()
def test_create_replace_instruction(self):
with self.assertRaises(NotImplementedError):
self.order.create_replace_instruction()
def test_update_current_order(self):
mock_current_order = mock.Mock()
self.order.update_current_order(mock_current_order)
self.assertEqual(self.order.responses.current_order, mock_current_order)
def test_current_order(self):
self.assertIsNone(self.order.current_order)
mock_responses = mock.Mock()
mock_responses.current_order = None
self.order.responses = mock_responses
self.assertEqual(self.order.current_order, mock_responses.place_response)
mock_responses.current_order = 1
self.assertEqual(self.order.current_order, 1)
@mock.patch("flumine.backtest.simulated.config")
def test_current_order_simulated(self, mock_config):
mock_config.simulated = True
order = BaseOrder(mock.Mock(), "", mock.Mock())
self.assertTrue(order.simulated)
self.assertTrue(order._simulated)
def test__is_complete(self):
self.order.status = None
self.assertFalse(self.order._is_complete())
for s in [
OrderStatus.PENDING,
OrderStatus.CANCELLING,
OrderStatus.UPDATING,
OrderStatus.REPLACING,
OrderStatus.EXECUTABLE,
]:
self.order.status = s
self.assertFalse(self.order._is_complete())
for s in [
OrderStatus.EXECUTION_COMPLETE,
OrderStatus.EXPIRED,
OrderStatus.VIOLATION,
]:
self.order.status = s
self.assertTrue(self.order._is_complete())
def test_average_price_matched(self):
with self.assertRaises(NotImplementedError):
assert self.order.average_price_matched
def test_size_matched(self):
with self.assertRaises(NotImplementedError):
assert self.order.size_matched
def test_size_remaining(self):
with self.assertRaises(NotImplementedError):
assert self.order.size_remaining
def test_size_cancelled(self):
with self.assertRaises(NotImplementedError):
assert self.order.size_cancelled
def test_size_lapsed(self):
with self.assertRaises(NotImplementedError):
assert self.order.size_lapsed
def test_size_voided(self):
with self.assertRaises(NotImplementedError):
assert self.order.size_voided
def test_elapsed_seconds(self):
self.assertIsNone(self.order.elapsed_seconds)
mock_responses = mock.Mock()
mock_responses.date_time_placed = datetime.datetime.utcnow()
self.order.responses = mock_responses
self.assertGreaterEqual(self.order.elapsed_seconds, 0)
def elapsed_seconds_created(self):
self.assertGreaterEqual(self.order.elapsed_seconds_created, 0)
def test_elapsed_seconds_executable(self):
self.assertIsNone(self.order.elapsed_seconds_executable)
mock_responses = mock.Mock()
mock_responses.date_time_placed = datetime.datetime.utcnow()
self.order.responses = mock_responses
self.order.date_time_execution_complete = datetime.datetime.utcnow()
self.assertGreaterEqual(self.order.elapsed_seconds_executable, 0)
def test_market_id(self):
self.assertEqual(self.order.market_id, self.mock_trade.market_id)
def test_lookup(self):
self.assertEqual(
self.order.lookup,
(self.mock_trade.market_id, self.mock_trade.selection_id, 1),
)
def test_repr(self):
self.assertEqual(repr(self.order), "Order None: None")
def test_set_and_get_sep(self):
self.order.sep = "a"
self.assertEqual("a", self.order.sep)
def test_customer_order_ref(self):
self.order.trade.strategy.name_hash = "my_name_hash"
self.order.id = 1234
self.assertEqual("my_name_hash-1234", self.order.customer_order_ref)
self.order.sep = "I"
self.assertEqual("my_name_hashI1234", self.order.customer_order_ref)
self.order.sep = "O"
self.assertEqual("my_name_hashO1234", self.order.customer_order_ref)
def test_notes_str(self):
self.order.notes = collections.OrderedDict({"1": 1, 2: "2", 3: 3, 4: "four"})
self.assertEqual(self.order.notes_str, "1,2,3,four")
self.order.notes = collections.OrderedDict()
self.assertEqual(self.order.notes_str, "")
class BetfairOrderTest(unittest.TestCase):
def setUp(self) -> None:
mock_client = mock.Mock(paper_trade=False)
self.mock_trade = mock.Mock(
client=mock_client, market_id="1.1", selection_id=123, info={}
)
self.mock_status = mock.Mock()
self.mock_order_type = mock.Mock(info={}, size=2.0, liability=2.0)
self.order = BetfairOrder(self.mock_trade, "BACK", self.mock_order_type)
def test_init(self):
self.assertEqual(self.order.EXCHANGE, ExchangeType.BETFAIR)
@mock.patch("flumine.order.order.BetfairOrder.placing")
def test_place(self, mock_placing):
self.order.place(123, 456, False)
mock_placing.assert_called_with()
self.assertEqual(self.order.publish_time, 123)
self.assertEqual(self.order.market_version, 456)
self.assertFalse(self.order.async_)
@mock.patch(
"flumine.order.order.BetfairOrder.size_remaining",
new_callable=mock.PropertyMock,
)
@mock.patch("flumine.order.order.BetfairOrder.cancelling")
def test_cancel(self, mock_cancelling, mock_size_remaining):
mock_size_remaining.return_value = 20
self.order.bet_id = 123
self.order.status = OrderStatus.EXECUTABLE
with self.assertRaises(OrderUpdateError):
self.order.cancel(12)
self.mock_order_type.ORDER_TYPE = OrderTypes.LIMIT
self.order.cancel(0.01)
self.assertEqual(self.order.update_data, {"size_reduction": 0.01})
mock_cancelling.assert_called_with()
self.order.cancel()
self.assertEqual(self.order.update_data, {"size_reduction": None})
def test_cancel_bet_id(self):
self.order.status = OrderStatus.EXECUTABLE
with self.assertRaises(OrderUpdateError):
self.order.cancel(12)
@mock.patch(
"flumine.order.order.BetfairOrder.size_remaining",
new_callable=mock.PropertyMock,
)
@mock.patch("flumine.order.order.BetfairOrder.cancelling")
def test_cancel_error_size(self, mock_cancelling, mock_size_remaining):
mock_size_remaining.return_value = 20
self.order.status = OrderStatus.EXECUTABLE
with self.assertRaises(OrderUpdateError):
self.order.cancel(12)
self.mock_order_type.ORDER_TYPE = OrderTypes.LIMIT
with self.assertRaises(OrderUpdateError):
self.order.cancel(21)
@mock.patch(
"flumine.order.order.BetfairOrder.size_remaining",
new_callable=mock.PropertyMock,
)
def test_cancel_error(self, mock_size_remaining):
mock_size_remaining.return_value = 20
self.mock_order_type.ORDER_TYPE = OrderTypes.LIMIT
self.order.status = OrderStatus.PENDING
with self.assertRaises(OrderUpdateError):
self.order.cancel(12)
@mock.patch("flumine.order.order.BetfairOrder.updating")
def test_update(self, mock_updating):
self.order.bet_id = 123
self.order.status = OrderStatus.EXECUTABLE
with self.assertRaises(OrderUpdateError):
self.order.update("PERSIST")
self.mock_order_type.ORDER_TYPE = OrderTypes.LIMIT
self.mock_order_type.persistence_type = "LAPSE"
self.order.update("PERSIST")
self.assertEqual(self.mock_order_type.persistence_type, "PERSIST")
mock_updating.assert_called_with()
with self.assertRaises(OrderUpdateError):
self.order.update("PERSIST")
def test_update_bet_id(self):
self.order.status = OrderStatus.EXECUTABLE
with self.assertRaises(OrderUpdateError):
self.order.update("PERSIST")
def test_update_error(self):
self.mock_order_type.ORDER_TYPE = OrderTypes.LIMIT
self.mock_order_type.persistence_type = "LAPSE"
self.order.status = OrderStatus.PENDING
with self.assertRaises(OrderUpdateError):
self.order.update("PERSIST")
@mock.patch("flumine.order.order.BetfairOrder.replacing")
def test_replace(self, mock_replacing):
self.order.bet_id = 123
self.order.status = OrderStatus.EXECUTABLE
with self.assertRaises(OrderUpdateError):
self.order.replace(1.01)
self.mock_order_type.ORDER_TYPE = OrderTypes.LIMIT
self.mock_order_type.price = 2.02
self.order.replace(1.01)
self.assertEqual(self.order.update_data, {"new_price": 1.01})
mock_replacing.assert_called_with()
with self.assertRaises(OrderUpdateError):
self.order.replace(2.02)
def test_replace_bet_id(self):
self.order.status = OrderStatus.EXECUTABLE
with self.assertRaises(OrderUpdateError):
self.order.replace(1.01)
def test_replace_error(self):
self.mock_order_type.ORDER_TYPE = OrderTypes.LIMIT
self.order.status = OrderStatus.PENDING
with self.assertRaises(OrderUpdateError):
self.order.replace(1.52)
def test_create_place_instruction(self):
self.mock_order_type.ORDER_TYPE = OrderTypes.LIMIT
self.assertEqual(
self.order.create_place_instruction(),
{
"customerOrderRef": self.order.customer_order_ref,
"handicap": 0,
"limitOrder": self.mock_order_type.place_instruction(),
"orderType": "LIMIT",
"selectionId": self.mock_trade.selection_id,
"side": "BACK",
},
)
self.mock_order_type.ORDER_TYPE = OrderTypes.LIMIT_ON_CLOSE
self.assertEqual(
self.order.create_place_instruction(),
{
"customerOrderRef": self.order.customer_order_ref,
"handicap": 0,
"limitOnCloseOrder": self.mock_order_type.place_instruction(),
"orderType": "LIMIT_ON_CLOSE",
"selectionId": self.mock_trade.selection_id,
"side": "BACK",
},
)
self.mock_order_type.ORDER_TYPE = OrderTypes.MARKET_ON_CLOSE
self.assertEqual(
self.order.create_place_instruction(),
{
"customerOrderRef": self.order.customer_order_ref,
"handicap": 0,
"marketOnCloseOrder": self.mock_order_type.place_instruction(),
"orderType": "MARKET_ON_CLOSE",
"selectionId": self.mock_trade.selection_id,
"side": "BACK",
},
)
def test_create_cancel_instruction(self):
self.order.update_data = {"size_reduction": 0.02}
self.assertEqual(
self.order.create_cancel_instruction(), {"sizeReduction": 0.02}
)
def test_create_update_instruction(self):
self.mock_order_type.persistence_type = "PERSIST"
self.assertEqual(
self.order.create_update_instruction(), {"newPersistenceType": "PERSIST"}
)
def test_create_replace_instruction(self):
self.order.update_data = {"new_price": 2.02}
self.assertEqual(self.order.create_replace_instruction(), {"newPrice": 2.02})
def test_average_price_matched(self):
self.assertEqual(self.order.average_price_matched, 0)
mock_current_order = mock.Mock(average_price_matched=12.3)
self.order.responses.current_order = mock_current_order
self.assertEqual(
self.order.average_price_matched, mock_current_order.average_price_matched
)
def test_size_matched(self):
self.assertEqual(self.order.size_matched, 0)
mock_current_order = mock.Mock(size_matched=10)
self.order.responses.current_order = mock_current_order
self.assertEqual(self.order.size_matched, mock_current_order.size_matched)
def test_size_remaining(self):
self.mock_order_type.ORDER_TYPE = OrderTypes.LIMIT
self.mock_order_type.size = 0
self.mock_order_type.bet_target_size = 0
self.assertEqual(self.order.size_remaining, 0)
self.mock_order_type.size = 10
mock_current_order = mock.Mock(size_remaining=10)
self.order.responses.current_order = mock_current_order
self.assertEqual(self.order.size_remaining, mock_current_order.size_remaining)
def test_size_remaining_missing(self):
self.mock_order_type.ORDER_TYPE = OrderTypes.LIMIT
self.mock_order_type.size = 2.51
self.assertEqual(self.order.size_remaining, 2.51)
@mock.patch(
"flumine.order.order.BetfairOrder.size_matched",
new_callable=mock.PropertyMock,
)
def test_size_remaining_missing_partial_match(self, mock_size_matched):
self.mock_order_type.ORDER_TYPE = OrderTypes.LIMIT
mock_size_matched.return_value = 2
self.mock_order_type.size = 10
self.assertEqual(self.order.size_remaining, 8)
def test_size_remaining_market_on_close(self):
self.mock_order_type.ORDER_TYPE = OrderTypes.MARKET_ON_CLOSE
self.mock_order_type.size = ValueError
self.order.responses.current_order = None
self.assertEqual(self.order.size_remaining, self.mock_order_type.liability)
def test_size_cancelled(self):
self.assertEqual(self.order.size_cancelled, 0)
mock_current_order = mock.Mock(size_cancelled=10)
self.order.responses.current_order = mock_current_order
self.assertEqual(self.order.size_cancelled, mock_current_order.size_cancelled)
def test_size_lapsed(self):
self.assertEqual(self.order.size_lapsed, 0)
mock_current_order = mock.Mock(size_lapsed=10)
self.order.responses.current_order = mock_current_order
self.assertEqual(self.order.size_lapsed, mock_current_order.size_lapsed)
def test_size_voided(self):
self.assertEqual(self.order.size_voided, 0)
mock_current_order = mock.Mock(size_voided=10)
self.order.responses.current_order = mock_current_order
self.assertEqual(self.order.size_voided, mock_current_order.size_voided)
def test_info(self):
self.order.status_log = [OrderStatus.PENDING, OrderStatus.EXECUTION_COMPLETE]
self.assertEqual(
self.order.info,
{
"bet_id": None,
"handicap": self.order.handicap,
"id": self.order.id,
"date_time_created": str(self.order.date_time_created),
"market_id": self.mock_trade.market_id,
"selection_id": self.mock_trade.selection_id,
"publish_time": None,
"market_version": None,
"async": None,
"status": None,
"status_log": "Pending, Execution complete",
"trade": self.mock_trade.info,
"order_type": self.mock_order_type.info,
"info": {
"side": self.order.side,
"size_matched": self.order.size_matched,
"size_remaining": self.order.size_remaining,
"size_cancelled": self.order.size_cancelled,
"size_lapsed": self.order.size_lapsed,
"size_voided": self.order.size_voided,
"average_price_matched": self.order.average_price_matched,
},
"customer_order_ref": self.order.customer_order_ref,
"simulated": {
"profit": 0.0,
"piq": 0.0,
"matched": [],
},
"violation_msg": self.order.violation_msg,
"responses": {
"date_time_placed": None,
"elapsed_seconds_executable": None,
},
"runner_status": self.order.runner_status,
"market_notes": None,
"notes": "",
},
)
def test_json(self):
self.assertTrue(isinstance(self.order.json(), str))
def test_set_invalid_sep(self):
with self.assertRaises(ValueError):
self.order.sep = "@"
class IsValidCustomerOrderRefTestCase(unittest.TestCase):
def test_letters_True(self):
# ascii_letters contains a-z and A-Z
for c in string.ascii_letters:
self.assertTrue(BetfairOrder.is_valid_customer_order_ref_character(c))
def test_2letters_False(self):
self.assertFalse(BetfairOrder.is_valid_customer_order_ref_character("aB"))
self.assertFalse(BetfairOrder.is_valid_customer_order_ref_character("CD"))
def test_digits_True(self):
# string.digits contains digits 0-9
for c in string.digits:
self.assertTrue(BetfairOrder.is_valid_customer_order_ref_character(c))
def test_special_characters_True(self):
for c in VALID_BETFAIR_CUSTOMER_ORDER_REF_CHARACTERS:
self.assertTrue(BetfairOrder.is_valid_customer_order_ref_character((c)))
def test_special_characters_False(self):
for c in list('!"£$%'):
self.assertFalse(BetfairOrder.is_valid_customer_order_ref_character((c)))
|
liampauling/flumine
|
tests/test_order.py
|
Python
|
mit
| 23,869
|
# para os tipos numericos temos os seguintes operadores:
# + - * / % **
print "Numeros inteiros:"
x = 10
y = 3
print x, "+", y, "=", x + y
print x, "+", y, "=", x - y
print x, "+", y, "=", x*y
print x, "+", y, "=", x/y # repare como o resultado eh um inteiro
print x, "+", y, "=", x % y # esse eh o resto da divisao
print x, "+", y, "=", x**y # esse eh o operador potencia, x elevado a potencia de y
print x, "(",bin(x),") & ",y,"(",bin(y),") =", x&y # operador binario E
print x, "(",bin(x),") | ",y,"(",bin(y),") =", x|y # operador binario OU
print x, "(",bin(x),") ^ ",y,"(",bin(y),") =", x^y # operador binario XOU
print x," igual a ",y,"? ", x==y
print x," diferente de ",y,"? ", x!=y
print x," maior que ",y,"? ", x>y
print x," menor que ",y,"? ", x<y
print x," maior ou igual a ",y,"? ", x>=y
print x," menor ou igual a ",y,"? ", x<=y
print "\nNumeros em ponto flutuante: "
x = 10.0
y = 3.0
print x, "+", y, "=", x + y
print x, "+", y, "=", x - y
print x, "+", y, "=", x*y
print x, "+", y, "=", x/y # agora eh um numero real
print x, "+", y, "=", x % y # esse eh o resto da divisao
print x, "+", y, "=", x**y # esse eh o operador potencia, x elevado a potencia de y
print "\nNumeros complexos:"
x = 1 + 1j
y = 2 + 1j
print x, "+", y, "=", x + y
print x, "+", y, "=", x - y
print x, "+", y, "=", x*y
print x, "+", y, "=", x/y # agora eh um numero real
print x, "+", y, "=", x % y # esse eh o resto da divisao
print x, "+", y, "=", x**y # esse eh o operador potencia, x elevado a potencia de y
print "\nVariaveis Booleanas:"
# agora x eh uma variavel booleana (logica)
x = True
y = False
print "Nao ", x, "=", not x
print x," ou ",y,"=",x or y
print x," e ",y,"=",x and y
x = 10
y = 3
print x, " maior que ", y, " OU ", x, " menor que ", y, "? ", x>y or x<y
print x, " maior que ", y, " E ", x, " menor que ", y, "? ", x>y and x<y
print "\nOperacao com Strings:"
x = "Ola "
y = "Mundo"
print x," + ",y," = ",x+y
print x," *2 = ",x*2
print x,"*2 + ",y," = ",x*2 + y
print "Letra na posicao 0 de x = ",x[0]
print "Concatenar as 3 primeiras letras de x com y = ",x[0:3] + y
# Operadores Relacionais
print "Tem 'a' em Ola? ", "a" in x
print "Nao tem 'b' em Ola? ", "b" not in x
|
folivetti/PI-UFABC
|
AULA_01/Python/operadores.py
|
Python
|
mit
| 2,187
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import time
import yaml
import logging
import threading
from ybk.lighttrade.sysframe import Client as SysframeClient
log = logging.getLogger('trader')
configfile = open(os.path.join(os.path.dirname(__file__), 'trading.yaml'), encoding='utf-8')
config = yaml.load(configfile)
try:
accountfile = open(
os.path.join(os.path.dirname(__file__), 'accounts.yaml'))
account = yaml.load(accountfile)
except:
account = {}
lock = threading.RLock()
class Trader(object):
""" 交易调度 """
traders = {}
def __init__(self, exchange, username=None, password=None):
""" 登陆并缓存Trader Object """
with lock:
d = config[exchange]
if d['system'] == 'sysframe':
Client = SysframeClient
elif d['system'] == 'winner':
raise NotImplementedError
if username is None:
u = account[exchange][0]
username = u['username']
password = u['password']
if d.get('disabled'):
raise ValueError('该交易所被禁止')
signature = (exchange, username, password)
if signature not in self.traders:
if not isinstance(d['tradeweb_url'], list):
d['tradeweb_url'] = [d['tradeweb_url']]
self.client = Client(front_url=d['front_url'],
tradeweb_url=d['tradeweb_url'])
setattr(self.client, 'exchange', exchange)
self.client.login(username, password)
self.traders[signature] = self
else:
old = self.traders[signature]
self.client = old.client
self.client.keep_alive()
def __getattr__(self, key):
if key in self.__dict__:
return self.__dict__[key]
else:
return getattr(self.client, key)
@property
def server_time(self):
t0 = time.time()
return t0 + self.client.time_offset + self.client.latency * 3
if __name__ == '__main__':
pass
|
yxdong/ybk
|
ybk/lighttrade/trader.py
|
Python
|
mit
| 2,160
|
import command_line
import ast
import os
import traceback
from operator import attrgetter
import settings
from execution_tree_builder import build_execution_tree
# TODO move classes to separate files
class DataCollectorCall(object):
def __init__(self, var_name="", indentation=0, line_position=0, need_stacktrace=False, is_return_operator=False):
self.collected_variable = var_name
self.indentation = indentation
self.line_position = line_position
self.need_stacktrace = need_stacktrace
self.is_return_operator = is_return_operator
class SourceCodeInfo(object):
def __init__(self, function_calls=[], function_declarations=[], statements=[], source_code_string="", return_calls=[]):
self.function_calls = function_calls
self.function_declarations = function_declarations
self.statements = statements
self.source_code_string = source_code_string
self.return_calls = return_calls
class FunctionCall(object):
def __init__(self, func_name="", arguments=[], line_position=0, indentation=0, parent_function=""):
self.func_name = func_name
self.arguments = arguments
self.line_position = line_position
self.indentation = indentation
self.parent_function = parent_function
def __str__(self):
return self.func_name + " " + str(self.line_position)
class FunctionDeclaration(object):
def __init__(self, func_name="", arguments=[], start_position=0, end_position=0):
self.name = func_name
self.arguments = arguments
self.start_position = start_position
self.end_position = end_position
def __str__(self):
return self.name + " " + str(self.arguments) + " " + str(self.start_position) + " " + str(self.end_position)
class ReturnCall(object):
def __init__(self, line_position = 0, indentation = 0):
self.line_position = line_position
self.indentation = indentation
class Statement(object):
def __init__(self, destination_var_name="", subscript_key="", line_position=0, indentation=0):
self.destination_var_name = destination_var_name
self.subscript_key = subscript_key
self.line_position = line_position
self.indentation = indentation
def __str__(self):
return self.destination_var_name + " " + str(self.line_position)
class VariableAsFunction(object):
def __init__(self):
self.var_name = ""
self.type = "" # ???
self.dependencies = ()
### Sample of the modification of existing source code
### http://stackoverflow.com/questions/768634/parse-a-py-file-read-the-ast-modify-it-then-write-back-the-modified-source-c
# TODO move process functions to separate file
def process_assign_node(node):
statement = Statement()
if isinstance(node, ast.AugAssign):
statement.destination_var_name = node.target.id
statement.line_position = node.target.lineno
statement.indentation = node.target.col_offset
else:
for target in node.targets:
if isinstance(target, ast.Name):
statement.destination_var_name = target.id
statement.line_position = target.lineno
statement.indentation = target.col_offset
elif isinstance(target, ast.Subscript):
statement.destination_var_name = target.value.id
statement.subscript_key = target.slice.value.id
statement.line_position = target.lineno
statement.indentation = target.col_offset
if isinstance(node.value, ast.List):
for list_item in node.value.elts:
if is_value_type(list_item):
print "Value type"
else:
print "Other type"
elif isinstance(node.value, ast.BinOp):
print "Binary operation"
process_operand(node.value.left)
process_operand(node.value.right)
elif isinstance(node.value, ast.Subscript):
print "Subscript assign "
elif is_value_type(node.value):
print ""
else:
print "Unhandled assign type"
return statement
def process_operand(operand):
if isinstance(operand, ast.Num):
print "Operand is a number."
elif isinstance(operand, ast.Call):
print "Operand is function call."
else:
print "Unhandled operand's processing."
def is_value_type(item):
# TODO: extend with
return isinstance(item, ast.Num)
def process_return_call_node(node):
return_call = ReturnCall(line_position=node.lineno, indentation=node.col_offset)
return return_call
def process_func_call_node(node):
function_call = FunctionCall()
items = []
for arg in node.args:
# ast.Name
if isinstance(arg, ast.Name):
items.append(arg.id)
function_call.func_name = node.func.id
function_call.arguments = items
function_call.line_position = node.lineno
function_call.indentation = node.col_offset
return function_call
def process_func_declaration_node(node):
declaration = FunctionDeclaration()
function_args = []
for arg in node.args.args:
# ast.Name
function_args.append(arg.id)
declaration.name = node.name
declaration.args = function_args
declaration.start_position = node.lineno
for element in node.body:
if element.lineno > declaration.end_position:
declaration.end_position = element.lineno
return declaration
def put_data_collector(variable, line_position):
print variable + " " + str(line_position)
def generate_indentation(size):
return " " * size
def build_data_collectors(source_code_info):
"""
Build structures with arguments for generating file write capturing calls
"""
file_write_calls = []
for statement in source_code_info.statements:
line_position = statement.line_position + 1
data_collector = DataCollectorCall(statement.destination_var_name, statement.indentation, line_position)
file_write_calls.append(data_collector)
for function_declaration in source_code_info.function_declarations:
for argument in function_declaration.args:
line_position = function_declaration.start_position + 1
data_collector = DataCollectorCall(argument, settings.DEFAULT_INDENT_SIZE, line_position, True)
file_write_calls.append(data_collector)
for return_call in source_code_info.return_calls:
data_collector = DataCollectorCall(indentation=return_call.indentation,
line_position=return_call.line_position - 1,
is_return_operator=True,
need_stacktrace=True)
file_write_calls.append(data_collector)
file_write_calls.sort(key=attrgetter('line_position'))
return file_write_calls
def generate_data_collector_call(data_collector_call, descriptor_name):
result_write_call = ""
indentation = generate_indentation(data_collector_call.indentation)
if data_collector_call.need_stacktrace:
stacktrace_type = settings.META_MARK_FUNC_CALL_STACKTRACE
if data_collector_call.is_return_operator == True:
stacktrace_type = settings.META_MARK_RETURN_STACKTRACE
file_write_call_string = "{}.write(\"{}\" + str(traceback.extract_stack()) + \"\\n\")\n".format(descriptor_name,
stacktrace_type)
stacktrace_snapshot_call = indentation + file_write_call_string
result_write_call += stacktrace_snapshot_call
if data_collector_call.is_return_operator == False:
var_name = data_collector_call.collected_variable
file_write_call_string = "{}.write(\"{} \" + \"{} = \" + str({}) + \"\\n\")\n".format(descriptor_name,
settings.META_MARK_VARCHANGE,
var_name, var_name)
var_change_write_call = indentation + file_write_call_string
result_write_call += var_change_write_call
return result_write_call
def apply_data_collectors(source_code_info):
data_collectors_info = build_data_collectors(source_code_info)
result_code = settings.FILE_DESCRIPTOR_NAME + " = open(\"" + settings.COLLECTED_DATA_FILE + "\", \"w\")\n"
line_counter = 1
code_lines = source_code_info.source_code_string.split("\n")
if len(data_collectors_info) > 0:
current_data_collector = data_collectors_info[0]
data_collectors_info.remove(current_data_collector)
for code_line in code_lines:
while current_data_collector is not None and current_data_collector.line_position == line_counter:
result_code += "\n" + generate_data_collector_call(current_data_collector, settings.FILE_DESCRIPTOR_NAME)
current_data_collector = None
if len(data_collectors_info) > 0:
current_data_collector = data_collectors_info[0]
data_collectors_info.remove(current_data_collector)
result_code = result_code + "\n" + code_line
line_counter += 1
result_code = "{}\n{}{}".format(result_code, settings.FILE_DESCRIPTOR_NAME, ".close()")
return result_code
|
skyylex/Luminous-proof-of-concept
|
core/source_transformer.py
|
Python
|
mit
| 9,513
|
#!env/bin/python
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from app import app, db
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
|
iniweb/deployCD
|
manage.py
|
Python
|
mit
| 266
|
def add_native_methods(clazz):
def getVMTemporaryDirectory____():
raise NotImplementedError()
clazz.getVMTemporaryDirectory____ = staticmethod(getVMTemporaryDirectory____)
|
laffra/pava
|
pava/implementation/natives/sun/misc/VMSupport.py
|
Python
|
mit
| 190
|
"""An example of using a middleware to require HTTPS connections.
requires https://github.com/falconry/falcon-require-https to be installed via
pip install falcon-require-https
"""
import hug
from falcon_require_https import RequireHTTPS
hug.API(__name__).http.add_middleware(RequireHTTPS())
@hug.get()
def my_endpoint():
return "Success!"
|
timothycrosley/hug
|
examples/force_https.py
|
Python
|
mit
| 355
|
import sys
from .space_delimited import SpaceDelimited
try:
from nltk.stem.snowball import SnowballStemmer
stemmer = SnowballStemmer("french")
except ValueError:
raise ImportError("Could not load stemmer for {0}. ".format(__name__))
try:
from nltk.corpus import stopwords as nltk_stopwords
stopwords = set(nltk_stopwords.words('french') + ["a"])
except LookupError:
raise ImportError("Could not load stopwords for {0}. ".format(__name__) +
"You may need to install the nltk 'stopwords' " +
"corpora. See http://www.nltk.org/data.html")
try:
import enchant
dictionary = enchant.Dict("fr")
except enchant.errors.DictNotFoundError:
raise ImportError("No enchant-compatible dictionary found for 'fr'. " +
"Consider installing 'myspell-fr'.")
badwords = [
r"con",
r"fesse", r"foutre",
r"merde+", r"merdique",
r"prostituee?", r"putain", r"putes",
r"salop", r"stupide",
]
sys.modules[__name__] = SpaceDelimited(
__name__,
doc="""
french
======
revision
--------
.. autoattribute:: revision.words
.. autoattribute:: revision.content_words
.. autoattribute:: revision.badwords
.. autoattribute:: revision.misspellings
.. autoattribute:: revision.infonoise
parent_revision
---------------
.. autoattribute:: parent_revision.words
.. autoattribute:: parent_revision.content_words
.. autoattribute:: parent_revision.badwords
.. autoattribute:: parent_revision.misspellings
.. autoattribute:: parent_revision.infonoise
diff
----
.. autoattribute:: diff.words_added
.. autoattribute:: diff.words_removed
.. autoattribute:: diff.badwords_added
.. autoattribute:: diff.badwords_removed
.. autoattribute:: diff.misspellings_added
.. autoattribute:: diff.misspellings_removed
""",
badwords=badwords,
dictionary=dictionary,
stemmer=stemmer,
stopwords=stopwords
)
|
ToAruShiroiNeko/revscoring
|
revscoring/languages/french.py
|
Python
|
mit
| 1,905
|
#-----------------------------------------------------------------------------
# Copyright (c) 2008-2012, David P. D. Moss. All rights reserved.
#
# Released under the BSD license. See the LICENSE file for details.
#-----------------------------------------------------------------------------
import netaddr
name = 'netaddr'
version = netaddr.__version__
description = 'Pythonic manipulation of IPv4, IPv6, CIDR, EUI and MAC network addresses'
keywords = [
'Networking', 'Systems Administration', 'IANA', 'IEEE', 'CIDR', 'IP',
'IPv4', 'IPv6', 'CIDR', 'EUI', 'MAC', 'MAC-48', 'EUI-48', 'EUI-64'
]
download_url = 'http://github.com/drkjam/netaddr/downloads'
author = 'David P. D. Moss'
author_email = 'drkjam@gmail.com'
url = 'http://github.com/drkjam/netaddr/'
# Required by distutils only.
packages = [
'netaddr',
'netaddr.ip',
'netaddr.eui',
'netaddr.strategy',
'netaddr.tests',
]
# Required by distutils only.
package_data = {
'netaddr.ip': [
'ipv4-address-space.xml',
'ipv6-address-space.xml',
'multicast-addresses.xml'
],
'netaddr.eui': [
'*.txt',
'*.idx'
],
'netaddr.tests': [
'core/*.txt',
'eui/*.txt',
'ip/*.txt',
'strategy/*.txt',
],
}
scripts = ['netaddr/tools/netaddr']
license = 'BSD License'
#------------------------------------------------------------------------
# NB - keep this text around 74 characters wide so it is viewable
# in various fixed window sizes.
long_description = """
A pure Python network address representation and manipulation library.
netaddr provides a Pythonic way of working with :-
- IPv4 and IPv6 addresses and subnets
- MAC addresses, OUI and IAB identifiers, IEEE EUI-64 identifiers
- arbitrary (non-aligned) IP address ranges and IP address sets
- various non-CIDR IP range formats such as nmap and glob-style formats
Included are routines for :-
- generating, sorting and summarizing IP addresses and networks
- performing easy conversions between address notations and formats
- detecting, parsing and formatting network address representations
- performing set-based operations on groups of IP addresses and subnets
- working with arbitrary IP address ranges and formats
- accessing OUI and IAB organisational information published by IEEE
- accessing IP address and block information published by IANA
For details on the latest updates and changes, see :-
http://github.com/drkjam/netaddr/blob/rel-0.7.x/CHANGELOG
API documentation for the latest release is available here :-
http://packages.python.org/netaddr/
"""
platforms = 'OS Independent'
classifiers = [
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Information Technology',
'Intended Audience :: Science/Research',
'Intended Audience :: System Administrators',
'Intended Audience :: Telecommunications Industry',
'License :: OSI Approved :: BSD License',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.4',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Topic :: Communications',
'Topic :: Documentation',
'Topic :: Education',
'Topic :: Education :: Testing',
'Topic :: Home Automation',
'Topic :: Internet',
'Topic :: Internet :: Log Analysis',
'Topic :: Internet :: Name Service (DNS)',
'Topic :: Internet :: Proxy Servers',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Topic :: Internet :: WWW/HTTP :: Site Management',
'Topic :: Security',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Quality Assurance',
'Topic :: Software Development :: Testing',
'Topic :: Software Development :: Testing :: Traffic Generation',
'Topic :: System :: Benchmark',
'Topic :: System :: Clustering',
'Topic :: System :: Distributed Computing',
'Topic :: System :: Installation/Setup',
'Topic :: System :: Logging',
'Topic :: System :: Monitoring',
'Topic :: System :: Networking',
'Topic :: System :: Networking :: Firewalls',
'Topic :: System :: Networking :: Monitoring',
'Topic :: System :: Networking :: Time Synchronization',
'Topic :: System :: Recovery Tools',
'Topic :: System :: Shells',
'Topic :: System :: Software Distribution',
'Topic :: System :: Systems Administration',
'Topic :: System :: System Shells',
'Topic :: Text Processing',
'Topic :: Text Processing :: Filters',
'Topic :: Utilities',
]
install_requires = [
]
setup_requires = [
]
|
ashmastaflash/gwdetect
|
dependencies/netaddr-0.7.10/release.py
|
Python
|
mit
| 5,263
|
import model as Model
NODES_PER_ROBOT = 6
ROBOT_CPU_CAPACITY = 100
SERVER_CAPACITY = 400 # for greedy_2 the value must be 0
ALGORITHM = 'greedy_1' # greedy_2
###################################################################################################
def generate(num_computers, num_robots, num_cameras):
msgs_robot = 0
for x in range(1, num_robots + 1):
msgs_robot += 8 + num_robots - x
# Computers for each robot
computers = {}
for x in range(1, num_robots+1):
computer = Model.Computer('C' + str(x), ROBOT_CPU_CAPACITY)
computer.type = 'robot'
computers[computer.id] = computer
# Computers for servers
for x in range(num_robots+1, num_computers+1):
computer = Model.Computer('C' + str(x), SERVER_CAPACITY)
computer.type = 'server'
computers[computer.id] = computer
#-----------------------------------------------------------------------------------------------------------
# Links
num_wireless_links = 0
for x in range(1, num_robots+1):
num_wireless_links += num_computers - x
num_link=1
links = {}
# wireless links
for x in range(1, num_robots+1):
for y in range(x+1, num_computers+1):
bandwidth = 54000/num_wireless_links
link = Model.Link('L' + str(num_link), computers['C'+str(x)], computers['C' + str(y)], bandwidth)
links[link.id] = link
computers['C'+str(x)].add_link(link)
computers['C'+str(y)].add_link(link)
num_link+=1
# wired links
for x in range(num_robots+1, num_computers+1):
for y in range(x+1, num_computers+1):
bandwidth = 100000
link = Model.Link('L' + str(num_link), computers['C'+str(x)], computers['C'+str(y)], bandwidth)
links[link.id] = link
computers['C'+str(x)].add_link(link)
computers['C'+str(y)].add_link(link)
num_link+=1
#-----------------------------------------------------------------------------------------------------------
# Nodes
#
# Experiment, N1
# Tracker (one per camera), N2..N(1+camera_no)
# Then for each robot:
# Environment (1+cameras) + (robot-1)*6
# Model,
# Planner,
# AMCL,
# Navigation,
# Youbot_core
if num_computers - num_robots > 1:
servers_residence = []
for n in range(num_robots+1, num_computers+1):
servers_residence.append(computers['C' + str(n)])
else:
servers_residence = [computers['C' + str(num_computers)]]
num_node = 1
nodes = {}
# Experiment node
id = 'N' + str(num_node)
node = Model.Node(id, [], None)
setting = Model.Setting(node, 1, 1, servers_residence, 'S1')
node.settings = [setting]
nodes[node.id] = node
node.formula = 'x'
node.ratio = 0.01
num_node += 1
# Nodes for cameras
for x in range(1, num_cameras+1):
# Tracker
id = 'N' + str(num_node)
node = Model.Node(id, [], None)
if ALGORITHM == 'greedy_1':
setting_min = Model.Setting(node, 200, 100, servers_residence, 'S1')
setting_max = Model.Setting(node, 80, 40, servers_residence, 'S2')
node.settings = [setting_min, setting_max]
elif ALGORITHM == 'greedy_2':
setting = Model.Setting(node, 120, 70, servers_residence, 'S1')
node.settings = [setting]
nodes[node.id] = node
node.formula = '66.62*math.log(x)+56.308'
node.ratio = 0.83
num_node += 1
# Nodes for robots
for x in range(1, num_robots+1):
robot_residence = []
robot_residence.append(computers['C' + str(x)])
# Environment
id = 'N' + str(num_node)
node = Model.Node(id, [], None)
setting = Model.Setting(node, 1, 1, [], 'S1')
node.settings = [setting]
nodes[node.id] = node
node.formula = 'x'
node.ratio = 0.01
num_node += 1
# Model
id = 'N' + str(num_node)
node = Model.Node(id, [], None)
if ALGORITHM == 'greedy_1':
setting_min = Model.Setting(node, 59, 100, [], 'S1')
setting_max = Model.Setting(node, 17, 20, [], 'S2')
node.settings = [setting_min, setting_max]
elif ALGORITHM == 'greedy_2':
setting = Model.Setting(node, 39, 70, [], 'S1')
node.settings = [setting]
nodes[node.id] = node
node.formula = '63.707*math.log(x)+132.16'
node.ratio = 3.64
num_node += 1
# Planner
id = 'N' + str(num_node)
planner_node = Model.Node(id, [], None)
setting = Model.Setting(planner_node, 1, 1, [], 'S1')
planner_node.settings = [setting]
nodes[planner_node.id] = planner_node
planner_node.formula = 'x'
planner_node.ratio = 0.01
num_node += 1
# AMCL
id = 'N' + str(num_node)
node = Model.Node(id, [], None)
if ALGORITHM == 'greedy_1':
setting_min = Model.Setting(node, 66, 100, [], 'S1')
setting_max = Model.Setting(node, 19, 20, [], 'S2')
node.settings = [setting_min, setting_max]
elif ALGORITHM == 'greedy_2':
setting = Model.Setting(node, 41, 50, [], 'S1')
node.settings = [setting]
nodes[node.id] = node
node.formula = '135.4*(x**2) + 55.126*(x)+4.6383'
node.ratio = 1.33
num_node += 1
# Navigation
id = 'N' + str(num_node)
navigation_node = Model.Node(id, [], None)
if ALGORITHM == 'greedy_1':
setting_min = Model.Setting(navigation_node, 50, 100, [], 'S1')
setting_max = Model.Setting(navigation_node, 25, 10, [], 'S2')
navigation_node.settings = [setting_min, setting_max]
elif ALGORITHM == 'greedy_2':
setting = Model.Setting(navigation_node, 39, 65, [], 'S1')
navigation_node.settings = [setting]
nodes[navigation_node.id] = navigation_node
navigation_node.formula = '129.12*math.log(x)+188.36'
navigation_node.ratio = 5.06
num_node += 1
# Youbot_core
id = 'N' + str(num_node)
youbot_node = Model.Node(id, [], None)
setting = Model.Setting(youbot_node, 16, 1, robot_residence, 'S1')
youbot_node.settings = [setting]
nodes[youbot_node.id] = youbot_node
youbot_node.formula = 'x'
youbot_node.ratio = 0.01
num_node += 1
# two coresidence constraints
# Planner with Navigation
planner_coresidence = nodes['N' + str(1+num_cameras+((x-1)*NODES_PER_ROBOT)+5)]
planner_node.coresidence = [planner_coresidence]
# Navigation with planner
navigation_coresidence = nodes['N' + str(1+num_cameras+(x-1)*NODES_PER_ROBOT+3)]
navigation_node.coresidence = [navigation_coresidence]
#-----------------------------------------------------------------------------------------------------------
# Messages
num_mess=1
messages = {}
# Messages from Experiment (Experiment - Environment)
for x in range(1, num_robots+1):
msg_id = 'M' + str(num_mess)
source = nodes['N1']
target = nodes['N' + str(1+num_cameras+(x-1)*NODES_PER_ROBOT+1)]
size = 1
message = Model.Message(msg_id, source, target, size)
source.add_msg_source(message)
target.add_msg_sink(message)
num_mess += 1
messages[message.id] = message
# Messages from cameras (Tracker - Environment)
for x in range(1, num_cameras+1):
msg_id = 'M' + str(num_mess)
source = nodes['N' + str(1+x)]
target = nodes['N' + str(2+num_cameras)]
size = 3
message = Model.Message(msg_id, source, target, size)
source.add_msg_source(message)
target.add_msg_sink(message)
num_mess += 1
messages[message.id] = message
# Messages from robots
for x in range(1, num_robots+1):
# (Environment - Model)
msg_id = 'M' + str(num_mess)
source = nodes['N' + str(1+num_cameras+(x-1)*NODES_PER_ROBOT+1)]
target = nodes['N' + str(1+num_cameras+(x-1)*NODES_PER_ROBOT+2)]
size = 1
message = Model.Message(msg_id, source, target, size)
source.add_msg_source(message)
target.add_msg_sink(message)
num_mess += 1
messages[message.id] = message
# (Environment - Planner)
msg_id = 'M' + str(num_mess)
source = nodes['N' + str(1+num_cameras+(x-1)*NODES_PER_ROBOT+1)]
target = nodes['N' + str(1+num_cameras+(x-1)*NODES_PER_ROBOT+3)]
size = 1
message = Model.Message(msg_id, source, target, size)
source.add_msg_source(message)
target.add_msg_sink(message)
num_mess += 1
messages[message.id] = message
# (Environment - Youbot_core)
msg_id = 'M' + str(num_mess)
source = nodes['N' + str(1+num_cameras+(x-1)*NODES_PER_ROBOT+1)]
target = nodes['N' + str(1+num_cameras+(x-1)*NODES_PER_ROBOT+6)]
size = 1
message = Model.Message(msg_id, source, target, size)
source.add_msg_source(message)
target.add_msg_sink(message)
num_mess += 1
messages[message.id] = message
# Between robots (Environment - Environment)
for y in range(x+1, num_robots+1):
msg_id = 'M' + str(num_mess)
source = nodes['N' + str(1+(num_cameras)+(x-1)*NODES_PER_ROBOT+1)]
target = nodes['N' + str(1+(num_cameras)+(y-1)*NODES_PER_ROBOT+1)]
size = 1
message = Model.Message(msg_id, source, target, size)
source.add_msg_source(message)
target.add_msg_sink(message)
num_mess += 1
messages[message.id] = message
##
# (Planner - Navigation)
msg_id = 'M' + str(num_mess)
source = nodes['N' + str(1+num_cameras+(x-1)*NODES_PER_ROBOT+3)]
target = nodes['N' + str(1+num_cameras+(x-1)*NODES_PER_ROBOT+5)]
size = 1
message = Model.Message(msg_id, source, target, size)
source.add_msg_source(message)
target.add_msg_sink(message)
num_mess += 1
messages[message.id] = message
# (Navigation - Environment)
msg_id = 'M' + str(num_mess)
source = nodes['N' + str(1+num_cameras+(x-1)*NODES_PER_ROBOT+5)]
target = nodes['N' + str(1+num_cameras+(x-1)*NODES_PER_ROBOT+1)]
size = 1
message = Model.Message(msg_id, source, target, size)
source.add_msg_source(message)
target.add_msg_sink(message)
num_mess += 1
messages[message.id] = message
# (Youbot_core - Navigation)
msg_id = 'M' + str(num_mess)
source = nodes['N' + str(1+num_cameras+(x-1)*NODES_PER_ROBOT+6)]
target = nodes['N' + str(1+num_cameras+(x-1)*NODES_PER_ROBOT+5)]
size = 1
message = Model.Message(msg_id, source, target, size)
source.add_msg_source(message)
target.add_msg_sink(message)
num_mess += 1
messages[message.id] = message
# (Youbot_core - AMCL)
msg_id = 'M' + str(num_mess)
source = nodes['N' + str(1+num_cameras+(x-1)*NODES_PER_ROBOT+6)]
target = nodes['N' + str(1+num_cameras+(x-1)*NODES_PER_ROBOT+4)]
size = 1
message = Model.Message(msg_id, source, target, size)
source.add_msg_source(message)
target.add_msg_sink(message)
num_mess += 1
messages[message.id] = message
# (AMCL - Environment)
msg_id = 'M' + str(num_mess)
source = nodes['N' + str(1+(num_cameras)+(x-1)*NODES_PER_ROBOT+4)]
target = nodes['N' + str(1+(num_cameras)+(x-1)*NODES_PER_ROBOT+1)]
size = 1
message = Model.Message(msg_id, source, target, size)
source.add_msg_source(message)
target.add_msg_sink(message)
num_mess += 1
messages[message.id] = message
problem = Model.Problem(nodes=nodes, messages=messages, computers=computers, links=links)
return problem
|
ipab-rad/perf_ros
|
src/tool/problem.py
|
Python
|
mit
| 12,503
|
import numpy as np
import scipy.linalg as la
def calculate_vertex_normals(verts, tris):
v_array = np.array(verts)
tri_array = np.array(tris, dtype=int)
tri_pts = v_array[tri_array]
n = np.cross( tri_pts[:,1] - tri_pts[:,0],
tri_pts[:,2] - tri_pts[:,0])
v_normals = np.zeros(v_array.shape)
for i in range(tri_array.shape[0]):
for j in tris[i]:
v_normals[j,:] += n[i,:]
nrms = np.sqrt(v_normals[:,0]**2 + v_normals[:,1]**2 + v_normals[:,2]**2)
v_normals = v_normals / nrms.reshape((-1,1))
return v_normals
|
jfozard/pyvol
|
pyvol/mesh/algo.py
|
Python
|
mit
| 602
|
"""
问题描述:给定一个矩阵matrix,其中的值有正、负和0,返回子矩阵的最大累加和.
例如,矩阵matrix为
-90 48 78
64 -40 64
-81 -7 66
其中,最大累加和的子矩阵为:
48 78
-40 64
-7 66
所以返回累加和209.
例如,matrix为:
-1 -1 -1
-1 2 2
-1 -1 -1
其中,最大累加和的子矩阵为:
2 2
所以返回累加和为4.
"""
import sys
from arrandmatrix.q16 import MaxSum
class MaxMatrixSum:
@classmethod
def get_max_sum(cls, matrix):
if not matrix:
return 0
max_value = -sys.maxsize
for i in range(len(matrix)):
j = i
pre_arr = [0 for _ in range(len(matrix[0]))]
while j < len(matrix):
arr = cls.arr_add(matrix[j], pre_arr)
max_value = max([MaxSum.get_max_sum(arr), max_value])
j += 1
pre_arr = arr
return max_value
@classmethod
def arr_add(cls, arr1, arr2):
return [arr1[i]+arr2[i] for i in range(len(arr1))]
if __name__ == '__main__':
my_matrix = [
[-90, 48, 78],
[64, -40, 64],
[-81, -7, 66]
]
print(MaxMatrixSum.get_max_sum(my_matrix))
|
ResolveWang/algrithm_qa
|
arrandmatrix/q17.py
|
Python
|
mit
| 1,207
|
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 19 11:45:20 2016
@author: johnguttag
"""
import random, pylab, numpy
#set line width
pylab.rcParams['lines.linewidth'] = 4
#set font size for titles
pylab.rcParams['axes.titlesize'] = 20
#set font size for labels on axes
pylab.rcParams['axes.labelsize'] = 20
#set size of numbers on x-axis
pylab.rcParams['xtick.labelsize'] = 16
#set size of numbers on y-axis
pylab.rcParams['ytick.labelsize'] = 16
#set size of ticks on x-axis
pylab.rcParams['xtick.major.size'] = 7
#set size of ticks on y-axis
pylab.rcParams['ytick.major.size'] = 7
#set size of markers
pylab.rcParams['lines.markersize'] = 10
#set number of examples shown in legends
pylab.rcParams['legend.numpoints'] = 1
def getData(fileName):
dataFile = open(fileName, 'r')
distances = []
masses = []
dataFile.readline() #discard header
for line in dataFile:
d, m = line.split()
distances.append(float(d))
masses.append(float(m))
dataFile.close()
return (masses, distances)
def labelPlot():
pylab.title('Measured Displacement of Spring')
pylab.xlabel('|Force| (Newtons)')
pylab.ylabel('Distance (meters)')
def plotData(fileName):
xVals, yVals = getData(fileName)
xVals = pylab.array(xVals)
yVals = pylab.array(yVals)
xVals = xVals*9.81 #acc. due to gravity
pylab.plot(xVals, yVals, 'bo',
label = 'Measured displacements')
labelPlot()
plotData('springData.txt')
|
johntauber/MITx6.00.2x
|
Unit4/Lecture10Video1Notes.py
|
Python
|
mit
| 1,486
|
# Generated by Django 2.1.7 on 2019-03-09 11:23
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("account", "0018_auto_20190309_1153"),
]
operations = [
migrations.AddField(
model_name="taggeduser",
name="count",
field=models.PositiveIntegerField(default=1),
),
migrations.AddField(
model_name="taggeduser",
name="tag_new",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="tagged_users",
to="account.UserTag",
),
),
]
|
fin/froide
|
froide/account/migrations/0019_auto_20190309_1223.py
|
Python
|
mit
| 755
|
# -*- coding: utf-8 -*-
#
# Jetlibs documentation build configuration file, created by
# sphinx-quickstart on Wed Dec 23 16:22:13 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Jetlibs'
copyright = u'2015, Marius Messerschmidt'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Jetlibsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Jetlibs.tex', u'Jetlibs Documentation',
u'Marius Messerschmidt', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'jetlibs', u'Jetlibs Documentation',
[u'Marius Messerschmidt'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Jetlibs', u'Jetlibs Documentation',
u'Marius Messerschmidt', 'Jetlibs', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'Jetlibs'
epub_author = u'Marius Messerschmidt'
epub_publisher = u'Marius Messerschmidt'
epub_copyright = u'2015, Marius Messerschmidt'
# The basename for the epub file. It defaults to the project name.
#epub_basename = u'Jetlibs'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
|
jetspace/jetlibs
|
docs/source/conf.py
|
Python
|
mit
| 10,239
|
"""engine.SCons.Tool.msvc
Tool-specific initialization for Microsoft Visual C/C++.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import os.path
import re
import sys
import SCons.Action
import SCons.Builder
import SCons.Errors
import SCons.Platform.win32
import SCons.Tool
import SCons.Tool.msvs
import SCons.Util
import SCons.Warnings
import SCons.Scanner.RC
from .MSCommon import msvc_exists, msvc_setup_env_once
CSuffixes = ['.c', '.C']
CXXSuffixes = ['.cc', '.cpp', '.cxx', '.c++', '.C++']
def validate_vars(env):
"""Validate the PCH and PCHSTOP construction variables."""
if 'PCH' in env and env['PCH']:
if 'PCHSTOP' not in env:
raise SCons.Errors.UserError("The PCHSTOP construction must be defined if PCH is defined.")
if not SCons.Util.is_String(env['PCHSTOP']):
raise SCons.Errors.UserError("The PCHSTOP construction variable must be a string: %r"%env['PCHSTOP'])
def pch_emitter(target, source, env):
"""Adds the object file target."""
validate_vars(env)
pch = None
obj = None
for t in target:
if SCons.Util.splitext(str(t))[1] == '.pch':
pch = t
if SCons.Util.splitext(str(t))[1] == '.obj':
obj = t
if not obj:
obj = SCons.Util.splitext(str(pch))[0]+'.obj'
target = [pch, obj] # pch must be first, and obj second for the PCHCOM to work
return (target, source)
def object_emitter(target, source, env, parent_emitter):
"""Sets up the PCH dependencies for an object file."""
validate_vars(env)
parent_emitter(target, source, env)
# Add a dependency, but only if the target (e.g. 'Source1.obj')
# doesn't correspond to the pre-compiled header ('Source1.pch').
# If the basenames match, then this was most likely caused by
# someone adding the source file to both the env.PCH() and the
# env.Program() calls, and adding the explicit dependency would
# cause a cycle on the .pch file itself.
#
# See issue #2505 for a discussion of what to do if it turns
# out this assumption causes trouble in the wild:
# http://scons.tigris.org/issues/show_bug.cgi?id=2505
if 'PCH' in env:
pch = env['PCH']
if str(target[0]) != SCons.Util.splitext(str(pch))[0] + '.obj':
env.Depends(target, pch)
return (target, source)
def static_object_emitter(target, source, env):
return object_emitter(target, source, env,
SCons.Defaults.StaticObjectEmitter)
def shared_object_emitter(target, source, env):
return object_emitter(target, source, env,
SCons.Defaults.SharedObjectEmitter)
pch_action = SCons.Action.Action('$PCHCOM', '$PCHCOMSTR')
pch_builder = SCons.Builder.Builder(action=pch_action, suffix='.pch',
emitter=pch_emitter,
source_scanner=SCons.Tool.SourceFileScanner)
# Logic to build .rc files into .res files (resource files)
res_scanner = SCons.Scanner.RC.RCScan()
res_action = SCons.Action.Action('$RCCOM', '$RCCOMSTR')
res_builder = SCons.Builder.Builder(action=res_action,
src_suffix='.rc',
suffix='.res',
src_builder=[],
source_scanner=res_scanner)
def msvc_batch_key(action, env, target, source):
"""
Returns a key to identify unique batches of sources for compilation.
If batching is enabled (via the $MSVC_BATCH setting), then all
target+source pairs that use the same action, defined by the same
environment, and have the same target and source directories, will
be batched.
Returning None specifies that the specified target+source should not
be batched with other compilations.
"""
# Fixing MSVC_BATCH mode. Previous if did not work when MSVC_BATCH
# was set to False. This new version should work better.
# Note we need to do the env.subst so $MSVC_BATCH can be a reference to
# another construction variable, which is why we test for False and 0
# as strings.
if not 'MSVC_BATCH' in env or env.subst('$MSVC_BATCH') in ('0', 'False', '', None):
# We're not using batching; return no key.
return None
t = target[0]
s = source[0]
if os.path.splitext(t.name)[0] != os.path.splitext(s.name)[0]:
# The base names are different, so this *must* be compiled
# separately; return no key.
return None
return (id(action), id(env), t.dir, s.dir)
def msvc_output_flag(target, source, env, for_signature):
"""
Returns the correct /Fo flag for batching.
If batching is disabled or there's only one source file, then we
return an /Fo string that specifies the target explicitly. Otherwise,
we return an /Fo string that just specifies the first target's
directory (where the Visual C/C++ compiler will put the .obj files).
"""
# Fixing MSVC_BATCH mode. Previous if did not work when MSVC_BATCH
# was set to False. This new version should work better. Removed
# len(source)==1 as batch mode can compile only one file
# (and it also fixed problem with compiling only one changed file
# with batch mode enabled)
if not 'MSVC_BATCH' in env or env.subst('$MSVC_BATCH') in ('0', 'False', '', None):
return '/Fo$TARGET'
else:
# The Visual C/C++ compiler requires a \ at the end of the /Fo
# option to indicate an output directory. We use os.sep here so
# that the test(s) for this can be run on non-Windows systems
# without having a hard-coded backslash mess up command-line
# argument parsing.
return '/Fo${TARGET.dir}' + os.sep
CAction = SCons.Action.Action("$CCCOM", "$CCCOMSTR",
batch_key=msvc_batch_key,
targets='$CHANGED_TARGETS')
ShCAction = SCons.Action.Action("$SHCCCOM", "$SHCCCOMSTR",
batch_key=msvc_batch_key,
targets='$CHANGED_TARGETS')
CXXAction = SCons.Action.Action("$CXXCOM", "$CXXCOMSTR",
batch_key=msvc_batch_key,
targets='$CHANGED_TARGETS')
ShCXXAction = SCons.Action.Action("$SHCXXCOM", "$SHCXXCOMSTR",
batch_key=msvc_batch_key,
targets='$CHANGED_TARGETS')
def generate(env):
"""Add Builders and construction variables for MSVC++ to an Environment."""
static_obj, shared_obj = SCons.Tool.createObjBuilders(env)
# TODO(batch): shouldn't reach in to cmdgen this way; necessary
# for now to bypass the checks in Builder.DictCmdGenerator.__call__()
# and allow .cc and .cpp to be compiled in the same command line.
static_obj.cmdgen.source_ext_match = False
shared_obj.cmdgen.source_ext_match = False
for suffix in CSuffixes:
static_obj.add_action(suffix, CAction)
shared_obj.add_action(suffix, ShCAction)
static_obj.add_emitter(suffix, static_object_emitter)
shared_obj.add_emitter(suffix, shared_object_emitter)
for suffix in CXXSuffixes:
static_obj.add_action(suffix, CXXAction)
shared_obj.add_action(suffix, ShCXXAction)
static_obj.add_emitter(suffix, static_object_emitter)
shared_obj.add_emitter(suffix, shared_object_emitter)
env['CCPDBFLAGS'] = SCons.Util.CLVar(['${(PDB and "/Z7") or ""}'])
env['CCPCHFLAGS'] = SCons.Util.CLVar(['${(PCH and "/Yu%s \\\"/Fp%s\\\""%(PCHSTOP or "",File(PCH))) or ""}'])
env['_MSVC_OUTPUT_FLAG'] = msvc_output_flag
env['_CCCOMCOM'] = '$CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS $CCPCHFLAGS $CCPDBFLAGS'
env['CC'] = 'cl'
env['CCFLAGS'] = SCons.Util.CLVar('/nologo')
env['CFLAGS'] = SCons.Util.CLVar('')
env['CCCOM'] = '${TEMPFILE("$CC $_MSVC_OUTPUT_FLAG /c $CHANGED_SOURCES $CFLAGS $CCFLAGS $_CCCOMCOM","$CCCOMSTR")}'
env['SHCC'] = '$CC'
env['SHCCFLAGS'] = SCons.Util.CLVar('$CCFLAGS')
env['SHCFLAGS'] = SCons.Util.CLVar('$CFLAGS')
env['SHCCCOM'] = '${TEMPFILE("$SHCC $_MSVC_OUTPUT_FLAG /c $CHANGED_SOURCES $SHCFLAGS $SHCCFLAGS $_CCCOMCOM","$SHCCCOMSTR")}'
env['CXX'] = '$CC'
env['CXXFLAGS'] = SCons.Util.CLVar('$( /TP $)')
env['CXXCOM'] = '${TEMPFILE("$CXX $_MSVC_OUTPUT_FLAG /c $CHANGED_SOURCES $CXXFLAGS $CCFLAGS $_CCCOMCOM","$CXXCOMSTR")}'
env['SHCXX'] = '$CXX'
env['SHCXXFLAGS'] = SCons.Util.CLVar('$CXXFLAGS')
env['SHCXXCOM'] = '${TEMPFILE("$SHCXX $_MSVC_OUTPUT_FLAG /c $CHANGED_SOURCES $SHCXXFLAGS $SHCCFLAGS $_CCCOMCOM","$SHCXXCOMSTR")}'
env['CPPDEFPREFIX'] = '/D'
env['CPPDEFSUFFIX'] = ''
env['INCPREFIX'] = '/I'
env['INCSUFFIX'] = ''
# env.Append(OBJEMITTER = [static_object_emitter])
# env.Append(SHOBJEMITTER = [shared_object_emitter])
env['STATIC_AND_SHARED_OBJECTS_ARE_THE_SAME'] = 1
env['RC'] = 'rc'
env['RCFLAGS'] = SCons.Util.CLVar('')
env['RCSUFFIXES']=['.rc','.rc2']
env['RCCOM'] = '$RC $_CPPDEFFLAGS $_CPPINCFLAGS $RCFLAGS /fo$TARGET $SOURCES'
env['BUILDERS']['RES'] = res_builder
env['OBJPREFIX'] = ''
env['OBJSUFFIX'] = '.obj'
env['SHOBJPREFIX'] = '$OBJPREFIX'
env['SHOBJSUFFIX'] = '$OBJSUFFIX'
# Set-up ms tools paths
msvc_setup_env_once(env)
env['CFILESUFFIX'] = '.c'
env['CXXFILESUFFIX'] = '.cc'
env['PCHPDBFLAGS'] = SCons.Util.CLVar(['${(PDB and "/Yd") or ""}'])
env['PCHCOM'] = '$CXX /Fo${TARGETS[1]} $CXXFLAGS $CCFLAGS $CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS /c $SOURCES /Yc$PCHSTOP /Fp${TARGETS[0]} $CCPDBFLAGS $PCHPDBFLAGS'
env['BUILDERS']['PCH'] = pch_builder
if 'ENV' not in env:
env['ENV'] = {}
if 'SystemRoot' not in env['ENV']: # required for dlls in the winsxs folders
env['ENV']['SystemRoot'] = SCons.Platform.win32.get_system_root()
def exists(env):
return msvc_exists()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
timj/scons
|
src/engine/SCons/Tool/msvc.py
|
Python
|
mit
| 11,390
|
import fresh_tomatoes
import media
toy_story = media.Movie("Toy Story",
"A story of a boy and his toys that come to life",
"http://upload.wikimedia.org/wikipedia/en/1/13/Toy_Story.jpg",
"https://www.youtube.com/watch?v=vwyZH85NQC4")
#print(toy_story.storyline)
avatar = media.Movie("Avatar","A marine on an alien planet",
"http://upload.wikimedia.org/wikipedia/en/b/b0/Avatar-Teaser-Poster.jpg",
"http://www.youtube.com/watch?v=5PSNL1qE6VY")
dawn = media.Movie("Dawn Of The Planet Of The Apes",
"A story about an ape",
"http://upload.wikimedia.org/wikipedia/en/7/77/Dawn_of_the_Planet_of_the_Apes.jpg",
"http://www.youtube.com/watch?v=eq1sTNGDXo0")
gonegirl = media.Movie("Gone Girl",
"A sad story",
"http://upload.wikimedia.org/wikipedia/en/0/05/Gone_Girl_Poster.jpg",
"http://www.youtube.com/watch?v=Ym3LB0lOJ0o")
avenger = media.Movie("Avenger",
"A story about superheroes",
"http://upload.wikimedia.org/wikipedia/en/3/37/Captain_America_The_First_Avenger_poster.jpg",
"http://www.youtube.com/watch?v=hIR8Ar-Z4hw")
dark_knight = media.Movie("Dark knight rises",
"A story about batman",
"http://upload.wikimedia.org/wikipedia/en/8/83/Dark_knight_rises_poster.jpg",
"http://www.youtube.com/watch?v=g8evyE9TuYk")
movies = [toy_story, avatar, dawn, gonegirl, avenger, dark_knight]
#fresh_tomatoes.open_movies_page(movies)
#print (media.Movie.VALID_RATINGS)
print (media.Movie.__doc__)
|
tuanvu216/udacity-course
|
programming_foudations_with_python/entertainment_center.py
|
Python
|
mit
| 1,734
|
import os, sys, shutil
if "SGE_ROOT" not in os.environ:
print "scramble(): Please set SGE_ROOT to the path of your SGE installation"
print "scramble(): before scrambling DRMAA_python"
sys.exit(1)
# change back to the build dir
if os.path.dirname( sys.argv[0] ) != "":
os.chdir( os.path.dirname( sys.argv[0] ) )
# find setuptools
scramble_lib = os.path.join( "..", "..", "..", "lib" )
sys.path.append( scramble_lib )
import get_platform # fixes fat python 2.5
try:
from setuptools import *
import pkg_resources
except:
from ez_setup import use_setuptools
use_setuptools( download_delay=8, to_dir=scramble_lib )
from setuptools import *
import pkg_resources
# clean, in case you're running this by hand from a dirty module source dir
for dir in [ "build", "dist", "gridengine" ]:
if os.access( dir, os.F_OK ):
print "scramble_it.py: removing dir:", dir
shutil.rmtree( dir )
# patch
file = "setup.py"
print "scramble(): Patching", file
if not os.access( "%s.orig" %file, os.F_OK ):
shutil.copyfile( file, "%s.orig" %file )
i = open( "%s.orig" %file, "r" )
o = open( file, "w" )
for line in i.readlines():
if line == 'SGE6_ROOT="/scratch_test02/SGE6"\n':
line = 'SGE6_ROOT="%s"\n' % os.environ["SGE_ROOT"]
if line.startswith('link_args ='):
line = 'link_args = [ "-L%s" % os.path.join(SGE6_ROOT, "lib", SGE6_ARCH), "-ldrmaa" ]\n'
print >>o, line,
i.close()
o.close()
# go
me = sys.argv[0]
sys.argv = [ me ]
sys.argv.append( "build" )
execfile( "setup.py", globals(), locals() )
# fix _cDRMAA.so rpath
so = "build/lib.%s-%s/_cDRMAA.so" % ( pkg_resources.get_platform(), sys.version[:3] )
libdrmaa = os.path.join(SGE6_ROOT, "lib", SGE6_ARCH, "libdrmaa.dylib.1.0" )
os.system( "install_name_tool -change libdrmaa.dylib.1.0 %s %s" % ( libdrmaa, so ) )
sys.argv = [ me ]
sys.argv.append( "bdist_egg" )
execfile( "setup.py", globals(), locals() )
|
volpino/Yeps-EURAC
|
scripts/scramble/scripts/DRMAA_python-macosx.py
|
Python
|
mit
| 1,938
|
# -*- coding:utf-8 -*-
def length_of_last_word(str_):
split_str = str_.split(" ")
if not split_str:
return 0
return len(split_str[-1])
if __name__ == '__main__':
result = length_of_last_word("hello world")
print(result)
|
xudongyangwork/algo
|
day43/xudy.py
|
Python
|
mit
| 251
|
# -*- coding: utf-8 -*-
from distutils.core import setup
from setuptools import find_packages
LONGDOC = """
A very simple python library, used to format datetime with *** time ago statement.
Install
pip install timeago
Usage
import timeago, datetime
d = datetime.datetime.now() + datetime.timedelta(seconds = 60 * 3.4)
# locale
print (timeago.format(d, locale='zh_CN')) # will print 3分钟后
"""
setup(name = 'timeago',
version = '1.0.7',
description = 'A very simple python library, used to format datetime with `*** time ago` statement. eg: "3 hours ago".',
long_description = LONGDOC,
author = 'hustcc',
author_email = 'i@hust.cc',
url = 'https://github.com/hustcc/timeago',
license = 'MIT',
install_requires = [],
classifiers = [
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Natural Language :: Chinese (Simplified)',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Utilities'
],
keywords = 'timeago, seconds ago, minutes ago, hours ago, just now',
packages = find_packages('src'),
package_dir = {'':'src'},
)
|
avrong/timeago
|
setup.py
|
Python
|
mit
| 1,607
|
import subprocess
import os
"""
What are the differences and similarities between ffmpeg, libav, and avconv?
https://stackoverflow.com/questions/9477115
ffmeg encoders high to lower quality
libopus > libvorbis >= libfdk_aac > aac > libmp3lame
libfdk_aac due to copyrights needs to be compiled by end user
on MacOS brew install ffmpeg --with-fdk-aac will do just that. Other OS?
https://trac.ffmpeg.org/wiki/Encode/AAC
"""
def song(input_song, output_song, folder, avconv=False, verbose=False):
"""Do the audio format conversion."""
if not input_song == output_song:
print('Converting {0} to {1}'.format(
input_song, output_song.split('.')[-1]))
if avconv:
exit_code = convert_with_avconv(input_song, output_song, folder, verbose)
else:
exit_code = convert_with_ffmpeg(input_song, output_song, folder, verbose)
return exit_code
return 0
def convert_with_avconv(input_song, output_song, folder, verbose):
"""Convert the audio file using avconv."""
if verbose:
level = 'debug'
else:
level = '0'
command = ['avconv',
'-loglevel', level,
'-i', os.path.join(folder, input_song),
'-ab', '192k',
os.path.join(folder, output_song)]
return subprocess.call(command)
def convert_with_ffmpeg(input_song, output_song, folder, verbose):
"""Convert the audio file using FFmpeg."""
ffmpeg_pre = 'ffmpeg -y '
if not verbose:
ffmpeg_pre += '-hide_banner -nostats -v panic '
input_ext = input_song.split('.')[-1]
output_ext = output_song.split('.')[-1]
if input_ext == 'm4a':
if output_ext == 'mp3':
ffmpeg_params = '-codec:v copy -codec:a libmp3lame -q:a 2 '
elif output_ext == 'webm':
ffmpeg_params = '-c:a libopus -vbr on -b:a 192k -vn '
elif input_ext == 'webm':
if output_ext == 'mp3':
ffmpeg_params = ' -ab 192k -ar 44100 -vn '
elif output_ext == 'm4a':
ffmpeg_params = '-cutoff 20000 -c:a libfdk_aac -b:a 192k -vn '
command = '{0}-i {1} {2}{3}'.format(
ffmpeg_pre, os.path.join(folder, input_song), ffmpeg_params, os.path.join(folder, output_song)).split(' ')
return subprocess.call(command)
|
AndreaMordenti/spotydowny
|
core/convert.py
|
Python
|
mit
| 2,314
|
import os
import io
import stat
import time
import threading
import sublime
import sublime_plugin
# Set of IDs of view that are being monitored.
TAILF_VIEWS = set()
STATUS_KEY = 'tailf'
class TailF(sublime_plugin.TextCommand):
'''
Start monitoring file in `tail -f` line style.
'''
def __init__(self, *args, **kwargs):
super(TailF, self).__init__(*args, **kwargs)
self.prev_file_size = -1
self.prev_mod_time = -1
def run(self, edit):
self.view.set_read_only(True)
t = threading.Thread(target=self.thread_handler)
TAILF_VIEWS.add(self.view.id())
self.view.set_status(STATUS_KEY, 'TailF mode')
t.start()
def thread_handler(self):
while True:
if self.view.id() in TAILF_VIEWS:
if self.view.file_name() is None:
sublime.error_message('File not save on disk')
return
else:
file_stat = os.stat(self.view.file_name())
new_size = file_stat[stat.ST_SIZE]
new_mod_time = file_stat[stat.ST_MTIME]
if (new_mod_time > self.prev_mod_time or
new_size != self.prev_file_size):
self.view.run_command('update_file')
self.view.run_command('move_to',
args={'to': 'eof', 'extend': False})
self.prev_file_size = new_size
self.prev_mod_time = new_mod_time
time.sleep(self.view.settings().get('tailf_pull_rate'))
else:
return
def description(self):
return 'Starts monitoring file on disk'
class StopTailF(sublime_plugin.TextCommand):
'''
Stop monitoring file command.
'''
def run(self, edit):
TAILF_VIEWS.remove(self.view.id())
# restore view to previous state
self.view.set_read_only(False)
self.view.set_scratch(False)
self.view.erase_status(STATUS_KEY)
def description(self):
return 'Stops monitoring file on disk'
class UpdateFile(sublime_plugin.TextCommand):
'''
Reloads content of the file and replaces view content with it.
'''
def run(self, edit):
read_only = self.view.is_read_only()
self.view.set_read_only(False)
with io.open(self.view.file_name(), 'r', encoding='utf-8-sig') as f:
content = f.read()
whole_file = sublime.Region(0, self.view.size())
self.view.replace(edit, whole_file, content)
self.view.set_read_only(read_only)
# don't ask user if he want's to save changes to disk
self.view.set_scratch(True)
class TailFEventListener(sublime_plugin.EventListener):
'''
Listener that removes files from monitored files once file is
about to be closed.
'''
def on_pre_close(self, view):
if view.id() in TAILF_VIEWS:
TAILF_VIEWS.remove(view.id())
|
delicb/SublimeConfig
|
tailf.py
|
Python
|
mit
| 3,015
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com timehome@corp.globo.com
import logging
from json import loads, dumps
from datetime import datetime, timedelta
from redis import Redis, RedisError
from thumbor.storages import BaseStorage
from thumbor.utils import on_exception
from tornado.concurrent import return_future
logger = logging.getLogger('thumbor')
class Storage(BaseStorage):
storage = None
def __init__(self, context, shared_client=True):
'''Initialize the RedisStorage
:param thumbor.context.Context shared_client: Current context
:param boolean shared_client: When set to True a singleton client will
be used.
'''
BaseStorage.__init__(self, context)
self.shared_client = shared_client
self.storage = self.reconnect_redis()
def get_storage(self):
'''Get the storage instance.
:return Redis: Redis instance
'''
if self.storage:
return self.storage
self.storage = self.reconnect_redis()
return self.storage
def reconnect_redis(self):
if self.shared_client and Storage.storage:
return Storage.storage
storage = Redis(
port=self.context.config.REDIS_STORAGE_SERVER_PORT,
host=self.context.config.REDIS_STORAGE_SERVER_HOST,
db=self.context.config.REDIS_STORAGE_SERVER_DB,
password=self.context.config.REDIS_STORAGE_SERVER_PASSWORD
)
if self.shared_client:
Storage.storage = storage
return storage
def on_redis_error(self, fname, exc_type, exc_value):
'''Callback executed when there is a redis error.
:param string fname: Function name that was being called.
:param type exc_type: Exception type
:param Exception exc_value: The current exception
:returns: Default value or raise the current exception
'''
if self.shared_client:
Storage.storage = None
else:
self.storage = None
if self.context.config.REDIS_STORAGE_IGNORE_ERRORS is True:
logger.error("[REDIS_STORAGE] %s" % exc_value)
if fname == '_exists':
return False
return None
else:
raise exc_value
def __key_for(self, url):
return 'thumbor-crypto-%s' % url
def __detector_key_for(self, url):
return 'thumbor-detector-%s' % url
@on_exception(on_redis_error, RedisError)
def put(self, path, bytes):
storage = self.get_storage()
storage.set(path, bytes)
storage.expireat(
path, datetime.now() + timedelta(
seconds=self.context.config.STORAGE_EXPIRATION_SECONDS
)
)
@on_exception(on_redis_error, RedisError)
def put_crypto(self, path):
if not self.context.config.STORES_CRYPTO_KEY_FOR_EACH_IMAGE:
return
if not self.context.server.security_key:
raise RuntimeError(
"STORES_CRYPTO_KEY_FOR_EACH_IMAGE can't be True if no "
"SECURITY_KEY specified"
)
key = self.__key_for(path)
self.get_storage().set(key, self.context.server.security_key)
@on_exception(on_redis_error, RedisError)
def put_detector_data(self, path, data):
key = self.__detector_key_for(path)
self.get_storage().set(key, dumps(data))
@return_future
def get_crypto(self, path, callback):
callback(self._get_crypto(path))
@on_exception(on_redis_error, RedisError)
def _get_crypto(self, path):
if not self.context.config.STORES_CRYPTO_KEY_FOR_EACH_IMAGE:
return None
crypto = self.get_storage().get(self.__key_for(path))
if not crypto:
return None
return crypto
@return_future
def get_detector_data(self, path, callback):
callback(self._get_detector_data(path))
@on_exception(on_redis_error, RedisError)
def _get_detector_data(self, path):
data = self.get_storage().get(self.__detector_key_for(path))
if not data:
return None
return loads(data)
@return_future
def exists(self, path, callback):
callback(self._exists(path))
@on_exception(on_redis_error, RedisError)
def _exists(self, path):
return self.get_storage().exists(path)
@on_exception(on_redis_error, RedisError)
def remove(self, path):
self.get_storage().delete(path)
@return_future
def get(self, path, callback):
@on_exception(self.on_redis_error, RedisError)
def wrap():
return self.get_storage().get(path)
callback(wrap())
|
wking/thumbor
|
thumbor/storages/redis_storage.py
|
Python
|
mit
| 4,944
|
import re
import string
import nltk
from bs4 import BeautifulSoup
__author__ = 'nolram'
class NewsItem:
def __init__(self, news, stop_words):
self.all_words = []
self.stop_words = stop_words
self.regex = re.compile('[%s]' % re.escape(string.punctuation))
if "titulo" in news and "categoria" in news:
self.add_words(news["titulo"])
self.title = news["titulo"]
if "subcategoria" in news:
self.category = news["subcategoria"].lower()
else:
self.category = news["categoria"].lower()
if "texto" in news:
self.add_words(" ".join(news["texto"]))
self.url = news["url"]
def normalized_words(self, s):
words = []
oneline = s.replace('\n', ' ')
soup = BeautifulSoup(oneline.strip(), 'html.parser')
cleaned = soup.get_text()
toks1 = cleaned.split()
for t1 in toks1:
translated = self.regex.sub('', t1)
toks2 = translated.split()
for t2 in toks2:
t2s = t2.strip()
if len(t2s) > 1:
words.append(t2s.lower())
return words
def word_count(self):
return len(self.all_words)
def word_freq_dist(self):
freqs = nltk.FreqDist() # class nltk.probability.FreqDist
for w in self.all_words:
freqs.inc(w, 1)
return freqs
def add_words(self, s):
words = self.normalized_words(s)
for w in words:
if w not in self.stop_words:
self.all_words.append(w)
def features(self, top_words):
word_set = set(self.all_words)
features = {}
features['url'] = self.url
for w in top_words:
features["w_%s" % w] = (w in word_set)
return features
def normalized_frequency_power(self, word, freqs, largest_count):
n = self.normalized_frequency_value(word, freqs, largest_count)
return pow(n, 2)
def normalized_frequency_value(self, word, freqs, largest_count):
count = freqs.get(word)
n = 0
if count is None:
n = float(0)
else:
n = ((float(count) * float(largest_count)) / float(freqs.N())) * 100.0
return n
def normalized_boolean_value(self, word, freqs, largest_count):
count = freqs.get(word)
if count is None:
return float(0)
else:
return float(1)
def knn_data(self, top_words):
data_array = []
freqs = self.word_freq_dist()
largest_count = freqs.values()[0]
features = {}
features['url'] = self.url
for w in top_words:
data_array.append(self.normalized_boolean_value(w, freqs, largest_count))
print "knn_data: %s" % data_array
return data_array
def as_debug_array(self, guess):
l = []
l.append('---')
#l.append('lookup_key: %s' % (self.lookup_key()))
l.append('Categoria: %s' % (self.category))
l.append('Palpite: %s' % (guess))
l.append('URL: %s' % (self.url))
l.append('Titulos: %s' % (self.title))
l.append('')
l.append('Todas as palavras por contagem')
freqs = nltk.FreqDist([w.lower() for w in self.all_words])
for w in freqs.keys():
l.append("%-20s %d" % (w, freqs.get(w)))
l.append('')
l.append('all_words, sequentially:')
for w in self.all_words:
l.append(w)
return l
|
nolram/news_crawler
|
classificador/news_item.py
|
Python
|
mit
| 3,574
|
AUTHENTICATION_BACKENDS = (
# Needed to login by username in Django admin, regardless of `allauth`
'django.contrib.auth.backends.ModelBackend',
# `allauth` specific authentication methods, such as login by e-mail
'allauth.account.auth_backends.AuthenticationBackend',
)
LOGIN_REDIRECT_URL = 'reviews'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = True
ACCOUNT_LOGOUT_ON_GET = True
ACCOUNT_PASSWORD_MIN_LENGTH = 10
ALLOW_NEW_REGISTRATIONS = True
|
borfast/housing-reviews
|
housing_reviews/settings/auth.py
|
Python
|
mit
| 476
|
import re
from six.moves import zip
def check_tag(root, expected):
pattern = re.compile(r"{.*}([a-zA-Z]+)")
for tag, el in zip(expected, root.iter()):
m = pattern.match(el.tag)
assert m is not None
assert m.group(1) == tag, "Expect tag=%s, get %s" % (tag, m.group(1))
|
kunxi/docxgen
|
tests/__init__.py
|
Python
|
mit
| 302
|
"""
Format and compress XML documents
"""
import getopt
import re
import sys
import xml.parsers.expat
__version__ = "0.2.4"
DEFAULT_BLANKS = False
DEFAULT_COMPRESS = False
DEFAULT_SELFCLOSE = False
DEFAULT_CORRECT = True
DEFAULT_INDENT = 2
DEFAULT_INDENT_CHAR = " "
DEFAULT_INLINE = True
DEFAULT_ENCODING_INPUT = None
DEFAULT_ENCODING_OUTPUT = None
DEFAULT_EOF_NEWLINE = False
class Formatter:
# Use internal encoding:
encoding_internal = None
def __init__(
self,
indent=DEFAULT_INDENT,
preserve=[],
blanks=DEFAULT_BLANKS,
compress=DEFAULT_COMPRESS,
selfclose=DEFAULT_SELFCLOSE,
indent_char=DEFAULT_INDENT_CHAR,
encoding_input=DEFAULT_ENCODING_INPUT,
encoding_output=DEFAULT_ENCODING_OUTPUT,
inline=DEFAULT_INLINE,
correct=DEFAULT_CORRECT,
eof_newline=DEFAULT_EOF_NEWLINE,
):
# Minify the XML document:
self.compress = compress
# Use self-closing tags
self.selfclose = selfclose
# Correct text nodes
self.correct = correct
# Decode the XML document:
self.encoding_input = self.enc_normalize(encoding_input)
# Encode ouput by:
self.encoding_output = self.enc_normalize(encoding_output)
# Insert indent = indent*level*indent_char:
self.indent = int(indent)
# Indent by char:
self.indent_char = indent_char
# Format inline objects:
self.inline = inline
# Don't compress this elements and their descendants:
self.preserve = preserve
# Preserve blanks lines (collapse multiple into one)
self.blanks = blanks
# Always add a newline character at EOF
self.eof_newline = eof_newline
@property
def encoding_effective(self, enc=None):
if self.encoding_output:
return self.encoding_output
elif self.encoding_internal:
return self.encoding_internal
elif self.encoding_input:
return self.encoding_input
else:
return "UTF-8"
def enc_normalize(self, string):
""" Format an Encoding identifier to upper case. """
if isinstance(string, str):
return string.upper()
return None
def enc_encode(self, strg):
""" Encode a formatted XML document in target"""
if sys.version_info > (3, 0):
return strg.encode(self.encoding_effective) # v3
return strg.decode("utf-8").encode(self.encoding_effective) # v2
def enc_output(self, path, strg):
""" Output according to encoding """
fh = sys.stdout
if strg is not None:
if path is not None:
open(path, "w+b").write(strg)
elif sys.version_info > (3, 0):
fh.buffer.write(strg)
else:
fh.write(strg)
def format_string(self, xmldoc=""):
""" Format a XML document given by xmldoc """
token_list = Formatter.TokenList(self)
token_list.parser.Parse(xmldoc)
return self.enc_encode(str(token_list))
def format_file(self, file):
""" Format a XML document given by path name """
fh = open(file, "rb")
token_list = Formatter.TokenList(self)
token_list.parser.ParseFile(fh)
fh.close()
return self.enc_encode(str(token_list))
class TokenList:
# Being in a cdata section:
cdata_section = False
# Lock deletion of leading whitespace:
desc_mixed_level = None
# Lock indenting:
indent_level = None
# Reference the Formatter:
formatter = None
# Count levels:
level_counter = 0
# Lock deletion of whitespaces:
preserve_level = None
def __init__(self, formatter):
# Keep tokens in a list:
self._list = []
self.formatter = formatter
self.parser = xml.parsers.expat.ParserCreate(
encoding=self.formatter.encoding_input
)
self.parser.specified_attributes = 1
self.parser.buffer_text = True
# Push tokens to buffer:
for pattern in [
"XmlDecl%s",
"ElementDecl%s",
"AttlistDecl%s",
"EntityDecl%s",
"StartElement%s",
"EndElement%s",
"ProcessingInstruction%s",
"CharacterData%s",
"Comment%s",
"Default%s",
"StartDoctypeDecl%s",
"EndDoctypeDecl%s",
"StartCdataSection%s",
"EndCdataSection%s",
"NotationDecl%s",
]:
setattr(
self.parser, pattern % "Handler", self.xml_handler(pattern % "")
)
def __iter__(self):
return iter(self._list)
def __len__(self):
return len(self._list)
def __getitem__(self, pos):
if 0 <= pos < len(self._list):
return self._list[pos]
else:
raise IndexError
def __setitem__(self, pos, value):
if 0 <= pos < len(self._list):
self._list[pos] = value
else:
raise IndexError
def __str__(self):
""" Returns the formatted XML document in UTF-8. """
for step in ["configure", "pre_operate", "post_operate"]:
for tk in iter(self):
getattr(tk, step)()
result = ""
for tk in iter(self):
result += str(tk)
if self.formatter.eof_newline and not result.endswith("\n"):
result += "\n"
return result
def append(self, tk):
""" Add token to tokenlist. """
tk.pos = len(self._list)
self._list.append(tk)
def level_increment(self):
""" Increment level counter. """
self.level_counter += 1
def level_decrement(self):
""" Decrement level counter. """
self.level_counter -= 1
def token_descendant_mixed(self, tk):
""" Mark descendants of mixed content. """
if tk.name == "StartElement":
# Mark every descendant:
if tk.content_model in [2, 3] and self.desc_mixed_level is None:
self.desc_mixed_level = tk.level
return False
return self.desc_mixed_level is not None
elif tk.name == "EndElement":
# Stop marking every descendant:
if tk.level is self.desc_mixed_level:
self.desc_mixed_level = None
elif self.desc_mixed_level is not None:
return True
return False
elif self.desc_mixed_level is None:
return False
return self.desc_mixed_level >= tk.level - 1
def sequence(self, tk, scheme=None):
"""Returns sublist of token list.
None: next to last
EndElement: first to previous"""
if scheme == "EndElement" or (scheme is None and tk.end):
return reversed(self._list[: tk.pos])
return self._list[(tk.pos + 1) :]
def token_indent(self, tk):
if self.formatter.inline:
return self.token_indent_inline(tk)
""" Indent outside of text of mixed content. """
if tk.name == "StartElement":
# Block indenting for descendants of text and mixed content:
if tk.content_model in [2, 3] and self.indent_level is None:
self.indent_level = tk.level
elif self.indent_level is not None:
return False
return True
elif tk.name == "EndElement":
# Unblock indenting for descendants of text and mixed content:
if tk.level == self.indent_level:
self.indent_level = None
elif self.indent_level is None:
return True
return False
return self.indent_level is None
def token_indent_inline(self, tk):
""" Indent every element content - no matter enclosed by text or mixed content. """
for itk in iter(self.sequence(tk, "EndElement")):
if itk.level < tk.level and itk.name == "StartElement":
if itk.content_model == 1:
return True
return False
if (
itk.level == tk.level
and tk.name == "EndElement"
and itk.name == "StartElement"
):
if itk.content_model == 1:
return True
return False
return True
def token_model(self, tk):
"""Returns code for content model.
0: empty
1: element
2: text
3: mixed"""
eflag = tflag = 0
for itk in iter(self.sequence(tk)):
# Element boundary found:
if itk.level <= tk.level:
break
# Direct child found:
elif (itk.level - 1) == tk.level:
if itk.start:
eflag = 1
elif itk.not_empty:
tflag = 2
return eflag + tflag
def token_preserve(self, tk):
"""Preseve eyery descendant of an preserved element.
0: not locked
1: just (un)locked
2: locked"""
# Lock perserving for StartElements:
if tk.name == "StartElement":
if self.preserve_level is not None:
return 2
if tk.arg[0] in self.formatter.preserve:
self.preserve_level = tk.level
return 1
return 0
# Unlock preserving for EndElements:
elif tk.name == "EndElement":
if (
tk.arg[0] in self.formatter.preserve
and tk.level == self.preserve_level
):
self.preserve_level = None
return 1
elif self.preserve_level is None:
return 0
return 2
return self.preserve_level is not None
def whitespace_append_trailing(self, tk):
""" Add a trailing whitespace to previous character data. """
if self.formatter.correct and tk.leading and tk.not_empty:
self.whitespace_append(tk, "EndElement", "StartElement", True)
def whitespace_append_leading(self, tk):
""" Add a leading whitespace to previous character data. """
if self.formatter.correct and tk.trailing and tk.not_empty:
self.whitespace_append(tk)
def whitespace_append(
self, tk, start="StartElement", stop="EndElement", direct=False
):
""" Add a whitspace to token list. """
for itk in self.sequence(tk, start):
if (
itk.empty
or (itk.name == stop and itk.descendant_mixed is False)
or (itk.name == start and abs(tk - itk) == 1)
):
break
elif itk.not_empty or (itk.name == start and itk.descendant_mixed):
self.insert_empty(itk, direct)
break
def whitespace_delete_leading(self, tk):
""" Returns True, if no next token or all empty (up to next end element)"""
if (
self.formatter.correct
and tk.leading
and not tk.preserve
and not tk.cdata_section
):
for itk in self.sequence(tk, "EndElement"):
if itk.trailing:
return True
elif itk.name in ["EndElement", "CharacterData", "EndCdataSection"]:
return False
return True
return False
def whitespace_delete_trailing(self, tk):
"""Returns True, if no next token or all empty (up to next end element)"""
if (
self.formatter.correct
and tk.trailing
and not tk.preserve
and not tk.cdata_section
):
for itk in self.sequence(tk, "StartElement"):
if itk.end:
return True
elif (
itk.name in ["StartElement", "StartCdataSection"]
or itk.not_empty
):
return False
return True
return False
def insert_empty(self, tk, before=True):
""" Insert an Empty Token into token list - before or after tk. """
if not (0 < tk.pos < (len(self) - 1)):
return False
ptk = self[tk.pos - 1]
ntk = self.formatter.CharacterData(self, [" "])
ntk.level = max(ptk.level, tk.level)
ntk.descendant_mixed = tk.descendant_mixed
ntk.preserve = ptk.preserve * tk.preserve
ntk.cdata_section = ptk.cdata_section or tk.cdata_section
if before:
self._list.insert(tk.pos + 1, ntk)
else:
self._list.insert(tk.pos, ntk)
for i in range((tk.pos - 1), len(self._list)):
self._list[i].pos = i
def xml_handler(self, key):
""" Returns lambda function which adds token to token list"""
return lambda *arg: self.append(getattr(self.formatter, key)(self, arg))
class Token(object):
def __init__(self, tklist, arg):
# Reference Token List:
self.list = tklist
# Token datas:
self.arg = list(arg)
# Token is placed in an CDATA section:
self.cdata_section = False
# Token has content model:
self.content_model = None
# Remove trailing wihtespaces:
self.delete_trailing = False
# Remove leading whitespaces:
self.delete_leading = False
# Token is descendant of text or mixed content element:
self.descendant_mixed = False
# Reference to formatter:
self.formatter = tklist.formatter
# Insert indenting white spaces:
self.indent = False
# N-th generation of roots descendants:
self.level = self.list.level_counter
# Token class:
self.name = self.__class__.__name__
# Preserve white spaces within enclosed tokens:
self.preserve = False
# Position in token list:
self.pos = None
def __sub__(self, other):
return self.pos - other.pos
def __unicode__(self):
return ""
# Workaround, see http://lucumr.pocoo.org/2011/1/22/forwards-compatible-python/:
if sys.version_info > (3, 0):
__str__ = lambda x: x.__unicode__()
else:
__str__ = lambda x: unicode(x).encode("utf-8")
@property
def end(self):
return self.name == "EndElement"
@property
def empty(self):
return self.name == "CharacterData" and re.match(
r"^[\t\s\n]*$", self.arg[0]
)
@property
def leading(self):
return self.name == "CharacterData" and re.search(
r"^[\t\s\n]+", self.arg[0]
)
@property
def not_empty(self):
return (
self.name == "CharacterData"
and not self.cdata_section
and not re.match(r"^[\t\s\n]+$", self.arg[0])
)
@property
def trailing(self):
return self.name == "CharacterData" and re.search(
r"[\t\s\n]+$", self.arg[0]
)
@property
def start(self):
return self.name == "StartElement"
@property
def correct(self):
return self.formatter.correct
def attribute(self, key, value):
if key and value:
return ' %s="%s"' % (key, value)
elif key:
return ' %s=""' % (key)
return ""
def indent_insert(self):
""" Indent token. """
# Child of root and no empty node
if (
self.level > 0 and not (self.end and self.list[self.pos - 1].start)
) or ( # not empty node:
self.end and not self.list[self.pos - 1].start
):
return self.indent_create(self.level)
return ""
def indent_create(self, times=1):
""" Returns indent string. """
if not self.formatter.compress and self.formatter.indent:
return "\n%s" % (
(times * self.formatter.indent) * self.formatter.indent_char
)
return ""
def identifier(self, systemid, publicid):
# TODO add base parameter:
if publicid and systemid:
return ' PUBLIC "%s" "%s"' % (publicid, systemid)
elif publicid:
return ' PUBLIC "%s"' % publicid
elif systemid:
return ' SYSTEM "%s"' % systemid
return ""
def configure(self):
""" Set token properties. """
self.descendant_mixed = self.list.token_descendant_mixed(self)
self.preserve = self.list.token_preserve(self)
self.cdata_section = self.list.cdata_section
def pre_operate(self):
pass
def post_operate(self):
pass
class AttlistDecl(Token):
def __unicode__(self):
str = self.indent_create()
str += "<!ATTLIST %s %s" % (self.arg[0], self.arg[1])
if self.arg[2] is not None:
str += " %s" % self.arg[2]
if self.arg[4] and not self.arg[3]:
str += " #REQUIRED"
elif self.arg[3] and self.arg[4]:
str += " #FIXED"
elif not self.arg[4] and not self.arg[3]:
str += " #IMPLIED"
if self.arg[3]:
str += ' "%s"' % self.arg[3]
str += ">"
return str
class CharacterData(Token):
def __unicode__(self):
str = self.arg[0]
if not self.preserve and not self.cdata_section:
# remove empty tokens always in element content!
if self.empty and not self.descendant_mixed:
if self.formatter.blanks and not self.formatter.compress and re.match(r"\s*\n\s*\n\s*", str):
str = "\n"
else:
str = ""
else:
if self.correct:
str = re.sub(r"\r\n", "\n", str)
str = re.sub(r"\r|\n|\t", " ", str)
str = re.sub(r"\s+", " ", str)
if self.delete_leading:
str = re.sub(r"^\s", "", str)
if self.delete_trailing:
str = re.sub(r"\s$", "", str)
if not self.cdata_section:
str = re.sub(r"&", "&", str)
str = re.sub(r"<", "<", str)
return str
def pre_operate(self):
self.list.whitespace_append_trailing(self)
self.list.whitespace_append_leading(self)
def post_operate(self):
self.delete_leading = self.list.whitespace_delete_leading(self)
self.delete_trailing = self.list.whitespace_delete_trailing(self)
class Comment(Token):
def __unicode__(self):
str = ""
if self.preserve in [0, 1] and self.indent:
str += self.indent_insert()
str += "<!--%s-->" % re.sub(
r"^[\r\n]+$", "\n", re.sub(r"^[\r\n]+", "\n", self.arg[0])
)
return str
def configure(self):
super(Formatter.Comment, self).configure()
self.indent = self.list.token_indent(self)
class Default(Token):
pass
class EndCdataSection(Token):
def __unicode__(self):
return "]]>"
def configure(self):
self.list.cdata_section = False
class ElementDecl(Token):
def __unicode__(self):
str = self.indent_create()
str += "<!ELEMENT %s%s>" % (self.arg[0], self.evaluate_model(self.arg[1]))
return str
def evaluate_model(self, model, modelStr="", concatStr=""):
childSeq = []
mixed = model[0] == xml.parsers.expat.model.XML_CTYPE_MIXED
hasChilds = len(model[3]) or mixed
if model[0] == xml.parsers.expat.model.XML_CTYPE_EMPTY: # 1
modelStr += " EMPTY"
elif model[0] == xml.parsers.expat.model.XML_CTYPE_ANY: # 2
modelStr += " ANY"
elif model[0] == xml.parsers.expat.model.XML_CTYPE_NAME: # 4
modelStr = "%s" % model[2] # new start
elif model[0] in (
xml.parsers.expat.model.XML_CTYPE_CHOICE,
xml.parsers.expat.model.XML_CTYPE_MIXED,
): # 5
concatStr = "|"
elif model[0] == xml.parsers.expat.model.XML_CTYPE_SEQ: # 6
concatStr = ","
if hasChilds:
modelStr += " ("
if mixed:
childSeq.append("#PCDATA")
for child in model[3]:
childSeq.append(self.evaluate_model(child))
modelStr += concatStr.join(childSeq)
if hasChilds:
modelStr += ")"
modelStr += {
xml.parsers.expat.model.XML_CQUANT_NONE: "",
xml.parsers.expat.model.XML_CQUANT_OPT: "?",
xml.parsers.expat.model.XML_CQUANT_PLUS: "+",
xml.parsers.expat.model.XML_CQUANT_REP: "*",
}[model[1]]
return modelStr
class EndDoctypeDecl(Token):
def __unicode__(self):
str = ""
if self.list[self.pos - 1].name != "StartDoctypeDecl":
str += self.indent_create(0)
str += "]"
str += ">"
str += self.indent_create(0)
return str
class EndElement(Token):
def __init__(self, list, arg):
list.level_decrement()
super(Formatter.EndElement, self).__init__(list, arg)
def __unicode__(self):
str = ""
# Don't close empty nodes on compression mode:
if (
not (self.formatter.compress or self.formatter.selfclose)
or self.list[self.pos - 1].name != "StartElement"
):
if self.preserve in [0] and self.indent:
str += self.indent_insert()
str += "</%s>" % self.arg[0]
return str
def configure(self):
self.descendant_mixed = self.list.token_descendant_mixed(self)
self.preserve = self.list.token_preserve(self)
self.indent = self.list.token_indent(self)
class EntityDecl(Token):
def __unicode__(self):
str = self.indent_create()
str += "<!ENTITY "
if self.arg[1]:
str += "% "
str += "%s " % self.arg[0]
if self.arg[2]:
str += '"%s"' % self.arg[2]
else:
str += "%s " % self.identifier(self.arg[4], self.arg[5])
if self.arg[6]:
str += "NDATA %s" % self.arg[6]
str += ">"
return str
class NotationDecl(Token):
def __unicode__(self):
str = self.indent_create()
str += "<!NOTATION %s%s>" % (
self.arg[0],
self.identifier(self.arg[2], self.arg[3]),
)
return str
class ProcessingInstruction(Token):
def __unicode__(self):
str = ""
if self.preserve in [0, 1] and self.indent:
str += self.indent_insert()
str += "<?%s %s?>" % (self.arg[0], self.arg[1])
return str
def configure(self):
super(Formatter.ProcessingInstruction, self).configure()
self.indent = self.list.token_indent(self)
class StartCdataSection(Token):
def __unicode__(self):
return "<![CDATA["
def configure(self):
self.list.cdata_section = True
class StartDoctypeDecl(Token):
def __unicode__(self):
str = "<!DOCTYPE %s" % (self.arg[0])
if self.arg[1]:
str += self.identifier(self.arg[1], self.arg[2])
if self.arg[3]:
str += " ["
return str
class StartElement(Token):
def __init__(self, list, arg):
super(Formatter.StartElement, self).__init__(list, arg)
self.list.level_increment()
def __unicode__(self):
str = ""
if self.preserve in [0, 1] and self.indent:
str += self.indent_insert()
str += "<%s" % self.arg[0]
for attr in sorted(self.arg[1].keys()):
str += self.attribute(attr, self.arg[1][attr])
if self.list[self.pos + 1].end and (self.formatter.compress or self.formatter.selfclose):
str += "/>"
else:
str += ">"
return str
def configure(self):
self.content_model = self.list.token_model(self)
self.descendant_mixed = self.list.token_descendant_mixed(self)
self.preserve = self.list.token_preserve(self)
self.indent = self.list.token_indent(self)
class XmlDecl(Token):
def __init__(self, list, arg):
super(Formatter.XmlDecl, self).__init__(list, arg)
if len(self.arg) > 1:
self.formatter.encoding_internal = self.arg[1]
def __unicode__(self):
str = "<?xml%s%s" % (
self.attribute("version", self.arg[0]),
self.attribute("encoding", self.formatter.encoding_effective),
)
if self.arg[2] > -1:
str += self.attribute("standalone", "yes")
str += "?>\n"
return str
def cli_usage(msg=""):
""" Output usage for command line tool. """
sys.stderr.write(msg + "\n")
sys.stderr.write(
'Usage: xmlformat [--preserve "pre,literal"] [--blanks]\
[--compress] [--selfclose] [--indent num] [--indent-char char]\
[--outfile file] [--encoding enc] [--outencoding enc]\
[--disable-inlineformatting] [--overwrite] [--disable-correction]\
[--eof-newline]\
[--help] <--infile file | file | - >\n'
)
sys.exit(2)
def cli():
""" Launch xmlformatter from command line. """
res = None
indent = DEFAULT_INDENT
indent_char = DEFAULT_INDENT_CHAR
outfile = None
overwrite = False
preserve = []
blanks = False
compress = DEFAULT_COMPRESS
selfclose = DEFAULT_SELFCLOSE
infile = None
encoding = DEFAULT_ENCODING_INPUT
outencoding = DEFAULT_ENCODING_OUTPUT
inline = DEFAULT_INLINE
correct = DEFAULT_CORRECT
eof_newline = DEFAULT_EOF_NEWLINE
try:
opts, args = getopt.getopt(
sys.argv[1:],
"",
[
"compress",
"selfclose",
"disable-correction",
"disable-inlineformatting",
"encoding=",
"help",
"infile=",
"indent=",
"indent-char=",
"outfile=",
"outencoding=",
"overwrite",
"preserve=",
"blanks",
"eof-newline"
],
)
except getopt.GetoptError as err:
cli_usage(str(err))
for key, value in opts:
if key in ["--indent"]:
indent = value
elif key in ["--preserve"]:
preserve = value.replace(",", " ").split()
elif key in ["--blanks"]:
blanks = True
elif key in ["--help"]:
cli_usage()
elif key in ["--compress"]:
compress = True
elif key in ["--selfclose"]:
selfclose = True
elif key in ["--outfile"]:
outfile = value
elif key in ["--infile"]:
infile = value
elif key in ["--encoding"]:
encoding = value
elif key in ["--outencoding"]:
outencoding = value
elif key in ["--indent-char"]:
indent_char = value
elif key in ["--disable-inlineformatting"]:
inline = False
elif key in ["--disable-correction"]:
correct = False
elif key in ["--overwrite"]:
overwrite = True
elif key in ["--eof-newline"]:
eof_newline = True
try:
formatter = Formatter(
indent=indent,
preserve=preserve,
blanks=blanks,
compress=compress,
selfclose=selfclose,
encoding_input=encoding,
encoding_output=outencoding,
indent_char=indent_char,
inline=inline,
correct=correct,
eof_newline=eof_newline,
)
input_file = None
if infile:
input_file = infile
res = formatter.format_file(input_file)
elif len(args) > 0:
if args[0] == "-":
res = formatter.format_string("".join(sys.stdin.readlines()))
else:
input_file = args[0]
res = formatter.format_file(input_file)
except xml.parsers.expat.ExpatError as err:
cli_usage("XML error: %s" % err)
except IOError as err:
cli_usage("IO error: %s" % err)
except:
cli_usage("Unkonwn error")
if overwrite:
formatter.enc_output(input_file, res)
else:
formatter.enc_output(outfile, res)
|
pamoller/xmlformatter
|
xmlformatter.py
|
Python
|
mit
| 30,777
|
import os
import sys
import django
def main():
"""
Standalone django model test with a 'memory-only-django-installation'.
You can play with a django model without a complete django app installation.
http://www.djangosnippets.org/snippets/1044/
"""
sys.path.append(os.path.abspath(os.path.dirname(__file__)))
os.environ["DJANGO_SETTINGS_MODULE"] = "django.conf.global_settings"
from django.conf import global_settings
global_settings.INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.sites',
'django.contrib.contenttypes',
'websettings',
)
global_settings.DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
global_settings.MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
global_settings.SECRET_KEY = "secret_key_for_testing"
global_settings.ROOT_URLCONF = "websettings.urls"
global_settings.WEBSETTINGS_MODULE = 'websettings.tests.settingstore'
from django.test.utils import get_runner
test_runner = get_runner(global_settings)
test_runner = test_runner()
failures = test_runner.run_tests(['websettings'])
sys.exit(failures)
if __name__ == '__main__':
main()
|
hirokiky/django-websettings
|
runtest.py
|
Python
|
mit
| 1,437
|
import Network
from time import sleep
from threading import Thread
CALL_ROOMLIST = 0
CALL_WEAPLIST = 1
CALL_PLAYERLIST = 2
CALL_NEWPLAYER = 3
CALL_PLAYERLEFT = 4
CALL_CHAT = 5
CALL_PLAYERDAT = 6
CALL_ROOMSTAT = 7
CALL_LEAVEROOM = 8
CALL_SHOOT = 9
CALL_SCORE = 10
class GameClient(Network.Client):
CONNECTING = 0
JOINING_ROOM = 1
LEAVING_ROOM = 2
rooms = []
players = []
weapList= []
scores = {}
response = {}
currRoomInfo = None
main = None
status = -1
charId = 0
roomState = -1
roomId = 0
roomName = ""
stateDict = {
"WAITING":0,
"PLAYING":1,
"DEAD":99
}
invStateDict = {
0:"WAITING",
1:"PLAYING",
99:"DEAD"
}
winnerId = -1
def __init__(self, main):
super(GameClient, self).__init__()
self.main = main
self.rooms = []
self.scores = {}
self.players =[]
self.weapList = []
self.response = {}
def connect(self, name, addr, evt=False): #Blocks
self.status = self.CONNECTING
super(GameClient, self).connect(name, addr)
if evt:
self.onConnect(self.complete(self.CONNECTING))
else:
return self.complete(self.CONNECTING)
def connect_async(self, name, addr): #Doesn't block
t = Thread(target=self.connect, args=[name, addr, True])
t.start()
# NETWORK FUNCTIONS
def complete(self, event, timeout = 2):
waited = 0
while event == self.status and waited <= timeout:
sleep(.1)
waited += .1
if waited >= timeout:
return False
return self.response[event]
def done(self, event, response):
self.response[event] = response
self.status = -1
def playerById(self, pId):
low = 0
high = len(self.players) - 1
while low <= high:
mid = (low + high) >> 1
midId = self.players[mid][0]
if midId < pId:
low = mid + 1
elif midId > pId:
high = mid - 1
else:
return mid
return None
def getPlayers(self):
return self.players
def getRooms(self):
return self.rooms
def clearScores(self):
self.scores = {}
# EVENT FUNCTIONS
def onConnect(self, result):
self.main.onConnect(result)
def onRoomList(self, data):
self.rooms = data
self.main.handleNetworkCall(CALL_ROOMLIST, (self.rooms,))
def onWeapList(self, data):
self.weapList = data
self.main.handleNetworkCall(CALL_WEAPLIST, (self.weapList,))
def onPlayerList(self, playerList, roomId, roomState, yourId):
self.players = playerList
self.playerId = yourId
self.players.sort()
self.roomId = roomId
self.roomState = roomState
if self.status in [self.CONNECTING, self.JOINING_ROOM, self.LEAVING_ROOM]:
self.done(self.status, True)
self.main.handleNetworkCall(CALL_PLAYERLIST, (self.players,))
def onNewPlayer(self, player):
#playername = player[0][:player[0].find('\00')]
self.players.append(player)
self.players.sort()
self.main.handleNetworkCall(CALL_NEWPLAYER, (player,))
def onPlayerLeft(self, data):
playerPos = self.playerById(data[0])
player = self.players[playerPos]
del self.players[playerPos]
if data[2] != -1:
self.players[self.playerById(data[2])] = self.changeTuple(self.players[self.playerById(data[2])], 4, True)
self.main.handleNetworkCall(CALL_PLAYERLEFT, (player,))
def changeTuple(self, tup, key, value):
flist = list(tup)
flist[key] = value
return tuple(flist)
def onChat(self, data):
self.main.handleNetworkCall(CALL_CHAT, (data,))
def onPlayerData(self, data):
self.main.handleNetworkCall(CALL_PLAYERDAT, (data,))
def onRoomStat(self, data):
self.winnerId = data[1]
self.main.handleNetworkCall(CALL_ROOMSTAT, (data,))
#if data[0] == 0:
# self.main.endGame()
#elif data[0] == 1:
# print "starting game"
# self.main.startGame()
def onRoomSwitch(self, action, result):
self.main.onRoomSwitch(action, result)
return result
def onLeaveRoom(self):
if self.status in [self.JOINING_ROOM]:
self.done(self.status, False)
def onShoot(self, bulletdata):
self.main.handleNetworkCall(CALL_SHOOT, (bulletdata,))
def onScore(self, score):
self.scores[score[0]] = score[1], score[2]
self.scores[score[3]] = score[4], score[5]
self.main.handleNetworkCall(CALL_SCORE, (score,))
def onChangeChar(self, charId, playerId):
playerPos = self.playerById(playerId)
player = self.players[playerPos]
self.players[playerPos] = self.changeTuple(self.players[playerPos], 3, charId)
def onDisconnect(self):
self.main.onDisconnect()
## SENDING FUNCTIONS
def joinRoom(self, roomid, roomName, block=False):
if block:
self.status = self.JOINING_ROOM
self.sendDataReliable(Network.Structs.joinRoom.dataType, Network.Structs.joinRoom.pack(roomid)).join()
# This function blocks...
return self.onRoomSwitch(self.JOINING_ROOM, self.complete(self.JOINING_ROOM))
else:
self.winnerId = -1
self.roomName = roomName
Thread(target=self.joinRoom, args=[roomid, roomName, True]).start()
def makeRoom(self, roomName, block=False):
if block:
self.status = self.JOINING_ROOM
self.sendDataReliable(Network.Structs.makeRoom.dataType, Network.Structs.makeRoom.pack(len(roomName))+roomName)
return self.onRoomSwitch(self.JOINING_ROOM, self.complete(self.JOINING_ROOM))
else:
self.winnerId = -1
self.roomName = roomName
Thread(target=self.makeRoom, args=[roomName, True]).start()
def leaveRoom(self, block=False):
if block:
self.status = self.LEAVING_ROOM
self.sendDataReliable(Network.Structs.leaveRoom.dataType, Network.Structs.leaveRoom.pack())
return self.onRoomSwitch(self.LEAVING_ROOM, self.complete(self.LEAVING_ROOM))
else:
self.winnerId = -1
Thread(target=self.leaveRoom, args=[True]).start()
def startGame(self):
self.sendDataReliable(Network.Structs.startGame.dataType, Network.Structs.startGame.pack(0))
def sendGameData(self, gameData):
self.sendData(Network.Structs.playerDat.dataType, gameData)
def sendShoot(self, bullet):
self.sendDataReliable(Network.Structs.shoot.dataType, Network.Structs.shoot.pack(-1, bullet.x, bullet.y, bullet.angle, bullet.type))
def setCharacter(self, charId):
self.sendDataReliable(Network.Structs.setCharacter.dataType, Network.Structs.setCharacter.pack(charId, 0))
self.charId = charId
def sendDeath(self, killerid):
self.sendDataReliable(Network.Structs.onDeath.dataType, Network.Structs.onDeath.pack(killerid))
def sendPicked(self, serverId):
self.sendDataReliable(Network.Structs.takeWeap.dataType, Network.Structs.takeWeap.pack(serverId))
def sendChat(self, data):
self.sendDataReliable(Network.Structs.preChat.dataType, Network.Structs.preChat.pack(len(data)) + data)
def __del__(self):
super(GameClient, self).__del__()
|
nemothekid/Colosseum--Year-3XXX
|
GameClient.py
|
Python
|
mit
| 7,613
|
import functools
import itertools
import json
import multiprocessing
import os
import shutil
import sys
import time
import cv2
import numpy
import utility.config
import utility.cv
import utility.geometry
import utility.gui
import utility.image
import utility.log
# Explicitly disable OpenCL. Querying for OpenCL support breaks when multiprocessing.
cv2.ocl.setUseOpenCL(False)
# Create multiprocessing pool. Uses `multiprocessing.cpu_count()` processes by default.
pool = multiprocessing.Pool()
# Load all templates
template_refs = utility.cv.load_template_refs()
template_game_over = utility.cv.load_template_game_over()
# Setup empty trace directory
trace_directory = "trace"
if os.path.exists(trace_directory):
shutil.rmtree(trace_directory)
os.mkdir(trace_directory)
# Wait for game to start
while True:
screenshot = utility.image.downscale(utility.image.screenshot())
if utility.cv.match_template(screenshot, template_game_over)["score"] < 0.5:
# Game over screen cleared
utility.log.separator()
break
utility.log.info("Waiting for game to start...")
time.sleep(1)
# Begin player run loop
while True:
start = time.time()
# Grab screenshot
screenshot_original = utility.image.screenshot()
screenshot = utility.image.downscale(screenshot_original)
utility.log.performance("screenshot", start)
# Calculate character and jump matches
#
# See http://stackoverflow.com/questions/1408356/keyboard-interrupts-with-pythons-multiprocessing-pool
matches = []
map_fn = functools.partial(utility.cv.multi_match_template, screenshot)
map_args = template_refs
map_results = pool.map_async(map_fn, map_args).get(1)
utility.log.performance("multi_match_template", start)
for (idx, match_template_multiple_results) in enumerate(map_results):
for result in match_template_multiple_results:
# Adjust vertical center for character type towards bottom
if result["type"] == "character":
result["center"] = {
"x": result["center"]["x"],
"y": result["y1"] + ((result["y2"] - result["y1"]) * utility.config.character_vertical_center)
}
# Filter any conflicts from existing matches
conflicting_matches = []
def keep(match):
if match["type"] != result["type"]:
# Not conflicting by type
return True
if match["type"] == "jump" and match["action"] != result["action"]:
# Not conflicting by jump action
return True
if not utility.geometry.rects_overlap(match, result):
# Not conflicting by overlap
return True
# Conflicts with result
return False
matches = [m for m in matches if keep(m)]
# Determine best match to keep
best_match = result
for match in conflicting_matches:
if match["score"] > best_match["score"]:
# Conflicting match has higher score
best_match = match
continue
# Save best match
matches.append(best_match)
utility.log.performance("matches", start)
# Determine action
possible_actions = utility.geometry.calculate_actions(matches)
utility.log.performance("calculate_actions", start)
for action in possible_actions:
if action["action"] == "double" and action["distance"] <= utility.config.double_jump_action_distance:
# Double jump
utility.log.info("double click")
utility.gui.mouse_double_click()
break
elif action["action"] == "single" and action["distance"] <= utility.config.single_jump_action_distance:
# Single jump
utility.log.info("single click")
utility.gui.mouse_click()
break
else:
# Try next action
continue
utility.log.performance("execute action", start)
# Highlight results
composite_image = utility.image.highlight_regions(screenshot, matches)
utility.log.performance("highlight_regions", start)
# Present composite image
# utility.image.show(composite_image)
# utility.log.performance("show", start)
# Log trace
utility.log.trace(trace_directory, screenshot_original, composite_image, matches, possible_actions)
utility.log.performance("trace", start)
# Match game over
game_over = (len(matches) == 0 and utility.cv.match_template(screenshot, template_game_over)["score"] > 0.5)
# Log total
utility.log.performance("total", start)
utility.log.separator()
# Check exit condition
if game_over:
# Game ended
break
|
joeydong/endless-lake-player
|
player.py
|
Python
|
mit
| 4,891
|
import os
from tsc.models import *
def test_get_new_reservable_schedules():
old = [
Schedule(1, datetime.datetime(2015, 11, 1, 22, 00), ScheduleStatus.reservable),
Schedule(1, datetime.datetime(2015, 11, 1, 23, 00), ScheduleStatus.reservable),
]
new = [
Schedule(1, datetime.datetime(2015, 11, 1, 22, 00), ScheduleStatus.reserved),
Schedule(1, datetime.datetime(2015, 11, 1, 23, 00), ScheduleStatus.reservable),
Schedule(1, datetime.datetime(2015, 11, 2, 11, 00), ScheduleStatus.reservable),
Schedule(1, datetime.datetime(2015, 11, 2, 11, 30), ScheduleStatus.reserved),
]
schedules = Schedule.get_new_reservable_schedules(old, new)
assert schedules == [
Schedule(1, datetime.datetime(2015, 11, 2, 11, 00), ScheduleStatus.reservable)
]
def test_github_get_latest_tag():
gh = GitHub(os.environ.get("GITHUB_API_TOKEN"))
assert gh.get_latest_version().split(".") >= "1.0.0".split(".")
|
oinume/dmm-eikaiwa-tsc
|
tests/test_models.py
|
Python
|
mit
| 978
|
# -*- coding: utf-8 -*-
"""The application's model objects"""
from zope.sqlalchemy import ZopeTransactionExtension
from sqlalchemy.orm import scoped_session, sessionmaker
# from sqlalchemy import MetaData
from sqlalchemy.ext.declarative import declarative_base
# Global session manager: DBSession() returns the Thread-local
# session object appropriate for the current web request.
maker = sessionmaker( autoflush = True, autocommit = False,
extension = ZopeTransactionExtension() )
DBSession = scoped_session( maker )
# Base class for all of our model classes: By default, the data model is
# defined with SQLAlchemy's declarative extension, but if you need more
# control, you can switch to the traditional method.
DeclarativeBase = declarative_base()
# There are two convenient ways for you to spare some typing.
# You can have a query property on all your model classes by doing this:
# DeclarativeBase.query = DBSession.query_property()
# Or you can use a session-aware mapper as it was used in TurboGears 1:
# DeclarativeBase = declarative_base(mapper=DBSession.mapper)
# Global metadata.
# The default metadata is the one from the declarative base.
metadata = DeclarativeBase.metadata
# If you have multiple databases with overlapping table names, you'll need a
# metadata for each database. Feel free to rename 'metadata2'.
# metadata2 = MetaData()
#####
# Generally you will not want to define your table's mappers, and data objects
# here in __init__ but will want to create modules them in the model directory
# and import them at the bottom of this file.
#
######
def init_model( engine ):
"""Call me before using any of the tables or classes in the model."""
DBSession.configure( bind = engine )
# If you are using reflection to introspect your database and create
# table objects for you, your tables must be defined and mapped inside
# the init_model function, so that the engine is available if you
# use the model outside tg2, you need to make sure this is called before
# you use the model.
#
# See the following example:
# global t_reflected
# t_reflected = Table("Reflected", metadata,
# autoload=True, autoload_with=engine)
# mapper(Reflected, t_reflected)
# Import your model modules here.
from auth import User, Group, Permission
from logic import *
from sysutil import *
from fileutil import *
|
LamCiuLoeng/budget
|
budget/model/__init__.py
|
Python
|
mit
| 2,408
|
# This script generates GeoTiff files based Corine land cover data
# Usage: python generateGeotiff.py berryName
# berryName is optional. If not provided all output layers are generated.
# Licensed under the MIT license
from osgeo import gdal, ogr, gdalconst
import sys
gdal.UseExceptions()
gdal.AllRegister()
# Paths for input and output. These may be adjusted as needed.
src_filename = "../../aineisto/Clc2012_FI20m.tif"
dstPath = "../../output"
berries = ["mustikka", "puolukka", "karpalo", "vadelma"]
if len(sys.argv) > 1:
berries = [sys.argv[1]]
# WARNING: these values are not based on scientific research.
corineToBerryIndex = dict()
corineToBerryIndex["mustikka"] = dict()
corineToBerryIndex["mustikka"][24] = 70
corineToBerryIndex["mustikka"][25] = 80
corineToBerryIndex["mustikka"][27] = 50
corineToBerryIndex["mustikka"][28] = 60
corineToBerryIndex["puolukka"] = dict()
corineToBerryIndex["puolukka"][24] = 80
corineToBerryIndex["puolukka"][25] = 60
corineToBerryIndex["karpalo"] = dict()
corineToBerryIndex["karpalo"][40] = 50
corineToBerryIndex["karpalo"][42] = 80
corineToBerryIndex["vadelma"] = dict()
corineToBerryIndex["vadelma"][36] = 80
corineToBerryIndex["vadelma"][35] = 60
# Normalize values so that the highest value in output is always 100
normalizationFactor = 100.0 / 80.0
srcDs = gdal.Open(src_filename)
corineBand = srcDs.GetRasterBand(1)
xSize = corineBand.XSize
ySize = corineBand.YSize
print "Input raster size is ", xSize, ySize
for berry in berries:
driver = srcDs.GetDriver()
dstDs = driver.Create(dstPath + "/" + berry + ".tif", xSize, ySize, 1, gdal.GDT_UInt16, options = ['COMPRESS=LZW'])
dstDs.SetGeoTransform(srcDs.GetGeoTransform())
dstDs.SetProjection(srcDs.GetProjection())
array = corineBand.ReadAsArray(0, 0, xSize, ySize)
for x in range(0, xSize):
indexes = corineToBerryIndex[berry]
if x % 500 == 0:
print `round(100.0 * x / xSize)` + " % of " + berry + " done"
for y in range(0, ySize):
origVal = array[y,x]
if origVal in indexes:
finalVal = int(indexes[origVal] * normalizationFactor)
else:
finalVal = 0
array[y,x] = finalVal
dstBand = dstDs.GetRasterBand(1)
dstBand.WriteArray(array, 0, 0)
# Once we're done, close properly the dataset
dstBand = None
dstDs = None
corineBand = None
srcDs = None
|
lukefi/missamustikka
|
backend/generateGeotiff.py
|
Python
|
mit
| 2,298
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
DEPS = [
'git',
'recipe_engine/context',
'recipe_engine/path',
'recipe_engine/platform',
'recipe_engine/properties',
'recipe_engine/raw_io',
'recipe_engine/step',
]
def RunSteps(api):
url = 'https://chromium.googlesource.com/chromium/src.git'
# git.checkout can optionally dump GIT_CURL_VERBOSE traces to a log file,
# useful for debugging git access issues that are reproducible only on bots.
curl_trace_file = None
if api.properties.get('use_curl_trace'):
curl_trace_file = api.path['start_dir'].join('curl_trace.log')
submodule_update_force = api.properties.get('submodule_update_force', False)
submodule_update_recursive = api.properties.get('submodule_update_recursive',
True)
# You can use api.git.checkout to perform all the steps of a safe checkout.
retVal = api.git.checkout(
url,
ref=api.properties.get('revision'),
recursive=True,
submodule_update_force=submodule_update_force,
set_got_revision=api.properties.get('set_got_revision'),
curl_trace_file=curl_trace_file,
remote_name=api.properties.get('remote_name'),
display_fetch_size=api.properties.get('display_fetch_size'),
file_name=api.properties.get('checkout_file_name'),
submodule_update_recursive=submodule_update_recursive,
use_git_cache=api.properties.get('use_git_cache'))
assert retVal == "deadbeef", (
"expected retVal to be %r but was %r" % ("deadbeef", retVal))
# count_objects shows number and size of objects in .git dir.
api.git.count_objects(
name='count-objects',
can_fail_build=api.properties.get('count_objects_can_fail_build'),
git_config_options={'foo': 'bar'})
# Get the remote URL.
api.git.get_remote_url(
step_test_data=lambda: api.raw_io.test_api.stream_output('foo'))
api.git.get_timestamp(test_data='foo')
# You can use api.git.fetch_tags to fetch all tags from the remote
api.git.fetch_tags(api.properties.get('remote_name'))
# If you need to run more arbitrary git commands, you can use api.git itself,
# which behaves like api.step(), but automatically sets the name of the step.
with api.context(cwd=api.path['checkout']):
api.git('status')
api.git('status', name='git status can_fail_build',
can_fail_build=True)
api.git('status', name='git status cannot_fail_build',
can_fail_build=False)
# You should run git new-branch before you upload something with git cl.
api.git.new_branch('refactor') # Upstream is origin/master by default.
# And use upstream kwarg to set up different upstream for tracking.
api.git.new_branch('feature', upstream='refactor')
# You can use api.git.rebase to rebase the current branch onto another one
api.git.rebase(name_prefix='my repo', branch='origin/master',
dir_path=api.path['checkout'],
remote_name=api.properties.get('remote_name'))
if api.properties.get('cat_file', None):
step_result = api.git.cat_file_at_commit(api.properties['cat_file'],
api.properties['revision'],
stdout=api.raw_io.output())
if 'TestOutput' in step_result.stdout:
pass # Success!
# Bundle the repository.
api.git.bundle_create(
api.path['start_dir'].join('all.bundle'))
def GenTests(api):
yield api.test('basic')
yield api.test('basic_ref') + api.properties(revision='refs/foo/bar')
yield api.test('basic_branch') + api.properties(revision='refs/heads/testing')
yield api.test('basic_hash') + api.properties(
revision='abcdef0123456789abcdef0123456789abcdef01')
yield api.test('basic_file_name') + api.properties(checkout_file_name='DEPS')
yield api.test('basic_submodule_update_force') + api.properties(
submodule_update_force=True)
yield api.test('platform_win') + api.platform.name('win')
yield api.test('curl_trace_file') + api.properties(
revision='refs/foo/bar', use_curl_trace=True)
yield (
api.test('can_fail_build') +
api.step_data('git status can_fail_build', retcode=1)
)
yield (
api.test('cannot_fail_build') +
api.step_data('git status cannot_fail_build', retcode=1)
)
yield (
api.test('set_got_revision') +
api.properties(set_got_revision=True)
)
yield (
api.test('rebase_failed') +
api.step_data('my repo rebase', retcode=1)
)
yield api.test('remote_not_origin') + api.properties(remote_name='not_origin')
yield (
api.test('count-objects_delta') +
api.properties(display_fetch_size=True))
yield (
api.test('count-objects_failed') +
api.step_data('count-objects', retcode=1))
yield (
api.test('count-objects_with_bad_output') +
api.step_data(
'count-objects',
stdout=api.raw_io.output(api.git.count_objects_output('xxx'))))
yield (
api.test('count-objects_with_bad_output_fails_build') +
api.step_data(
'count-objects',
stdout=api.raw_io.output(api.git.count_objects_output('xxx'))) +
api.properties(count_objects_can_fail_build=True))
yield (
api.test('cat-file_test') +
api.step_data('git cat-file abcdef12345:TestFile',
stdout=api.raw_io.output('TestOutput')) +
api.properties(revision='abcdef12345', cat_file='TestFile'))
yield (
api.test('git-cache-checkout') +
api.properties(use_git_cache=True))
|
Shouqun/node-gn
|
tools/depot_tools/recipes/recipe_modules/git/examples/full.py
|
Python
|
mit
| 5,612
|
from collections import defaultdict
from zipfile import ZipFile
from datetime import datetime
from itertools import izip
import logging
import sys
import shelve
from backtest import constants
def main():
PRICES_DATA = constants.PRICES_DATA
performances = shelve.open(constants.CACHE_PERFS, protocol=2)
with ZipFile(PRICES_DATA, 'r') as prices_data:
securities = prices_data.namelist()
for index, dataset_name in enumerate(securities):
#if index == 100: break
batch_count = index / 100 + 1
if index % 100 == 0:
logging.info('processing batch %d/%d' % (batch_count, len(securities) / 100 + 1))
security_code = dataset_name.split('/')[-1][:-4]
security_performances = dict()
dataset = prices_data.open(dataset_name).readlines()
dates = list()
prices = list()
for row in dataset:
items = row.strip().split(',')
px_date = datetime.strptime(items[0], '%Y-%m-%d')
if items[4].startswith('#N/A'):
continue
px_last = float(items[4])
dates.append(px_date)
prices.append(px_last)
for date, price, price_prev in izip(dates[1:], prices[1:], prices[:-1]):
perf = (price / price_prev) - 1.0
security_performances[date.strftime('%Y%m%d')] = perf
performances[security_code] = security_performances
performances.close()
if __name__ == '__main__':
logging.basicConfig(
level=logging.DEBUG,
format='%(levelname)s %(asctime)s %(module)s - %(message)s'
)
main()
|
chris-ch/us-equities
|
create-stats-perfs-db.py
|
Python
|
mit
| 1,963
|
from functools import partial
def build_tag_filter(args):
"""
Returns a filter which selects entries with all of the given
tags only.
@param list(str) args, e.g. ["+tag1", "unrelated"]
@return (callable filter, list remaining_args)
"""
remaining_args = []
tags = set()
for arg in args:
if arg.startswith('+'):
tags.add(arg[1:])
else:
remaining_args.append(arg)
filter = partial(get_entries_with_tags, tags)
return filter, remaining_args
def get_entries_with_tags(tags, entries):
"""
Returns all entries which match all of the given tags.
@param set tags
@return generator[Entry]
"""
for entry in entries:
skip = False
for wanted_tag in tags:
if wanted_tag.lower() not in entry.get_tags():
skip = True
break
if not skip:
yield entry
|
hoffie/ripple
|
ripple/filters/tag.py
|
Python
|
mit
| 925
|
# Copyright (c) 2016 Lee Cannon
# Licensed under the MIT License, see included LICENSE File
from collections import Counter
from .filter import at_trigrams, with_words
def count_trigrams(interactions: list, minimum: int = 1, n: int = None, include_unknown: bool = False) -> list:
"""Returns the n most common trigrams in the interactions given.
:param interactions: The interactions to check.
:type interactions: list
:param minimum: Ignore trigrams that occur less than equal to minimum. Defaults to 1
:type minimum: int
:param n: The maximum number of words to return, defaults to None which returns all.
:type n: int
:param include_unknown: Determines if the interactions with unknown trigrams should be included. Default False
:type include_unknown: bool
:return: The list of most common trigrams in the interactions given.
:rtype: list
"""
# The below (if not interaction.trigram == 'OWN') ignores unknown trigrams
if not include_unknown:
trigram_list = [interaction.trigram for interaction in interactions if not interaction.trigram == 'OWN']
else:
trigram_list = [interaction.trigram for interaction in interactions]
return [trigram for trigram in Counter(trigram_list).most_common(n=n) if trigram[1] > minimum]
def count_words(interactions: list, minimum: int = 1, n: int = None, additional_words_to_ignore: list=None) -> list:
"""Returns the n most common words in the interactions given.
:param interactions: The interactions to check.
:type interactions: list
:param minimum: Ignore words that occur less than equal to minimum. Defaults to 1
:type minimum: int
:param n: The maximum number of words to return, defaults to None which returns all.
:type n: int
:param additional_words_to_ignore: List of additional words to ignore
:type additional_words_to_ignore: list
:return: The list of most common words in the interactions given.
:rtype: list
"""
if additional_words_to_ignore is None:
additional_words_to_ignore = []
word_list = [word for interaction in interactions for word in set(interaction.title_words)
if word not in additional_words_to_ignore]
counts = Counter(word_list).most_common(n=n)
counts = [count for count in counts if count[1] > minimum]
return counts
def count_interactions(interactions: list):
return len(interactions)
def count_words_at_trigrams(interactions: list, trigrams: list, n: int = None, minimum: int = 2,
additional_words_to_ignore: list = None) -> list:
"""Returns the list of most common words at the given trigram in order. Ignores words where the number of
occurrences is less than the minimum.
Example of returned list:
| [['modnet', 1234],
| ['password', 123],
| ['outlook', 34],
| ['network', 4]]
:param interactions: The list of interactions to check.
:type interactions: list
:param trigrams: The list of trigrams to check.
:type trigrams: list
:param n: The maximum number of words to return, defaults to None which returns all.
:type n: int
:param minimum: Ignores words where the number of occurrences is less than the minimum. Defaults to 2.
:type minimum: int
:param additional_words_to_ignore: List of additional words to ignore
:type additional_words_to_ignore: list
:return: The list of most common words at the given trigram.
:rtype: list
"""
if additional_words_to_ignore is None:
additional_words_to_ignore = []
return [word for word in count_words(at_trigrams(interactions, trigrams), n=n)
if word[1] >= minimum and word[0] not in additional_words_to_ignore]
def count_trigram_with_words(interactions: list, words: list, n: int = None, minimum: int = 2) -> list:
"""Returns the list of most common trigrams for occurrences of the given word in order. Ignores trigrams where the
number of occurrences is less than the minimum.
Example of returned list:
| [['ABW', 1234],
| ['NOW', 123],
| ['YOR', 34],
| ['BRC', 4]]
:param interactions: The list of interactions to check.
:type interactions: list
:param words: The list of words to check.
:type words: list
:param n: The maximum number of words to return, defaults to None which returns all.
:type n: int
:param minimum: Ignores words where the number of occurrences is less than the minimum. Defaults to 2.
:type minimum: int
:return: The list of most common words at the given trigram.
:rtype: list
"""
return [trigram for trigram in count_trigrams(with_words(interactions, words), n=n)
if trigram[1] >= minimum]
|
leecannon/trending
|
trending/count.py
|
Python
|
mit
| 4,854
|
# !/usr/bin/python
# @package model
# @author Attila Borcs
#
# Class for the deep neural net. Each class function wrapped with
# a decorator function using python @property for unifying
# the DNN functionalities when tensorflow graph initializer
# called (tf.global_variables_initializer())
import functools
import tensorflow as tf
import matplotlib as mpl
mpl.use('TkAgg')
import matplotlib.pyplot as plt
import numpy as np
import params as prm
import matplotlib.pyplot as plt
import tensorflow.contrib.slim as slim
from tensorflow.examples.tutorials.mnist import input_data
def doublewrap(function):
"""
A decorator decorator, allowing to use the decorator to be used without
parentheses if not arguments are provided. All arguments must be optional.
credits: ttps://danijar.github.io/structuring-your-tensorflow-models
"""
@functools.wraps(function)
def decorator(*args, **kwargs):
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
return function(args[0])
else:
return lambda wrapee: function(wrapee, *args, **kwargs)
return decorator
@doublewrap
def define_scope(function, scope=None, *args, **kwargs):
"""
A decorator for functions that define TensorFlow operations. The wrapped
function will only be executed once. Subsequent calls to it will directly
return the result so that operations are added to the graph only once.
The operations added by the function live within a tf.variable_scope(). If
this decorator is used with arguments, they will be forwarded to the
variable scope. The scope name defaults to the name of the wrapped function.
credits: ttps://danijar.github.io/structuring-your-tensorflow-models
"""
attribute = '_cache_' + function.__name__
name = scope or function.__name__
@property
@functools.wraps(function)
def decorator(self):
if not hasattr(self, attribute):
with tf.variable_scope(name, *args, **kwargs):
setattr(self, attribute, function(self))
return getattr(self, attribute)
return decorator
class Model:
"""
This class responsible to build and wrap all of the functionalities
of the tensor graph. Attributes of prediction, optimization and
loss function will be stored under tensorflow variable scope.
"""
def __init__(self, image, label):
self.image = image
self.label = label
self.prediction
self.optimize
self.error
self.hidden_1
self.hidden_2
self.hidden_3
@define_scope(initializer=slim.xavier_initializer())
def prediction(self):
x = self.image
x_image = tf.reshape(x, [-1, prm.mnist_img_size, prm.mnist_img_size, 1])
self.hidden_1 = slim.conv2d(x_image, 5,
[prm.conv_size, prm.conv_size])
pool_1 = slim.max_pool2d(self.hidden_1,
[prm.max_pool_size, prm.max_pool_size])
self.hidden_2 = slim.conv2d(pool_1, 5, [prm.conv_size, prm.conv_size])
pool_2 = slim.max_pool2d(self.hidden_2,
[prm.max_pool_size, prm.max_pool_size])
hidden_3 = slim.conv2d(pool_2, 20, [prm.conv_size, prm.conv_size])
self.hidden_3 = slim.dropout(hidden_3, 1.0)
x = slim.fully_connected(
slim.flatten(self.hidden_3), 10, activation_fn=tf.nn.softmax)
return x
@define_scope
def optimize(self):
logprob = tf.log(self.prediction + 1e-12)
cross_entropy = -tf.reduce_sum(self.label * logprob)
optimizer = tf.train.AdamOptimizer(1e-4)
return optimizer.minimize(cross_entropy)
@define_scope
def error(self):
mistakes = tf.not_equal(
tf.argmax(self.label, 1), tf.argmax(self.prediction, 1))
return tf.reduce_mean(tf.cast(mistakes, tf.float32))
|
attilaborcs/dnn-visualization
|
model.py
|
Python
|
mit
| 3,922
|
#!/usr/bin/env python
"""
File: twitter_analyse.py
Author: Me
Email: 0
Github: 0
Description: Analyse tweets. For the detail, please refer to the document
```twitter_analyse.notes```
"""
# System lib
from __future__ import division
import json
import os
from math import log
import numpy
# 3-rd party lib
# import nltk
from nltk.classify import NaiveBayesClassifier
from textblob import TextBlob
# Constants
TWEET_DIR = os.path.join('.', 'twitter_data')
OSCAR_DIR = os.path.join(TWEET_DIR, 'oscar')
RAZZIES_DIR = os.path.join(TWEET_DIR, 'razzies')
PREDICT_DIR = os.path.join(TWEET_DIR, 'proof')
CANDIDATE_DIR = os.path.join(TWEET_DIR, 'candidates')
# PREDICT_OSCAR_DIR = os.path.join(PREDICT_DIR, 'oscar')
# PREDICT_RAZZIES_DIR = os.path.join(PREDICT_DIR, 'razzies')
def attribute_to_characteristic(tweet):
"""
Extract attributes from a tweet and form a characteristic of a tweet
@param tweet dict
@return dict
Charateristic of a tweet
"""
ret = {}
text = tweet['text']
retweets = tweet['retweet_count']
favorites = tweet['favorite_count']
followers = tweet['author_followers']
friends = tweet['author_friends']
publishes = tweet['author_num_of_status']
blob = TextBlob(text)
polarity = blob.sentiment.polarity
ret['scaled_polarity'] = calculate_scaled_polarity(
polarity,
int(retweets),
int(favorites),
int(followers),
int(friends),
int(publishes)
)
ret['retweets'] = retweets
ret['favorites'] = favorites
ret['followers'] = followers
ret['friends'] = friends
ret['publishes'] = publishes
ret['polarity'] = polarity
# print 'p=%.2f re=%d fav=%d, fol=%d, fd=%d, pub=%d' % (
# polarity, retweets, favorites, followers, friends, publishes
# )
return ret
def calculate_scaled_polarity(
polarity, retweets, favorites, followers, friends, publishes):
"""
Return a scaled polarity for a tweet
@param polarity float
@param retweets int
@param favorites int
@param followers int
@param friends int
@param publishes int
@return float
"""
# Avoid zero case and negative value
retweets = retweets if retweets > 0 else 1
favorites = favorites if favorites > 0 else 1
followers = followers if followers > 0 else 1
friends = friends if friends > 0 else 1
publishes = publishes if publishes > 0 else 1
# Entropy
ret = polarity * \
(
log(retweets, 2) +
log(favorites, 2) +
log(followers, 2) +
log(friends, 2) +
log(publishes, 2)
)
return round(ret, 2)
def tweets2film(tweet_characteristics):
"""
Aggreate tweet's characteristics to form a film's characteristics
@param tweet_characteristics list of dict
@return dict
characteristics of a film
"""
ret = {}
retweets_data = []
favorites_data = []
polarities_data = []
friends_data = []
followers_data = []
for t in tweet_characteristics:
retweets_data.append(t['retweets'])
favorites_data.append(t['favorites'])
polarities_data.append(t['polarity'])
friends_data.append(t['friends'])
followers_data.append(t['followers'])
retweets = numpy.array(retweets_data)
favorites = numpy.array(favorites_data)
polarities = numpy.array(polarities_data)
friends = numpy.array(friends_data)
followers = numpy.array(followers_data)
for data_set in [
('retweets', retweets),
('favorites', favorites),
('polarities', polarities),
('friends', friends),
('followers', followers)
]:
data_name = data_set[0]
data_list = data_set[1]
print '|%s| sd: %f mean: %f min: %d max: %d' % (
data_name,
round(data_list.std(), 2),
round(numpy.average(data_list), 2),
data_list.min(),
data_list.max(),
)
# ret['avg_followers'] = round(numpy.average(followers_data), 2)
# ret['avg_friends'] = round(numpy.average(friends_data), 2)
ret['avg_polarity'] = round(numpy.average(polarities_data), 2)
# ret['avg_retweet'] = round(numpy.average(retweets_data), 2)
# ret['std_friends'] = round(friends.std(), 2)
# ret['std_followers'] = round(followers.std(), 2)
# ret['std_polarity'] = round(polarities.std(), 2)
ret['std_retweet'] = round(retweets.std(), 2)
# ret['log_friends'] = round(log(sum(friends_data)) / log(2), 2)
# ret['log_followers'] = round(log(sum(followers_data)) / log(2), 2)
ret['log_retweets'] = round(log(sum(retweets_data)) / log(2), 2)
ret['log_favorites'] = round(log(sum(favorites_data)) / log(2), 2)
return ret
def construct_film_characteristic(film_name, tweet_characteristics):
"""
Construct featuresets for given parameters
@param film_name string
@param tweet_characteristics list of dict
@return featuresets
"""
ret = {}
# Analyze film's attributes
ret['length_of_film'] = len(film_name)
ret['number_of_words'] = len(film_name.split(' '))
# Analyze tweet's characteristics
aggreated_characteristic = tweets2film(tweet_characteristics)
# Merge 2 characteristics
ret = dict(ret.items() + aggreated_characteristic.items())
return ret
def predictCandidates():
list_of_files = os.listdir(CANDIDATE_DIR)
for fn in list_of_files:
path = os.path.join(CANDIDATE_DIR, fn)
film_name = os.path.splitext(fn)[0]
with open(path, 'r') as f:
tweets = json.load(f)
tweets = json.loads(tweets)
tweet_characteristics = []
for tweet in tweets:
# Per tweet analyze
characteristic = attribute_to_characteristic(tweet)
tweet_characteristics.append(characteristic)
film_characteristic = construct_film_characteristic(
film_name,
tweet_characteristics
)
result = classifier.classify(film_characteristic)
print 'film: |%s| PREDICT: |%s|\n' % (film_name, result)
features = []
for my_dir in [OSCAR_DIR, RAZZIES_DIR]:
label = os.path.basename(my_dir)
print "=========== Training {0} ============".format(label)
for fn in os.listdir(my_dir):
path = os.path.join(my_dir, fn)
film_name = os.path.splitext(fn)[0]
# print 'dir=%s, film_name=%s, path=%s' % (my_dir, film_name, path)
with open(path, 'r') as f:
tweets = json.load(f)
tweets = json.loads(tweets)
tweet_characteristics = []
for tweet in tweets:
# Per tweet analyze
characteristic = attribute_to_characteristic(tweet)
tweet_characteristics.append(characteristic)
try:
film_characteristic = construct_film_characteristic(
film_name,
tweet_characteristics
)
except Exception as e:
print '{0}: {1}'.format(film_name, e)
else:
# print 'film: |%s|' % film_name
# print film_characteristic
feature = (film_characteristic, label)
features.append(feature)
# Train the classifier
classifier = NaiveBayesClassifier.train(features)
classifier.show_most_informative_features(10)
# Predict the film
report = {}
predict_labels = ['oscar', 'razzies']
for predict_label in predict_labels:
my_dir = os.path.join(PREDICT_DIR, predict_label)
list_of_files = os.listdir(my_dir)
report[predict_label] = {
'number_of_match': 0,
'number_of_films': len(list_of_files)
}
for fn in list_of_files:
path = os.path.join(my_dir, fn)
film_name = os.path.splitext(fn)[0]
with open(path, 'r') as f:
tweets = json.load(f)
tweets = json.loads(tweets)
tweet_characteristics = []
for tweet in tweets:
# Per tweet analyze
characteristic = attribute_to_characteristic(tweet)
tweet_characteristics.append(characteristic)
film_characteristic = construct_film_characteristic(
film_name,
tweet_characteristics
)
result = classifier.classify(film_characteristic)
if result == predict_label:
report[predict_label]['number_of_match'] += 1
print film_characteristic
print 'film: |%s| PREDICT: |%s|\n' % (film_name, result)
report['features'] = film_characteristic.keys()
# classifier.show_most_informative_features()
print "# Features in film's characteristic\n"
for f in report['features']:
print '* %s' % f
print '\n# Prediction\n'
for predict_label in predict_labels:
r = report[predict_label]
print '## %s\n' % predict_label
print 'match %d out of %d, accuracy=%d%%\n' % (
r['number_of_match'],
r['number_of_films'],
round(r['number_of_match'] / r['number_of_films'] * 100)
)
print '## overall\n'
print 'match %d out of %d, accuracy=%d%%\n' % (
sum(
[report[p]['number_of_match'] for p in predict_labels]
),
sum(
[report[p]['number_of_films'] for p in predict_labels]
),
round(
sum(
[report[p]['number_of_match'] for p in predict_labels]
) /
sum(
[report[p]['number_of_films'] for p in predict_labels]
) * 100
)
)
predictCandidates()
|
mondwan/ProjectRazzies
|
twitter_analyse.py
|
Python
|
mit
| 9,448
|
# -*- coding: utf-8 -*-
"""Module providing views for the folderish content page type"""
import json
import urllib
from Acquisition import aq_inner
from Products.Five.browser import BrowserView
from plone import api
from plone.i18n.normalizer.interfaces import IIDNormalizer
from zope.component import getUtility
from newe.sitecontent import utils
from newe.sitecontent.showroom import IShowRoom
from newe.sitecontent.project import IProject
class ShowRoomView(BrowserView):
""" Show room default view """
def render(self):
return self.index()
def __call__(self):
self.has_showrooms = len(self.showrooms()) > 0
self.has_subitems = len(self.subitems()) > 0
return self.render()
def showroom_content(self):
context = aq_inner(self.context)
template = context.restrictedTraverse('@@showroom-content')()
return template
def showrooms(self):
context = aq_inner(self.context)
return context.restrictedTraverse('@@folderListing')(
portal_type='newe.sitecontent.showroom',
review_state='published')
def projects(self):
context = aq_inner(self.context)
return context.restrictedTraverse('@@folderListing')(
portal_type='newe.sitecontent.project',
review_state='published')
def subitems(self):
""" A showroom containing other showrooms
should not list contained projects
"""
if self.has_showrooms:
return self.showrooms()
return self.projects()
def _project_assets(self, uuid):
project = api.content.get(UID=uuid)
data = getattr(project, 'assets')
if data is None:
data = dict()
return data
def _assets(self, uuid):
return json.loads(self._project_assets(uuid))
def has_preview_image(self, uuid):
""" Test if we have an available preview image """
if len(self._project_assets(uuid)):
assets = self._assets(uuid)
return len(assets['items']) > 0
return False
def get_preview_container(self, uuid):
data = self._assets(uuid)
items = data['items']
return items[0]
def rendered_preview_image(self, uuid):
item = api.content.get(UID=uuid)
return item.restrictedTraverse('@@stack-preview')()
def normalize_subject(self, subject):
""" Normalizer for project filter categories
This function is called by the isotope filter navigation
"""
normalizer = getUtility(IIDNormalizer)
return normalizer.normalize(subject)
def url_encode_subject_query(self, subject):
""" Quote subject query string """
return urllib.quote(subject)
def computed_class(self, uuid):
item = api.content.get(UID=uuid)
klass = 'app-card-{0}'.format(uuid)
subjects = item.Subject()
for subject in subjects:
pretty_subject = self.normalize_subject(subject)
klass = '{0} {1}'.format(klass, pretty_subject)
return klass
def available_filter(self):
context = aq_inner(self.context)
context_subjects = utils.keywords_filtered_by_context(context)
return context_subjects
def filter_map(self):
idx = 0
mapping = {}
for subject in self.available_filter():
idx += 1
mapping[subject] = idx
return mapping
def filter_map_keys(self):
return self.filter_map().keys()
def item_filter_category(self, uuid):
item = api.content.get(UID=uuid)
subjects = item.Subject()
filter_map = self.filter_map()
if len(subjects) > 1:
item_categories = list()
for subject in subjects:
item_categories.append(filter_map[subject])
return ', '.join(item_categories)
else:
return filter_map[subjects[0]]
class ShowRoomContentView(BrowserView):
""" Embeddable content card listing """
def __call__(self):
self.has_showrooms = len(self.showrooms()) > 0
return self.render()
def render(self):
return self.index()
@property
def traverse_subpath(self):
return self.subpath
def publishTraverse(self, request, name):
if not hasattr(self, 'subpath'):
self.subpath = []
self.subpath.append(name)
return self
def active_filter_category(self):
try:
active_category = self.traverse_subpath[0]
return active_category
except AttributeError:
return None
def contained_items(self, type_interface):
context = aq_inner(self.context)
query = dict(
context=context,
depth=1,
object_provides=type_interface,
portal_state='published',
sort_on='getObjPositionInParent'
)
if self.active_filter_category():
active_filter = self.active_filter_category()
for key, value in self.filter_map().items():
if str(value) == active_filter:
query['Subject'] = key
items = api.content.find(**query)
return items
def showrooms(self):
return self.contained_items(IShowRoom)
def projects(self):
return self.contained_items(IProject)
def subitems(self):
""" A showroom containing other showrooms
should not list contained projects
"""
if self.has_showrooms:
return self.showrooms()
return self.projects()
def _project_assets(self, uuid):
project = api.content.get(UID=uuid)
data = getattr(project, 'assets', None)
if not data:
data = dict()
return data
def _assets(self, uuid):
return json.loads(self._project_assets(uuid))
def has_preview_image(self, uuid):
""" Test if we have an available preview image """
if len(self._project_assets(uuid)):
assets = self._assets(uuid)
return len(assets['items']) > 0
return False
def get_preview_container(self, uuid):
data = self._assets(uuid)
items = data['items']
return items[0]
def rendered_preview_image(self, uuid):
item = api.content.get(UID=uuid)
return item.restrictedTraverse('@@stack-preview')()
def available_filter(self):
context = aq_inner(self.context)
context_subjects = utils.keywords_filtered_by_context(context)
return context_subjects
def filter_map(self):
idx = 0
mapping = {}
for subject in self.available_filter():
idx += 1
mapping[subject] = idx
return mapping
def filter_map_keys(self):
return self.filter_map().keys()
def normalize_subject(self, subject):
""" Normalizer for project filter categories
This function is called by the isotope filter navigation
"""
normalizer = getUtility(IIDNormalizer)
return normalizer.normalize(subject)
def computed_class(self, uuid):
item = api.content.get(UID=uuid)
klass = 'app-card-{0}'.format(uuid)
subjects = item.Subject()
for subject in subjects:
pretty_subject = self.normalize_subject(subject)
klass = '{0} {1}'.format(klass, pretty_subject)
return klass
|
a25kk/newe
|
src/newe.sitecontent/newe/sitecontent/browser/showroom.py
|
Python
|
mit
| 7,487
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'final.ui'
#
# Created by: PyQt5 UI code generator 5.8.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from forecastiopy import *
import datetime
import sys
from ubidots import ApiClient
import time
import webbrowser
from threading import Thread
import numpy as np
import skfuzzy as fuzz
from skfuzzy import control as ctrl
import os.path
import serial
# Import SPI library (for hardware SPI) and MCP3008 library.
import Adafruit_SSD1306
# Raspberry Pi pin configuration:
RST = 32
# 128x32 display with hardware I2C:
disp = Adafruit_SSD1306.SSD1306_128_32(rst=RST)
import Adafruit_GPIO.SPI as SPI
import Adafruit_MCP3008
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
PORT = '/dev/ttyUSB0'
BAUD_RATE = 9600
# Open serial port
ser = serial.Serial(PORT, BAUD_RATE)
class MovieSplashScreen(QSplashScreen):
def __init__(self, movie, parent = None):
movie.jumpToFrame(0)
pixmap = QPixmap(movie.frameRect().size())
QSplashScreen.__init__(self, pixmap)
self.movie = movie
self.movie.frameChanged.connect(self.repaint)
def showEvent(self, event):
self.movie.start()
def hideEvent(self, event):
self.movie.stop()
def paintEvent(self, event):
painter = QPainter(self)
pixmap = self.movie.currentPixmap()
self.setMask(pixmap.mask())
painter.drawPixmap(0, 0, pixmap)
def sizeHint(self):
return self.movie.scaledSize()
def mousePressEvent(self, mouse_event):
pass
class Ui_system(object):
done1 = False
done2 = False
done3 = False
t = 0
c = 0
b = 0
eco = 0
roomt = 0
roomh = 0
def setupUi(self, system):
system.setObjectName("system")
system.resize(800, 600)
system.setToolTip("")
system.setStyleSheet("background-color: rgb(44, 0, 30);")
self.Fuzzy_system = QtWidgets.QWidget()
self.Fuzzy_system.setEnabled(True)
self.Fuzzy_system.setGeometry(QtCore.QRect(0, 0, 800, 538))
self.Fuzzy_system.setObjectName("Fuzzy_system")
self.title_1 = QtWidgets.QLabel(self.Fuzzy_system)
self.title_1.setGeometry(QtCore.QRect(150, -20, 503, 85))
self.title_1.setStyleSheet("font: 36pt \"Peace Sans\";\n"
"color: rgb(233, 84, 32);")
self.title_1.setObjectName("title_1")
self.time_hours = QtWidgets.QLabel(self.Fuzzy_system)
self.time_hours.setGeometry(QtCore.QRect(576, 60, 121, 121))
self.time_hours.setStyleSheet("font: 76pt \"Slim Joe\";\n"
"color:rgb(238, 247, 251);")
self.time_hours.setObjectName("time_hours")
self.time_min = QtWidgets.QLabel(self.Fuzzy_system)
self.time_min.setGeometry(QtCore.QRect(710, 80, 67, 41))
self.time_min.setStyleSheet("font: 26pt \"Big John\";\n"
"color:rgb(238, 247, 251);")
self.time_min.setText("")
self.time_min.setObjectName("time_min")
self.time_hours.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.time_min.setAlignment(QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)
self.timer1 = QtCore.QTimer()
self.timer1.setInterval(1000)
self.timer1.timeout.connect(self.Time)
self.timer1.start()
self.date = QtWidgets.QLabel(self.Fuzzy_system)
self.date.setGeometry(QtCore.QRect(700, 130, 101, 21))
self.date.setStyleSheet("font: 10pt \"Big John\";\n"
"color:rgb(238, 247, 251);")
self.date.setText("")
self.date.setObjectName("date")
self.date.setAlignment(QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)
self.timer2 = QtCore.QTimer()
self.timer2.setInterval(1000)
self.timer2.timeout.connect(self.Date)
self.timer2.start()
self.run_system = QtWidgets.QPushButton(self.Fuzzy_system)
self.run_system.setGeometry(QtCore.QRect(230, 480, 361, 51))
self.run_system.setStyleSheet("color: rgb(255, 255, 255);\n"
"font: 11pt \"Big John\";")
self.run_system.setObjectName("run_system")
self.run_system.clicked.connect(self.Run_System)
self.timer5 = QtCore.QTimer()
self.timer5.setInterval(1000 * 300)
self.timer5.timeout.connect(self.Run_System)
self.timer5.start()
self.avg_temp_txt = QtWidgets.QLabel(self.Fuzzy_system)
self.avg_temp_txt.setGeometry(QtCore.QRect(0, 100, 121, 51))
self.avg_temp_txt.setStyleSheet("font: 75 32pt \"Moon\";\n"
"color:rgbrgb(85, 85, 255);")
self.avg_temp_txt.setObjectName("avg_temp_txt")
self.avg_temp_txt.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
self.temp_icon = QtWidgets.QLabel(self.Fuzzy_system)
self.temp_icon.setGeometry(QtCore.QRect(340, 110, 32, 32))
self.temp_icon.setStyleSheet("font: 26pt \"Big John\";\n"
"color:rgb(174, 167, 159)")
self.temp_icon.setObjectName("temp_icon")
self.avg_cc_txt = QtWidgets.QLabel(self.Fuzzy_system)
self.avg_cc_txt.setGeometry(QtCore.QRect(0, 170, 121, 51))
self.avg_cc_txt.setStyleSheet("font: 75 32pt \"Moon\";\n"
"color:rgb(85, 85, 255);")
self.avg_cc_txt.setObjectName("avg_cc_txt")
self.avg_cc_txt.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
self.avg_batt_txt = QtWidgets.QLabel(self.Fuzzy_system)
self.avg_batt_txt.setGeometry(QtCore.QRect(0, 240, 121, 51))
self.avg_batt_txt.setStyleSheet("font: 75 32pt \"Moon\";\n"
"color:rgb(85, 85, 255);")
self.avg_batt_txt.setObjectName("avg_batt_txt")
self.avg_batt_txt.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
self.timer3 = QtCore.QTimer()
self.timer3.setInterval(1000 * 900)
self.timer3.timeout.connect(self.Update_Battery)
self.timer3.start()
self.battery_percent_but = QtWidgets.QPushButton(self.Fuzzy_system)
self.battery_percent_but.setGeometry(QtCore.QRect(120, 250, 221, 32))
self.battery_percent_but.setStyleSheet("font: 75 11pt \"Moon\";\n"
"color: rgb(200, 226, 240);")
self.battery_percent_but.clicked.connect(self.Batt_Percent)
self.battery_percent_but.setObjectName("battery_percent_but")
self.batt_icon = QtWidgets.QLabel(self.Fuzzy_system)
self.batt_icon.setGeometry(QtCore.QRect(340, 250, 32, 32))
self.batt_icon.setStyleSheet("font: 26pt \"Big John\";\n"
"color:rgb(174, 167, 159)")
self.batt_icon.setObjectName("batt_icon")
self.cloud_icon = QtWidgets.QLabel(self.Fuzzy_system)
self.cloud_icon.setGeometry(QtCore.QRect(340, 180, 32, 32))
self.cloud_icon.setStyleSheet("font: 26pt \"Big John\";\n"
"color:rgb(174, 167, 159)")
self.cloud_icon.setObjectName("cloud_icon")
self.average_cc_but = QtWidgets.QPushButton(self.Fuzzy_system)
self.average_cc_but.setGeometry(QtCore.QRect(120, 180, 221, 32))
self.average_cc_but.setStyleSheet("font: 75 11pt \"Moon\";\n"
"color: rgb(200, 226, 240);")
self.average_cc_but.setObjectName("average_cc_but")
self.average_cc_but.clicked.connect(self.Avg_CC)
self.defuzz_txt = QtWidgets.QLabel(self.Fuzzy_system)
self.defuzz_txt.setGeometry(QtCore.QRect(240, 380, 161, 71))
self.defuzz_txt.setStyleSheet("font: 40pt \"Big John\";\n"
"color:rgb(238, 247, 251);")
self.defuzz_txt.setObjectName("defuzz_txt")
self.defuzz_but = QtWidgets.QPushButton(self.Fuzzy_system)
self.defuzz_but.setGeometry(QtCore.QRect(50, 400, 179, 32))
self.defuzz_but.setStyleSheet("font: 11pt \"Peace Sans\";\n"
"color: rgb(34, 139, 34)")
self.defuzz_but.setObjectName("defuzz_but")
self.defuzz_but.clicked.connect(self.Defuzz)
self.eco_level_but = QtWidgets.QPushButton(self.Fuzzy_system)
self.eco_level_but.setGeometry(QtCore.QRect(450, 400, 179, 32))
self.eco_level_but.setStyleSheet("font: 11pt \"Peace Sans\";\n"
"color: rgb(34, 139, 34)")
self.eco_level_but.setObjectName("eco_level_but")
self.eco_level_but.clicked.connect(self.Eco)
self.temp_but = QtWidgets.QPushButton(self.Fuzzy_system)
self.temp_but.setGeometry(QtCore.QRect(500, 200, 161, 26))
self.temp_but.setStyleSheet("color:rgb(200, 226, 240);\n"
"font: 75 11pt \"Moon\";")
self.temp_but.setObjectName("temp_but")
self.temp_but.clicked.connect(self.DarkSky)
self.average_temp_but = QtWidgets.QPushButton(self.Fuzzy_system)
self.average_temp_but.setGeometry(QtCore.QRect(120, 110, 221, 32))
self.average_temp_but.setStyleSheet("font: 75 11pt \"Moon\";\n"
"color: rgb(200, 226, 240);")
self.average_temp_but.setObjectName("average_temp_but")
self.average_temp_but.clicked.connect(self.Avg_temp)
self.cloud_cover_but = QtWidgets.QPushButton(self.Fuzzy_system)
self.cloud_cover_but.setGeometry(QtCore.QRect(500, 270, 161, 26))
self.cloud_cover_but.setStyleSheet("color:rgb(200, 226, 240);\n"
"font: 75 11pt \"Moon\";")
self.cloud_cover_but.setObjectName("cloud_cover_but")
self.cloud_cover_but.clicked.connect(self.DarkSky)
self.temp_text = QtWidgets.QLabel(self.Fuzzy_system)
self.temp_text.setGeometry(QtCore.QRect(662, 180, 131, 61))
self.temp_text.setStyleSheet("font: 75 32pt \"Moon\";\n"
"color:rgb(233, 99, 94);")
self.temp_text.setObjectName("temp_text")
self.temp_text.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
self.eco_level_txt = QtWidgets.QLabel(self.Fuzzy_system)
self.eco_level_txt.setGeometry(QtCore.QRect(640, 380, 61, 71))
self.eco_level_txt.setStyleSheet("font: 40pt \"Big John\";\n"
"color:rgb(238, 247, 251);")
self.eco_level_txt.setObjectName("eco_level_txt")
self.cloud_cover_txt = QtWidgets.QLabel(self.Fuzzy_system)
self.cloud_cover_txt.setGeometry(QtCore.QRect(662, 250, 131, 61))
self.cloud_cover_txt.setStyleSheet("font: 75 32pt \"Moon\";\n"
"color:rgb(233, 99, 94);")
self.cloud_cover_txt.setObjectName("cloud_cover_txt")
self.cloud_cover_txt.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
self.refresh_current = QtWidgets.QToolButton(self.Fuzzy_system)
self.refresh_current.setGeometry(QtCore.QRect(610, 330, 88, 31))
self.refresh_current.setStyleSheet("font: 11pt \"Peace Sans\";\n"
"color: rgb(34, 139, 34)")
self.refresh_current.setObjectName("refresh_current")
self.refresh_current.clicked.connect(self.loading2)
self.refresh_avg = QtWidgets.QToolButton(self.Fuzzy_system)
self.refresh_avg.setGeometry(QtCore.QRect(150, 300, 88, 31))
self.refresh_avg.setStyleSheet("font: 11pt \"Peace Sans\";\n"
"color: rgb(34, 139, 34)")
self.refresh_avg.setObjectName("refresh_avg")
self.refresh_avg.clicked.connect(self.loading1)
self.timer4 = QtCore.QTimer()
self.timer4.setInterval(1000 * 86400)
self.timer4.timeout.connect(self.loading1)
self.timer4.start()
self.dark_sky_1 = QtWidgets.QToolButton(self.Fuzzy_system)
self.dark_sky_1.setGeometry(QtCore.QRect(640, 510, 158, 23))
self.dark_sky_1.setStyleSheet("font: 25 10pt \"Ubuntu\";\n"
"color: rgb(85, 170, 255)")
self.dark_sky_1.setObjectName("dark_sky_1")
self.dark_sky_1.clicked.connect(self.DarkSky)
self.title_1.raise_()
self.time_hours.raise_()
self.time_min.raise_()
self.date.raise_()
self.run_system.raise_()
self.avg_temp_txt.raise_()
self.avg_cc_txt.raise_()
self.avg_batt_txt.raise_()
self.defuzz_txt.raise_()
self.average_temp_but.raise_()
self.temp_icon.raise_()
self.average_cc_but.raise_()
self.cloud_icon.raise_()
self.battery_percent_but.raise_()
self.batt_icon.raise_()
self.cloud_cover_but.raise_()
self.temp_text.raise_()
self.defuzz_but.raise_()
self.eco_level_but.raise_()
self.eco_level_txt.raise_()
self.temp_but.raise_()
self.cloud_cover_txt.raise_()
self.refresh_current.raise_()
self.refresh_avg.raise_()
self.dark_sky_1.raise_()
system.addItem(self.Fuzzy_system, "")
self.Room_Conditions = QtWidgets.QWidget()
self.Room_Conditions.setGeometry(QtCore.QRect(0, 0, 800, 538))
self.Room_Conditions.setObjectName("Room_Conditions")
self.title_2 = QtWidgets.QLabel(self.Room_Conditions)
self.title_2.setGeometry(QtCore.QRect(130, -20, 521, 85))
self.title_2.setStyleSheet("font: 36pt \"Peace Sans\";\n"
"color: rgb(233, 84, 32);")
self.title_2.setObjectName("title_2")
self.room_temp_txt = QtWidgets.QLabel(self.Room_Conditions)
self.room_temp_txt.setGeometry(QtCore.QRect(2, 90, 131, 61))
self.room_temp_txt.setStyleSheet("font: 75 32pt \"Moon\";\n"
"color:rgb(238, 247, 251);")
self.room_temp_txt.setObjectName("room_temp_txt")
self.room_temp_txt.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
self.room_hum_but = QtWidgets.QPushButton(self.Room_Conditions)
self.room_hum_but.setGeometry(QtCore.QRect(490, 110, 161, 26))
self.room_hum_but.setStyleSheet("color:rgb(233, 99, 94);\n"
"font: 75 11pt \"Moon\";")
self.room_hum_but.setObjectName("room_hum_but")
self.room_hum_but.clicked.connect(self.Room_hum_browser)
self.room_hum_txt = QtWidgets.QLabel(self.Room_Conditions)
self.room_hum_txt.setGeometry(QtCore.QRect(660, 90, 131, 61))
self.room_hum_txt.setStyleSheet("font: 75 32pt \"Moon\";\n"
"color:rgb(238, 247, 251);")
self.room_hum_txt.setObjectName("room_hum_txt")
self.room_hum_txt.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
self.room_temp_but = QtWidgets.QPushButton(self.Room_Conditions)
self.room_temp_but.setGeometry(QtCore.QRect(140, 110, 161, 26))
self.room_temp_but.setStyleSheet("color:rgb(233, 99, 94);\n"
"font: 75 11pt \"Moon\";")
self.room_temp_but.setObjectName("room_temp_but")
self.room_temp_but.clicked.connect(self.Room_temp_browser)
self.heater_on = QtWidgets.QLabel(self.Room_Conditions)
self.heater_on.setGeometry(QtCore.QRect(230, 310, 61, 61))
self.heater_on.setStyleSheet("font: 75 26pt \"Moon\";\n"
"color: rgb(0, 255, 0);")
self.heater_on.setObjectName("heater_on")
self.cooler_on = QtWidgets.QLabel(self.Room_Conditions)
self.cooler_on.setGeometry(QtCore.QRect(230, 380, 61, 61))
self.cooler_on.setStyleSheet("font: 75 26pt \"Moon\";\n"
"color: rgb(0, 255, 0);")
self.cooler_on.setObjectName("cooler_on")
self.heater_off = QtWidgets.QLabel(self.Room_Conditions)
self.heater_off.setGeometry(QtCore.QRect(300, 310, 61, 61))
self.heater_off.setStyleSheet("font: 75 26pt \"Moon\";\n"
"color: rgb(255, 0, 0);\n"
"")
self.heater_off.setObjectName("heater_off")
self.cooler_off = QtWidgets.QLabel(self.Room_Conditions)
self.cooler_off.setGeometry(QtCore.QRect(300, 380, 61, 61))
self.cooler_off.setStyleSheet("font: 75 26pt \"Moon\";\n"
"color: rgb(255, 0, 0);")
self.cooler_off.setObjectName("cooler_off")
self.heater = QtWidgets.QLabel(self.Room_Conditions)
self.heater.setGeometry(QtCore.QRect(150, 330, 71, 31))
self.heater.setStyleSheet("font: 11pt \"Peace Sans\";\n"
"color:rgb(85, 85, 255);")
self.heater.setObjectName("heater")
self.cooler = QtWidgets.QLabel(self.Room_Conditions)
self.cooler.setGeometry(QtCore.QRect(150, 400, 71, 31))
self.cooler.setStyleSheet("color:rgb(85, 85, 255);\n"
"font: 11pt \"Peace Sans\";")
self.cooler.setObjectName("cooler")
self.dehumid_on = QtWidgets.QLabel(self.Room_Conditions)
self.dehumid_on.setGeometry(QtCore.QRect(490, 380, 61, 61))
self.dehumid_on.setStyleSheet("font: 75 26pt \"Moon\";\n"
"color: rgb(0, 255, 0);")
self.dehumid_on.setObjectName("dehumid_on")
self.humid_off = QtWidgets.QLabel(self.Room_Conditions)
self.humid_off.setGeometry(QtCore.QRect(420, 310, 61, 61))
self.humid_off.setStyleSheet("font: 75 26pt \"Moon\";\n"
"color: rgb(255, 0, 0);")
self.humid_off.setObjectName("humid_off")
self.humid_on = QtWidgets.QLabel(self.Room_Conditions)
self.humid_on.setGeometry(QtCore.QRect(490, 310, 61, 61))
self.humid_on.setStyleSheet("font: 75 26pt \"Moon\";\n"
"color: rgb(0, 255, 0);")
self.humid_on.setObjectName("humid_on")
self.dehumid_off = QtWidgets.QLabel(self.Room_Conditions)
self.dehumid_off.setGeometry(QtCore.QRect(420, 380, 61, 61))
self.dehumid_off.setStyleSheet("font: 75 26pt \"Moon\";\n"
"color: rgb(255, 0, 0);")
self.dehumid_off.setObjectName("dehumid_off")
self.humidifier = QtWidgets.QLabel(self.Room_Conditions)
self.humidifier.setGeometry(QtCore.QRect(560, 330, 101, 31))
self.humidifier.setStyleSheet("font: 11pt \"Peace Sans\";\n"
"color:rgb(85, 85, 255);")
self.humidifier.setObjectName("humidifier")
self.dehumidifier = QtWidgets.QLabel(self.Room_Conditions)
self.dehumidifier.setGeometry(QtCore.QRect(560, 400, 121, 31))
self.dehumidifier.setStyleSheet("font: 11pt \"Peace Sans\";\n"
"color:rgb(85, 85, 255);")
self.dehumidifier.setObjectName("dehumidifier")
self.running = QtWidgets.QLabel(self.Room_Conditions)
self.running.setGeometry(QtCore.QRect(230, 170, 331, 41))
self.running.setStyleSheet("color: rgb(255, 255, 0);\n"
"font: 14pt \"Big John\";")
self.running.setObjectName("running")
self.run_eco_level = QtWidgets.QLabel(self.Room_Conditions)
self.run_eco_level.setGeometry(QtCore.QRect(350, 220, 81, 61))
self.run_eco_level.setStyleSheet("font: 40pt \"Big John\";\n"
"color: rgb(255, 255, 255);\n"
"")
self.run_eco_level.setObjectName("run_eco_level")
self.run_eco_level.setObjectName("run_eco_level")
self.run_eco_level.setText("--")
self.run_eco_level.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
self.open_ubidots = QtWidgets.QPushButton(self.Room_Conditions)
self.open_ubidots.setGeometry(QtCore.QRect(230, 460, 361, 51))
self.open_ubidots.setStyleSheet("color: rgb(255, 255, 255);\n"
"font: 11pt \"Big John\";")
self.open_ubidots.setObjectName("open_ubidots")
self.open_ubidots.clicked.connect(self.Open_ubidots)
self.dark_sky_2 = QtWidgets.QToolButton(self.Room_Conditions)
self.dark_sky_2.setGeometry(QtCore.QRect(640, 490, 158, 23))
self.dark_sky_2.setStyleSheet("font: 25 10pt \"Ubuntu\";\n"
"color: rgb(85, 170, 255)")
self.dark_sky_2.setObjectName("dark_sky_2")
system.addItem(self.Room_Conditions, "")
self.retranslateUi(system)
system.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(system)
def retranslateUi(self, system):
_translate = QtCore.QCoreApplication.translate
system.setWindowTitle(_translate("system", "ToolBox"))
self.title_1.setText(_translate("system", "SYSTEM VARIABLES"))
self.time_hours.setText(_translate("system", "<html><head/><body><p align=\"right\"><br/></p></body></html>"))
self.run_system.setToolTip(_translate("system", "<html><head/><body><p align=\"center\"><span style=\" font-family:\'Moon\'; font-size:9pt; font-weight:600; color:#e95420;\">RUN SYSTEM IN OBTAINED ECONOMY LEVEL</span></p></body></html>"))
self.run_system.setText(_translate("system", "RUN SYSTEM"))
self.avg_temp_txt.setText(_translate("system", "<html><head/><body><p><br/></p></body></html>"))
self.temp_icon.setText(_translate("system", "<html><head/><body><p><img src=\":/icons/Icons/thermometer.png\"/></p></body></html>"))
self.avg_cc_txt.setText(_translate("system", "<html><head/><body><p><br/></p></body></html>"))
self.avg_batt_txt.setText(_translate("system", "<html><head/><body><p><br/></p></body></html>"))
self.battery_percent_but.setToolTip(_translate("system", "<html><head/><body><p align=\"center\"><span style=\" font-size:9pt; font-weight:600; color:#e95420;\">VIEW PLOT IN UBIDOTS</span></p></body></html>"))
self.battery_percent_but.setText(_translate("system", "BATTERY PERCENTAGE"))
self.batt_icon.setText(_translate("system", "<html><head/><body><p><img src=\":/icons/Icons/battery.png\"/></p></body></html>"))
self.cloud_icon.setText(_translate("system", "<html><head/><body><p><img src=\":/icons/Icons/cloudy.png\"/></p></body></html>"))
self.average_cc_but.setToolTip(_translate("system", "<html><head/><body><p align=\"center\"><span style=\" font-size:9pt; font-weight:600; color:#e95420;\">VIEW PLOT IN UBIDOTS</span></p></body></html>"))
self.average_cc_but.setText(_translate("system", "AVERAGE CLOUD COVER"))
self.defuzz_txt.setText(_translate("system", "<html><head/><body><p><br/></p></body></html>"))
self.defuzz_but.setToolTip(_translate("system", "<html><head/><body><p align=\"center\"><span style=\" font-family:\'Moon\'; font-size:9pt; font-weight:600; color:#e95420;\">DEFUZZIFY THE INPUTS</span></p></body></html>"))
self.defuzz_but.setText(_translate("system", "DEFUZZIFICATION"))
self.eco_level_but.setToolTip(_translate("system", "<html><head/><body><p align=\"center\"><span style=\" font-family:\'Moon\'; font-size:9pt; font-weight:600; color:#e95420;\">Log DATA</span></p></body></html>"))
self.eco_level_but.setText(_translate("system", "ECONOMY LEVEL"))
self.temp_but.setToolTip(_translate("system", "<html><head/><body><p align=\"center\"><span style=\" font-size:9pt; font-weight:600; color:#e95420;\">WEATHER FORECAST</span></p></body></html>"))
self.temp_but.setText(_translate("system", "TEMPERATURE"))
self.average_temp_but.setToolTip(_translate("system", "<html><head/><body><p align=\"center\"><span style=\" font-size:9pt; font-weight:600; color:#e95420;\">VIEW PLOT IN UBIDOTS</span></p></body></html>"))
self.average_temp_but.setText(_translate("system", "AVERAGE TEMPERATURE"))
self.cloud_cover_but.setToolTip(_translate("system", "<html><head/><body><p align=\"center\"><span style=\" font-size:9pt; font-weight:600; color:#e95420;\">WEATHER FORECAST</span></p></body></html>"))
self.cloud_cover_but.setText(_translate("system", "CLOUD COVER"))
self.temp_text.setText(_translate("system", "<html><head/><body><p><br/></p></body></html>"))
self.eco_level_txt.setText(_translate("system", "<html><head/><body><p><br/></p></body></html>"))
self.cloud_cover_txt.setText(_translate("system", "<html><head/><body><p><br/></p></body></html>"))
self.refresh_current.setToolTip(_translate("system", "<html><head/><body><p align=\"center\"><span style=\" font-family:\'Moon\'; font-size:9pt; font-weight:600; color:#e95420;\">REFRESH DATA</span></p></body></html>"))
self.refresh_current.setText(_translate("system", "REFRESH"))
self.refresh_avg.setToolTip(_translate("system", "<html><head/><body><p align=\"center\"><span style=\" font-family:\'Moon\'; font-size:9pt; font-weight:600; color:#e95420;\">REFRESH DATA</span></p></body></html>"))
self.refresh_avg.setText(_translate("system", "REFRESH"))
self.dark_sky_1.setToolTip(_translate("system", "<html><head/><body><p align=\"center\"><span style=\" font-family:\'Moon\'; font-size:9pt; font-weight:600; color:#e95420;\">DARK SKY</span></p></body></html>"))
self.dark_sky_1.setText(_translate("system", "POWERED BY DARK SKY"))
system.setItemText(system.indexOf(self.Fuzzy_system), _translate("system", "Page 1"))
self.title_2.setText(_translate("system", "ROOM CONDITIONS"))
self.room_temp_txt.setText(_translate("system", "<html><head/><body><p><br/></p></body></html>"))
self.room_hum_but.setToolTip(_translate("system", "<html><head/><body><p align=\"center\"><span style=\" font-size:9pt; font-weight:600; color:#e95420;\">VIEW PLOT IN UBIDOTS</span></p></body></html>"))
self.room_hum_but.setText(_translate("system", "HUMIDITY"))
self.room_hum_txt.setText(_translate("system", "<html><head/><body><p><br/></p></body></html>"))
self.room_temp_but.setToolTip(_translate("system", "<html><head/><body><p align=\"center\"><span style=\" font-size:9pt; font-weight:600; color:#e95420;\">VIEW PLOT IN UBIDOTS</span></p></body></html>"))
self.room_temp_but.setText(_translate("system", "TEMPERATURE"))
self.heater_on.setText(_translate("system", "<html><head/><body><p><br/></p></body></html>"))
self.cooler_on.setText(_translate("system", "<html><head/><body><p><br/></p></body></html>"))
self.heater_off.setText(_translate("system", "<html><head/><body><p align=\"right\"><br/></p></body></html>"))
self.cooler_off.setText(_translate("system", "<html><head/><body><p align=\"right\"><br/></p></body></html>"))
self.heater.setToolTip(_translate("system", "<html><head/><body><p align=\"center\"><span style=\" font-family:\'Moon\'; font-size:9pt; font-weight:600; color:#e95420;\">HEATER STATUS</span></p></body></html>"))
self.heater.setText(_translate("system", "<html><head/><body><p align=\"right\">HEATER</p></body></html>"))
self.cooler.setToolTip(_translate("system", "<html><head/><body><p align=\"center\"><span style=\" font-family:\'Moon\'; font-size:9pt; font-weight:600; color:#e95420;\">COOLER STATUS</span></p></body></html>"))
self.cooler.setText(_translate("system", "<html><head/><body><p align=\"right\">COOLER</p></body></html>"))
self.dehumid_on.setText(_translate("system", "<html><head/><body><p align=\"right\"><br/></p></body></html>"))
self.humid_off.setText(_translate("system", "<html><head/><body><p><br/></p></body></html>"))
self.humid_on.setText(_translate("system", "<html><head/><body><p align=\"right\"><br/></p></body></html>"))
self.dehumid_off.setText(_translate("system", "<html><head/><body><p><br/></p></body></html>"))
self.humidifier.setToolTip(_translate("system", "<html><head/><body><p align=\"center\"><span style=\" font-family:\'Moon\'; font-size:9pt; font-weight:600; color:#e95420;\">HUMIDIFIER STATUS</span></p></body></html>"))
self.humidifier.setText(_translate("system", "HUMIDIFIER"))
self.dehumidifier.setToolTip(_translate("system", "<html><head/><body><p align=\"center\"><span style=\" font-family:\'Moon\'; font-size:9pt; font-weight:600; color:#e95420;\">DEHUMIDIFIER STATUS</span></p></body></html>"))
self.dehumidifier.setText(_translate("system", "DEHUMIDIFIER"))
self.running.setText(_translate("system", "<html><head/><body><p align=\"center\">RUNNING IN ECONOMY LEVEL</p></body></html>"))
self.run_eco_level.setToolTip(_translate("system", "<html><head/><body><p align=\"center\"><span style=\" font-family:\'Moon\'; font-size:9pt; font-weight:600; color:#e95420;\">VIEW PLOT</span></p></body></html>"))
self.run_eco_level.setText(_translate("system", "<html><head/><body><p align=\"center\"><br/></p></body></html>"))
self.open_ubidots.setToolTip(_translate("system", "<html><head/><body><p align=\"center\"><span style=\" font-family:\'Moon\'; font-size:9pt; font-weight:600; color:#e95420;\">OPEN UBIDOTS IN WEB BROWSER</span></p></body></html>"))
self.open_ubidots.setText(_translate("system", "OPEN UBIDOTS"))
self.dark_sky_2.setToolTip(_translate("system", "<html><head/><body><p align=\"center\"><span style=\" font-family:\'Moon\'; font-size:9pt; font-weight:600; color:#e95420;\">DARK SKY</span></p></body></html>"))
self.dark_sky_2.setText(_translate("system", "POWERED BY DARK SKY"))
system.setItemText(system.indexOf(self.Room_Conditions), _translate("system", "Page 2"))
def DarkSky(self):
webbrowser.open('https://darksky.net/poweredby/', new = 2)
def Time(self):
self.time_hours.setText(QtCore.QTime.currentTime().toString("h"))
self.time_min.setText(QtCore.QTime.currentTime().toString("mm"))
def Date(self):
self.date.setText(QtCore.QDate.currentDate().toString("ddd, MMM d"))
def loading1(self):
self.done1 = False
movie = QMovie("Icons/loading.gif")
splash = MovieSplashScreen(movie)
splash.setMask(splash.mask())
splash.show()
test1 = Thread(target = self.Update_Average).start()
while not self.done1:
app.processEvents()
splash.finish(system)
def Update_Average(self):
f = open('Ubidots_APIkey.txt', 'r')
apikey = f.readline().strip()
f.close()
api = ApiClient(token = apikey)
try:
temp = api.get_variable("58d76383762542260cf36d8f")
cloud_cover = api.get_variable("58d76394762542260a851a05")
batt = api.get_variable("58d763aa762542260cf36f24")
except ValueError:
print('Unable to obtain variable')
f = open('DS_APIkey.txt','r')
apikey = f.read()
f.close()
Bangalore = [12.9716, 77.5946]
fio = ForecastIO.ForecastIO(apikey,
units=ForecastIO.ForecastIO.UNITS_SI,
lang=ForecastIO.ForecastIO.LANG_ENGLISH,
latitude=Bangalore[0], longitude=Bangalore[1],
)
tempc = 0
clouds = 0
if fio.has_hourly() is True:
hourly = FIOHourly.FIOHourly(fio)
for hour in range(0, 48):
tempc = tempc + float(str(hourly.get_hour(hour)['temperature']))
clouds = clouds + float(str(hourly.get_hour(hour)['cloudCover']))
else:
print('No Hourly data')
self.t = round(tempc / 48, 2)
self.c = round(clouds / 48, 2)
self.b = self.Update_Battery()
try:
temp.save_value({'value': self.t})
cloud_cover.save_value({'value': self.c})
batt.save_value({'value': self.b})
time.sleep(1)
except:
print('Value not sent')
self.avg_temp_txt.setText('{:0.01f}°'.format(self.t))
self.avg_cc_txt.setText('{}%'.format(int(self.c*100)))
self.avg_batt_txt.setText('{}%'.format(self.b))
self.done1 = True
def loading2(self):
self.done2 = False
movie = QMovie("Icons/loading.gif")
splash = MovieSplashScreen(movie)
splash.setMask(splash.mask())
splash.show()
test = Thread(target = self.Update_Current).start()
while not self.done2:
app.processEvents()
splash.finish(system)
def Batt_Percent(self):
webbrowser.open('https://app.ubidots.com/ubi/getchart/page/R2kbUV5P5DSJVlXdTfMOXflxNtM', new = 2)
def Avg_CC(self):
webbrowser.open('https://app.ubidots.com/ubi/getchart/page/0f62Hh2lV0PMO8-p_X7DYFyNnd4', new = 2)
def Avg_temp(self):
webbrowser.open('https://app.ubidots.com/ubi/getchart/page/DlD6wC0uiipZzD3nbBT_Xty6myk', new = 2)
def Update_Battery(self):
f = open('Ubidots_APIkey.txt', 'r')
apikey = f.readline().strip()
f.close()
api = ApiClient(token = apikey)
try:
batt = api.get_variable("58d763aa762542260cf36f24")
except ValueError:
print('Value Error')
# Initialize library.
disp.begin()
time.sleep(5)
width = disp.width
height = disp.height
# Clear display.
disp.clear()
disp.display()
image = Image.new('1', (width, height))
# Get drawing object to draw on image.
draw = ImageDraw.Draw(image)
# Load default font.
font = ImageFont.load_default()
# Alternatively load a TTF font. Make sure the .ttf font file is in the same directory as the python script!
# Some other nice fonts to try: http://www.dafont.com/bitmap.php
#font = ImageFont.truetype('Minecraftia.ttf', 8)
# Hardware SPI configuration:
SPI_PORT = 0
SPI_DEVICE = 0
mcp = Adafruit_MCP3008.MCP3008(spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE))
# Main program loop.
time.sleep(2)
# Draw a black filled box to clear the image.
draw.rectangle((0,0,width,height), outline=0, fill=0)
value = mcp.read_adc(0)
volts = ((value*3.3)) / float(1023) #voltage divider voltage
volts = volts * 5.7 #actual voltage
volts = round(volts,2)
if (volts >=13.6):
batt = 100
print('100% Battery')
draw.text((0, 0), 'Battery percent at: ',font=font, fill = 255)
draw.text((50, 20),str(batt) , font=font, fill = 255)
disp.image(image)
disp.display()
time.sleep(1)
elif (volts > 11.6):
batt = round ((volts - 11.6) * 50,1)
print(batt,'% Battery')
draw.text((10, 0), 'Battery percent at: ',font=font, fill = 255)
draw.text((45, 20),str(batt) , font=font, fill = 255)
disp.image(image)
disp.display()
time.sleep(1)
else:
batt = 0
print('Connection Error')
draw.text((55, 10),':(' , font=font, fill = 255)
disp.image(image)
disp.display()
# Print the ADC values.
# Pause time.
time.sleep(1)
return(batt)
def Update_Current(self):
f = open('DS_APIkey.txt','r')
apikey = f.read()
f.close()
Bangalore = [12.9716, 77.5946]
fio = ForecastIO.ForecastIO(apikey,
units=ForecastIO.ForecastIO.UNITS_SI,
lang=ForecastIO.ForecastIO.LANG_ENGLISH,
latitude=Bangalore[0], longitude=Bangalore[1],
)
if fio.has_currently() is True:
currently = FIOCurrently.FIOCurrently(fio)
self.temp_text.setText('{:0.01f}°'.format(currently.temperature))
self.cloud_cover_txt.setText('{}%'.format(int(currently.cloudCover * 100)))
else:
print('No Currently data')
self.done2 = True
def Defuzz(self):
# New Antecedent/Consequent objects hold universe variables and membership
# functions
batt_percent = ctrl.Antecedent(np.arange(0, 100, 1), 'Battery_percentage')
temp = ctrl.Antecedent(np.arange(15, 30, 1), 'Temperature')
cloud_cover = ctrl.Antecedent(np.arange(0, 1, 0.01), 'Cloud_cover')
eco_level = ctrl.Consequent(np.arange(1, 4, 0.01), 'Economy_level')
# Battery membership function population
batt_percent['Low_battery'] = fuzz.trapmf(batt_percent.universe, [0, 0, 20, 30])
batt_percent['Medium_battery'] = fuzz.trapmf(batt_percent.universe, [20, 25, 75, 80])
batt_percent['High_battery'] = fuzz.trapmf(batt_percent.universe, [75, 80, 100, 100])
# Temperature membership function population
temp['Low_temperature'] = fuzz.trapmf(temp.universe, [0, 0, 18, 20])
temp['Medium_temperature'] = fuzz.trapmf(temp.universe, [18, 20, 24, 26])
temp['High_temperature'] = fuzz.trapmf(temp.universe, [24 , 26, 30, 30])
# Cloud_cover membership function population
cloud_cover['Minimum_clouds'] = fuzz.trapmf(cloud_cover.universe, [0, 0, 0.20, 0.25])
cloud_cover['Medium_clouds'] = fuzz.trapmf(cloud_cover.universe, [0.20, 0.25, 0.65, 0.70])
cloud_cover['High_clouds'] = fuzz.trapmf(cloud_cover.universe, [0.65, 0.70, 1, 1])
# Custom membership functions can be built interactively with a familiar,
# Pythonic API
eco_level['Critical'] = fuzz.trimf(eco_level.universe, [0, 1.0, 2.0])
eco_level['Alert'] = fuzz.trimf(eco_level.universe, [1.75, 2.25, 2.75])
eco_level['Normal'] = fuzz.trimf(eco_level.universe, [2.5, 3.0, 3.5])
eco_level['Economyless'] = fuzz.trimf(eco_level.universe, [3.25, 4.0, 5.0])
# Rules
rule1 = ctrl.Rule(batt_percent['Low_battery'] &
(~temp['High_temperature']),
eco_level['Critical'])
rule2 = ctrl.Rule(batt_percent['Low_battery'] &
temp['High_temperature'] &
cloud_cover['High_clouds'],
eco_level['Critical'])
rule3 = ctrl.Rule(batt_percent['Low_battery'] &
temp['High_temperature'] &
(~cloud_cover['High_clouds']),
eco_level['Alert'])
rule4 = ctrl.Rule(batt_percent['Medium_battery'] &
temp['Low_temperature'] &
(~cloud_cover['High_clouds']),
eco_level['Alert'])
rule5 = ctrl.Rule(batt_percent['Medium_battery'] &
temp['Low_temperature'] &
cloud_cover['High_clouds'],
eco_level['Critical'])
rule6 = ctrl.Rule(batt_percent['Medium_battery'] &
(~temp['Low_temperature']) &
(~cloud_cover['High_clouds']),
eco_level['Normal'])
rule7 = ctrl.Rule(batt_percent['Medium_battery'] &
(~temp['Low_temperature']) &
cloud_cover['High_clouds'],
eco_level['Alert'])
rule8 = ctrl.Rule(batt_percent['High_battery'] &
temp['Low_temperature'] &
(~cloud_cover['High_clouds']),
eco_level['Normal'])
rule9 = ctrl.Rule(batt_percent['High_battery'] &
temp['Low_temperature'] &
cloud_cover['High_clouds'],
eco_level['Alert'])
rule10 = ctrl.Rule(batt_percent['High_battery'] &
(~temp['Low_temperature']) &
(~cloud_cover['High_clouds']),
eco_level['Economyless'])
rule11 = ctrl.Rule(batt_percent['High_battery'] &
(~temp['Low_temperature']) &
cloud_cover['High_clouds'],
eco_level['Normal'])
eco_ctrl = ctrl.ControlSystem([rule1, rule2, rule3, rule4,
rule5, rule6, rule7, rule8,
rule9, rule10, rule11])
eco_mode = ctrl.ControlSystemSimulation(eco_ctrl)
# Pass inputs to the ControlSystem using Antecedent labels with Pythonic API
# Note: if you like passing many inputs all at once, use .inputs(dict_of_data)
eco_mode.input['Temperature'] = self.t
eco_mode.input['Cloud_cover'] = self.c
eco_mode.input['Battery_percentage'] = self.b
# Crunch the numbers
eco_mode.compute()
defuzz = eco_mode.output['Economy_level']
self.defuzz_txt.setText(format(defuzz,'.2f'))
self.eco = int(defuzz + 0.5)
def Eco(self):
if (self.eco < 1):
self.eco = 1
self.eco_level_txt.setNum(self.eco)
self.run_eco_level.setNum(self.eco)
filename1 = datetime.datetime.now().strftime("%Y.%m.%d_%H:%M")
save_path = 'Logs/'
complete_path = os.path.join(save_path, filename1+'.log')
f = open(complete_path, 'w')
if (self.t == 0) or (self.c == 0) or (self.b == 0):
f.write('Data Unavailable, running in economy level 1')
else:
f.write('Average Temperature is: ' + str(self.t) + ' °C' + '\n')
f.write('Average Cloud Cover is: ' + str(self.c) + ' %' + '\n')
f.write('Battery level is: ' + str(self.b) + '%' + '\n')
f.write('Economy Level is: ' + str(self.eco) + '\n')
f.close()
else:
self.eco_level_txt.setNum(self.eco)
self.run_eco_level.setNum(self.eco)
filename1 = datetime.datetime.now().strftime("%Y.%m.%d_%H:%M")
save_path = 'Logs/'
complete_path = os.path.join(save_path, filename1+'.txt')
f = open(complete_path, 'w')
if (self.t == 0) or (self.c == 0) or (self.b == 0):
f.write('Data Unavailable, running in economy level 1')
else:
f.write('Average Temperature is: ' + str(self.t) + ' °C' + '\n')
f.write('Average Cloud Cover is: ' + str(self.c) + ' %' + '\n')
f.write('Battery level is: ' + str(self.b) + ' % ' + '\n')
f.write('Economy Level is: ' + str(self.eco) + '\n')
f.close()
def Room_cond(self):
if ser.isOpen():
ser.close()
ser.open()
ser.isOpen()
ser.write('s'.encode())
time.sleep(2)
response = ser.readline().strip().decode()
hum = float(response[:5])
temp = float(response[5:])
f = open('Ubidots_APIkey.txt', 'r')
apikey = f.readline().strip()
f.close()
api = ApiClient(token = apikey)
try:
roomtemp = api.get_variable("58d763b8762542260a851bd1")
roomhumidity = api.get_variable("58d763c57625422609b8d088")
except ValueError:
print('Unable to obtain variable')
self.roomt = temp
self.roomh = hum
try:
roomtemp.save_value({'value': self.roomt})
roomhumidity.save_value({'value': self.roomh})
time.sleep(1)
except:
pass
self.room_temp_txt.setText(format(self.roomt,'.2f'))
self.room_hum_txt.setText(format(self.roomh,'.2f'))
def Room_temp_browser(self):
webbrowser.open('https://app.ubidots.com/ubi/getchart/page/G284654CCK1E77kbBR7zmpBDNkw', new = 2)
def Room_hum_browser(self):
webbrowser.open('https://app.ubidots.com/ubi/getchart/page/qgaJ95jUNq91E3aVxJsNo7NphbU', new = 2)
def Run_System(self):
f = open('Ubidots_APIkey.txt', 'r')
apikey = f.readline().strip()
f.close()
api = ApiClient(token = apikey)
self.cooler_on.setText(' ')
self.heater_on.setText(' ')
self.humid_on.setText(' ')
self.dehumid_on.setText(' ')
self.cooler_off.setText(' ')
self.heater_off.setText(' ')
self.humid_off.setText(' ')
self.dehumid_off.setText(' ')
self.Room_cond()
try:
cooler = api.get_variable("58d768e0762542260a855c7a")
heater = api.get_variable("58d768eb7625422609b91152")
humidifier = api.get_variable("58d768f8762542260cf3b292")
exhaust = api.get_variable("58d76907762542260dfad769")
except ValueError:
print('Unable to obtain variable')
cooler.save_value({'value': 0})
heater.save_value({'value': 0})
humidifier.save_value({'value': 0})
exhaust.save_value({'value': 0})
if (self.eco < 1):
self.run_eco_level.setText('--')
elif (self.eco == 1):
t = self.roomt
h = self.roomh
if (t >= 35):
ser.write('c'.encode())
self.cooler_on.setText('ON')
self.heater_off.setText('OFF')
cooler.save_value({'value': 1})
heater.save_value({'value': 0})
time.sleep(1)
if (t <= 15):
ser.write('f'.encode())
self.heater_on.setText('ON')
self.cooler_off.setText('OFF')
heater.save_value({'value': 1})
cooler.save_value({'value': 0})
time.sleep(1)
if (h <= 25):
ser.write('h'.encode())
self.humid_on.setText('ON')
self.dehumid_off.setText('OFF')
humidifier.save_value({'value': 1})
exhaust.save_value({'value': 0})
time.sleep(1)
if (h >= 80):
ser.write('e'.encode())
self.dehumid_on.setText('ON')
self.humid_off.setText('OFF')
exhaust.save_value({'value': 1})
humidifier.save_value({'value': 0})
time.sleep(1)
if ((h > 25 and h < 80)):
self.humid_off.setText('OFF')
self.dehumid_off.setText('OFF')
humidifier.save_value({'value': 0})
exhaust.save_value({'value': 0})
time.sleep(1)
if ((t > 15) and (t < 35)):
self.cooler_off.setText('OFF')
self.heater_off.setText('OFF')
cooler.save_value({'value': 0})
heater.save_value({'value': 0})
time.sleep(1)
elif (self.eco == 2):
t = self.roomt
h = self.roomh
if (t >= 32):
ser.write('c'.encode())
self.cooler_on.setText('ON')
self.heater_off.setText('OFF')
cooler.save_value({'value': 1})
heater.save_value({'value': 0})
time.sleep(1)
if (t <= 18):
ser.write('f'.encode())
self.heater_on.setText('ON')
self.cooler_off.setText('OFF')
heater.save_value({'value': 1})
cooler.save_value({'value': 0})
time.sleep(1)
if (h <= 30):
ser.write('h'.encode())
self.humid_on.setText('ON')
self.dehumid_off.setText('OFF')
humidifier.save_value({'value': 1})
exhaust.save_value({'value': 0})
time.sleep(1)
if (h >= 70):
ser.write('e'.encode())
self.dehumid_on.setText('ON')
self.humid_off.setText('OFF')
exhaust.save_value({'value': 1})
humidifier.save_value({'value': 0})
time.sleep(1)
if ((h > 30 and h < 70)):
self.humid_off.setText('OFF')
self.dehumid_off.setText('OFF')
exhaust.save_value({'value': 0})
humidifier.save_value({'value': 0})
time.sleep(1)
if ((t > 18) and (t < 32)):
self.cooler_off.setText('OFF')
self.heater_off.setText('OFF')
cooler.save_value({'value': 0})
heater.save_value({'value': 0})
time.sleep(1)
elif (self.eco == 3):
t = self.roomt
h = self.roomh
if (t >= 30):
ser.write('c'.encode())
self.cooler_on.setText('ON')
self.heater_off.setText('OFF')
cooler.save_value({'value': 1})
heater.save_value({'value': 0})
time.sleep(1)
if (t <= 20):
ser.write('f'.encode())
self.heater_on.setText('ON')
self.cooler_off.setText('OFF')
heater.save_value({'value': 1})
cooler.save_value({'value': 0})
time.sleep(1)
if (h <= 40):
ser.write('h'.encode())
self.humid_on.setText('ON')
self.dehumid_off.setText('OFF')
humidifier.save_value({'value': 1})
exhaust.save_value({'value': 0})
time.sleep(1)
if (h >= 60):
ser.write('e'.encode())
self.dehumid_on.setText('ON')
self.humid_off.setText('OFF')
exhaust.save_value({'value': 1})
humidifier.save_value({'value': 0})
time.sleep(1)
if ((h > 40 and h < 60)):
self.humid_off.setText('OFF')
self.dehumid_off.setText('OFF')
exhaust.save_value({'value': 0})
humidifier.save_value({'value': 0})
time.sleep(1)
if ((t > 20) and (t < 30)):
self.cooler_off.setText('OFF')
self.heater_off.setText('OFF')
cooler.save_value({'value': 0})
heater.save_value({'value': 0})
time.sleep(1)
elif (self.eco == 4):
t = self.roomt
h = self.roomh
if (t >= 27):
ser.write('c'.encode())
self.cooler_on.setText('ON')
self.heater_off.setText('OFF')
cooler.save_value({'value': 1})
heater.save_value({'value': 0})
time.sleep(1)
if (t <= 22):
ser.write('f'.encode())
self.heater_on.setText('ON')
self.cooler_off.setText('OFF')
heater.save_value({'value': 1})
cooler.save_value({'value': 0})
time.sleep(1)
if (h <= 25):
ser.write('h'.encode())
self.humid_on.setText('ON')
self.dehumid_off.setText('OFF')
humidifier.save_value({'value': 1})
exhaust.save_value({'value': 0})
time.sleep(1)
if (h >= 50):
ser.write('e'.encode())
self.dehumid_on.setText('ON')
self.humid_off.setText('OFF')
exhaust.save_value({'value': 1})
humidifier.save_value({'value': 0})
time.sleep(1)
if ((h > 25) and (h < 50)):
self.humid_off.setText('OFF')
self.dehumid_off.setText('OFF')
exhaust.save_value({'value': 0})
humidifier.save_value({'value': 0})
time.sleep(1)
if ((t > 22) and (t < 27)):
self.cooler_off.setText('OFF')
self.heater_off.setText('OFF')
cooler.save_value({'value': 0})
heater.save_value({'value': 0})
time.sleep(1)
def Open_ubidots(self):
webbrowser.open('https://app.ubidots.com/ubi/public/getdashboard/page/P8OAd8cR6dtoL6aO4AQ384euynE', new = 2)
import system_rc
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
system = QtWidgets.QToolBox()
ui = Ui_system()
ui.setupUi(system)
system.move(QApplication.desktop().screen().rect().center() - system.rect().center())
system.show()
sys.exit(app.exec_())
|
kunz07/fyp2017
|
GUI/final.py
|
Python
|
mit
| 52,154
|
"""Store various constants here"""
from enum import Enum
# Maximum file upload size (in bytes).
MAX_CONTENT_LENGTH = 1 * 1024 * 1024 * 1024
# Authentication/account creation constants
PWD_HASH_ALGORITHM = 'pbkdf2_sha256'
SALT_SIZE = 24
MIN_USERNAME_LENGTH = 2
MAX_USERNAME_LENGTH = 32
MIN_PASSWORD_LENGTH = 8
MAX_PASSWORD_LENGTH = 1024
HASH_ROUNDS = 100000
PWD_RESET_KEY_LENGTH = 32
# Length of time before recovery key expires, in minutes.
PWD_RESET_KEY_EXPIRATION = 1 * 24 * 60
CREATE_ACCOUNT_KEY_LENGTH = 32
class Gender(Enum):
"""Value of members.gender if member's gender is unknown"""
NO_GENDER = None
"""Value of members.gender if member is female"""
FEMALE = 0
"""Value of members.gender if member is male"""
MALE = 1
CONTACTS = {
'Administration': [{
'name': 'Kevin Gilmartin',
'role': 'Dean of Undergraduate Students',
'email': 'kmg@hss.caltech.edu'
}, {
'name': 'Lesley Nye',
'role': 'Dean of Undergraduate Students',
'email': 'lnye@caltech.edu'
}, {
'name': 'Kristin Weyman',
'role': 'Associate Dean of Undergraduate Students',
'email': 'kweyman@caltech.edu'
}, {
'name': 'Beth Larranaga',
'role': 'Office Manager',
'email': 'rosel@caltech.edu'
}, {
'name': 'Sara Loredo',
'role': 'Office Assistant',
'email': 'sara@caltech.edu'
}],
'Student Life': [{
'name':
'Tom Mannion',
'role':
'Senior Director, Student Activities and Programs',
'email':
'mannion@caltech.edu'
}, {
'name': 'Joe Shepherd',
'role': 'Vice President for Student Affairs',
'email': 'joseph.e.shepherd@caltech.edu'
}, {
'name':
'Felicia Hunt',
'role':
'Assistant Vice President for Student Affairs and Residential Experience',
'email':
'fhunt@caltech.edu'
}, {
'name': 'Maria Katsas',
'role': 'Director of Housing',
'email': 'maria@caltech.edu'
}, {
'name':
'Allie McIntosh',
'role':
'Community Educator and Deputy Title IX Coordinator',
'email':
'allie@caltech.edu'
}, {
'name': 'Jaime Reyes',
'role': 'Acting Director of Dining Services',
'email': 'reyes@caltech.edu'
}]
}
|
ASCIT/donut-python
|
donut/constants.py
|
Python
|
mit
| 2,372
|
"""
created 09/05/17
For executation of the kallisto quantification step
To be run with three arguements
* basedir - top level output directory
* input directory - contains folders with .fastq.gz files
* max_threads - how many threads to allocate to kallisto
Returns kallisto quantifications and associated log files to a directory
within the top level output dir.
An example pair of files is:
25uM_1_R1_trimmed_1P.fastq.gz
25uM_1_R1_trimmed_2P.fastq.gz
Outputs kallisto files for each read pair and
associated log files in a nested directory
"""
# --- packages
import os
import sys
from subprocess import call
# --- variables using sys.argv
basedir = sys.argv[1]
inputdirectory = sys.argv[2]
max_threads = sys.argv[3]
processed = basedir + "kallisto/"
# --- functions
def kallisto_call(read1):
"""
l is the lock object
read1 is the forward read
calls kallisto quant for the read pair specified by the arguements
Rewrite this to be more specific for a single read pair
so it can be parallelised
also review how to actually do this... current way does not seem to.
"""
dividing = read1.split(".")
basename = dividing[0].replace("_1P", "")
read2 = read1.replace("1P", "2P")
call(
"kallisto quant -i " + basedir +
"transcriptome_kallisto.idx -t " +
max_threads + " -o " + processed + basename + " -b 100 " +
inputdirectory + read1 + " " + inputdirectory + read2, shell=True)
# --- __main__ call
if __name__ == "__main__":
# --- check dirs and create if neccessary
if not os.path.exists(processed):
os.makedirs(processed)
# --- create list of read1 pair file names
read_list = []
for fname in os.listdir(inputdirectory):
if "1P" in fname:
read_list.append(fname)
# --- call kallisto_call on each read pair in parallel
for read in read_list:
kallisto_call(read)
|
samleenz/rnaseq_pipe
|
kallisto_quant.py
|
Python
|
mit
| 1,936
|
# Principal Component Analysis Code :
from numpy import mean,cov,double,cumsum,dot,linalg,array,rank,size,flipud
from pylab import *
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
from mvpa.clfs.knn import kNN
from mvpa.datasets import Dataset
from mvpa.clfs.transerror import TransferError
from mvpa.misc.data_generators import normalFeatureDataset
from mvpa.algorithms.cvtranserror import CrossValidatedTransferError
from mvpa.datasets.splitters import NFoldSplitter
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_kNN/Scaled')
from data_method_V import Fmat_original
def pca(X):
#get dimensions
num_data,dim = X.shape
#center data
mean_X = X.mean(axis=1)
M = (X-mean_X) # subtract the mean (along columns)
Mcov = cov(M)
###### Sanity Check ######
i=0
n=0
while i < 123:
j=0
while j < 140:
if X[i,j] != X[i,j]:
print X[i,j]
print i,j
n=n+1
j = j+1
i=i+1
print n
##########################
print 'PCA - COV-Method used'
val,vec = linalg.eig(Mcov)
#return the projection matrix, the variance and the mean
return vec,val,mean_X, M, Mcov
def my_mvpa(Y,num2):
#Using PYMVPA
PCA_data = np.array(Y)
PCA_label_2 = ['Styrofoam-Fixed']*5 + ['Books-Fixed']*5 + ['Bucket-Fixed']*5 + ['Bowl-Fixed']*5 + ['Can-Fixed']*5 + ['Box-Fixed']*5 + ['Pipe-Fixed']*5 + ['Styrofoam-Movable']*5 + ['Container-Movable']*5 + ['Books-Movable']*5 + ['Cloth-Roll-Movable']*5 + ['Black-Rubber-Movable']*5 + ['Can-Movable']*5 + ['Box-Movable']*5 + ['Rug-Fixed']*5 + ['Bubble-Wrap-1-Fixed']*5 + ['Pillow-1-Fixed']*5 + ['Bubble-Wrap-2-Fixed']*5 + ['Sponge-Fixed']*5 + ['Foliage-Fixed']*5 + ['Pillow-2-Fixed']*5 + ['Rug-Movable']*5 + ['Bubble-Wrap-1-Movable']*5 + ['Pillow-1-Movable']*5 + ['Bubble-Wrap-2-Movable']*5 + ['Pillow-2-Movable']*5 + ['Cushion-Movable']*5 + ['Sponge-Movable']*5
clf = kNN(k=num2)
terr = TransferError(clf)
ds1 = Dataset(samples=PCA_data,labels=PCA_label_2)
cvterr = CrossValidatedTransferError(terr,NFoldSplitter(cvtype=1),enable_states=['confusion'])
error = cvterr(ds1)
return (1-error)*100
def result(eigvec_total,eigval_total,mean_data_total,B,C,num_PC):
# Reduced Eigen-Vector Matrix according to highest Eigenvalues..(Considering First 20 based on above figure)
W = eigvec_total[:,0:num_PC]
m_W, n_W = np.shape(W)
# Normalizes the data set with respect to its variance (Not an Integral part of PCA, but useful)
length = len(eigval_total)
s = np.matrix(np.zeros(length)).T
i = 0
while i < length:
s[i] = sqrt(C[i,i])
i = i+1
Z = np.divide(B,s)
m_Z, n_Z = np.shape(Z)
#Projected Data:
Y = (W.T)*B # 'B' for my Laptop: otherwise 'Z' instead of 'B'
m_Y, n_Y = np.shape(Y.T)
return Y.T
if __name__ == '__main__':
Fmat = Fmat_original
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
print 'Total_Matrix_Shape:',m_tot,n_tot
eigvec_total, eigval_total, mean_data_total, B, C = pca(Fmat)
#print eigvec_total
#print eigval_total
#print mean_data_total
m_eigval_total, n_eigval_total = np.shape(np.matrix(eigval_total))
m_eigvec_total, n_eigvec_total = np.shape(eigvec_total)
m_mean_data_total, n_mean_data_total = np.shape(np.matrix(mean_data_total))
print 'Eigenvalue Shape:',m_eigval_total, n_eigval_total
print 'Eigenvector Shape:',m_eigvec_total, n_eigvec_total
print 'Mean-Data Shape:',m_mean_data_total, n_mean_data_total
#Recall that the cumulative sum of the eigenvalues shows the level of variance accounted by each of the corresponding eigenvectors. On the x axis there is the number of eigenvalues used.
perc_total = cumsum(eigval_total)/sum(eigval_total)
num_PC=1
while num_PC <=20:
Proj = np.zeros((140,num_PC))
Proj = result(eigvec_total,eigval_total,mean_data_total,B,C,num_PC)
# PYMVPA:
num=0
cv_acc = np.zeros(21)
while num <=20:
cv_acc[num] = my_mvpa(Proj,num)
num = num+1
plot(np.arange(21),cv_acc,'-s')
grid('True')
hold('True')
num_PC = num_PC+1
legend(('1-PC', '2-PCs', '3-PCs', '4-PCs', '5-PCs', '6-PCs', '7-PCs', '8-PCs', '9-PCs', '10-PCs', '11-PC', '12-PCs', '13-PCs', '14-PCs', '15-PCs', '16-PCs', '17-PCs', '18-PCs', '19-PCs', '20-PCs'))
ylabel('Cross-Validation Accuracy')
xlabel('k in k-NN Classifier')
show()
|
tapomayukh/projects_in_python
|
classification/Classification_with_kNN/Single_Contact_Classification/Scaled_Features/best_kNN_PCA/objects/test11_cross_validate_objects_1200ms_scaled_method_v.py
|
Python
|
mit
| 4,915
|
# Kata link: https://www.codewars.com/kata/58daa7617332e59593000006
# First solution
def find_longest(arr):
count = [len(str(v)) for v in arr]
max_value = max(count)
max_index = count.index(max_value)
return arr[max_index]
# Another solution
def find_longest(arr):
return max(arr, key=lambda x: len(str(x)))
|
chyumin/Codewars
|
Python/7 kyu/Most Digits.py
|
Python
|
mit
| 330
|
# Copyright (c) 2014 Adafruit Industries
# Author: Tony DiCola
# Modified by Mauy5043 (2016)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# This is a direct copy of what's in the Adafruit Python GPIO library:
# https://raw.githubusercontent.com/adafruit/Adafruit_Python_GPIO/master/Adafruit_GPIO/Platform.py
# TODO: Add dependency on Adafruit Python GPIO and use its platform detect
# functions.
import platform
import re
# Platform identification constants.
UNKNOWN = 0
RASPBERRY_PI = 1
BEAGLEBONE_BLACK = 2
def platform_detect():
return BEAGLEBONE_BLACK
|
Mausy5043/bonediagd
|
DHT22/bonediagd_DHT/platform_detect.py
|
Python
|
mit
| 1,586
|
import numpy as _np
import lnls as _lnls
import pyaccel as _pyaccel
from . import lattice as _lattice
default_cavity_on = False
default_radiation_on = False
default_vchamber_on = False
def create_accelerator(optics_mode=_lattice.default_optics_mode, energy=_lattice.energy):
lattice = _lattice.create_lattice(optics_mode=optics_mode, energy=energy)
accelerator = _pyaccel.accelerator.Accelerator(
lattice=lattice,
energy=energy,
harmonic_number=_lattice.harmonic_number,
cavity_on=default_cavity_on,
radiation_on=default_radiation_on,
vchamber_on=default_vchamber_on
)
return accelerator
accelerator_data = dict()
accelerator_data['lattice_version'] = 'BO_V06_01'
accelerator_data['global_coupling'] = 0.0002 # expected corrected value
accelerator_data['pressure_profile'] = _np.array([[0, 496.8], [1.5e-8]*2]) # [s [m], p [mbar]]o
496.78745
|
lnls-fac/sirius
|
pymodels/BO_V06_01/accelerator.py
|
Python
|
mit
| 915
|
#!/usr/bin/env python
# coding: utf-8
# Copyright (C) USC Information Sciences Institute
# Author: Vladimir M. Zaytsev <zaytsev@usc.edu>
# URL: <http://nlg.isi.edu/>
# For more information, see README.md
# For license information, see LICENSE
from django.conf.urls import url
from django.conf.urls import patterns
from django.shortcuts import redirect
urlpatterns = patterns(
"",
url(r"^$", "n7.web.n7.views.demo", name="demo"),
url(r"^triples/$", "n7.web.n7.views.trainer", name="trainer"),
url(r"^novels/$", "n7.web.n7.views.trainer_add", name="trainer_post"),
)
|
zaycev/n7
|
n7/web/urls.py
|
Python
|
mit
| 586
|
'''
Week-2:Exercise-grader-polysum
A regular polygon has n number of sides. Each side has length s.
The area of a regular polygon is: (0.25∗n∗s^2)/tan(π/n)
The perimeter of a polygon is: length of the boundary of the polygon
Write a function called polysum that takes 2 arguments, n and s. This function should sum the area and square of the perimeter of the regular polygon. The function returns the sum, rounded to 4 decimal places.
'''
#code
import math
def polysum(n,s):
'''
Input: n - number of sides(should be an integer)
s- length of each sides(can be an intger or a float)
Output: Returns Sum of area and the square of the perimeter of the regular polygon(gives a float)
'''
#Code
def areaOfPolygon(n,s):
#Pi = 3.1428
area = (0.25 * n * s ** 2)/math.tan(math.pi/n)
return area
def perimeterOfPolygon(n,s):
perimeter = n * s
return perimeter
sum = areaOfPolygon(n,s) + (perimeterOfPolygon(n,s) ** 2)
return round(sum,4)
|
ahmedkareem999/MITx-6.00.1x
|
polySum.py
|
Python
|
mit
| 1,018
|
#!/usr/bin/env python3
print(sum(map(int, str(2**1000))))
|
mazayus/ProjectEuler
|
problem016.py
|
Python
|
mit
| 59
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-11-01 22:55
from __future__ import unicode_literals
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [
('contentcuration', '0038_contentnode_author'),
]
operations = [
migrations.AlterField(
model_name='formatpreset',
name='id',
field=models.CharField(choices=[('high_res_video', 'High Resolution'), ('low_res_video', 'Low Resolution'), ('vector_video', 'Vectorized'), ('video_thumbnail', 'Thumbnail'), ('video_subtitle', 'Subtitle'), ('audio', 'Audio'), ('audio_thumbnail', 'Thumbnail'), ('document', 'Document'), (
'document_thumbnail', 'Thumbnail'), ('exercise', 'Exercise'), ('exercise_thumbnail', 'Thumbnail'), ('exercise_image', 'Exercise Image'), ('exercise_graphie', 'Exercise Graphie'), ('channel_thumbnail', 'Channel Thumbnail')], max_length=150, primary_key=True, serialize=False),
),
]
|
DXCanas/content-curation
|
contentcuration/contentcuration/migrations/0039_auto_20161101_1555.py
|
Python
|
mit
| 1,022
|
import os
import re
from setuptools import setup
base_path = os.path.dirname(__file__)
def get_long_description():
readme_md = os.path.join(base_path, "README.md")
with open(readme_md) as f:
return f.read()
with open(os.path.join(base_path, "cfscrape", "__init__.py")) as f:
VERSION = re.compile(r'.*__version__ = "(.*?)"', re.S).match(f.read()).group(1)
setup(
name="cfscrape",
packages=["cfscrape"],
version=VERSION,
description='A simple Python module to bypass Cloudflare\'s anti-bot page. See https://github.com/Anorov/cloudflare-scrape for more information.',
long_description=get_long_description(),
long_description_content_type="text/markdown",
author="Anorov",
author_email="anorov.vorona@gmail.com",
url="https://github.com/Anorov/cloudflare-scrape",
keywords=["cloudflare", "scraping"],
include_package_data=True,
install_requires=["requests >= 2.23.0"],
)
|
Anorov/cloudflare-scrape
|
setup.py
|
Python
|
mit
| 943
|
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from django.shortcuts import render
def home(request):
return render(request, 'home.html', {'context_var': 'expected'})
def withUrlFields(request, value):
return HttpResponse(value)
@login_required
def requiresLogin(request):
return HttpResponse('logged in')
def httpStatus(request, status):
return HttpResponse(status=int(status))
|
tctimmeh/django-testing-base
|
testsite/testapp/views.py
|
Python
|
mit
| 449
|
#!/usr/bin/python
# Made by Sangpil Kim
# June 2016
import json
from google import search
import csv
def searchGoogle(query,dic):
bloomberg = []
forbes = []
# later do forbes as well
for url in search(query, stop=10):
print(url)
if 'bloomberg.com/research/stocks/private/person' in url:
bloomberg.append(url)
if 'bloomberg.com/research/stocks/people/person' in url:
bloomberg.append(url)
if 'forbes.com/lists' in url:
forbes.append(url)
dic['bloomberg'] = bloomberg
dic['forbes'] = forbes
return dic
def scrapInfo(name):
dic = {}
dic['name'] = name
query = dic['name']+ ' bloomberg'
info = searchGoogle(query,dic)
return info
def readCSV(fileName):
with open(fileName,'rU') as csvfile:
names = []
companies = []
reader = csv.DictReader(csvfile)
for row in reader:
names.append(row['ceoname'])
companies.append(row['coname'])
return names, companies
# Read CSV file with colum name by DictReader
tuples = readCSV('ceoname.csv')
# Unpacking tuples
names , _ = tuples
print(names)
conj = []
#Scrap info
for name in names:
conj.append(scrapInfo(name))
print(conj[len(conj)-1])
#Dump as json
dstJson = 'oldBloomberg.json'
with open(dstJson, mode='w', encoding='utf-8') as f:
json.dump(conj,f)
|
spk921/scrapers
|
bloomberg/archive/getOldBloomberg.py
|
Python
|
mit
| 1,395
|
# Download the Python helper library from twilio.com/docs/python/install
from twilio.rest.ip_messaging import TwilioIpMessagingClient
# Your Account Sid and Auth Token from twilio.com/user/account
account = "ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
token = "your_auth_token"
client = TwilioIpMessagingClient(account, token)
service = client.services.get(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")
channel = service.channels.get(sid="CHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")
messages = channel.messages.list()
for m in messages:
print(m)
|
teoreteetik/api-snippets
|
ip-messaging/rest/messages/list-messages/list-messages.5.x.py
|
Python
|
mit
| 533
|
from . import common
import os
import hglib
class test_paths(common.basetest):
def test_basic(self):
f = open('.hg/hgrc', 'a')
f.write('[paths]\nfoo = bar\n')
f.close()
# hgrc isn't watched for changes yet, have to reopen
self.client = hglib.open()
paths = self.client.paths()
self.assertEquals(len(paths), 1)
self.assertEquals(paths['foo'], os.path.abspath('bar'))
self.assertEquals(self.client.paths('foo'), os.path.abspath('bar'))
|
beckjake/python3-hglib
|
tests/test-paths.py
|
Python
|
mit
| 512
|
# -*- coding: utf-8 -*-
# programme qui demande un nombre et affiche les 10 triples successifs
chaine = input("donne un nombre : ")
nombre = int(chaine)
triple = nombre
compteur=1
while(compteur<=10):
triple=triple*3
print(triple)
compteur=compteur+1
|
Straor/Prog
|
Python/prog13.py
|
Python
|
mit
| 255
|
#!/usr/bin/python3
"""
Given a function rand7 which generates a uniform random integer in the range 1
to 7, write a function rand10 which generates a uniform random integer in the
range 1 to 10.
Do NOT use system's Math.random().
"""
# The rand7() API is already defined for you.
def rand7():
return 0
class Solution:
def rand10(self):
"""
generate 7 twice, (rv1, rv2), 49 combination
assign 40 combinations for the 1 to 10 respectively
7-ary system
:rtype: int
"""
while True:
rv1 = rand7()
rv2 = rand7()
s = (rv1 - 1) * 7 + (rv2 - 1) # make it start from 0
if s < 40: # s \in [0, 40)
return s % 10 + 1 # since I make it start from 0
|
algorhythms/LeetCode
|
470 Implement Rand10() Using Rand7().py
|
Python
|
mit
| 773
|
import question_template
game_type = 'input_output'
source_language = 'C'
parameter_list = [
['$x1','int'],['$x2','int'],['$x3','int'],['$y0','int'],
]
tuple_list = [
['for_continue_',
[0,1,2,None],
[0,2,2,None],
[0,4,2,None],
[0,6,2,None],
[0,7,2,None],
[None,None,2,1],
[None,None,2,2],
[None,None,2,4],
[None,None,2,6],
[None,None,2,7],
[0,1,3,None],
[0,2,3,None],
[0,4,3,None],
[0,6,4,None],
[0,7,5,None],
[None,None,3,1],
[None,None,3,2],
[None,None,3,4],
[None,None,4,6],
[None,None,5,7],
]
]
global_code_template = '''\
d #include <stdio.h>
x #include <stdio.h>
'''
main_code_template = '''\
dx int s = $x1;
dx int i;
dx for (i = 1; i < $x2; i++) {
dx if (i % $x3 == 0)
dx continue;
dx s = s + i;
dx }
dx printf("%d\\n",s);
'''
argv_template = ''
stdin_template = ''
stdout_template = '''\
$y0
'''
question = question_template.Question_template(game_type,source_language,
parameter_list,tuple_list,global_code_template,main_code_template,
argv_template,stdin_template,stdout_template)
|
stryder199/RyarkAssignments
|
Assignment2/ttt/archive/for_loops/for_continue.py
|
Python
|
mit
| 1,067
|
"""
Geofilters
----------
Filters coded oriented to filter and detect uncorrect data.
"""
import os
import numpy as np
import pandas as pd
from collections import Counter
from sklearn.neighbors import KDTree
from pySpatialTools.Preprocess.Transformations.Transformation_2d.geo_filters\
import check_in_square_area
def check_correct_spain_coord(coord, radians=False):
"Check if the coordinates given are in Spain or not."
coord = np.array(coord)
lim_points = np.array([[-18.25, 4.5], [27.75, 44]])
if radians:
lim_points = np.pi/180*lim_points
logi = check_in_square_area(coord, lim_points)
return logi
def filter_uncorrect_coord_spain(data, coord_vars, radians=False):
"Filter not corrrect spain coordinates."
coord = data[coord_vars].as_matrix()
logi = check_correct_spain_coord(coord, radians)
return data[logi]
def filter_bool_uncorrect_coord_spain(data, coord_vars, radians=False):
"Filter data from pandas dataframe structure."
coord = data[coord_vars].as_matrix()
logi = check_correct_spain_coord(coord, radians)
return logi
def fill_locations_by_region_jittering(locations, uncorrect, regions):
"""Creation random locations for uncorrect locations."""
## 0. Preparing computations
new_locations = locations[:]
u_regs = np.unique(regions)
regs_mean_locs = []
regs_std_locs = []
## 1. Computing statistical correct locations
for reg in u_regs:
logi = np.logical_and(regions == reg, np.logical_not(uncorrect))
reg_mean_locs = np.mean(locations[logi], axis=0)
reg_std_locs = np.std(locations[logi], axis=0)
regs_mean_locs.append(reg_mean_locs)
regs_std_locs.append(reg_std_locs)
## 2. Computing new locations for uncorrect
idxs = np.where(uncorrect)[0]
new_locs = []
for i in idxs:
reg = regions[i]
i_reg = np.where(u_regs == reg)[0][0]
# Random creation
loc = np.random.random(2)*regs_std_locs[i_reg] + regs_mean_locs[i_reg]
new_locs.append(loc)
## 3. Replace
new_locations[uncorrect] = np.array(new_locs)
return new_locations
def get_statistics2fill_locations(locations, regions):
## 0. Preparing computations
correct = check_correct_spain_coord(locations)
regions = [e for e in regions if e != float('nan') and e != np.nan]
u_regs = np.unique(regions)
regs_mean_locs = []
regs_std_locs = []
## 1. Computing statistical correct locations
for reg in u_regs:
logi = np.logical_and(regions == reg, correct)
reg_mean_locs = np.mean(locations[logi], axis=0)
reg_std_locs = np.std(locations[logi], axis=0)
regs_mean_locs.append(reg_mean_locs)
regs_std_locs.append(reg_std_locs)
return regs_mean_locs, regs_std_locs, u_regs
def fill_locations(df, loc_vars, reg_var, mean_locs, std_locs, u_regs):
## 0. Preparation computations
locs = df[loc_vars].as_matrix()
regions = df[reg_var].as_matrix()
correct = check_correct_spain_coord(locs)
idxs = np.where(np.logical_not(correct))[0]
## 1. Compute new locations
new_locs = []
for i in idxs:
reg = regions[i]
i_reg = np.where(u_regs == reg)[0][0]
# Random creation
loc = np.random.random(2)*std_locs[i_reg] + mean_locs[i_reg]
new_locs.append(loc)
df[loc_vars][np.logical_not(correct)] = np.array(new_locs)
return df
###############################################################################
############################ Auxiliar to cleanning ############################
###############################################################################
def fill_nulls(df, mean_locs, std_locs, u_cps, raw_muni, raw_cps, raw_locs,
pathdata):
loc_vars, reg_var = ['es-x', 'es-y'], 'cp'
locs = df[loc_vars].as_matrix()
null_locs = np.logical_not(check_correct_spain_coord(locs))
null_cps = pd.isnull(df[reg_var]).as_matrix()
null_possible = np.array([e in u_cps for e in list(df['cp'])]).astype(bool)
null_imp = np.logical_and(np.logical_not(null_possible), null_locs)
null_both = np.logical_or(np.logical_and(null_locs, null_cps), null_imp)
null_neither = np.logical_and(np.logical_not(null_locs),
np.logical_not(null_cps))
# print null_locs.sum(), null_cps.sum(), null_both.sum()
null_cps2locs = np.logical_and(null_locs, np.logical_not(null_cps))
null_cps2locs = np.logical_and(null_cps2locs, null_possible)
null_locs2cps = np.logical_and(null_cps, np.logical_not(null_locs))
# print null_both.sum(), null_cps2locs.sum(), null_locs2cps.sum()
# print null_locs.sum(), null_cps.sum(), null_imp.sum()
## Inputing locations from cp
if null_cps2locs.sum():
new_locs = create_cp2locs(mean_locs, std_locs, u_cps, null_cps2locs,
list(df['cp']))
df_null_locs = pd.DataFrame({'nif': list(df['nif'][null_cps2locs]),
'es-x': new_locs[:, 0],
'es-y': new_locs[:, 1]})
df['es-x'][null_cps2locs] = new_locs[:, 0]
df['es-y'][null_cps2locs] = new_locs[:, 1]
else:
df_null_locs = pd.DataFrame({'nif': [], 'es-x': [], 'es-y': []})
df_null_locs.to_csv(os.path.join(pathdata, 'cps2locs'), sep=';')
## Inputing cp from locations
if null_locs2cps.sum():
new_cps = create_locs2cp(locs, null_locs2cps, raw_locs, raw_cps)
df_null_cps = pd.DataFrame({'nif': list(df['nif'][null_locs2cps]),
'cp': list(new_cps)})
df['cp'][null_locs2cps] = new_cps
else:
df_null_cps = pd.DataFrame({'nif': [], 'cp': []})
df_null_cps.to_csv(os.path.join(pathdata, 'locs2cps'), sep=';')
## Inputing cp and locations from municipio
# localidades = list(df['localidad'][null_both])
# localidades_known = list(df['localidad'][np.logical_not(null_both)])
# cp
# new2_cps, new2_locs = create_locsandcp()
localidades = [e.strip().lower() for e in list(df['localidad'][null_both])]
df_null_both = pd.DataFrame({'nif': list(df['nif'][null_both]),
# 'localidad': localidades,
'cp': list(df['cp'][null_both]),
'es-x': df['es-x'][null_both],
'es-y': df['es-y'][null_both]})
# 'cp': list(new2_cps),
# 'es-x': new2_locs[:, 0],
# 'es-y': new2_locs[:, 1]})
df_null_both.to_csv(os.path.join(pathdata, 'nulllocsandcps'), sep=';')
# df['cp'][null_both] = new2_cps
# df['es-x'][null_both] = new2_locs[:, 0]
# df['es-y'][null_both] = new2_locs[:, 1]
# print df.shape, null_neither.sum()
df = df[null_neither]
return df
def create_cp2locs(mean_locs, std_locs, u_regs, uncorrect, regions):
idxs = np.where(uncorrect)[0]
new_locs = []
for i in idxs:
reg = regions[i]
i_reg = np.where(u_regs == reg)[0][0]
# Random creation
loc = np.random.random(2)*std_locs[i_reg] + mean_locs[i_reg]
new_locs.append(loc)
new_locs = np.array(new_locs)
return new_locs
def create_locs2cp(locs, null_locs2cps, raw_locs, raw_cps):
locs_cp = locs[null_locs2cps]
new_cps = retrieve_7major_cp(locs_cp, raw_locs, raw_cps)
return new_cps
def retrieve_7major_cp(locs, raw_locs, raw_cps):
raw_cps = np.array(raw_cps).astype(int)
ret = KDTree(raw_locs)
new_cps = []
for i in range(len(locs)):
neighs = ret.query(locs[[i]], 7)[1].ravel()
c = Counter([raw_cps[nei] for nei in neighs])
new_cps.append(c.keys()[np.argmax(c.values())])
return new_cps
def create_locsandcp():
pass
|
tgquintela/Mscthesis
|
FirmsLocations/Preprocess/geo_filters.py
|
Python
|
mit
| 7,868
|
# -*- coding: utf-8 -*-
__author__ = "Ildar Bikmamatov"
__email__ = "vistoyn@gmail.com"
__copyright__ = "Copyright 2016"
__license__ = "MIT"
__version__ = "1.0.1"
from . import log
from .lib import *
from .error import *
from .colors import colorf
from .datelib import *
|
vistoyn/python-foruse
|
foruse/__init__.py
|
Python
|
mit
| 274
|
from __future__ import print_function
import sys
import subprocess
class AutoInstall(object):
_loaded = set()
@classmethod
def find_module(cls, name, path, target=None):
if path is None and name not in cls._loaded:
cls._loaded.add(name)
print("Installing", name)
try:
out = subprocess.check_output(['sudo', sys.executable, '-m', 'pip', 'install', name])
print(out)
except Exception as e:
print("Failed" + e.message)
return None
sys.meta_path.append(AutoInstall)
|
Liuchang0812/slides
|
pycon2015cn/ex6_auto_install/autoinstall.py
|
Python
|
mit
| 590
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
|
qisanstudio/qstudio-launch
|
src/studio/launch/commands/config.py
|
Python
|
mit
| 69
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-09-27 15:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lots_admin', '0021_auto_20160927_0941'),
]
operations = [
migrations.AlterField(
model_name='address',
name='ward',
field=models.CharField(max_length=10, null=True),
),
]
|
datamade/large-lots
|
lots_admin/migrations/0022_auto_20160927_1051.py
|
Python
|
mit
| 462
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Allows access to the bot account's watchlist.
The watchlist can be updated manually by running this script.
Syntax:
python pwb.py watchlist [-all | -new]
Command line options:
-all - Reloads watchlists for all wikis where a watchlist is already
present
-new - Load watchlists for all wikis where accounts is setting in
user-config.py
"""
#
# (C) Daniel Herding, 2005
# (C) Pywikibot team, 2005-2016
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id$'
#
import os
import pywikibot
from pywikibot import config
from pywikibot.data.api import CachedRequest
from scripts.maintenance.cache import CacheEntry
def get(site=None):
"""Load the watchlist, fetching it if necessary."""
if site is None:
site = pywikibot.Site()
watchlist = [p.title() for p in site.watched_pages()]
return watchlist
def isWatched(pageName, site=None):
"""Check whether a page is being watched."""
watchlist = get(site)
return pageName in watchlist
def refresh(site, sysop=False):
"""Fetch the watchlist."""
pywikibot.output(u'Retrieving watchlist for %s via API.' % str(site))
return list(site.watched_pages(sysop=sysop, force=True))
def refresh_all(sysop=False):
"""Reload watchlists for all wikis where a watchlist is already present."""
cache_path = CachedRequest._get_cache_dir()
files = os.listdir(cache_path)
seen = []
for filename in files:
entry = CacheEntry(cache_path, filename)
entry._load_cache()
entry.parse_key()
entry._rebuild()
if entry.site not in seen:
if entry._data.get('watchlistraw'):
refresh(entry.site, sysop)
seen.append(entry.site)
def refresh_new(sysop=False):
"""Load watchlists of all wikis for accounts set in user-config.py."""
pywikibot.output(
'Downloading all watchlists for your accounts in user-config.py')
for family in config.usernames:
for lang in config.usernames[family]:
site = pywikibot.Site(lang, family)
refresh(site, sysop=sysop)
for family in config.sysopnames:
for lang in config.sysopnames[family]:
site = pywikibot.Site(lang, family)
refresh(site, sysop=sysop)
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: list of unicode
"""
all = False
new = False
sysop = False
for arg in pywikibot.handle_args(args):
if arg in ('-all', '-update'):
all = True
elif arg == '-new':
new = True
elif arg == '-sysop':
sysop = True
if all:
refresh_all(sysop=sysop)
elif new:
refresh_new(sysop=sysop)
else:
site = pywikibot.Site()
watchlist = refresh(site, sysop=sysop)
pywikibot.output(u'%i pages in the watchlist.' % len(watchlist))
for page in watchlist:
try:
pywikibot.stdout(page.title())
except pywikibot.InvalidTitle:
pywikibot.exception()
if __name__ == "__main__":
main()
|
h4ck3rm1k3/pywikibot-core
|
scripts/watchlist.py
|
Python
|
mit
| 3,349
|
"""Graphical user interface to Delta-Elektronika SM-700 Series
controllers."""
import sys
import pyhard2.driver as drv
import pyhard2.driver.virtual as virtual
import pyhard2.driver.deltaelektronika as delta
import pyhard2.ctrlr as ctrlr
def createController():
"""Initialize controller."""
config = ctrlr.Config("deltaelektronika", "SM-700")
if not config.nodes:
config.nodes, config.names = ([1], ["SM700"])
if config.virtual:
driver = virtual.VirtualInstrument()
iface = ctrlr.virtualInstrumentController(config, driver)
else:
driver = delta.Sm700Series(drv.Serial(config.port))
iface = ctrlr.Controller(config, driver)
iface.addCommand(driver.source.voltage, "Voltage", poll=True, log=True)
iface.addCommand(driver.source.current, "Current", poll=True, log=True)
iface.populate()
return iface
def main(argv):
"""Start controller."""
from PyQt4 import QtGui
app = QtGui.QApplication(argv)
app.lastWindowClosed.connect(app.quit)
iface = createController()
iface.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main(sys.argv)
|
Synss/pyhard2
|
pyhard2/ctrlr/deltaelektronika.py
|
Python
|
mit
| 1,159
|
# vim: fileencoding=utf-8 et sw=4 ts=4 tw=80:
# python-quilt - A Python implementation of the quilt patch system
#
# See LICENSE comming with the source of python-quilt for details.
import os
from helpers import make_file
from unittest import TestCase
import quilt.refresh
from quilt.db import Db, Patch
from quilt.utils import TmpDirectory
class Test(TestCase):
def test_refresh(self):
with TmpDirectory() as dir:
old_dir = os.getcwd()
try:
os.chdir(dir.get_name())
db = Db(".pc")
db.create()
backup = os.path.join(".pc", "patch")
os.mkdir(backup)
make_file(b"", backup, "file")
db.add_patch(Patch("patch"))
db.save()
make_file(b"", "patch")
make_file(b"added\n", "file")
cmd = quilt.refresh.Refresh(".", ".pc", ".")
cmd.refresh()
with open("patch", "r") as patch:
self.assertTrue(patch.read(30))
finally:
os.chdir(old_dir)
|
bjoernricks/python-quilt
|
tests/test_refresh.py
|
Python
|
mit
| 1,125
|
# region Description
"""
nmap_scanner.py: Scan local network with NMAP
Author: Vladimir Ivanov
License: MIT
Copyright 2020, Raw-packet Project
"""
# endregion
# region Import
from raw_packet.Utils.base import Base
import xml.etree.ElementTree as ET
import subprocess as sub
from tempfile import gettempdir
from os.path import isfile, join
from os import remove
from typing import Union, List, Dict, NamedTuple
from collections import namedtuple
# endregion
# region Authorship information
__author__ = 'Vladimir Ivanov'
__copyright__ = 'Copyright 2020, Raw-packet Project'
__credits__ = ['']
__license__ = 'MIT'
__version__ = '0.2.1'
__maintainer__ = 'Vladimir Ivanov'
__email__ = 'ivanov.vladimir.mail@gmail.com'
__status__ = 'Development'
# endregion
# region Main class - NmapScanner
class NmapScanner:
# region Variables
_base: Base = Base(admin_only=True, available_platforms=['Linux', 'Darwin', 'Windows'])
try:
Info = namedtuple(typename='Info', field_names='vendor, os, mac_address, ipv4_address, ports',
defaults=('', '', '', '', []))
except TypeError:
Info = namedtuple(typename='Info', field_names='vendor, os, mac_address, ipv4_address, ports')
# endregion
# region Init
def __init__(self, network_interface: str):
self._your: Dict[str, Union[None, str]] = \
self._base.get_interface_settings(interface_name=network_interface,
required_parameters=['mac-address', 'ipv4-address',
'first-ipv4-address', 'last-ipv4-address'])
self.local_network: str = \
self._your['first-ipv4-address'] + '-' + \
self._your['last-ipv4-address'].split('.')[3]
if self._base.get_platform().startswith('Darwin'):
self._nmap_scan_result: str = '/tmp/nmap_scan.xml'
else:
self._nmap_scan_result: str = join(gettempdir(), 'nmap_scan.xml')
# endregion
# region Find devices in local network with nmap
def scan(self,
exit_on_failure: bool = True,
quiet: bool = False) -> Union[None, List[NamedTuple]]:
try:
# region Variables
network_devices: List[NamedTuple] = list()
ipv4_address: str = ''
mac_address: str = ''
vendor: str = ''
os: str = ''
ports: List[int] = list()
# endregion
nmap_command: str = 'nmap ' + self.local_network + \
' --open -n -O --osscan-guess -T5 -oX ' + self._nmap_scan_result
if not quiet:
self._base.print_info('Start nmap scan: ', nmap_command)
if self._base.get_platform().startswith('Windows'):
nmap_process = sub.Popen(nmap_command, shell=True, stdout=sub.PIPE, stderr=sub.STDOUT)
else:
nmap_process = sub.Popen([nmap_command], shell=True, stdout=sub.PIPE, stderr=sub.STDOUT)
nmap_process.wait()
assert isfile(self._nmap_scan_result), \
'Not found nmap scan result file: ' + self._base.error_text(self._nmap_scan_result)
nmap_report = ET.parse(self._nmap_scan_result)
root_tree = nmap_report.getroot()
for element in root_tree:
try:
assert element.tag == 'host'
state = element.find('status').attrib['state']
assert state == 'up'
# region Address
for address in element.findall('address'):
if address.attrib['addrtype'] == 'ipv4':
ipv4_address = address.attrib['addr']
if address.attrib['addrtype'] == 'mac':
mac_address = address.attrib['addr'].lower()
try:
vendor = address.attrib['vendor']
except KeyError:
pass
# endregion
# region Open TCP ports
for ports_info in element.find('ports'):
if ports_info.tag == 'port':
ports.append(ports_info.attrib['portid'])
# endregion
# region OS
for os_info in element.find('os'):
if os_info.tag == 'osmatch':
try:
os = os_info.attrib['name']
except TypeError:
pass
break
# endregion
network_devices.append(self.Info(vendor=vendor, os=os, mac_address=mac_address,
ipv4_address=ipv4_address, ports=ports))
except AssertionError:
pass
remove(self._nmap_scan_result)
assert len(network_devices) != 0, \
'Could not find any devices on interface: ' + self._base.error_text(self._your['network-interface'])
return network_devices
except OSError:
self._base.print_error('Something went wrong while trying to run ', 'nmap')
if exit_on_failure:
exit(2)
except KeyboardInterrupt:
self._base.print_info('Exit')
exit(0)
except AssertionError as Error:
self._base.print_error(Error.args[0])
if exit_on_failure:
exit(1)
return None
# endregion
# endregion
|
Vladimir-Ivanov-Git/raw-packet
|
raw_packet/Scanners/nmap_scanner.py
|
Python
|
mit
| 5,743
|
from ipfs_connector import IPFSConnector, IPFSConfig
from nn_loader import NNListener, NNLoader
class ProcessorCallback:
pass
class Processor(NNListener):
def __init__(self, callback: ProcessorCallback, ipfs_config: IPFSConfig):
print("Connecting to IPFS server %s:%d..." % (ipfs_config.server, ipfs_config.port))
try:
self.ipfs_connector = IPFSConnector(ipfs_config)
except:
raise IPFSError("Can't connect IPFS server")
print("IPFS server connected successfully")
self.nn_loader = NNLoader()
def cognition_completed(self, results):
pass
def cognite_batch(self, arch: str, model: str, data: str) -> (str, int):
try:
print("Downloading architecture file %s" % arch)
self.ipfs_connector.download_file(arch)
except:
raise IPFSError("Architecture file not found")
try:
print("Downloading model file %s" % model)
self.ipfs_connector.download_file(model)
except:
raise IPFSError("Model file not found")
try:
print("Downloading data file %s" % data)
self.ipfs_connector.download_file(data)
except:
raise IPFSError("Data file not found")
print("Running model and data..")
self.nn_loader.load_and_run(arch, model, data, self)
return 'task0', 0
def get_time_estimate(self):
# TODO: Implement
return 0
class IPFSError (Exception):
def __init__(self, message: str):
self.message = message
class ModelInconsistencyError (Exception):
pass
class DataInconsistencyError (Exception):
pass
|
Neurochain/neurowrk
|
src/processor.py
|
Python
|
mit
| 1,697
|
from .manager import Manager
__version__ = '0.2.4'
|
chendx79/Python3HandlerSocket
|
pyhs/__init__.py
|
Python
|
mit
| 51
|
#This thread handles user operations of only 1 user
#, and is connected to the matchmaking thread and to the database thread
#The list of operations is as follows:
#userType: 0 for normal, 1 for facebook
# ID | ARGUMENTS
# 0 --- User signup | userType(fb or normal),id,name,email,password
# 1 --- User login | userType,id,name,email,password
# 2 --- Change password | newPassword
# 3 --- Forgot password | email,name
# 4 --- Confirm password change code| email,name,code
# 5 --- Start game | -
#The separator in the messages can be a space and messages are terminated with \n
#so the final form of the messages is:
# 0 0 userType id name email password
# 1 1 userType id name email password
# 2 2 newPassword
# 3 3 email,name
# 4 4 email,name,code
# 5 5
import socket,Queue
from threading import *
PORT = 11337
#This function-thread listens on a port for connections
def listener(queueToDatabase,queueToMatchMaking,setupSocket):
#Configure server Socket
setupSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
#Listen on all interfaces
setupSocket.bind(('0.0.0.0',PORT))
setupSocket.setblocking(True)
while True:
setupSocket.settimeout(None)
setupSocket.listen(1)
print 'LISTENING'
replySocket,address = setupSocket.accept()
#now create a new userThread
uThread = Thread(target=userThread,args=(replySocket,queueToDatabase,queueToMatchMaking))
uThread.start()
replySocket.send('0\n')
print 'Created new user thread'
print('Listener Thread ends now')
setupSocket.close()
#dbQueue is for communicating with database thread
#matchQueue is for communicating with matchmaking thread
def userThread(replySocket,dbQueue,matchQueue,userType = None,userId = None,name = None,email = None):
answerQueue = Queue.Queue()
replySocket.settimeout(None)
while True:
message = replySocket.recv(512)
#Connection shut down on other side
if len(message) == 0:
print 'CLIENT SOCKET SHUT DOWN'
break
print "MESSAGE IS " + message
args = message.split()
#After game message
if (len(args) == 1 and args[0] != '5'):
continue
#Now check operation type
if args[0] == '0':
userType = args[1]
userId = args[2]
name = args[3]
email = args[4]
password = args[5]
#Check user type
if userType == '0':#normal user
data = {'operation':0,'answer':answerQueue,'name':name,'email':email,'password':password}
elif userType == '1':#Facebook user
data = {'operation':1,'answer':answerQueue,'id':userId,'name':name,'email':email}
elif args[0] == '1':
userType = args[1]
userId = args[2]
name = None if args[3] == '0' else args[3]
email = None if args[4] == '0' else args[4]
password = args[5]
if userType == '0':#normal user
data = {'operation':2,'answer':answerQueue,'name':name,'email':email,'password':password}
elif userType == '1':#Facebook user
data = {'operation':3,'answer':answerQueue,'id':userId}
elif args[0] == '2':
password = args[1]
data = {'operation':6,'answer':answerQueue,'name':name,'email':email,'newPass':password}
elif args[0] == '3':
email = None if args[1] == '0' else args[1]
name = None if args[2] == '0' else args[2]
data = {'operation':7,'answer':answerQueue,'name':name,'email':email}
elif args[0] == '4':
email = None if args[1] == '0' else args[1]
name = None if args[2] == '0' else args[2]
code = int(args[3])
data = {'operation':8,'answer':answerQueue,'name':name,'email':email,'code':code}
elif args[0] == '5':
if userType == '0':
data = {'operation':9,'answer':answerQueue,'name':name,'email':email}
elif userType == '1':
data = {'operation':10,'answer':answerQueue,'id':userId}
#get user data
dbQueue.put(data)
playerToken = answerQueue.get()
playerToken['type'] = userType
playerToken['socket'] = replySocket
#now send to matchmaking thread
print 'Send data to %s' % name
replySocket.send('0\n')
matchQueue.put(playerToken)
print 'Send data to match making thread'
break
#now send data
dbQueue.put(data)
result = answerQueue.get()
print 'result of operation is %r' % result
if result:
replySocket.send('0\n')
else:
replySocket.send('1\n')
#Terminate thread
print 'User Thread out'
|
Shalantor/Connect4
|
server/userThread.py
|
Python
|
mit
| 5,045
|
# Schema
DB = "db"
Name = "name"
Tables = "tables"
Table = "table"
Columns = "columns"
Column = "column"
Attributes = "attributes"
Initials = "initials"
Initial = "initial"
InitialValue = "initialvalue"
Value = "value"
PrimaryKey = "primarykey"
|
eddiedb6/pdb
|
PDBConst.py
|
Python
|
mit
| 245
|
# -*- coding: utf-8 -*-
"""
oyPivotSwitcher.py by Erkan Ozgur Yilmaz (c) 2009
v10.5.17
Description :
-------------
A tool for easy animating of switching of pivots
Version History :
-----------------
v10.5.17
- modifications for Maya 2011 and PyMel 1.0.2
v9.12.25
- removed oyAxialCorrectionGroup script import
- moved to new versioning scheme
v1.0.1
- setup check: now the objects pivot attributes are checked for safe setup
v1.0.0
- initial working version
v1.0.0.preAlpha
- development version
TODO List :
-----------
----------------------------------------------------------------------------
"""
__version__ = "10.5.17"
import pymel.core as pm
from anima.dcc.mayaEnv import auxiliary
class PivotSwitcher(object):
"""A utility class to help dynamically switch pivot positions in maya"""
def __init__(self, _object):
# the object
self._object = auxiliary.get_valid_dag_node(_object)
assert isinstance(self._object, pm.nodetypes.Transform)
# the data
self._futurePivot = pm.nodetypes.Transform
self._isSetup = False
# read the settings
self._read_settings()
def _read_settings(self):
"""reads the settings from the objects pivotData attribute"""
# check if the object has pivotData attribute
if self._object.hasAttr("pivotData"):
# get the future pivot object
self._futurePivot = auxiliary.get_valid_dag_node(
pm.listConnections(self._object.attr("pivotData.futurePivot"))[0]
)
# set isSetup flag to True
self._isSetup = True
return True
return False
def _save_settings(self):
"""save settings inside objects pivotData attribute"""
# data to be save :
# -----------------
# futurePivot node
# create attributes
self._create_data_attribute()
# connect futurePivot node
pm.connectAttr(
"%s%s" % (self._futurePivot.name(), ".message"),
self._object.attr("pivotData.futurePivot"),
f=True,
)
def _create_data_attribute(self):
"""creates attribute in self._object to hold the data"""
if not self._object.hasAttr("pivotData"):
pm.addAttr(self._object, ln="pivotData", at="compound", nc=1)
if not self._object.hasAttr("futurePivot"):
pm.addAttr(self._object, ln="futurePivot", at="message", p="pivotData")
def _create_future_pivot(self):
"""creates the futurePivot locator"""
if self._isSetup:
return
# create a locator and move it to the current pivot
# parent the locator under the object
locator_name = self._object.name() + "_futurePivotLocator#"
self._futurePivot = auxiliary.get_valid_dag_node(
pm.spaceLocator(n=locator_name)
)
pm.parent(self._futurePivot, self._object)
current_pivot_pos = pm.xform(self._object, q=True, ws=True, piv=True)
pm.xform(self._futurePivot, ws=True, t=current_pivot_pos[0:3])
# change the color
self._futurePivot.setAttr("overrideEnabled", 1)
self._futurePivot.setAttr("overrideColor", 13)
# set translate and visibility to non-keyable
self._futurePivot.setAttr("tx", k=False, channelBox=True)
self._futurePivot.setAttr("ty", k=False, channelBox=True)
self._futurePivot.setAttr("tz", k=False, channelBox=True)
self._futurePivot.setAttr("v", k=False, channelBox=True)
# lock scale and rotate
self._futurePivot.setAttr("rx", lock=True, k=False, channelBox=False)
self._futurePivot.setAttr("ry", lock=True, k=False, channelBox=False)
self._futurePivot.setAttr("rz", lock=True, k=False, channelBox=False)
self._futurePivot.setAttr("sx", lock=True, k=False, channelBox=False)
self._futurePivot.setAttr("sy", lock=True, k=False, channelBox=False)
self._futurePivot.setAttr("sz", lock=True, k=False, channelBox=False)
# hide it
self._futurePivot.setAttr("v", 0)
def setup(self):
"""setups specified object for pivot switching"""
# if it is setup before, don't do anything
if self._isSetup:
return
if not self.is_good_for_setup():
pm.PopupError(
"the objects pivots are connected to something\n"
"THE OBJECT CANNOT BE SETUP!!!"
)
return
# create the parent constraint
self._create_future_pivot()
# create attributes for data holding
self._create_data_attribute()
# save the settings
self._save_settings()
self._isSetup = True
def toggle(self):
"""toggles pivot visibility"""
if not self._isSetup:
return
# toggle the pivot visibility
current_vis = self._futurePivot.getAttr("v")
current_vis = (current_vis + 1) % 2
self._futurePivot.setAttr("v", current_vis)
def switch(self):
"""switches the pivot to the futurePivot"""
if not self._isSetup:
return
# get the current frame
frame = pm.currentTime(q=True)
# get the current position of the object
current_object_pos = pm.xform(self._object, q=True, ws=True, t=True)
current_pivot_pos = pm.xform(self._object, q=True, ws=True, piv=True)
future_pivot_pos = pm.xform(self._futurePivot, q=True, ws=True, t=True)
displacement = (
future_pivot_pos[0] - current_pivot_pos[0],
future_pivot_pos[1] - current_pivot_pos[1],
future_pivot_pos[2] - current_pivot_pos[2],
)
# move the pivot to the future_pivot
pm.xform(self._object, ws=True, piv=future_pivot_pos[0:3])
# set keyframes
pm.setKeyframe(self._object, at="rotatePivotX", t=frame, ott="step")
pm.setKeyframe(self._object, at="rotatePivotY", t=frame, ott="step")
pm.setKeyframe(self._object, at="rotatePivotZ", t=frame, ott="step")
pm.setKeyframe(self._object, at="scalePivotX", t=frame, ott="step")
pm.setKeyframe(self._object, at="scalePivotY", t=frame, ott="step")
pm.setKeyframe(self._object, at="scalePivotZ", t=frame, ott="step")
# set pivot translations
self._object.setAttr("rotatePivotTranslate", -1 * displacement)
self._object.setAttr("scalePivotTranslate", -1 * displacement)
# set keyframes
pm.setKeyframe(self._object, at="rotatePivotTranslateX", t=frame, ott="step")
pm.setKeyframe(self._object, at="rotatePivotTranslateY", t=frame, ott="step")
pm.setKeyframe(self._object, at="rotatePivotTranslateZ", t=frame, ott="step")
pm.setKeyframe(self._object, at="scalePivotTranslateX", t=frame, ott="step")
pm.setKeyframe(self._object, at="scalePivotTranslateY", t=frame, ott="step")
pm.setKeyframe(self._object, at="scalePivotTranslateZ", t=frame, ott="step")
def _set_dg_dirty(self):
"""sets the DG to dirty for _object, currentPivot and futurePivot"""
pm.dgdirty(self._object, self._futurePivot)
def fix_jump(self):
"""fixes the jumps after editing the keyframes"""
pass
def is_good_for_setup(self):
"""checks if the objects rotatePivot, scalePivot, rotatePivotTranslate
and scalePivotTranslate is not connected to anything
"""
attributes = [
"rotatePivot",
"scalePivot",
"rotatePivotTranslate",
"scalePivotTranslate",
]
for attrStr in attributes:
connections = self._object.attr(attrStr).connections()
if len(connections) > 0:
return False
return True
def get_one_switcher():
"""returns a generator that generates a PivotSwitcher object for every
transform node in the selection
"""
for node in pm.ls(sl=True):
try:
node = auxiliary.get_valid_dag_node(node)
if node.type() == "transform":
my_pivot_switcher = PivotSwitcher(node)
yield my_pivot_switcher
except TypeError:
pass
def setup_pivot():
"""setups pivot switching for selected objects"""
for piv_switcher in get_one_switcher():
piv_switcher.setup()
def switch_pivot():
"""switches pivot for selected objects"""
for piv_switcher in get_one_switcher():
piv_switcher.switch()
def toggle_pivot():
"""toggles pivot visibilities for selected objects"""
for piv_switcher in get_one_switcher():
piv_switcher.toggle()
|
eoyilmaz/anima
|
anima/dcc/mayaEnv/pivot_switcher.py
|
Python
|
mit
| 8,729
|
import subprocess
def convert_chinese(text):
return subprocess.getoutput("echo '%s' | opencc -c hk2s.json" % text)
|
josherich/mindynode-parsers
|
mindynode_nltk/utils/opencc.py
|
Python
|
mit
| 117
|
"""
events.py
Defines a simple event handler system similar to that used in C#. Events allow
multicast delegates and arbitrary message passing. They use weak references so
they don't keep their handlers alive if they are otherwise out of scope.
"""
import weakref
import maya.utils
from functools import partial, wraps
import inspect
class Event(object):
"""
Simple event handler, similar to the standard c# event pattern. The object
raising an event calls this event object as a callable; the object will in
turn fire any of the callables it stores in its Handlers list, passing the
args and kwargs provided by the original caller.
sample usage:
test = Event()
> def a ( *args, **kwargs ):
print "A", args, kwargs
> test += a;
> test( 'arg1', 'arg2', e="fred" )
A ('arg1', 'arg2') {'e': 'fred', 'event': <Event object at 0x00000000026892E8>}
the handlers are stored as weakrefs, so they will not keep their referents alive if those referents exists
in no other scope. For example:
> x = Event()
> def test(*args, **kwargs):
> print "hello world"
> x += test
> x()
hello world
> test = None
> x()
a hard reference to a handler can be stored on another object when binding to the event, this can be used
for when handlers are defined inside another functions scope. For example:
> x = Event()
> def test(*args, **kwargs):
> print 'hello world'
> class Stash(object):
> pass
> stash = Stash()
> x += test, stash
> del test
> x()
hello world
> del stash
> x()
Handlers must exhibit the *args, **kwargs signature. It's the handler's job
to decide what to do with them but they will be passed.
Events can be given 'metadata' - arguments that are passed in at creation time:
x = Event(name = 'test_event')
def test (*args, *kwargs):
print args, kwargs
x()
{'name': 'test_event', 'event': <Event object at 0x00000000026892E8>}
Metadata added when the Event is first created will be included in every
firing of the event. Arguments and keywords can also be associated with a
particular firing:
x = Event(name = 'test_event')
def test (*args, *kwargs):
print "args:", args
print "kwargs:", kwargs
x('hello')
args: hello
kwargs: {'name': 'test_event', 'event': <Event object at 0x00000000026892E8>}
x('world')
args: world
kwargs: {'name': 'test_event', 'event': <Event object at 0x00000000026892E8>}
"""
def __init__(self, **data):
self._handlers = set()
'''Set list of handlers callables. Use a set to avoid multiple calls on one handler'''
self.data = data
self.data['event'] = self
def _add_handler(self, handler):
"""
Add a handler callable. Raises a ValueError if the argument is not callable
"""
stash = None
if isinstance(handler, tuple):
handler, stash = handler
if not callable(handler):
raise ValueError("%s is not callable", handler)
if stash is not None:
setattr(stash, '_sh_{}'.format(id(handler)), handler)
self._handlers.add(get_weak_reference(handler))
return self
def _remove_handler(self, handler):
"""
Remove a handler. Ignores handlers that are not present.
"""
stash = None
if isinstance(handler, tuple):
handler, stash = handler
try:
delattr(stash, '_sh_{}'.format(id(handler)))
except AttributeError:
pass
wr = get_weak_reference(handler)
delenda = [h for h in self._handlers if h == wr]
self._handlers = self._handlers.difference(set(delenda))
return self
def metadata(self, kwargs):
"""
returns the me
"""
md = {}
md.update(self.data)
md.update(kwargs)
return md
def _fire(self, *args, **kwargs):
"""
Call all handlers. Any decayed references will be purged.
"""
delenda = []
for handler in self._handlers:
try:
handler(*args, **self.metadata(kwargs))
except DeadReferenceError:
delenda.append(handler)
self._handlers = self._handlers.difference(set(delenda))
def _handler_count(self):
"""
Returns the count of the _handlers field
"""
return len([i for i in self._handlers])
# hook up the instance methods to the base methods
# doing it this way allows you to override more neatly
# in derived classes
__call__ = _fire
__len__ = _handler_count
__iadd__ = _add_handler
__isub__ = _remove_handler
def __del__(self):
print 'event expired'
class MayaEvent(Event):
"""
Subclass of event that uses Maya.utils.executeDeferred.
"""
def _fire(self, *args, **kwargs):
"""
Call all handlers. Any decayed references will be purged.
"""
delenda = []
for handler in self._handlers:
try:
maya.utils.executeDeferred(partial(handler, *args, **self.metadata(kwargs)))
except DeadReferenceError:
delenda.append(handler)
self._handlers = self._handlers.difference(set(delenda))
__call__ = _fire
class DeadReferenceError(TypeError):
"""
Raised when a WeakMethodBound or WeakMethodFree tries to fire a method that
has been garbage collected. Used by Events to know when to drop dead
references
"""
pass
# # create weak references to both bound and unbound methods
# # hat tip to Frederic Jolliton on ActiveState
class WeakMethodBound(object):
"""
Encapsulates a weak reference to a bound method on an object. Has a
hashable ID so that Events can identify multiple references to the same
method and not duplicate them
"""
__slots__ = ('function', 'referent', 'ID', '_ref_name')
def __init__(self, f):
self.function = f.im_func
self.referent = weakref.ref(f.im_self)
self._ref_name = f.im_func.__name__
self.ID = id(f.im_self) ^ id(f.im_func.__name__)
def __call__(self, *args, **kwargs):
ref = self.referent()
if not ref is False and not ref is None:
return apply(self.function, (self.referent(),) + args, kwargs)
else:
raise DeadReferenceError("Reference to the bound method {0} no longer exists".format(self._ref_name))
def __eq__(self, other):
if not hasattr(other, 'ID'):
return False
return self.ID == other.ID
def __hash__(self):
return self.ID
class WeakMethodFree(object):
"""
Encapsulates a weak reference to an unbound method
"""
__slots__ = ('function', 'ID', '_ref_name')
def __init__(self, f):
self.function = weakref.ref(f)
self.ID = id(f)
self._ref_name = getattr(f, '__name__', "'unnamed'")
def __call__(self, *args, **kwargs):
if self.function():
return apply(self.function(), args, kwargs)
else:
raise DeadReferenceError("Reference to unbound method {0} no longer exists".format(self._ref_name))
def __eq__(self, other):
if not hasattr(other, 'ID'):
return False
return self.ID == other.ID
def __hash__(self):
return self.ID
def get_weak_reference(f):
"""
Returns a WeakMethodFree or a WeakMethodBound for the supplied function, as
appropriate
"""
try:
f.im_func
except AttributeError:
return WeakMethodFree(f)
return WeakMethodBound(f)
def event_handler(fn):
"""
decorator for making event handlers out of functions with no arguments
"""
if inspect.getargspec(fn).varargs and inspect.getargspec(fn).keywords:
return fn
@wraps(fn)
def wrapper(*_, **__):
return fn()
return wrapper
|
theodox/mGui
|
mGui/events.py
|
Python
|
mit
| 8,231
|
from django.conf.urls import url
from api import views
urlpatterns = [
url(r'stations/$', views.get_stations, name='api_stations'),
url(r'entry/(?P<station_id>\d+)/$', views.make_entry, name='api_entry'),
url(r'new/$', views.add_station, name='api_add_station'),
# Booking api
url(r'booking/(?P<resident_id>\d+)/$', views.booking, name='api_booking'),
url(r'book_profile/$', views.book_profile, name='api_book_profile'),
url(r'book_phone/$', views.book_phone, name='api_book_phone'),
url(r'book_code/$', views.book_code, name='api_book_code'),
# Insure api
url(r'insure/$', views.insure, name='api_insure'),
# Drugshare api
url(r'register_pharm/$', views.register_pharm, name='api_register_pharm'),
url(r'make_token/(?P<device_id>\d+)/$',
views.make_token, name='api_make_token'),
url(r'add_device/$', views.add_device, name='api_add_device'),
url(r'get_profile/$', views.get_profile, name='api_get_profile'),
url(r'update_pharm/(?P<device_id>\d+)/$',
views.update_pharm, name='api_update_pharm'),
url(r'add_outlet/(?P<device_id>\d+)/$',
views.add_outlet, name='api_add_outlet'),
url(r'delete_outlet/(?P<id>\d+)/$',
views.delete_outlet, name='api_delete_outlet'),
url(r'add_drug/$', views.add_drug, name='api_add_drug'),
url(r'edit_drug/(?P<id>\d+)/$', views.edit_drug, name='api_edit_drug'),
url(r'search_drug/(?P<device_id>\d+)/$',
views.search_drug, name='api_search_drug'),
url(r'wish_drug/(?P<device_id>\d+)/$',
views.wishlist_drug, name='api_wishlist_drug'),
url(r'stock_drug/(?P<device_id>\d+)/$',
views.stock_drug, name='api_stock_drug'),
url(r'remove_drug/(?P<id>\d+)/$',
views.remove_drug, name='api_remove_drug'),
url(r'recent_drugs/(?P<count>\d+)/$',
views.recent_drugs, name='api_recent_drugs'),
url(r'request_drug/(?P<drug_id>\d+)/$',
views.request_drug, name='api_request_drug'),
url(r'pending/(?P<device_id>\d+)/$',
views.pending_requests, name='api_pending_requests'),
url(r'accept/(?P<request_id>\d+)/$', views.accept, name='api_accept'),
url(r'reject/(?P<request_id>\d+)/$', views.reject, name='api_reject'),
url(r'drug_list/$', views.list_generic_drugs, name='api_drugs_list'),
url(r'feedback/(?P<id>\d+)/$', views.feedback, name='api_feedback'),
]
|
boyombo/django-stations
|
stations/api/urls.py
|
Python
|
mit
| 2,383
|
#-*- coding:Utf-8 -*-
from __future__ import print_function
"""
.. curentmodule:: pylayers.util.project
.. autosummary::
"""
import numpy as np
import os
import sys
import shutil
import pkgutil
import pdb
import seaborn as sns
import logging
class PyLayers(object):
""" Generic PyLayers Meta Class
"""
# sns.set_style("white")
def help(self,letter='az',typ='mt'):
""" generic help
Parameters
----------
txt : string
'mb' | 'mt'
mb :members
mt :methods
"""
members = [ x for x in self.__dict__.keys() if x not in dict.__dict__ ]
lmeth = [ x for x in np.sort(dir(self)) if x not in dict.__dict__]
if typ=='mb':
print(np.sort(self.__dict__.keys()))
if typ=='mt':
for s in lmeth:
if s not in members:
if s[0]!='_':
if len(letter)>1:
if (s[0]>=letter[0])&(s[0]<letter[1]):
try:
doc = eval('self.'+s+'.__doc__').split('\n')
print(s+': '+ doc[0])
except:
pass
else:
if (s[0]==letter[0]):
try:
doc = eval('self.'+s+'.__doc__').split('\n')
print(s+': '+ doc[0])
except:
pass
def _writedotpylayers(typ,path):
""" write .pylayers file
Parameters
----------
typ: string
source : update the path to the pylayers' source directory
project : update the path to the pylayers' project directory
path : string
path to typ
"""
home = os.path.expanduser('~')
# with open(os.path.join(home,'.pylayers'),'r') as f:
# lines = f.readlines()
with open(os.path.join(home,'.pylayers'),'a') as f:
f.write(typ+'\n')
f.write(path+'\n')
# replaceline=False
# for l in lines:
# if replaceline :
# f.write(path+"\n")
# replaceline=False
# elif typ in l:
# f.write(l)
# replaceline=True
# else:
# f.write(l)
home = os.path.expanduser('~')
currentdir = os.getcwd()
#if .pylayers exists
if os.path.isfile(os.path.join(home,'.pylayers')):
with open(os.path.join(home,'.pylayers'),'r') as f:
lines = f.readlines()
#''.join... to remove the '\n' character
pylayersdir = ''.join(lines[1].splitlines())
basename = ''.join(lines[3].splitlines())
# BACKWARD COMPATIBILITY MODE (from now .pylayers is create each install)
else:
if os.getenv('PYLAYERS') != None:
pylayersdir = os.getenv('PYLAYERS')
_writedotpylayers('source',pylayersdir)
print('PYLAYERS environement variable detected: ~/.pylayers updated')
else :
raise EnvironmentError('pylayers source path not found. Try to re-run setup.py')
if os.getenv('BASENAME') != None:
basename = os.getenv('BASENAME')
_writedotpylayers('project',basename)
print('BASENAME environement variable detected: ~/.pylayers updated')
else :
raise EnvironmentError('pylayers source path not found. Try to re-run setup.py')
# =======
# # if os.path.isfile(os.path.join(home,'.pylayers')):
# # with open(os.path.join(home,'.pylayers'),'r') as f:
# # lines = f.readlines()
# # #[:-1] to remove the '\n' character
# # pylayersdir = lines[1][:-1]
# # basename = lines[3]
# # else :
# try:
# pylayersdir = os.environ['PYLAYERS']
# except:
# pylayersdir = currentdir.split('pylayers')[0] + 'pylayers'
# if pylayersdir[-1] == '/' or pylayersdir[-1] == '\\':
# pylayersdir = pylayersdir[:-1]
# if len(pylayersdir) == 1:
# raise EnvironmentError('Please verify that pylayers sources are into the "pylayers/" directory')
# try:
# basename = os.environ['BASENAME']
# except:
# raise EnvironmentError('Please position an environement variable $BASENAME where your pylayers project will be hosted')
# >>>>>>> master
try:
mesdir = os.environ['MESDIR']
except:
mesdir = os.path.join(basename ,'meas')
try:
datadir = os.environ['DATADIR']
except:
datadir = os.path.join(basename, 'meas')
try:
os.path.isdir(os.path.join(basename ,'figures'))
except:
os.mkdir(os.path.join(basename,'figures'))
# Dictionnary which associate PYLAYERS environment variable with sub directories
# of the project
#
pstruc = {}
pstruc['DIRSIMUL'] ='ini'
pstruc['DIRWRL'] =os.path.join('struc','wrl')
pstruc['DIRLAY'] =os.path.join('struc','lay')
pstruc['DIROSM'] =os.path.join('struc','osm')
pstruc['DIRFUR'] = os.path.join('struc','furnitures')
pstruc['DIRIMAGE'] = os.path.join('struc','images')
pstruc['DIRPICKLE'] = os.path.join('struc','gpickle')
pstruc['DIRRES'] = os.path.join('struc','res')
pstruc['DIRSTR'] = os.path.join('struc','str')
pstruc['DIRSLAB'] = 'ini'
pstruc['DIRSLAB2'] = 'ini'
pstruc['DIRMAT'] = 'ini'
pstruc['DIRMAT2'] = 'ini'
pstruc['DIRANT'] = 'ant'
pstruc['DIRTRA'] = 'output'
pstruc['DIRLCH'] = 'output'
pstruc['DIRTUD'] = 'output'
pstruc['DIRTx'] = os.path.join('output','Tx001')
pstruc['DIRGEOM'] = 'geom'
pstruc['DIRTRA'] = 'output'
pstruc['DIRCIR'] = 'output'
pstruc['DIRMES'] = 'meas'
pstruc['DIRNETSAVE'] = 'netsave'
# pstruc['DIRSIG'] = os.path.join('output','sig')
pstruc['DIRR2D'] = os.path.join('output','r2d')
pstruc['DIRR3D'] = os.path.join('output','r3d')
pstruc['DIRCT'] = os.path.join('output','Ct')
pstruc['DIRH'] = os.path.join('output','H')
pstruc['DIRLNK'] = 'output'
pstruc['DIRBODY'] = 'body'
pstruc['DIRGIS'] = 'gis'
pstruc['DIRC3D'] = os.path.join('body','c3d')
pstruc['DIROOSM'] = os.path.join('gis','osm')
pstruc['DIRWEAR'] = os.path.join('body','wear')
# if basename directory does not exit it is created
try:
os.chdir(basename)
except:
print("Create directory " + basename)
os.mkdir(basename)
#
# write file project.conf
#
fd = open(os.path.join(basename,'project.conf'),'w')
fd.close()
#for nm in pstruc.keys():
for nm,nv in pstruc.items():
dirname = os.path.join(basename , pstruc[nm])
if not 'win' in sys.platform:
spl = nv.split('/') # never again a variable called sp
else:
spl = nv.split('\\') # never again a variable called sp
if len(spl)>1:
if not os.path.isdir(os.path.join(basename ,spl[0])):
os.mkdir(os.path.join(basename ,spl[0]))
os.mkdir(os.path.join(basename,nv))
print("create ",os.path.join(basename ,nv))
else:
if not os.path.isdir(os.path.join(basename ,nv)):
os.mkdir(os.path.join(basename ,nv))
print("create ",os.path.join(basename ,nv))
else :
if not os.path.isdir(dirname):
try:
os.mkdir(dirname)
except:
# dictionnary is not necessarly ordonned !
# parent directory may not be created
dirtmp= os.path.dirname(dirname)
os.mkdir(dirtmp)
os.mkdir(dirname)
print("create ",dirname)
# try:
# os.chdir(dirname)
# os.chdir('..')
# except:
# pdb.set_trace()
# sp = nv.split('/')
# if len(sp)>1:
# try:
# os.chdir(basename + '/'+sp[0])
# os.chdir('..')
# except:
# os.mkdir(basename + '/'+sp[0])
# os.chdir(basename + '/'+sp[0])
# os.mkdir(basename + '/'+sp[1])
# os.chdir('..')
# else:
# print "create "+ dirname
# os.mkdir(dirname)
# os.chdir('..')
if nm == 'DIRANT':
antdir = dirname
if nm == 'DIRFUR':
furdir = dirname
if nm == 'DIRGEOM':
geomdir = dirname
if nm == 'DIRLCH':
lchdir = dirname
if nm == 'DIRTUD':
tuddir = dirname
if nm == 'DIRSLAB':
slabdir = dirname
if nm == 'DIRMA':
matdir = dirname
if nm == 'DIRTRA':
tradir = dirname
if nm == 'DIROOSM':
osmdir = dirname
fd = open(os.path.join(basename,'project.conf'),'a')
fd.write(nm+' '+dirname +'\n')
fd.close()
#
# copy files from /data/ini in project directory
#
# IF new file type is added :
# 1 - add the directory path to pstruc['DIRFILETYPE'] = os.path.join('path','to','filetype')
# 2 - add the directory path to dirlist( just below)
if basename != os.path.join(pylayersdir,'data'):
if not 'win' in sys.platform:
dirlist=['ini','struc','struc/furnitures'
,'struc/osm','struc/wrl','struc/res','struc/str'
,'struc/images','struc/lay'
,'ant','output/Tx001','output'
,'geom','output/r2d'
,'output/r3d','body','body/c3d','body/wear']
else :
dirlist=['ini',os.path.join('struc','furnitures')
,os.path.join('struc','osm')
,os.path.join('struc','wrl')
,os.path.join('struc','res')
,os.path.join('struc','str')
,os.path.join('struc','images')
,os.path.join('struc','lay')
,'ant',os.path.join('output','Tx001'),'output'
,'geom'
,os.path.join('output','r2d')
,os.path.join('output','r3d'),'body'
,os.path.join('body','c3d')
,os.path.join('body','wear')]
for dl in dirlist:
filelist = os.listdir(os.path.join(pylayersdir,'data', dl))
for fi in filelist:
if not os.path.isdir(os.path.join(basename,dl,fi)):
if os.path.isfile(os.path.join(basename,dl,fi)): # file already exists
pass
else:
print(dl,fi)
try:
shutil.copy(
os.path.join(pylayersdir,'data',dl,fi),
os.path.join(basename,dl,fi))
except:
pdb.set_trace()
##
os.chdir(currentdir)
## set seaborn style
sns.set_style("white")
LOG_FORMAT = '%(asctime)s ; %(name)s ; %(levelname)s ; %(message)s'
logging.basicConfig(filename = basename+"/PyLayers.log",
level = logging.DEBUG,
format = LOG_FORMAT,
filemode = 'w')
logger = logging.getLogger(__name__)
logger.setLevel(0)
logger.info('INFO')
logger.debug('DEBUG')
logger.error('ERROR')
logger.critical('CRITICAL')
logger.warning('WARNING')
|
pylayers/pylayers
|
pylayers/util/project.py
|
Python
|
mit
| 10,692
|
import flask
import json
import bson
import os
from flask import request, redirect
import sys
from fontana import twitter
import pymongo
DEFAULT_PORT = 2014
DB = 'fontana'
connection = pymongo.Connection("localhost", 27017)
db = connection[DB]
latest_headers = {}
MODERATED_SIZE = 40
class MongoEncoder(json.JSONEncoder):
def default(self, obj, **kwargs):
if isinstance(obj, bson.ObjectId):
return str(obj)
else:
return json.JSONEncoder.default(obj, **kwargs)
app = flask.Flask('fontana')
def twitter_authorisation_begin():
"""
Step 1 and 2 of the Twitter oAuth flow.
"""
callback = absolute_url('twitter_signin')
if 'next' in flask.request.args:
callback = '%s?next=%s' % (callback, flask.request.args['next'])
try:
token = twitter.request_token(app.config, callback)
flask.session['twitter_oauth_token'] = token['oauth_token']
flask.session['twitter_oauth_token_secret'] = token['oauth_token_secret']
return flask.redirect(twitter.authenticate_url(token, callback))
except twitter.TwitterException, e:
return flask.abort(403, str(e))
def twitter_authorisation_done():
"""
Step 3 of the Twitter oAuth flow.
"""
if 'oauth_token' in flask.request.args:
token = flask.request.args
if flask.session['twitter_oauth_token'] != token['oauth_token']:
return flask.abort(403, 'oauth_token mismatch!')
auth = twitter.access_token(app.config, token)
flask.session['twitter_oauth_token'] = auth['oauth_token']
flask.session['twitter_oauth_token_secret'] = auth['oauth_token_secret']
flask.session['twitter_user_id'] = auth['user_id']
flask.session['twitter_screen_name'] = auth['screen_name']
if 'next' in flask.request.args:
return flask.redirect(flask.request.args['next'])
else:
return 'OK'
elif 'denied' in flask.request.args:
return flask.abort(403, 'oauth denied')
else:
return flask.abort(403, 'unknown sign in failure')
@app.route('/api/twitter/session/new/')
def twitter_signin():
"""
Handles the Twitter oAuth flow.
"""
args = flask.request.args
if not args or (len(args) == 1 and 'next' in args):
return twitter_authorisation_begin()
else:
return twitter_authorisation_done()
@app.route('/api/twitter/session/')
def twitter_session():
"""
Check for an active Twitter session. Returns a JSON response with the
active sceen name or a 403 if there is no active session.
"""
if not flask.session.get('twitter_user_id'):
return flask.abort(403, 'no active session')
return (json.dumps({
'screen_name': flask.session['twitter_screen_name']
}), 200, {'content-type': 'application/json'})
@app.route('/api/twitter/search/')
def twitter_search():
"""
Perform a Twitter search
"""
global latest_headers
if not flask.session.get('twitter_user_id'):
return flask.abort(403, 'no active session')
token = {
'oauth_token': flask.session['twitter_oauth_token'],
'oauth_token_secret': flask.session['twitter_oauth_token_secret']
}
# Find out last id
last = db['tweets'].aggregate( { '$group': { '_id':"", 'last': { '$max': "$id" } } } )
since_id = long(flask.request.args.get('since_id'))
params = dict(flask.request.args)
if last.get("ok") == 1 and last['result']:
last = long(last['result'][0]['last'])
params['since_id'] = max(last, since_id)
# Query twitter and cache result into DB
(text, status_code, headers) = twitter.search(app.config, token, params)
data = json.loads(text)
for s in data['statuses']:
s['exclude'] = s['text'].startswith('RT ')
s['classes'] = []
if s['text'].startswith('RT '):
s['classes'].append('RT')
if '?' in s['text']:
s['classes'].append('question')
# Use tweet id as _id so that save will replace existing tweets if necessary
s['_id'] = s['id']
db['tweets'].save(s)
latest_headers = dict(headers)
return (text, status_code, headers)
@app.route('/moderated')
def twitter_moderated():
"""
Return moderated posts
"""
return (json.dumps({ 'statuses': [ s for s in db['tweets'].find({ 'exclude': False }).sort([('id', -1)]).limit(MODERATED_SIZE) ]},
indent=None if request.is_xhr else 2,
cls=MongoEncoder),
200,
{'content-type': 'application/json'})
@app.route('/all')
def twitter_all():
"""
Return all cached posts
"""
since_id = long(request.values.get('since_id', 0))
return (json.dumps({ 'statuses': [ s for s in db['tweets'].find({ 'id': { '$gt': since_id } }).sort([ ('id', -1) ]) ]},
indent=None if request.is_xhr else 2,
cls=MongoEncoder),
200,
latest_headers)
@app.route('/exclude/<path:ident>')
def exclude(ident):
"""Exclude given post.
"""
db['tweets'].update( { 'id_str': ident },
{ '$set': { 'exclude': True } })
return redirect('/admin.html')
@app.route('/set_moderated/<int:length>')
def set_moderated_length(length):
"""Set moderated queue length
"""
global MODERATED_SIZE
if length > 2 and length < 100:
MODERATED_SIZE = length
return redirect('/admin.html')
@app.route('/include/<path:ident>')
def include(ident):
"""Include given post.
"""
db['tweets'].update( { 'id_str': ident },
{ '$set': { 'exclude': False } })
return redirect('/admin.html')
@app.route('/api/session/clear/', methods=['POST'])
def signout():
"""
Perform a sign out, clears the user's session.
"""
flask.session.clear()
return 'OK'
def absolute_url(name):
"""
Flask's url_for with added SERVER_NAME
"""
host = app.config['SERVER_NAME'] or ('localhost:' + str(DEFAULT_PORT))
url = flask.url_for(name)
return 'http://%s%s' % (host, url)
def devserver(extra_conf=None):
"""
Start a development server
"""
from werkzeug.wsgi import SharedDataMiddleware
# Load the "example" conf
root = app.root_path.split(os.path.dirname(__file__))[0]
conf = os.path.join(root, 'backend', 'var', 'conf', 'fontana-example.conf')
app.config.from_pyfile(conf)
if extra_conf:
app.config.from_pyfile(os.path.join(root, extra_conf))
# Serve the frontend files
app.wsgi_app = SharedDataMiddleware(app.wsgi_app, {
'/': app.config['STATIC_DIR']
})
# Setup a index.html redirect for convenience sake.
app.route('/')(lambda: flask.redirect('index.html'))
# Run the development or production server
if app.config.get('PROD'):
app.run(debug=False, host='0.0.0.0', port=DEFAULT_PORT)
else:
app.run()
if __name__ == "__main__":
# This will get invoked when you run `python backend/src/fontana.py`
if len(sys.argv) == 2:
devserver(sys.argv[1])
else:
devserver()
|
oaubert/TwitterFontana
|
backend/src/app.py
|
Python
|
mit
| 7,176
|
from __future__ import print_function
import argparse
import os
import random
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
from torch.autograd import Variable
#from utils.dataset import Radars
from utils.music import Songs
from model.D import Discriminator
from model.G import Generator
import numpy as np
from scipy import misc
import time
parser = argparse.ArgumentParser(description='train pix2pix model')
parser.add_argument('--batchSize', type=int, default=1, help='with batchSize=1 equivalent to instance normalization.')
parser.add_argument('--ngf', type=int, default=64)
parser.add_argument('--ndf', type=int, default=64)
parser.add_argument('--niter', type=int, default=200, help='number of epochs to train for')
parser.add_argument('--lr', type=float, default=0.0002, help='learning rate, default=0.0002')
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')
parser.add_argument('--cuda', action='store_true', help='enables cuda')
parser.add_argument('--outf', default='checkpoints/', help='folder to output images and model checkpoints')
parser.add_argument('--manualSeed', type=int, help='manual seed')
parser.add_argument('--dataPath', default='/ldata/radar_20d_2000/', help='path to training images')
parser.add_argument('--loadSize', type=int, default=286, help='scale image to this size')
parser.add_argument('--fineSize', type=int, default=256, help='random crop image to this size')
parser.add_argument('--flip', type=int, default=1, help='1 for flipping image randomly, 0 for not')
parser.add_argument('--input_nc', type=int, default=3, help='channel number of input image')
parser.add_argument('--output_nc', type=int, default=3, help='channel number of output image')
parser.add_argument('--lamb', type=int, default=100, help='weight on L1 term in objective')
opt = parser.parse_args()
print(opt)
try:
os.makedirs(opt.outf)
except OSError:
pass
if opt.manualSeed is None:
opt.manualSeed = random.randint(1, 10000)
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
if opt.cuda:
torch.cuda.manual_seed_all(opt.manualSeed)
cudnn.benchmark = True
########### DATASET ###########
#facades = Facades(opt.dataPath,opt.loadSize,opt.fineSize,opt.flip)
#dataset = Radars(dataPath=opt.dataPath,length=200)
dataset = Songs(dataPath=opt.dataPath,length=3000)
train_loader = torch.utils.data.DataLoader(dataset=dataset,
batch_size=opt.batchSize,
shuffle=True,
num_workers=2)
########### MODEL ###########
# custom weights initialization called on netG and netD
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
ndf = opt.ndf
ngf = opt.ngf
nc = 3
netD = Discriminator(opt.input_nc,opt.output_nc,ndf)
netG = Generator(opt.input_nc, opt.output_nc, opt.ngf)
if(opt.cuda):
netD.cuda()
netG.cuda()
netG.apply(weights_init)
netD.apply(weights_init)
print(netD)
print(netG)
########### LOSS & OPTIMIZER ##########
criterion = nn.BCELoss()
criterionL1 = nn.L1Loss()
optimizerD = torch.optim.Adam(netD.parameters(),lr=opt.lr, betas=(opt.beta1, 0.999))
optimizerG = torch.optim.Adam(netG.parameters(),lr=opt.lr, betas=(opt.beta1, 0.999))
########### GLOBAL VARIABLES ###########
input_nc = opt.input_nc
output_nc = opt.output_nc
fineSize = opt.fineSize
real_A = torch.FloatTensor(opt.batchSize, input_nc, fineSize, fineSize)
real_B = torch.FloatTensor(opt.batchSize, input_nc, fineSize, fineSize)
label = torch.FloatTensor(opt.batchSize)
real_A = Variable(real_A)
real_B = Variable(real_B)
label = Variable(label)
if(opt.cuda):
real_A = real_A.cuda()
real_B = real_B.cuda()
label = label.cuda()
real_label = 1
fake_label = 0
########### Training ###########
netD.train()
netG.train()
for epoch in range(1,opt.niter+1):
nowtime = time.time()
for i, image in enumerate(train_loader):
########### fDx ###########
netD.zero_grad()
imgA = image[0]
imgB = image[1]
# train with real data
real_A.data.copy_(imgA)
real_B.data.copy_(imgB)
real_AB = torch.cat((real_A, real_B), 1)
output = netD(real_AB)
label.data.resize_(output.size())
label.data.fill_(real_label)
errD_real = criterion(output, label)
errD_real.backward()
# train with fake
fake_B = netG(real_A)
label.data.fill_(fake_label)
fake_AB = torch.cat((real_A, fake_B), 1)
output = netD(fake_AB.detach())
errD_fake = criterion(output,label)
errD_fake.backward()
errD = (errD_fake + errD_real)/2
optimizerD.step()
########### fGx ###########
netG.zero_grad()
label.data.fill_(real_label)
output = netD(fake_AB)
errGAN = criterion(output, label)
errL1 = criterionL1(fake_B,real_B)
errG = errGAN + opt.lamb*errL1
errG.backward()
optimizerG.step()
########### Logging ##########
if(i % 50 == 0):
print('[%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f Loss_L1: %.4f'
% (epoch, opt.niter, i, len(train_loader),
errD.data[0], errGAN.data[0], errL1.data[0]))
print('Time: %.4f' % (time.time() - nowtime))
########## Visualize #########
if(epoch % 1 == 0):
f_B = fake_B.cpu().data.numpy()
for n,pic in enumerate(f_B[0]):
misc.imsave('%s/%d_%d.png' % (opt.outf,epoch,n),pic)
if(epoch % 10 == 0):
print('save model:',epoch)
torch.save(netG.state_dict(), '%s/netG.pth' % (opt.outf))
torch.save(netD.state_dict(), '%s/netD.pth' % (opt.outf))
torch.save(netG.state_dict(), '%s/netG.pth' % (opt.outf))
torch.save(netD.state_dict(), '%s/netD.pth' % (opt.outf))
|
xi-studio/anime
|
music/train.py
|
Python
|
mit
| 6,297
|
import os
def get_template_path(path):
file_path = os.path.join(os.getcwd(), path)
if not os.path.isfile(file_path):
raise Exception("This is not a valid template path %s"%(file_path))
return file_path
def get_template(path):
file_path = get_template_path(path)
return open(file_path).read()
def render_context(template_string, context):
return template_string.format(**context)
file_ = 'templates/email_message.txt'
file_html = 'templates/email_message.html'
template = get_template(file_)
template_html = get_template(file_html)
context = {
"name": "Niraj",
"date": None,
"total": None
}
print(render_context(template, context))
print(render_context(template_html, context))
|
nirajkvinit/python3-study
|
30days/day13/templates.py
|
Python
|
mit
| 691
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('core', '0011_auto_20151207_0017'),
('roster', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Student',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('classroom', models.ForeignKey(to='core.Classroom')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['user__last_name', 'user__first_name'],
},
bases=(models.Model,),
),
migrations.AlterField(
model_name='assignmentgrade',
name='student',
field=models.ForeignKey(to='roster.Student'),
preserve_default=True,
),
]
|
dulrich15/spot
|
apps/roster/migrations/0002_auto_20151207_0017.py
|
Python
|
mit
| 1,095
|