content stringlengths 5 1.05M |
|---|
import os
import sys
import argparse
from roblib import read_fasta
from random import randint, shuffle
__author__ = 'Rob Edwards'
parser = argparse.ArgumentParser(description='Convert a fasta file to fastq, faking the qual scores. Either -f or -n is required')
parser.add_argument('-f', help='fasta file')
parser.add_argument('-n', help='number of sequences', type=int)
parser.add_argument('-q', help='fastq output file', required=True)
parser.add_argument('-s', help='quality score. Default = 40', default=40, type=int)
parser.add_argument('-l', help='length of sequences for artificial sequences. Default = 150', default=150, type=int)
parser.add_argument('-r', help='random quality scores between 5 and 40', action='store_true')
args = parser.parse_args()
if not args.f and not args.n:
sys.stderr.write("FATAL: Either -f or -n is required. Please provide one or the other")
sys.exit(0)
c = chr(args.s)
if args.f:
fa = read_fasta(args.f)
with open(args.q, 'w') as out:
for i in fa:
l = len(fa[i])
q = l * c
if args.r:
q=""
for s in range(l):
q = q + chr(randint(33, 125))
out.write("@{}\n{}\n+\n{}\n".format(i, fa[i], q))
exit(0)
if args.n:
bases = ['A', 'T', 'G', 'C']
with open(args.q, 'w') as out:
for i in range(args.n):
seq = ''
qual = ''
for j in range(args.l):
shuffle(bases)
seq = seq + bases[0]
qual = qual + chr(randint(33, 125))
out.write("@FakeSeq{}\n{}\n+\n{}\n".format(i, seq, qual))
|
from unittest import mock
import pytest
from behave_django.runner import SimpleTestRunner
from behave.runner import Context, Runner
from django.test.testcases import TestCase
from .util import DjangoSetupMixin
class TestSimpleTestCase(DjangoSetupMixin):
@mock.patch('behave_django.management.commands.behave.behave_main', return_value=0) # noqa
@mock.patch('behave_django.management.commands.behave.SimpleTestRunner') # noqa
def test_use_simple_test_runner(self,
mock_simple_test_runner,
mock_behave_main):
self.run_management_command('behave', simple=True)
mock_behave_main.assert_called_once_with(args=[])
mock_simple_test_runner.assert_called_once_with()
def test_simple_test_runner_uses_simple_testcase(self):
runner = mock.MagicMock()
context = Context(runner)
SimpleTestRunner().setup_testclass(context)
assert isinstance(context.test, TestCase)
def test_simple_testcase_fails_when_accessing_base_url(self):
runner = Runner(mock.MagicMock())
runner.context = Context(runner)
SimpleTestRunner().patch_context(runner.context)
SimpleTestRunner().setup_testclass(runner.context)
with pytest.raises(RuntimeError):
assert runner.context.base_url == 'should raise an exception!'
def test_simple_testcase_fails_when_calling_get_url(self):
runner = Runner(mock.MagicMock())
runner.context = Context(runner)
SimpleTestRunner().patch_context(runner.context)
SimpleTestRunner().setup_testclass(runner.context)
with pytest.raises(RuntimeError):
runner.context.get_url()
|
from countdown.parser.Parser import Parser
def test_lex_duration_string():
test_input = 'ashdf 40m afsda 60hours asdf 5 second 20 hour 5day 2minute'
test_parser = Parser({
'duration': object(),
'printer': object(),
'alarm': object()
})
expected_output = ['40m', '60hours', '5 second', '20 hour', '2minute']
assert test_parser._lex_duration_string(test_input) == expected_output
|
import pandas as pd
import os
#import sed_eval
import gzip
import glob
import pickle
import librosa
import soundfile as sf
import numpy as np
import random
import math
from scipy.signal import hanning
from sklearn.preprocessing import StandardScaler
class DataGenerator():
'Generates data for the experiments'
def __init__(self, list_IDs, labels, label_list=[],
sequence_time=1.0, sequence_hop_time=0.5,frames=False,
audio_hop=882, audio_win=1764,n_fft=2048,sr=44100,mel_bands=128,
normalize='none',get_annotations=True,dataset='MAVD',normalize_energy=True,convert_to_dB=True):
""" Initialize the DataGenerator
Parameters
----------
list_IDs : list
List of file IDs (i.e wav file names)
labels : list
List of file annotations (i.e txt file names)
label_list : list (list of lists for MAVD)
List of classes of interest
If dataset == MAVD, each list is related to each level of classification
sequence_time : float
Time in seconds of each network input (i.e 1 second length mel-spectrogram)
sequence_hop_time : float
Time in secodns of the sequence hop
frames : bool
If True the audio signal is returned in a matrix frames
(only useful for end-to-end networks)
audio_win : int
Number of samples of the analysis window for the STFT calculation
audio_hop : int
Number of samples of the hop for the STFT calculation
n_fft : int
Number of samples to calculate the FFT
sr : int
Sampling rate. If this value is different that the audio files,
the signals are resampled (not recomended).
mel_bands : int
Number of Mel bands.
normalize : string
'standard' to use standar normalization (sklearn)
'minmax' to use minmax normalization
'none' to don't normalize features
get_annotations : bool
If True, the annotations are returned
dataset : 'string'
Select the dataset 'URBAN-SED' or 'MAVD'
"""
self.labels = labels
self.list_IDs = list_IDs
self.sr = sr
self.n_fft = n_fft
self.mel_bands = mel_bands
self.audio_hop = audio_hop
self.audio_win = audio_win
self.mel_basis = librosa.filters.mel(sr,n_fft,mel_bands,htk=True)
self.mel_basis = self.mel_basis**2 #For end-to-end networks
self.sequence_frames = int(sequence_time * sr / float(audio_hop))
self.sequence_hop = int(sequence_hop_time * sr / float(audio_hop))
self.sequence_samples = int(sequence_time * sr)
self.sequence_hop_samples = int(sequence_hop_time * sr)
self.hop_time = audio_hop / float(sr)
self.label_list = label_list
self.normalize = normalize
self.sequence_hop_time = sequence_hop_time
self.frames = frames
self.get_annotations = get_annotations
self.norm_scaler = np.zeros(self.mel_bands)
self.dataset = dataset #URBAN-SED or MAVD
self.normalize_energy = normalize_energy
self.convert_to_dB = convert_to_dB
def __data_generation(self, list_IDs_temp):
""" This function generates data with the files in list_IDs_temp
Parameters
----------
list_IDs_temp : list
List of file IDs.
Return
----------
X : array
Audio signals (only for end-to-end networks)
S : array
Spectrograms
mel : array
Mel-spectrograms
yt : array
Annotationes as categorical matrix
"""
X = []
if self.dataset == 'MAVD':
yt = {}
for index_list,l_list in enumerate(self.label_list):
yt[index_list] = []
if self.dataset == 'URBAN-SED':
yt = []
if self.dataset == 'UrbanSound8k':
yt = []
mel = []
id_t = []
S = []
window = hanning(self.audio_win)
N_IDs = len(list_IDs_temp)
N_checkpoints = 10
for i, ID in enumerate(list_IDs_temp):
if N_IDs > N_checkpoints:
if (i % (N_IDs // N_checkpoints)) == 0:
print(100*i/float(N_IDs), '%')
audio,sr_old = sf.read(ID)
if len(audio.shape) > 1:
audio = audio[:,0]
if self.sr != sr_old:
print('changing sampling rate')
audio = librosa.resample(audio, sr_old, self.sr)
if self.dataset == 'UrbanSound8k':
class_ix = int(ID.split('-')[1])
if (self.dataset == 'MAVD') | (self.dataset == 'URBAN-SED'):
label_file = self.labels[ID]
if self.get_annotations:
labels = pd.read_csv(label_file, delimiter='\t', header=None)
labels.columns = ['event_onset', 'event_offset','event_label']
else:
labels = pd.DataFrame({'event_onset' : []})
if self.dataset == 'MAVD':
event_rolls = []
for index_list,l_list in enumerate(self.label_list):
event_roll = np.zeros((int(math.ceil((len(audio)-self.sequence_samples+1)/ float(self.sequence_hop_samples))),
len(l_list)))
for event in labels.to_dict('records'):
c1 = event['event_label']
c2 = c1.split('/')[0]
c3 = ""
c4 = ""
if len(c1.split('/')) > 1:
c3 = c1.split('/')[1].split('_')[0]
if len(c1.split('/')[0].split('_')) > 1:
c4 = c1.split('/')[0].split('_')[1]
c1 = c1.split('_')[0]
if (c1 in l_list) | (c2 in l_list) | (c3 in l_list) | (c4 in l_list):
if (c1 in l_list):
c = c1
else:
if (c2 in l_list):
c = c2
else:
if (c3 in l_list):
c = c3
else:
c = c4
pos = l_list.index(c)
event_onset = event['event_onset']
event_offset = event['event_offset']
onset = int(math.floor(event_onset * 1 / float(self.sequence_hop_time)))
offset = int(math.ceil(event_offset * 1 / float(self.sequence_hop_time)))
event_roll[onset:offset, pos] = 1
event_rolls.append(event_roll)
if self.dataset == 'URBAN-SED':
event_roll = np.zeros((int(math.ceil((len(audio)-self.sequence_samples+1)/ float(self.sequence_hop_samples))),
len(self.label_list)))
for event in labels.to_dict('records'):
pos = self.label_list.index(event['event_label'])
event_onset = event['event_onset']
event_offset = event['event_offset']
onset = int(math.floor(event_onset * 1 / float(self.sequence_hop_time)))
offset = int(math.ceil(event_offset * 1 / float(self.sequence_hop_time)))
event_roll[onset:offset, pos] = 1
for i in np.arange(0,len(audio)-self.sequence_samples+1,self.sequence_hop_samples):
audio_slice = audio[i:i+self.sequence_samples]
#### Normalize by slices
if self.normalize == 'minmax':
audio_slice = audio[i:i+self.sequence_samples]/np.amax(audio[i:i+self.sequence_samples])
else:
audio_slice = audio[i:i+self.sequence_samples]
audio_slice[np.isinf(audio_slice)] = 0
audio_slice[np.isnan(audio_slice)] = 0
audio_slice_pad = np.pad(audio_slice, int(self.n_fft // 2), mode='reflect')
if self.frames:
f = librosa.util.frame(audio_slice_pad, frame_length=self.audio_win, hop_length=self.audio_hop)
W = np.zeros_like(f)
for j in range(W.shape[1]):
W[:,j] = window
f = f*W
X.append(f.T)
else:
X.append(audio_slice)
#### Normalize by slicess #audio_slice_pad
stft = np.abs(librosa.core.stft(audio_slice, n_fft=self.n_fft, hop_length=self.audio_hop,
win_length=self.audio_win, center=True))**2
#print(stft.shape)
if self.normalize_energy:
stft = stft/(self.n_fft/2+1)
S.append(stft)
melspec = self.mel_basis.dot(stft)
#melspec = melspec*self.alpha
if self.convert_to_dB:
melspec = librosa.core.power_to_db(melspec)
mel.append(melspec.T)
# Get y
j = int(i/float(self.sequence_hop_samples))
if self.dataset == 'MAVD':
for index_list,l_list in enumerate(self.label_list):
y = event_rolls[index_list][j, :]
assert y.shape == (len(l_list),)
yt[index_list].append(y)
if self.dataset == 'URBAN-SED':
y = event_roll[j, :]
assert y.shape == (len(self.label_list),)
yt.append(y)
if self.dataset == 'UrbanSound8k':
y = np.zeros(len(self.label_list))
y[class_ix] = 1
yt.append(y)
# Get id
id = [ID, i]
id_t.append(id)
X = np.asarray(X)
if self.dataset == 'MAVD':
for index_list,l_list in enumerate(self.label_list):
yt[index_list] = np.asarray(yt[index_list])
if self.dataset == 'URBAN-SED':
yt = np.asarray(yt)
if self.dataset == 'UrbanSound8k':
yt = np.asarray(yt)
mel = np.asarray(mel)
S = np.asarray(S)
S = np.transpose(S,(0,2,1))
X = np.expand_dims(X, -1)
return X,S,mel,yt
def return_all(self):
""" This function generates data with all the files in self.list_IDs
Return
----------
X : array
Audio signals (only for end-to-end networks)
S : array
Spectrograms
mel : array
Mel-spectrograms
yt : array
Annotationes as categorical matrix
"""
X,S,mel,y = self.__data_generation(self.list_IDs)
return X,S,mel,y
def return_random(self):
""" This function generates data for a random file in self.list_IDs
Return
----------
X : array
Audio signals (only for end-to-end networks)
S : array
Spectrograms
mel : array
Mel-spectrograms
yt : array
Annotationes as categorical matrix
"""
j = random.randint(0,len(self.list_IDs)-1)
X,S,mel,y = self.__data_generation([self.list_IDs[j]])
return X,S,mel,y
class Scaler():
'Generates scaler for the data'
def __init__(self,normalizer='standard'):
""" Initialize the Scaler
Parameters
----------
normalizer : string
'standard' to use standar normalization (sklearn)
'minmax' to use minmax normalization
'none' to don't normalize features
"""
self.normalizer = normalizer
if normalizer == 'standard':
self.scaler = StandardScaler()
if normalizer == 'minmax':
self.scaler = []
def fit(self, mel):
""" Fit the Scaler
Parameters
----------
mel : array
Data to fit the scaler
"""
if self.normalizer == 'standard':
mel_bands = mel.shape[-1]
self.scaler.fit(np.reshape(mel,(-1,mel_bands)))
assert len(self.scaler.mean_) == mel_bands
if self.normalizer == 'minmax':
min_v = np.amin(mel)#,axis=(0,2))
max_v = np.amax(mel)#,axis=(0,2))
self.scaler = [min_v,max_v]
def transform(self, mel):
""" Transform data using de the Scaler fitted
Parameters
----------
mel : array
Data to transform
Return
----------
mel : array
Transformed data.
"""
if self.normalizer == 'standard':
mel_dims = mel.shape
mel_bands = mel.shape[-1]
mel_temp = mel.reshape(-1,mel_bands)
mel_temp = self.scaler.transform(mel_temp)
mel = mel_temp.reshape(mel_dims)
if self.normalizer == 'minmax':
mel = 2*((mel-self.scaler[0])/(self.scaler[1]-self.scaler[0])-0.5)
return mel
def antitransform(self, mel):
""" Antitransform data using de the Scaler fitted
Parameters
----------
mel : array
Data to antitransform
Return
----------
mel : array
Antitransformed data.
"""
if self.normalizer == 'minmax':
mel = (self.scaler[1]-self.scaler[0])*(mel/2. + 0.5) + self.scaler[0]
return mel
def get_scaler(self):
return self.scaler
def set_scaler(self, scaler):
self.scaler = scaler |
"""
Collection of Numpy general functions, wrapped to fit Ivy syntax and signature.
"""
# global
import logging
import numpy as np
import math as _math
from operator import mul as _mul
from functools import reduce as _reduce
import multiprocessing as _multiprocessing
# local
import ivy
from ivy.functional.ivy import default_dtype
from ivy.functional.backends.numpy.device import _dev_callable, to_dev
# Helpers #
# --------#
def _to_dev(x, dev):
if dev is not None:
if 'gpu' in dev:
raise Exception('Native Numpy does not support GPU placement, consider using Jax instead')
elif 'cpu' in dev:
pass
else:
raise Exception('Invalid device specified, must be in the form [ "cpu:idx" | "gpu:idx" ],'
'but found {}'.format(dev))
return x
copy_array = lambda x: x.copy()
array_equal = np.array_equal
floormod = lambda x, y: np.asarray(x % y)
to_numpy = lambda x: x
to_numpy.__name__ = 'to_numpy'
to_scalar = lambda x: x.item()
to_scalar.__name__ = 'to_scalar'
to_list = lambda x: x.tolist()
to_list.__name__ = 'to_list'
container_types = lambda: []
inplace_arrays_supported = lambda: True
inplace_variables_supported = lambda: True
def inplace_update(x, val):
x.data = val
return x
def is_array(x, exclusive=False):
if isinstance(x, np.ndarray):
return True
return False
def unstack(x, axis, keepdims=False):
if x.shape == ():
return [x]
x_split = np.split(x, x.shape[axis], axis)
if keepdims:
return x_split
return [np.squeeze(item, axis) for item in x_split]
def inplace_decrement(x, val):
x -= val
return x
def inplace_increment(x, val):
x += val
return x
cumsum = np.cumsum
def cumprod(x, axis=0, exclusive=False):
if exclusive:
x = np.swapaxes(x, axis, -1)
x = np.concatenate((np.ones_like(x[..., -1:]), x[..., :-1]), -1)
res = np.cumprod(x, -1)
return np.swapaxes(res, axis, -1)
return np.cumprod(x, axis)
def scatter_flat(indices, updates, size=None, tensor=None, reduction='sum', dev=None):
target = tensor
target_given = ivy.exists(target)
if ivy.exists(size) and ivy.exists(target):
assert len(target.shape) == 1 and target.shape[0] == size
if dev is None:
dev = _dev_callable(updates)
if reduction == 'sum':
if not target_given:
target = np.zeros([size], dtype=updates.dtype)
np.add.at(target, indices, updates)
elif reduction == 'replace':
if not target_given:
target = np.zeros([size], dtype=updates.dtype)
target = np.asarray(target).copy()
target.setflags(write=1)
target[indices] = updates
elif reduction == 'min':
if not target_given:
target = np.ones([size], dtype=updates.dtype) * 1e12
np.minimum.at(target, indices, updates)
if not target_given:
target = np.where(target == 1e12, 0., target)
elif reduction == 'max':
if not target_given:
target = np.ones([size], dtype=updates.dtype) * -1e12
np.maximum.at(target, indices, updates)
if not target_given:
target = np.where(target == -1e12, 0., target)
else:
raise Exception('reduction is {}, but it must be one of "sum", "min" or "max"'.format(reduction))
return _to_dev(target, dev)
# noinspection PyShadowingNames
def scatter_nd(indices, updates, shape=None, tensor=None, reduction='sum', dev=None):
target = tensor
target_given = ivy.exists(target)
if ivy.exists(shape) and ivy.exists(target):
assert ivy.shape_to_tuple(target.shape) == ivy.shape_to_tuple(shape)
if dev is None:
dev = _dev_callable(updates)
shape = list(shape) if ivy.exists(shape) else list(tensor.shape)
indices_flat = indices.reshape(-1, indices.shape[-1]).T
indices_tuple = tuple(indices_flat) + (Ellipsis,)
if reduction == 'sum':
if not target_given:
target = np.zeros(shape, dtype=updates.dtype)
np.add.at(target, indices_tuple, updates)
elif reduction == 'replace':
if not target_given:
target = np.zeros(shape, dtype=updates.dtype)
target = np.asarray(target).copy()
target.setflags(write=1)
target[indices_tuple] = updates
elif reduction == 'min':
if not target_given:
target = np.ones(shape, dtype=updates.dtype) * 1e12
np.minimum.at(target, indices_tuple, updates)
if not target_given:
target = np.where(target == 1e12, 0., target)
elif reduction == 'max':
if not target_given:
target = np.ones(shape, dtype=updates.dtype) * -1e12
np.maximum.at(target, indices_tuple, updates)
if not target_given:
target = np.where(target == -1e12, 0., target)
else:
raise Exception('reduction is {}, but it must be one of "sum", "min" or "max"'.format(reduction))
return _to_dev(target, dev)
def gather(params, indices, axis=-1, dev=None):
if dev is None:
dev = _dev_callable(params)
return _to_dev(np.take_along_axis(params, indices, axis), dev)
def gather_nd(params, indices, dev=None):
if dev is None:
dev = _dev_callable(params)
indices_shape = indices.shape
params_shape = params.shape
num_index_dims = indices_shape[-1]
result_dim_sizes_list = [_reduce(_mul, params_shape[i + 1:], 1) for i in range(len(params_shape) - 1)] + [1]
result_dim_sizes = np.array(result_dim_sizes_list)
implicit_indices_factor = int(result_dim_sizes[num_index_dims - 1].item())
flat_params = np.reshape(params, (-1,))
new_shape = [1] * (len(indices_shape) - 1) + [num_index_dims]
indices_scales = np.reshape(result_dim_sizes[0:num_index_dims], new_shape)
indices_for_flat_tiled = np.tile(np.reshape(np.sum(indices * indices_scales, -1, keepdims=True), (-1, 1)), (1, implicit_indices_factor))
implicit_indices = np.tile(np.expand_dims(np.arange(implicit_indices_factor), 0), (indices_for_flat_tiled.shape[0], 1))
indices_for_flat = indices_for_flat_tiled + implicit_indices
flat_indices_for_flat = np.reshape(indices_for_flat, (-1,)).astype(np.int32)
flat_gather = np.take(flat_params, flat_indices_for_flat, 0)
new_shape = list(indices_shape[:-1]) + list(params_shape[num_index_dims:])
res = np.reshape(flat_gather, new_shape)
return _to_dev(res, dev)
multiprocessing = lambda context=None: _multiprocessing if context is None else _multiprocessing.get_context(context)
def indices_where(x):
where_x = np.where(x)
if len(where_x) == 1:
return np.expand_dims(where_x[0], -1)
res = np.concatenate([np.expand_dims(item, -1) for item in where_x], -1)
return res
# noinspection PyUnusedLocal
def one_hot(indices, depth, dev=None):
# from https://stackoverflow.com/questions/38592324/one-hot-encoding-using-numpy
res = np.eye(depth)[np.array(indices).reshape(-1)]
return res.reshape(list(indices.shape) + [depth])
|
#
# Contributed by Rodrigo Tobar <rtobar@icrar.org>
#
# ICRAR - International Centre for Radio Astronomy Research
# (c) UWA - The University of Western Australia, 2019
# Copyright by UWA (in the framework of the ICRAR)
#
'''
Benchmarking utility for ijson
'''
import argparse
import collections
import io
import os
import sys
import time
import ijson
from ijson import compat
_benchmarks = collections.OrderedDict()
def benchmark(f):
_benchmarks[f.__name__] = f
return f
@benchmark
def long_list(n):
return b'[' + b','.join([b'1' for _ in range(n)]) + b']'
@benchmark
def big_int_object(n):
return b'{' + b',\n'.join([b'"key_%d": %d' % (i, i) for i in range(n)]) + b'}'
@benchmark
def big_decimal_object(n):
return b'{' + b',\n'.join([b'"key_%d": %d.0' % (i, i) for i in range(n)]) + b'}'
@benchmark
def big_null_object(n):
return b'{' + b',\n'.join([b'"key_%d": null' % (i,) for i in range(n)]) + b'}'
@benchmark
def big_bool_object(n):
return b'{' + b',\n'.join([
b'"key_%d": %s' % (i, b"true" if i % 2 == 0 else b"false")
for i in range(n)]) + b'}'
@benchmark
def big_str_object(n):
return b'{' + b',\n'.join([b'"key_%d": "value_%d"' % (i, i) for i in range(n)]) + b'}'
@benchmark
def big_longstr_object(n):
str_template = b"value that is very long and should cause a bit less of JSON parsing"
return b'{' + b',\n'.join([b'"key_%d": "%s"' % (i, str_template) for i in range(n)]) + b'}'
@benchmark
def object_with_10_keys(n):
template = b'{' + b',\n'.join([b'"key_%d": "value_%d"' % (i, i) for i in range(10)]) + b'}'
return b'[' + b',\n'.join(
template
for _ in range(n)) + b']'
@benchmark
def empty_lists(n):
return b'[' + b', '.join(b'[]' for _ in range(n)) + b']'
@benchmark
def empty_objects(n):
return b'[' + b', '.join(b'{}' for _ in range(n)) + b']'
def parse_benchmarks(s):
return [_benchmarks[name] for name in s.split(',')]
BACKEND_NAMES = 'python', 'yajl', 'yajl2', 'yajl2_cffi', 'yajl2_c'
def load_backends():
backends = collections.OrderedDict()
for backend_name in BACKEND_NAMES:
try:
backends[backend_name] = ijson.get_backend(backend_name)
except ImportError:
continue
return backends
_backends = load_backends()
def parse_backends(s):
backends = collections.OrderedDict()
for name in s.split(','):
backends[name] = _backends[name]
return backends
def _stdout_tty_write_flush(message):
stdout = sys.stdout
if stdout.isatty():
stdout.write(message)
stdout.flush()
class progress_message(object):
def __init__(self, message):
self.message = message
def __enter__(self):
_stdout_tty_write_flush(self.message)
return self
def __exit__(self, *args):
_stdout_tty_write_flush('\r\033[K')
if compat.IS_PY35:
exec('''
class AsyncReader(object):
def __init__(self, data):
self.data = io.BytesIO(data)
async def read(self, n=-1):
return self.data.read(n)
def close(self):
self.data.close()
async def _run_async(method, reader, *method_args, **method_kwargs):
async for _ in method(reader, *method_args, **method_kwargs):
pass
''')
def run_benchmarks(args, benchmark_func=None, fname=None):
if bool(benchmark_func) == bool(fname):
raise ValueError("Either benchmark_func or fname must be given")
if benchmark_func:
bname = benchmark_func.__name__
with progress_message('Generating data for benchmark %s...' % (bname,)):
data = benchmark_func(args.size)
size = len(data)
else:
bname = fname
size = os.stat(args.input).st_size
for backend_name, backend in args.backends.items():
# Get correct method and prepare its arguments
method = args.method
if args.run_async:
method += '_async'
elif args.run_coro:
method += '_coro'
method = getattr(backend, method)
method_args = ()
if args.method in ('items', 'kvitems'):
method_args = args.prefix,
method_kwargs = {
'multiple_values': args.multiple_values,
'use_float': args.use_float
}
if not args.run_coro:
method_kwargs['buf_size'] = args.bufsize
# Prepare reader
reader = None
if not benchmark_func:
reader = open(fname, 'rb')
else:
reader = AsyncReader(data) if args.run_async else io.BytesIO(data)
# Prepare function that will run the benchmark
if args.run_async:
import asyncio
loop = asyncio.new_event_loop()
def run():
try:
loop.run_until_complete(_run_async(method, reader, *method_args, **method_kwargs))
finally:
loop.close()
elif args.run_coro:
def run():
from ijson.utils import sendable_list
events = sendable_list()
coro = method(events, *method_args, **method_kwargs)
if reader:
chunks = iter(lambda: reader.read(args.bufsize), b'')
else:
chunks = (data[pos:pos + args.bufsize]
for pos in range(0, len(data), args.bufsize))
for chunk in chunks:
coro.send(chunk)
del events[:]
coro.close()
else:
def run():
for _ in method(reader, *method_args, **method_kwargs):
pass
# Go, go, go!
start = time.time()
run()
duration = time.time() - start
megabytes = size / 1024. / 1024.
print("%.3f, %s, %s, %s, %.3f, %.3f" %
(megabytes, args.method, bname, backend_name, duration,
megabytes / duration))
reader.close()
def main():
DEFAULT_N = 100000
DEFAULT_BUFSIZE = 64 * 1024
ALL_BENCHMARKS = ','.join(_benchmarks)
ALL_BACKENDS = ','.join(_backends)
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--size', type=int,
help='Size of JSON content; actual size in bytes might differ, defaults to %d' % DEFAULT_N,
default=DEFAULT_N)
parser.add_argument('-S', '--bufsize', type=int,
help='Buffer size used during parsing; defaults to %d' % DEFAULT_BUFSIZE,
default=DEFAULT_BUFSIZE)
parser.add_argument('-b', '--benchmarks', type=parse_benchmarks,
help='Comma-separated list of benchmarks to include, defaults to %s' % ALL_BENCHMARKS,
default=ALL_BENCHMARKS)
parser.add_argument('-B', '--backends', type=parse_backends,
help='Comma-separated list of backends to include, defaults to %s' % ALL_BACKENDS,
default=ALL_BACKENDS)
parser.add_argument('-l', '--list', action='store_true',
help='List available benchmarks and backends')
parser.add_argument('-i', '--input',
help='File to use for benchmarks rather than built-in benchmarking functions')
parser.add_argument('-m', '--multiple-values', action='store_true', default=False,
help='Content has multiple JSON values, useful when used with -i')
parser.add_argument('-f', '--use-float', action='store_true', default=False,
help='Parse non-integer numbers as float instead of Decimal')
parser.add_argument('-M', '--method', choices=['basic_parse', 'parse', 'kvitems', 'items'],
help='The method to benchmark', default='basic_parse')
parser.add_argument('-c', '--coro', action='store_true', default=False,
dest='run_coro', help='Benchmark coroutine methods')
if compat.IS_PY35:
parser.add_argument('-a', '--async', action='store_true', default=False,
dest='run_async', help='Benchmark asyncio-enabled methods')
parser.add_argument('-p', '--prefix', help='Prefix (used with -M items|kvitems)', default='')
args = parser.parse_args()
if args.list:
msg = 'Backends:\n'
msg += '\n'.join(' - %s' % name for name in _backends)
msg += '\nBenchmarks:\n'
msg += '\n'.join(' - %s' % name for name in _benchmarks)
print(msg)
return
print("#mbytes,method,test_case,backend,time,mb_per_sec")
if args.input:
run_benchmarks(args, fname=args.input)
else:
for benchmark in args.benchmarks:
run_benchmarks(args, benchmark)
if __name__ == '__main__':
main()
|
import os
import yaml
def read_config():
with open(os.path.join(os.path.dirname(__file__), "../config.yml")) as ymlfile:
return yaml.load(ymlfile, Loader=yaml.BaseLoader)
def chromedriver_path():
os.environ["webdriver.chrome.driver"] = read_config()["paths"]["webdriver"]["chromedriver"]
return os.environ["webdriver.chrome.driver"]
def geckodriver_path():
os.environ["webdriver.gecko.driver"] = read_config()["paths"]["webdriver"]["geckodriver"]
return os.environ["webdriver.gecko.driver"]
def default_browser():
return read_config()["browser"]
|
from .base import ObjectModel
from .event_log import EventLog
from .handler_log import HandlerLog
from .sync_task import SyncTask
|
class Solution:
def maxProfit(self, prices: List[int]) -> int:
maxCur = 0
maxFar = 0
for i in range(1,len(prices)):
maxCur += prices[i] - prices[i-1]
print(maxCur,maxFar)
maxCur = max(0,maxCur)
maxFar = max(maxCur,maxFar)
return maxFar |
#-------------------------#
#Author: Albert Bagdasarov
#Date 16.08.20
#-------------------------#
import os
import time
import pyttsx3 as x3
import speech_recognition as sr
from Commands import *
import random
import re
import webbrowser
import wikipedia
import json
import request
from datetime import datetime
#Settings of Voice
r = sr.Recognizer()
mic = sr.Microphone(device_index=1)
engine = x3.init()
en_voice_id = "HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Speech\Voices\Tokens\TTS_MS_EN-US_ZIRA_11.0"
voices = engine.getProperty('voices')
engine.setProperty('voice',voices[1].id)
#All Assistent'opportunities
class Time():
def __init__(self):
self.engine = x3.init()
self.now = datetime.now()
self.engine.say(random.choice(Phrases_after_Actions))
self.time_now = self.now.strftime("%d %B, %A, %H Hours %M Minutes, %Y ")
print(self.time_now)
self.engine.say(self.time_now)
self.engine.runAndWait()
class System():
def __init__(self):
self.Chrome = "C:/Program Files (x86)/Google/Chrome/Application/chrome.exe"
self.cmd = 'C:/Users/LENOVO/AppData/Roaming/Microsoft/Windows/Start Menu/Programs/System Tools/cmd.exe'
self.Telegram = "C:/Users/LENOVO/AppData/Roaming/Telegram Desktop/Telegram.exe"
self.HoneyPot = 'C:/Users/LENOVO/Desktop/Win_Locker/dist/main.exe'
self.Desktop = 'C:/Users/LENOVO/Desktop'
self.Vs = "C:/Program Files/Microsoft VS Code/Code.exe"
def AppMannager(self):
self.engine = x3.init()
self.ask = Answer
print(self.ask)
self.engine.say(random.choice(Phrases_after_Actions))
time.sleep(1)
if self.ask == "open telegram":
os.startfile(self.Telegram)
elif self.ask == "open browser":
os.startfile(self.Chrome)
elif self.ask == "open cmd":
os.startfile(self.cmd)
elif self.ask == "open honeypot":
os.startfile(self.HoneyPot)
elif self.ask == "open visual studio":
os.startfile(self.Vs)
class Web():
def __init__(self):
self.url = Answer
self.new_list = ''
def OpenUrl(self):
self.url = list(str(self.url))
self.engine = x3.init()
for i in self.url:
if i == ' ':
self.new_list = ''
self.new_list+=i
self.url = self.new_list.strip()
print(self.url)
self.engine.say(random.choice(Phrases_after_Actions))
time.sleep(1)
if re.search(r'\.', self.url):
webbrowser.open_new_tab('https://' + self.url)
elif re.search(r'\ ', self.url):
webbrowser.open_new_tab('https://www.google.com/search?=&ei=&q=bu'+ self.url +'&oq=')
else:
webbrowser.open_new_tab('https://www.google.com/search?=&ei=&q='+ self.url +'&oq=')
class WikiPedia():
def __init__(self):
self.Question = Answer
self.Wiki_Request = wikipedia.summary(self.Question, sentences=random.randint(2,4))
self.New_list = list(str(self.Wiki_Request))
self.count = 0
def Wiki_Search(self):
self.engine = x3.init()
for i in self.New_list:
if i == '.':
self.count += 1
self.engine.say(random.choice(Phrases_after_Actions))
self.Wiki_Request = self.Wiki_Request.replace('.','\n',self.count)
print(self.Wiki_Request)
self.engine.say(self.Wiki_Request)
class BoringStuff():
def __init__(self):
self.music_directory = 'C:/Users/LENOVO/Downloads/MUSIC/'
self.random_music_id = random.randint(1,14)
self.command = Answer
def Joke(self):
self.engine = x3.init()
self.engine.say(random.choice(Phrases_after_Actions))
for i in Commands_Boring_Stuff:
if self.command.startswith('tell me a joke'):
self.joke = random.choice(Jokes)
self.engine.say(self.joke)
self.command = ''
if self.command.startswith(i):
self.music_directory = self.music_directory + str(self.random_music_id) + '.mp3'
os.startfile(self.music_directory)
#First request
def Greeting():
engine = x3.init()
engine.say('Hello, how can I help you, Sir?')
engine.runAndWait()
Greeting()
Answer = 'a'
#Launch
if __name__ == "__main__":
while Answer != 'stop':
with mic as source:
r.adjust_for_ambient_noise(source)
engine = x3.init()
print('Command Please')
engine.say("Command Please")
engine.runAndWait()
audio = r.listen(source)
try:
query = r.recognize_google(audio)
Answer = query.lower()
print(Answer)
except sr.UnknownValueError:
print("The voice wasn't recognized!")
Answer = ''
engine.say("The voice wasn't recognized!")
except sr.RequestError as e:
engine.say("[log] Undefined Error,Please check your Internet connection!")
#Check the meaning in Massives from Commands.py
def check_Meanings():
for i in Commands_Time:
if Answer == i:
Time()
for i in Commands_System:
if Answer == i:
system_1 = System()
system_1.AppMannager()
for i in Commands_Wikipedia:
if Answer.startswith(i):
print(Answer)
wiki_1 = WikiPedia()
wiki_1.Wiki_Search()
for i in Commands_Boring_Stuff:
if Answer.startswith(i):
bore_1 = BoringStuff()
bore_1.Joke()
for i in Commands_Detection_System:
if Answer.startswith(i):
os.startfile("/Detec/l.py")
if Answer.startswith('search'):
web_1 = Web()
web_1.OpenUrl()
check_Meanings()
|
from sns_boomerang.api.resources.jobs import Jobs
from sns_boomerang.common.items import Job
import pytest
from werkzeug.exceptions import NotFound
def test_get_job(monkeypatch):
test_id = 'test-id'
def mock_get(id):
assert id == test_id
return Job('topic', 'payload', 123, id=test_id)
monkeypatch.setattr(Job, 'get', mock_get)
response = Jobs.get(test_id)
assert response[0]['id'] == test_id
assert response[1] == 200
def test_get_no_job(monkeypatch):
test_id = 'test-id'
def mock_get(id):
assert id == test_id
return None
monkeypatch.setattr(Job, 'get', mock_get)
with pytest.raises(NotFound):
Jobs.get(test_id)
|
"""
Just a simple sample
"""
import sys
import time
from twitter_requests.generator import TwitterBearerGenerator
from twitter_requests.twitter import Twitter, TwitterQueryBuilder
QUERY = sys.argv[1]
EPOCH = 0
KEYS = {
'api_key': 'YOUR_API_KEY',
'api_secret_key': 'YOUR_API_SECRET_KEY'
}
TWITTER_BEARER_GEBERATOR = None
TOKEN = None
while True:
EPOCH = EPOCH + 1
if not isinstance(TWITTER_BEARER_GEBERATOR, TwitterBearerGenerator):
TWITTER_BEARER_GEBERATOR = TwitterBearerGenerator(KEYS)
TOKEN = TWITTER_BEARER_GEBERATOR.generate_token()
QUERY_BUILDER = TwitterQueryBuilder.builder().set_count(10)\
.set_lang('id').build()
TWITTER = Twitter(TOKEN, QUERY_BUILDER)
try:
TWITTER.find_tweets(QUERY)
for tweet in TWITTER.get_tweets_statuses():
print(f"{tweet['user']['name']} ==> {tweet['text']} ")
print(f"====================== END LINE OF EPOCH {EPOCH} ======================")
time.sleep(60)
except Exception as e:
TWITTER_BEARER_GEBERATOR = None
TOKEN = None
time.sleep(60)
|
# -*- coding: utf-8 -*-
# Copyright (2017) Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Python libs
import ipaddress
import logging
import os
from threading import Thread
# 3rd party libs
from flask import abort
from flask import Flask
from flask import g
from flask import request
from flask import Response
from flask_api import status
# own libs
from oneview_redfish_toolkit.api.redfish_error import RedfishError
from oneview_redfish_toolkit.api import scmb
from oneview_redfish_toolkit.blueprints.chassis import chassis
from oneview_redfish_toolkit.blueprints.chassis_collection \
import chassis_collection
from oneview_redfish_toolkit.blueprints.computer_system import computer_system
from oneview_redfish_toolkit.blueprints.computer_system_collection \
import computer_system_collection
from oneview_redfish_toolkit.blueprints.event_service import event_service
from oneview_redfish_toolkit.blueprints.manager import manager
from oneview_redfish_toolkit.blueprints.manager_collection \
import manager_collection
from oneview_redfish_toolkit.blueprints.metadata import metadata
from oneview_redfish_toolkit.blueprints.network_adapter \
import network_adapter
from oneview_redfish_toolkit.blueprints.network_adapter_collection \
import network_adapter_collection
from oneview_redfish_toolkit.blueprints.network_device_function \
import network_device_function
from oneview_redfish_toolkit.blueprints.network_device_function_collection \
import network_device_function_collection
from oneview_redfish_toolkit.blueprints.network_interface \
import network_interface
from oneview_redfish_toolkit.blueprints.network_interface_collection \
import network_interface_collection
from oneview_redfish_toolkit.blueprints.network_port import network_port
from oneview_redfish_toolkit.blueprints.network_port_collection \
import network_port_collection
from oneview_redfish_toolkit.blueprints.odata import odata
from oneview_redfish_toolkit.blueprints.redfish_base import redfish_base
from oneview_redfish_toolkit.blueprints.service_root import service_root
from oneview_redfish_toolkit.blueprints.session import session
from oneview_redfish_toolkit.blueprints.storage import storage
from oneview_redfish_toolkit.blueprints.storage_collection \
import storage_collection
from oneview_redfish_toolkit.blueprints.subscription\
import subscription
from oneview_redfish_toolkit.blueprints.subscription_collection \
import subscription_collection
from oneview_redfish_toolkit.blueprints.thermal import thermal
from oneview_redfish_toolkit import util
util.configure_logging(os.getenv("LOGGING_FILE", "logging.conf"))
if __name__ == '__main__':
# Load config file, schemas and creates a OV connection
try:
util.load_config('redfish.conf')
except Exception as e:
logging.exception('Failed to load app configuration')
logging.exception(e)
exit(1)
# Check auth mode
if util.config["redfish"]["authentication_mode"] not in \
["conf", "session"]:
logging.error(
"Invalid authentication_mode. Please check your conf"
" file. Valid values are 'conf' or 'session'")
# Flask application
app = Flask(__name__)
# Register blueprints
app.register_blueprint(redfish_base, url_prefix="/redfish/")
app.register_blueprint(service_root, url_prefix='/redfish/v1/')
app.register_blueprint(chassis_collection)
app.register_blueprint(computer_system_collection)
app.register_blueprint(computer_system)
app.register_blueprint(chassis)
app.register_blueprint(manager_collection)
app.register_blueprint(manager)
app.register_blueprint(metadata)
app.register_blueprint(odata)
app.register_blueprint(storage)
app.register_blueprint(thermal)
app.register_blueprint(storage_collection)
app.register_blueprint(network_adapter_collection)
app.register_blueprint(network_interface_collection)
app.register_blueprint(network_port_collection)
app.register_blueprint(network_device_function_collection)
app.register_blueprint(network_device_function)
app.register_blueprint(network_interface)
app.register_blueprint(network_adapter)
app.register_blueprint(network_port)
app.register_blueprint(session)
app.register_blueprint(event_service)
app.register_blueprint(subscription_collection)
app.register_blueprint(subscription)
@app.before_request
def check_authentication():
"""Checks authentication before serving the request"""
# If authentication_mode = conf don't need auth
auth_mode = util.config["redfish"]["authentication_mode"]
if auth_mode == "conf":
g.oneview_client = util.get_oneview_client()
return None
else:
# ServiceRoot don't need auth
if request.path.rstrip("/") in {"/redfish/v1",
"/redfish",
"/redfish/v1/odata",
"/redfish/v1/$metadata"}:
g.oneview_client = util.get_oneview_client(None, True)
return None
# If authenticating we do nothing
if request.path == "/redfish/v1/SessionService/Sessions" and \
request.method == "POST":
return None
# Any other path we demand auth
x_auth_token = request.headers.get('x-auth-token')
if not x_auth_token:
abort(
status.HTTP_401_UNAUTHORIZED,
"x-auth-token header not found")
else:
try:
oneview_client = util.get_oneview_client(x_auth_token)
g.oneview_client = oneview_client
except Exception:
abort(status.HTTP_401_UNAUTHORIZED, "invalid auth token")
@app.before_request
def has_odata_version_header():
"""Deny request that specify a different OData-Version than 4.0"""
odata_version_header = request.headers.get("OData-Version")
if odata_version_header is None:
pass
elif odata_version_header != "4.0":
abort(status.HTTP_412_PRECONDITION_FAILED,
"The request specify a different OData-Version "
"header then 4.0. This server also responds "
"to requests without the OData-Version header")
@app.after_request
def set_odata_version_header(response):
"""Set OData-Version header for all responses"""
response.headers["OData-Version"] = "4.0"
return response
@app.errorhandler(status.HTTP_400_BAD_REQUEST)
def bad_request(error):
"""Creates a Bad Request Error response"""
redfish_error = RedfishError(
"PropertyValueNotInList", error.description)
redfish_error.add_extended_info(
message_id="PropertyValueNotInList",
message_args=["VALUE", "PROPERTY"],
related_properties=["PROPERTY"])
error_str = redfish_error.serialize()
return Response(
response=error_str,
status=status.HTTP_400_BAD_REQUEST,
mimetype='application/json')
@app.errorhandler(status.HTTP_401_UNAUTHORIZED)
def unauthorized_error(error):
"""Creates a Unauthorized Error response"""
redfish_error = RedfishError(
"GeneralError", error.description)
error_str = redfish_error.serialize()
return Response(
response=error_str,
status=status.HTTP_401_UNAUTHORIZED,
mimetype='application/json')
@app.errorhandler(status.HTTP_404_NOT_FOUND)
def not_found(error):
"""Creates a Not Found Error response"""
redfish_error = RedfishError(
"GeneralError", error.description)
error_str = redfish_error.serialize()
return Response(
response=error_str,
status=status.HTTP_404_NOT_FOUND,
mimetype='application/json')
@app.errorhandler(status.HTTP_500_INTERNAL_SERVER_ERROR)
def internal_server_error(error):
"""Creates an Internal Server Error response"""
redfish_error = RedfishError(
"InternalError",
"The request failed due to an internal service error. "
"The service is still operational.")
redfish_error.add_extended_info("InternalError")
error_str = redfish_error.serialize()
return Response(
response=error_str,
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
mimetype="application/json")
@app.errorhandler(status.HTTP_501_NOT_IMPLEMENTED)
def not_implemented(error):
"""Creates a Not Implemented Error response"""
redfish_error = RedfishError(
"ActionNotSupported", error.description)
redfish_error.add_extended_info(
message_id="ActionNotSupported",
message_args=["action"])
error_str = redfish_error.serialize()
return Response(
response=error_str,
status=status.HTTP_501_NOT_IMPLEMENTED,
mimetype='application/json')
if util.config['redfish']['authentication_mode'] == 'conf':
# Loading scmb connection
if scmb.check_cert_exist():
logging.info('SCMB certs already exists testing connection...')
else:
logging.info('SCMB certs not found. Generating/getting certs....')
scmb.get_cert()
logging.info('Got certs. Testing connection...')
if not scmb.is_cert_working_with_scmb():
logging.error('Failed to connect to scmb. Aborting...')
exit(1)
scmb_thread = Thread(target=scmb.listen_scmb)
scmb_thread.daemon = True
scmb_thread.start()
else:
logging.warning("Authentication mode set to session. SCMB events will "
"be disabled")
config = util.config
try:
host = config["redfish"]["redfish_host"]
# Gets the correct IP type based on the string
ipaddress.ip_address(host)
except ValueError:
logging.error("Informed IP is not valid. Check the "
"variable 'redfish_host' on your config file.")
exit(1)
try:
port = int(config["redfish"]["redfish_port"])
except Exception:
logging.exception(
"Port must be an integer number between 1 and 65536.")
exit(1)
# Checking port range
if port < 1 or port > 65536:
logging.error("Port must be an integer number between 1 and 65536.")
exit(1)
if config["ssl"]["SSLType"] in ("self-signed", "adhoc"):
logging.warning("Server is starting with a self-signed certificate.")
if config["ssl"]["SSLType"] == "disabled":
logging.warning(
"Server is starting in HTTP mode. This is an insecure mode. "
"Running the server with HTTPS enabled is highly recommended.")
ssl_type = config["ssl"]["SSLType"]
# Check SSLType:
if ssl_type not in ('disabled', 'adhoc', 'certs', 'self-signed'):
logging.error(
"Invalid SSL type: {}. Must be one of: disabled, adhoc, "
"self-signed or certs".
format(ssl_type))
exit(1)
if ssl_type == 'disabled':
app.run(host=host, port=port, debug=True)
elif ssl_type == 'adhoc':
app.run(host=host, port=port, debug=True, ssl_context="adhoc")
else:
# We should use certs file provided by the user
ssl_cert_file = config["ssl"]["SSLCertFile"]
ssl_key_file = config["ssl"]["SSLKeyFile"]
# Generating cert files if they don't exists
if ssl_type == "self-signed":
if not os.path.exists(ssl_cert_file) and not \
os.path.exists(ssl_key_file):
logging.warning("Generating self-signed certs")
# Generate certificates
util.generate_certificate("certs", "self-signed", 2048)
else:
logging.warning("Using existing self-signed certs")
if ssl_cert_file == "" or ssl_key_file == "":
logging.error(
"SSL type: is 'cert' but one of the files are missing on"
"the config file. SSLCertFile: {}, SSLKeyFile: {}.".
format(ssl_cert_file, ssl_key_file))
ssl_context = (ssl_cert_file, ssl_key_file)
app.run(host=host, port=port, debug=True, ssl_context=ssl_context)
|
import tkinter as tk
from browserHandler import browserHandler
from GUI import MainMenu
from GUI import helper
class Application:
def __init__(self):
"""
Combines the created modules and create the main application
:return: None
"""
web_handler = browserHandler.WebHandler() # open browser
# create and configure root window
root = tk.Tk()
root.title('Float chat menu')
root.attributes('-topmost', True)
root.protocol('WM_DELETE_WINDOW', self.at_close)
# create and configure main menu
main_menu = MainMenu.MainMenu(root)
main_menu.pack()
main_menu.align_window()
main_menu.connect_browser_handler(web_handler)
root.resizable(False, False) # root is not resizable
self.root = root # to facilitate closing
root.mainloop()
def at_close(self):
"""
Called when close button is pressed
:return: None
"""
if helper.quit_popup(): # if quit is confirmed
self.root.destroy()
if __name__ == '__main__':
Application()
|
#!/usr/bin/env python3
import argparse
import asyncio
import math
import os
import sys
import traceback as tb
import warnings
from pathlib import Path
import aiohttp
from aiohttp.web import Application, HTTPBadGateway, Response, WebSocketResponse, WSMsgType, hdrs, json_response
from devtools import VERSION
WITH_UJSON = os.environ.get('DTP_UJSON', '').lower() == 'true'
if WITH_UJSON:
import ujson as json
else:
import json
WITH_UVLOOP = os.environ.get('DTP_UVLOOP', '').lower() == 'true'
if WITH_UVLOOP:
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
# https://pythonhosted.org/PyInstaller/runtime-information.html
if not getattr(sys, 'frozen', False):
CHROME_WRAPPER_PATH = str(Path(__file__, '../chrome-wrapper.sh').resolve())
async def the_handler(request):
response = WebSocketResponse()
handler = ws_handler if response.can_prepare(request) else proxy_handler
return await handler(request)
async def ws_handler(request):
app = request.app
tab_id = request.path_qs.split('/')[-1]
tabs = app['tabs']
if tabs.get(tab_id) is None:
app['tabs'][tab_id] = {}
# https://aiohttp.readthedocs.io/en/v1.0.0/faq.html#how-to-receive-an-incoming-events-from-different-sources-in-parallel
task = app.loop.create_task(ws_browser_handler(request))
app['tasks'].append(task)
return await ws_client_handler(request)
async def ws_client_handler(request):
app = request.app
path_qs = request.path_qs
tab_id = path_qs.split('/')[-1]
url = f'ws://{app["chrome_host"]}:{app["chrome_port"]}{path_qs}'
encode_id = app['f']['encode_id']
client_id = len(app['clients'])
log_prefix = f'[CLIENT {client_id}]'
log_msg = app['f']['print']
ws_client = WebSocketResponse()
await ws_client.prepare(request)
if client_id >= app['max_clients']:
log_msg(log_prefix, 'CONNECTION FAILED')
return ws_client
app['clients'][ws_client] = {
'id': client_id,
'tab_id': tab_id,
'subscriptions': set(), # TODO: Move subscriptions to separate entity
}
log_msg(log_prefix, 'CONNECTED')
if app['tabs'][tab_id].get('ws') is None or app['tabs'][tab_id]['ws'].closed:
session = aiohttp.ClientSession(loop=app.loop)
app['sessions'].append(session)
try:
app['tabs'][tab_id]['ws'] = await session.ws_connect(url)
except aiohttp.WSServerHandshakeError:
log_msg(log_prefix, f'CONNECTION ERROR: {tab_id}')
return ws_client
async for msg in ws_client:
if msg.type == WSMsgType.TEXT:
if app['tabs'][tab_id]['ws'].closed:
log_msg(log_prefix, 'RECONNECTED')
break
data = msg.json(loads=json.loads)
data['id'] = encode_id(client_id, data['id'])
log_msg(log_prefix, '>>', data)
if data.get('method', '').endswith('.enable'):
domain = data['method'].split('.')[0]
app['clients'][ws_client]['subscriptions'].add(domain)
elif data.get('method', '').endswith('.disable'):
domain = data['method'].split('.')[0]
if domain in app['clients'][ws_client]['subscriptions']:
app['clients'][ws_client]['subscriptions'].remove(domain)
app['tabs'][tab_id]['ws'].send_json(data, dumps=json.dumps)
else:
log_msg(log_prefix, 'DISCONNECTED')
return ws_client
async def ws_browser_handler(request):
log_prefix = '<<'
app = request.app
tab_id = request.path_qs.split('/')[-1]
decode_id = app['f']['decode_id']
log_msg = app['f']['print']
timeout = 10
interval = 0.1
for _ in range(math.ceil(timeout / interval)):
if app['tabs'][tab_id].get('ws') is not None and not app['tabs'][tab_id]['ws'].closed:
log_msg(f'[BROWSER {tab_id}]', 'CONNECTED')
break
await asyncio.sleep(interval)
else:
log_msg(f'[BROWSER {tab_id}]', 'DISCONNECTED')
return
async for msg in app['tabs'][tab_id]['ws']:
if msg.type == WSMsgType.TEXT:
data = msg.json(loads=json.loads)
if data.get('id') is None:
clients = {
k: v for k, v in app['clients'].items()
if v.get('tab_id') == tab_id and data.get('method', '').split('.')[0] in v['subscriptions']
}
for client in clients.keys():
if not client.closed:
client_id = app['clients'][client]['id']
log_msg(f'[CLIENT {client_id}]', log_prefix, msg.data)
client.send_str(msg.data)
else:
client_id, request_id = decode_id(data['id'])
log_msg(f'[CLIENT {client_id}]', log_prefix, data)
data['id'] = request_id
ws = next(ws for ws, client in app['clients'].items() if client['id'] == client_id)
ws.send_json(data, dumps=json.dumps)
else:
log_msg(f'[BROWSER {tab_id}]', 'DISCONNECTED')
return
def update_tab(tab, host, port, log_msg):
result = dict(tab) # It is safe enough — all values are strings
if result.get('id') is None:
log_msg('[ERROR]', f'Got a tab without id (which is improbable): {result}')
return result # Maybe it should raise an error?
devtools_url = f'{host}:{port}/devtools/page/{result["id"]}'
result['webSocketDebuggerUrl'] = f'ws://{devtools_url}'
result['devtoolsFrontendUrl'] = f'/devtools/inspector.html?ws={devtools_url}'
return result
async def proxy_handler(request):
app = request.app
method = request.method
path_qs = request.path_qs
session = aiohttp.ClientSession(loop=request.app.loop)
url = f'http://{app["chrome_host"]}:{app["chrome_port"]}{path_qs}'
log_msg = app['f']['print']
log_msg(f'[HTTP {method}] {path_qs}')
try:
response = await session.request(method, url)
headers = response.headers.copy()
if request.path in ('/json', '/json/list', '/json/new'):
data = await response.json(loads=json.loads)
proxy_host = request.url.host
proxy_port = request.url.port
if isinstance(data, list):
data = [update_tab(tab, proxy_host, proxy_port, log_msg) for tab in data]
elif isinstance(data, dict):
data = update_tab(data, proxy_host, proxy_port, log_msg)
else:
log_msg('[WARN]', f'JSON data neither list nor dict: {data}')
body, text = None, json.dumps(data)
headers[hdrs.CONTENT_LENGTH] = str(len(text))
else:
body, text = await response.read(), None
return Response(
body=body,
text=text,
status=response.status,
reason=response.reason,
headers=headers,
)
except aiohttp.ClientError as exc:
return HTTPBadGateway(text=str(exc))
finally:
session.close()
async def status_handler(request):
fields = (
'chrome_host',
'chrome_port',
'debug',
'internal',
'max_clients',
'proxy_hosts',
'proxy_ports',
'version',
)
data = {k: v for k, v in request.app.items() if k in fields}
return json_response(data=data, dumps=json.dumps)
async def init(loop, args):
app = Application(debug=args['debug'])
app.update(args)
log_msg = app['f']['print']
app['clients'] = {}
app['tabs'] = {}
# TODO: Move session and task handling to proper places
app['sessions'] = []
app['tasks'] = []
app.router.add_route('*', '/{path:(?!status.json).*}', the_handler)
app.router.add_route('*', '/status.json', status_handler)
handler = app.make_handler()
srvs = [await loop.create_server(handler, app['proxy_hosts'], proxy_port) for proxy_port in app['proxy_ports']]
log_msg(
f'DevTools Proxy started at {app["proxy_hosts"]}:{app["proxy_ports"]}\n'
f'Use --remote-debugging-port={app["chrome_port"]} --remote-debugging-address={app["chrome_host"]} for Chrome',
)
return app, srvs, handler
async def finish(app, srvs, handler):
for ws in list(app['clients'].keys()) + [tab['ws'] for tab in app['tabs'].values() if tab.get('ws') is not None]:
if not ws.closed:
await ws.close()
for session in app['sessions']:
if not session.closed:
await session.close()
for task in app['tasks']:
task.cancel()
await asyncio.sleep(0.1)
for srv in srvs:
srv.close()
await handler.shutdown()
for srv in srvs:
await srv.wait_closed()
app['f']['close_log']()
def encode_decode_id(max_clients):
bits_available = 31
bits_for_client_id = math.ceil(math.log2(max_clients))
_max_clients = 2 ** bits_for_client_id
max_request_id = 2 ** (bits_available - bits_for_client_id) - 1
def encode_id(client_id, request_id):
if request_id > max_request_id:
raise OverflowError
return (client_id << bits_available - bits_for_client_id) | request_id
def decode_id(encoded_id):
client_id = encoded_id >> (bits_available - bits_for_client_id)
request_id = encoded_id & max_request_id
return client_id, request_id
return encode_id, decode_id, _max_clients
def default_or_flatten_and_uniq(arg, default):
# Simple helper for parsing arguments with action='append' and default value
if arg is None:
return default
else:
return list(set(e for ee in arg for e in ee))
def main():
parser = argparse.ArgumentParser(
prog='devtools-proxy',
description='DevTools Proxy'
)
default_host = ['127.0.0.1']
parser.add_argument(
'--host',
type=str, nargs='+', action='append',
help=f'Hosts to serve on (default: {default_host})',
)
default_port = [9222]
parser.add_argument(
'--port',
type=int, nargs='+', action='append',
help=f'Ports to serve on (default: {default_port})',
)
parser.add_argument(
'--chrome-host',
type=str, default='127.0.0.1',
help=('Host on which Chrome is running, '
'it corresponds with --remote-debugging-address Chrome argument (default: %(default)r)'),
)
parser.add_argument(
'--chrome-port',
type=int, default=12222,
help=('Port which Chrome remote debugger is listening, '
'it corresponds with --remote-debugging-port Chrome argument (default: %(default)r)'),
)
parser.add_argument(
'--max-clients',
type=int, default=8,
help='Number of clients which proxy can handle during life cycle (default: %(default)r)',
)
parser.add_argument(
'--log',
default=sys.stdout, type=argparse.FileType('w'),
help='Write logs to file',
)
parser.add_argument(
'--version',
action='version',
version=VERSION,
help='Print DevTools Proxy version',
)
parser.add_argument(
'--debug',
action='store_true', default=False,
help='Turn on debug mode (default: %(default)r)',
)
args = parser.parse_args()
encode_id, decode_id, max_clients = encode_decode_id(args.max_clients)
args.port = default_or_flatten_and_uniq(args.port, default_port)
args.host = default_or_flatten_and_uniq(args.host, default_host)
arguments = {
'f': {
'encode_id': encode_id,
'decode_id': decode_id,
'print': lambda *a: args.log.write(' '.join(str(v) for v in a) + '\n'),
'close_log': lambda: args.log.close(),
},
'max_clients': max_clients,
'debug': args.debug,
'proxy_hosts': args.host,
'proxy_ports': args.port,
'chrome_host': args.chrome_host,
'chrome_port': args.chrome_port,
'internal': {
'ujson': WITH_UJSON,
'uvloop': WITH_UVLOOP,
},
'version': VERSION,
}
def _excepthook(exctype, value, traceback):
return arguments['f']['print'](*tb.format_exception(exctype, value, traceback))
sys.excepthook = _excepthook
loop = asyncio.get_event_loop()
if args.debug:
def _showwarning(message, category, filename, lineno, file=None, line=None):
return arguments['f']['print'](warnings.formatwarning(message, category, filename, lineno, line))
warnings.showwarning = _showwarning
warnings.simplefilter("always")
loop.set_debug(True)
application, srvs, handler = loop.run_until_complete(init(loop, arguments))
try:
loop.run_forever()
except KeyboardInterrupt:
loop.run_until_complete(finish(application, srvs, handler))
if __name__ == '__main__':
main()
|
import serial
from datetime import datetime, timedelta
from random import uniform
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, Float, DateTime, String
Base = declarative_base()
class Data(Base):
__tablename__ = 'data'
id = Column(Integer, primary_key=True)
plant_name = Column(String)
value = Column(Float)
timestamp = Column(DateTime)
def __init__(self, name, value, timestamp):
self.plant_name = name
self.value = value
self.timestamp = timestamp
|
from flask import Flask
from flask import render_template
from flask import session
app = Flask(__name__)
app.config['SECRET_KEY'] = 'top secret'
@app.route('/')
def index():
if 'count' not in session:
session['count'] = 1
else:
session['count'] += 1
return render_template('index12.html', count=session['count'])
if __name__ == '__main__':
app.run(debug=True)
|
# Generated by Django 2.2.2 on 2019-07-31 09:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0064_auto_20190731_0950'),
]
operations = [
migrations.AlterField(
model_name='organization',
name='logo',
field=models.ImageField(blank=True, null=True, upload_to='organizations'),
),
]
|
class Solution(object):
def majorityElement(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
a, b = None, None
for i, n in enumerate(nums):
if a and n == a[0]:
a = (n, a[1]+1)
elif b and n == b[0]:
b = (n, b[1]+1)
elif a is None: a = (n, 1)
elif b is None: b = (n, 1)
else:
a = (a[0], a[1]-1)
if a[1] == 0: a = None
b = (b[0], b[1]-1)
if b[1] == 0: b = None
return [n for n in (a[0] if a else None, b[0] if b else None) if nums.count(n) > len(nums)//3]
print Solution().majorityElement([6,5,5]) |
import plotly.plotly as py
import plotly.graph_objs as go
# plotly
# import plotly
# plotly.tools.set_credentials_file(
# username='houterm', api_key='putYaOwnKey')
def plot(sphero, step_count):
if step_count == 40000:
# if step_count == 1000:
x_error_behaviour = go.Scatter(
x=sphero.plot_time_list,
y=sphero.plot_x_error_list,
name='x direction'
)
y_error_behaviour = go.Scatter(
x=sphero.plot_time_list,
y=sphero.plot_y_error_list,
name='y direction'
)
x_unfiltered_error_behaviour = go.Scatter(
x=sphero.plot_time_list,
y=sphero.plot_x_unfiltered_error_list,
name='unfiltered x direction'
)
y_unfiltered_error_behaviour = go.Scatter(
x=sphero.plot_time_list,
y=sphero.plot_y_unfiltered_error_list,
name='unfiltered y direction'
)
layout = go.Layout(
title=go.layout.Title(
# text='Error Behaviour 3: Maze',
xref='paper',
x=0
),
xaxis=go.layout.XAxis(
title=go.layout.xaxis.Title(
text='Time (s)',
font=dict(
family='Courier New, monospace',
size=28,
color='#000'
)
),
# showticklabels=False
),
yaxis=go.layout.YAxis(
title=go.layout.yaxis.Title(
text='Error (pixels)',
font=dict(
family='Courier New, monospace',
size=28,
color='#000'
)
),
# showticklabels=False
),
legend=dict(
# x=0,
# y=0,
x= 1.1,
y= 1.2,
traceorder='normal',
font=dict(
family='sans-serif',
size=20,
color='#000'
),
orientation="h",
# bgcolor='#E2E2E2',
# bordercolor='#FFFFFF',
# borderwidth=2
)
)
# print(sphero.plot_time_list)
# print(sphero.plot_x_error_list)
data = [x_error_behaviour, y_error_behaviour, x_unfiltered_error_behaviour, y_unfiltered_error_behaviour]
fig = go.Figure(data=data, layout=layout)
# py.iplot(fig, filename='styling-names')
# py.plot(data, filename='error behaviour', auto_open=True)
py.plot(fig, filename='unfiltered_error', auto_open=True)
|
import shutil
import errno
import os
import time
import codecs
from django.http import HttpResponse
from django.utils.encoding import smart_str
from katana.wui.core.core_utils.app_info_class import AppInformation
from katana.utils import date_time_stamp_utils as dtutils
def readlines_from_file(path, start=None, end=None):
"""
This function uses the readlines() method to read a file.
A subsection of the file can be returned by giving the start and end parameters.
Args:
path: Absolute path to the file
start: String after which the file should be read
end: String at which file reading should be stopped
Returns:
data: list of lines read from the file
"""
data = None
try:
with open(path, "r") as f:
data = f.readlines()
except IOError:
print("--Error-- {0} does not exist".format(path))
else:
output_list = []
if start is not None and end is not None:
flag = False
for line in data:
if flag and end is not None and line == end:
break
if flag:
output_list.append(line)
if not flag and line.startswith(start):
flag = True
return output_list
return data
def copy_dir(src, dest):
output = True
try:
shutil.copytree(src, dest)
except OSError as e:
if e.errno == errno.ENOTDIR:
shutil.copy(src, dest)
else:
output = False
print("-- An Error Occurred -- {0}".format(e))
return output
def write_to_file(path, data):
output = True
try:
with open(path, 'w') as f:
f.write(data)
except Exception as e:
print("-- An Error Occurred -- {0}".format(e))
output = False
return output
def get_new_filepath(filename, path, ext='.log'):
""" append filename of log file with custom string """
fullpath = path + os.sep + filename + ext
if os.path.isfile(fullpath):
fullpath = add_time_date (fullpath)
return fullpath
def add_time_date(path):
""" add time and date to a path (file/dir)"""
if os.path.isfile(path):
time.sleep(1)
ftime = dtutils.get_current_datetime_stamp(time_format = "%y-%m-%d_%H-%M-%S-%f")
path = os.path.splitext(path)[0] + "_"+ftime + os.path.splitext(path)[1]
return path
def download(filepath, content_type):
"""
Make a file in downloadable HttpResponse
Args:
filepath: absolute path or relative path of the file from __file__ directory
content_type: application/pdf, application/json etc
Return: http response, with the file as an attachment
"""
f = open(filepath, "rb")
response = HttpResponse(f, content_type=content_type)
response['Content-Disposition'] = 'attachment; filename={}'.format(smart_str(filepath.split('/')[-1]))
return response
|
"""Constants for Mosoblgaz module"""
CONF_CONTRACTS = "contracts"
CONF_METER_NAME = "meter_name"
CONF_CONTRACT_NAME = "contract_name"
CONF_METERS = "meters"
CONF_INVOICES = "invoices"
CONF_INVOICE_NAME = "invoice_name"
CONF_INVERT_INVOICES = "invert_invoices"
CONF_PRIVACY_LOGGING = "privacy_logging"
DOMAIN = "mosoblgaz"
DATA_CONFIG = DOMAIN + "_config"
DATA_API_OBJECTS = DOMAIN + "_api_objects"
DATA_ENTITIES = DOMAIN + "_entities"
DATA_UPDATERS = DOMAIN + "_updaters"
DATA_OPTIONS_LISTENERS = DOMAIN + "_options_listeners"
DEFAULT_SCAN_INTERVAL = 60 * 60 # 1 hour
DEFAULT_TIMEOUT = 30 # 30 seconds
DEFAULT_CONTRACT_NAME_FORMAT = "MOG Contract {contract_code}"
DEFAULT_METER_NAME_FORMAT = "MOG Meter {meter_code}"
DEFAULT_INVOICE_NAME_FORMAT = "MOG {group} Invoice {contract_code}"
DEFAULT_INVERT_INVOICES = False
DEFAULT_ADD_INVOICES = True
DEFAULT_ADD_METERS = True
DEFAULT_ADD_CONTRACTS = True
DEFAULT_PRIVACY_LOGGING = False
ATTRIBUTION = "Data provided by Mosoblgaz"
RUB_CURRENCY = "руб."
ENTITIES_CONTRACT = "contract"
ENTITIES_METER_TARIFF = "meter_tariff"
ATTR_INDICATIONS = "indications"
ATTR_IGNORE_PERIOD = "ignore_period"
# Common attributes
ATTR_CONTRACT_CODE = "contract_code"
ATTR_METER_CODE = "meter_code"
# Meter attributes
ATTR_SERIAL = "serial"
# Contract attributes
ATTR_ADDRESS = "address"
ATTR_PERSON = "person"
ATTR_DEPARTMENT = "department"
ATTR_COLLECTED_AT = "collected_at"
ATTR_LAST_VALUE = "last_value"
ATTR_LAST_COST = "last_cost"
ATTR_LAST_CHARGED = "last_charged"
ATTR_PREVIOUS_VALUE = "previous_value"
ATTR_INVOICE_GROUP = "invoice_group"
ATTR_PERIOD = "period"
ATTR_TOTAL = "total"
ATTR_PAID = "paid"
ATTR_BALANCE = "balance"
ATTR_PAYMENTS_COUNT = "payments_count"
ATTR_PREVIOUS_PERIOD = "previous_period"
ATTR_PREVIOUS_TOTAL = "previous_total"
ATTR_PREVIOUS_PAID = "previous_paid"
ATTR_PREVIOUS_BALANCE = "previous_balance"
ATTR_PREVIOUS_PAYMENTS_COUNT = "previous_payments_count"
DEFAULT_MAX_INDICATIONS = 3
|
import numpy as np
import torch
def record(t, X, S, record_times, dt):
sims = X.shape[0]
M = X.shape[1]
num_t = X.shape[2]
N = X.shape[3]
# recorded data containers
X_record = []
S_record = []
# find recorded values
for m in range(M):
X_temp = torch.zeros(sims, len(record_times[m]), N)
S_temp = torch.zeros(sims, len(record_times[m]))
X_temp[:,0,:] = X[:,m, 0]
S_temp[:,0] = S[:,m, 0]
i_record = 1
for i in range(num_t):
if i_record < len(record_times[m]) and \
(t[m, i] >= record_times[m][i_record]) and \
(record_times[m][i_record] > t[m, i] - dt):
X_temp[:, i_record, :] = X[:, m, i, :]
S_temp[:, i_record] = S[:, m, i]
i_record = i_record + 1
X_record.append(X_temp)
S_record.append(S_temp)
# these are lists of recorded individuals.
return X_record, S_record
|
# -*- coding:utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.core.exceptions import ValidationError
from CadVlan.Util.utility import is_valid_cn, is_valid_command, is_valid_phone
def validate_cn(value):
if not is_valid_cn(value):
raise ValidationError(
u"Este campo permite apenas caracteres alfanuméricos e os caracteres '_' e '-'.")
def validate_commands(lst):
for value in lst:
if not is_valid_command(value):
raise ValidationError(
u"O nome dos comandos permite apenas caracteres alfanuméricos sem acento e alguns símbolos especiais.")
def validate_phone(value):
if not is_valid_phone(value):
raise ValidationError(
u"Este campo permite apenas caracteres numéricos, parênteses e '-'.")
|
# -*- encoding: utf-8 -*-
from openerp.report import report_sxw
import datetime
import logging
class diario_reporte(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(diario_reporte, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'datetime': datetime,
'lineas': self.lineas,
'folio': self.folio,
'saldo_inicial': self.saldo_inicial,
}),
self.folioActual = -1
self.context = context
self.cr = cr
self.uid = uid
def folio(self, datos):
if self.folioActual < 0:
if datos[0].folio_inicial <= 0:
self.folioActual = 1
else:
self.folioActual = datos[0].folio_inicial
else:
self.folioActual += 1
return self.folioActual
def saldo_inicial(self, datos, cuenta_id):
fecha_inicial = datos.fecha_desde
cuenta = self.pool.get('account.account').browse(self.cr, self.uid, cuenta_id, self.context)
if cuenta.user_type_id.include_initial_balance:
self.cr.execute("\
select coalesce(sum(l.debit) - sum(l.credit), 0) as saldo \
from account_move_line l join account_move m on(l.move_id = m.id) \
join account_account a on(l.account_id = a.id) \
where m.state = 'posted' and \
a.id = %s and \
l.date < %s", (cuenta_id, fecha_inicial))
else:
anio = fecha_inicial.split('-')[0]
self.cr.execute("\
select coalesce(sum(l.debit) - sum(l.credit), 0) as saldo \
from account_move_line l join account_move m on(l.move_id = m.id) \
join account_account a on(l.account_id = a.id) \
where m.state = 'posted' and \
a.id = %s and \
%s <= l.date and l.date < %s", (cuenta_id, anio+'-01-01', fecha_inicial))
result = self.cr.dictfetchall()
return result[0]['saldo']
def lineas(self, datos):
diarios = [str(x.id) for x in datos.diarios_id]
self.cr.execute("\
select j.name as descr, j.code as doc, l.date, a.code, a.name, a.id as account_id, a.code||' '||a.name as full_name, sum(l.debit) as debit, sum(l.credit) as credit \
from account_move_line l join account_move m on(l.move_id = m.id) \
join account_account a on(l.account_id = a.id) \
join account_journal j on(l.journal_id = j.id) \
where m.state = 'posted' and l.journal_id in ("+','.join(diarios)+") and l.date between %s and %s \
group by j.name, j.code, l.date, a.code, a.id, a.name order by l.date, a.code", (datos.fecha_desde, datos.fecha_hasta))
lineas = self.cr.dictfetchall()
lineas_agrupadas = {}
llave = 'date'
if datos.tipo == 'mayor':
llave = 'full_name'
for l in lineas:
if l[llave] not in lineas_agrupadas:
lineas_agrupadas[l[llave]] = {'llave': l[llave], 'lineas_detalladas': [], 'total_debe': 0, 'total_haber': 0}
lineas_agrupadas[l[llave]]['lineas_detalladas'].append(l)
for la in lineas_agrupadas.values():
for l in la['lineas_detalladas']:
la['total_debe'] += l['debit']
la['total_haber'] += l['credit']
return sorted(lineas_agrupadas.values(), key=lambda x: x['llave'])
report_sxw.report_sxw('report.diario_reporte', 'l10n_gt_extra.asistente_diario_reporte', 'addons/l10n_gt_extra/report/diario_reporte.rml', parser=diario_reporte, header=False)
|
"""General utilites."""
from . import marshmallow, specs, sqla
def get_all_subclasses(cls):
"""Recursively return all subclasses."""
all_subclasses = []
for subclass in cls.__subclasses__():
all_subclasses.append(subclass)
all_subclasses.extend(get_all_subclasses(subclass))
return all_subclasses
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import requests
import xml2ass
import os
import io
import shutil
def Danmuku(cid):
path = 'D:/ass'
if os.path.exists(path):
shutil.rmtree(path)
os.makedirs(path)
else:
os.makedirs(path)
# 链接格式:https://www.bilibili.com/video/BV******* (后面不能有/)
# url = input('输入视频链接:')
# bv = url[len('https://www.bilibili.com/video/'):]
headers = {'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.122 Safari/537.36'}
# a = requests.get('http://api.bilibili.com/x/web-interface/archive/stat?bvid='+ bv, headers = headers)
# dict_aid = eval(a.text)
# aid = dict_aid['data']['aid']
# # print(aid)
# r = requests.get("https://www.bilibili.com/widget/getPageList?aid=" + str(aid),headers = headers)
# # print(eval(r.text))
# cid = []
# for i in eval(r.text):
# dict = i
# every_cid = dict['cid']
# cid.append(every_cid)
# print(len(cid))
# *******多P视频分P下载弹幕************
# for j in range(len(cid)):
# p_cid = cid[j]
# # print(p_cid)
danmuku = requests.get('https://comment.bilibili.com/' + str(cid) + '.xml',headers = headers)
danmuku.encoding = 'utf-8'
# print(type(danmuku.text))
with io.open('D:/ass/'+str(cid)+'.xml', 'w',encoding = 'utf-8') as file:
file.write(danmuku.text)
file.close()
xml2ass.Danmaku2ASS('D:/ass/'+str(cid)+'.xml','D:/ass/'+str(cid)+'.ass',1920,540)
os.remove('D:/ass/'+str(cid)+'.xml')
|
"""
WSGI config for project project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import logging
import os
import platform
from django.core.wsgi import get_wsgi_application
from django.core.management import execute_from_command_line
class NoHealthFilter(logging.Filter):
def filter(self, record):
return record.getMessage().find('GET /health') == -1
# disable logging of the Openshift health checks
if __name__ != "__main__":
gunicorn_logger = logging.getLogger("gunicorn.access")
gunicorn_logger.addFilter(NoHealthFilter())
is_local = False
# check if the app is running on OpenShift
if not os.environ.get('OPENSHIFT_BUILD_NAMESPACE', False):
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "edivorce.settings.local")
is_local = True
else:
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "edivorce.settings.openshift")
if os.environ.get('POD_INIT_COMPLETE', "") != "True":
# gunicorn starts multiple threads and runs wsgi.py once for each thread. We only want
# these commands to run ONCE.
os.environ["POD_INIT_COMPLETE"] = "True"
# compress the static assets
execute_from_command_line(['manage.py', 'compress', '--force'])
question_fixture_path = '/opt/app-root/src/edivorce/fixtures/Question.json'
platform_name = platform.system()
if platform_name == "Windows":
question_fixture_path = os.path.realpath("./edivorce/fixtures/Question.json")
# load the Question fixture
if not is_local:
execute_from_command_line(['manage.py', 'loaddata', question_fixture_path])
application = get_wsgi_application()
|
import os
import click
from dehinter.font import dehint
from fontTools.ttLib import TTCollection
from fontTools.ttLib import TTFont
from fontTools.ttLib.removeOverlaps import removeOverlaps
from ftcli.Lib.Font import Font
from ftcli.Lib.utils import getFontsList, makeOutputFileName, getSourceString
# add-dsig
@click.group()
def dsig_add():
pass
@dsig_add.command()
@click.argument('input_path', type=click.Path(exists=True, resolve_path=True))
@click.option('-o', '--output-dir', type=click.Path(resolve_path=True),
help='The output directory where the output files are to be created. If it doesn\'t exist, will be'
'created. If not specified, files are saved to the same folder.')
@click.option('--recalc-timestamp/--no-recalc-timestamp', default=False,
help='Keeps the original font \'modified\' timestamp (head.modified) or set it to current time. By '
'default, original timestamp is kept.')
@click.option('--overwrite/--no-overwrite', default=True,
help='Overwrites existing output files or save them to a new file (numbers are appended at the end of'
'filename). By default, files are overwritten.')
def add_dsig(input_path, recalc_timestamp, output_dir, overwrite):
"""Adds a dummy DSIG to the font, if it's not present.
"""
files = getFontsList(input_path)
for f in files:
try:
font = Font(f, recalcTimestamp=recalc_timestamp)
if 'DSIG' not in font:
font.addDummyDSIG()
if output_dir is None:
output_dir = os.path.dirname(f)
else:
if not os.path.exists(output_dir):
os.mkdir(output_dir)
output_file = makeOutputFileName(f, outputDir=output_dir, overWrite=overwrite)
font.save(output_file)
click.secho(f'{os.path.basename(output_file)} --> saved', fg='green')
else:
click.secho(f'No changes made, DSIG table is already present in {os.path.basename(f)}', fg='yellow')
except Exception as e:
click.secho('ERROR: {}'.format(e), fg='red')
# dehinter
@click.group()
def remove_hinting():
pass
@remove_hinting.command()
@click.argument('input_path', type=click.Path(exists=True, resolve_path=True))
@click.option('--keep-cvar', is_flag=True, default=False, help="keep cvar table")
@click.option('--keep-cvt', is_flag=True, default=False, help="keep cvt table")
@click.option('--keep-fpgm', is_flag=True, default=False, help="keep fpgm table")
@click.option('--keep-hdmx', is_flag=True, default=False, help="keep hdmx table")
@click.option('--keep-ltsh', is_flag=True, default=False, help="keep LTSH table")
@click.option('--keep-prep', is_flag=True, default=False, help="keep prep table")
@click.option('--keep-ttfa', is_flag=True, default=False, help="keep ttfa table")
@click.option('--keep-vdmx', is_flag=True, default=False, help="keep vdmx table")
@click.option('--keep-glyf', is_flag=True, default=False, help="do not modify glyf table")
@click.option('--keep-gasp', is_flag=True, default=False, help="do not modify gasp table")
@click.option('--keep-maxp', is_flag=True, default=False, help="do not modify maxp table")
@click.option('--keep-head', is_flag=True, default=False, help="do not head glyf table")
@click.option('--verbose', is_flag=True, default=False, help="display standard output")
@click.option('-o', '--output-dir', type=click.Path(file_okay=False, resolve_path=True), default=None,
help='Specify the output directory where the output files are to be saved. If output_directory doesn\'t '
'exist, will be created. If not specified, files are saved to the same folder.')
@click.option('--recalc-timestamp/--no-recalc-timestamp', default=False, show_default=True,
help='Keep the original font \'modified\' timestamp (head.modified) or set it to current time. By '
'default, original timestamp is kept.')
@click.option('--overwrite/--no-overwrite', default=True, show_default=True,
help='Overwrite existing output files or save them to a new file (numbers are appended at the end of '
'file name). By default, files are overwritten.')
def dehinter(input_path, keep_cvar, keep_cvt, keep_fpgm, keep_hdmx, keep_ltsh, keep_prep, keep_ttfa, keep_vdmx,
keep_glyf, keep_gasp, keep_maxp, keep_head, verbose, output_dir=None, recalc_timestamp=False,
overwrite=True):
"""Drops hinting from all glyphs.
Currently, this only works with TrueType fonts with 'glyf' table.
This is a CLI for dehinter by Source Foundry: https://github.com/source-foundry/dehinter
"""
files = getFontsList(input_path)
for f in files:
try:
font = TTFont(f, recalcTimestamp=recalc_timestamp)
if not font.sfntVersion == 'OTTO':
dehint(font, keep_cvar=keep_cvar, keep_cvt=keep_cvt, keep_fpgm=keep_fpgm, keep_gasp=keep_gasp,
keep_glyf=keep_glyf, keep_head=keep_head, keep_ltsh=keep_ltsh, keep_maxp=keep_maxp,
keep_prep=keep_prep, keep_ttfa=keep_ttfa, keep_vdmx=keep_vdmx, verbose=verbose)
output_file = makeOutputFileName(f, outputDir=output_dir, overWrite=overwrite)
font.save(output_file)
click.secho(f'{os.path.basename(output_file)} --> saved', fg='green')
else:
click.secho(f'ERROR: {os.path.basename(f)} is not a TrueType file', fg='red')
except Exception as e:
click.secho(f'ERROR: {e}', fg='red')
@click.group()
def overlaps_remove():
pass
@overlaps_remove.command()
@click.argument('input_path', type=click.Path(exists=True, resolve_path=True))
@click.option('-o', '--output-dir', type=click.Path(file_okay=False, resolve_path=True), default=None,
help='Specify the output directory where the output files are to be saved. If output_directory doesn\'t '
'exist, will be created. If not specified, files are saved to the same folder.')
@click.option('--recalc-timestamp/--no-recalc-timestamp', default=False, show_default=True,
help='Keep the original font \'modified\' timestamp (head.modified) or set it to current time. By '
'default, original timestamp is kept.')
@click.option('--overwrite/--no-overwrite', default=True, show_default=True,
help='Overwrite existing output files or save them to a new file (numbers are appended at the end of '
'file name). By default, files are overwritten.')
def remove_overlaps(input_path, output_dir=None, recalc_timestamp=False, overwrite=True):
"""Simplify glyphs in TTFont by merging overlapping contours.
Overlapping components are first decomposed to simple contours, then merged.
Currently, this only works with TrueType fonts with 'glyf' table.
Note that removing overlaps invalidates the hinting. Hinting is dropped from all glyphs whether or not overlaps are
removed from a given one, as it would look weird if only some glyphs are left (un)hinted.
"""
files = getFontsList(input_path)
for f in files:
try:
font = TTFont(f, recalcTimestamp=recalc_timestamp)
if not font.sfntVersion == 'OTTO':
removeOverlaps(font)
output_file = makeOutputFileName(f, outputDir=output_dir, overWrite=overwrite)
font.save(output_file)
click.secho(f'{os.path.basename(output_file)} --> saved', fg='green')
else:
click.secho(f'{os.path.basename(f)} is not a TrueType file', fg='red')
except Exception as e:
click.secho(f'ERROR: {e}', fg='red')
@click.group()
def rename_fonts():
pass
@rename_fonts.command()
@click.argument('input_path', type=click.Path(exists=True, resolve_path=True))
@click.option('-s', '--source-string', type=click.Choice(
choices=['1_1_2', '1_4', '1_6', '1_16_17', '1_18', '3_1_2', '3_4', '3_6', '3_16_17', 'cff_1', 'cff_2']),
default='3_6',
help="""
The source string is read from a namerecord or from a combination of two namerecords, and the font file is renamed
according to it.
The first number in the sequence is the platformID, while the following numbers represent the nameID(s) numbers.
For example:
-s 1_1_2: reads the strings contained in PlatformID 1 (Macintosh) nameID 1 and nameID 2 values and concatenates them.
-s 3_6: reads the platformID 3 (Windows) nameID 6 (PostScript name).
If the font is CFF flavored, the cff_1 or cff_2 options can be used.
""")
def font_renamer(input_path, source_string):
"""Renames font files according to the provided source string.
"""
files = getFontsList(input_path)
for f in files:
d = os.path.dirname(f)
n = os.path.basename(f)
font = TTFont(f)
isCFF = 'CFF ' in font
if not isCFF and source_string in ('cff_1', 'cff_2'):
click.secho(f'Invalid option: {source_string}. {n} does not seem to be a CFF font', fg='yellow')
continue
string = getSourceString(f, source_string)
if not string:
string = os.path.splitext(n)[0]
new_ext = None
if font.flavor == 'woff':
new_ext = '.woff'
if font.flavor == 'woff2':
new_ext = '.woff2'
if font.flavor is None:
if isCFF:
new_ext = '.otf'
else:
new_ext = '.ttf'
new_file_name = string + new_ext
new_file = makeOutputFileName(os.path.join(d, new_file_name), overWrite=True)
if new_file != f:
try:
os.rename(f, new_file)
click.secho("{} --> {}".format(n, new_file_name), fg='green')
except FileExistsError:
new_file = makeOutputFileName(new_file, overWrite=False)
os.rename(f, new_file)
click.secho(f'{n} --> {os.path.basename(new_file)}', fg='green')
except Exception as e:
click.secho(f'ERROR: {e}', fg='red')
else:
click.secho(f'{f} --> skipped', fg='yellow')
@click.group()
def extract_ttc():
pass
@extract_ttc.command()
@click.argument('input_path', type=click.Path(exists=True, resolve_path=True, dir_okay=False))
@click.option('-o', '--output-dir', type=click.Path(file_okay=False, resolve_path=True), default=None,
help='Specify the output directory where the output files are to be saved. If output_directory doesn\'t '
'exist, will be created. If not specified, files are saved to the same folder.')
@click.option('--recalc-timestamp/--no-recalc-timestamp', default=False, show_default=True,
help='Keep the original font \'modified\' timestamp (head.modified) or set it to current time. By '
'default, original timestamp is kept.')
@click.option('--overwrite/--no-overwrite', default=True, show_default=True,
help='Overwrite existing output files or save them to a new file (numbers are appended at the end of file'
'name). By default, files are overwritten.')
def ttc_extractor(input_path, output_dir=None, recalc_timestamp=False, overwrite=True):
"""Extracts .ttc fonts to otf/ttf fonts.
"""
try:
TTCfont = TTCollection(input_path)
fonts = TTCfont.fonts
for font in fonts:
font.recalcTimestamp = recalc_timestamp
filename = str(font['name'].getName(6, 3, 1, 0x409))
ext = '.otf' if font.sfntVersion == 'OTTO' else '.ttf'
if not output_dir:
output_dir = os.path.dirname(input_path)
output_file = makeOutputFileName(filename + ext, outputDir=output_dir, overWrite=overwrite)
font.save(output_file)
click.secho(f'{os.path.basename(output_file)} --> saved', fg='green')
except Exception as e:
click.secho('ERROR: {}'.format(e), fg='red')
cli = click.CommandCollection(sources=[dsig_add, remove_hinting, rename_fonts, overlaps_remove, extract_ttc],
help="Miscellaneous utilities.")
|
import logging
import random
import threading
import time
from counters import Counters
from filter.filter import Filter
from filter.profanity import Profanity
from match import Match
from rate_limiters import RateLimiters
from replies import Replies
from sleep_wake import SleepWake
import telegram
from telegram.ext import CallbackContext
from telegram.ext import CommandHandler, MessageHandler, Filters
from telegram.ext import Updater
from telegram.error import TelegramError
logger = logging
#
# Our main Cheetah Bot class.
#
class CheetahBot():
counters = None
filter = None
match = None
profanity = None
rate_limiters = None
replies = None
sleep_wake = None
about_text = ""
about_text = """I am Cheetah Bot -- a cybernetic organism: living spots and fur over a metal endoskeleton.\n
My mission is to chirp at you. Add me to a Telegram group for cheetah sounds and pictures.\n
My directives are as follows:\n
{chee_text}
- I notice profanity and respond to it.
- If you @ me, I respond with cheetah pictures and noises.
- I have a quota of sending {actions} messages per {period} seconds.
{post_every_text}
- I can say {stats_total} different things, choosing from {stats_quotes} quotes and {stats_images} pictures.
@ me with 'help' to see this message again.
Made with 🙀 by Leopards.
"""
def __init__(self):
pass
#
# Our main entry point to start bot. This function will never exit.
#
def start(self, token, posts_file, group_ids, group_names,
actions, period, post_every, profanity_reply = False):
self.counters = Counters(post_every)
self.filter = Filter()
self.match = Match()
self.profanity = Profanity(ignore = not profanity_reply)
self.rate_limiters = RateLimiters(actions, period)
self.replies = Replies(posts_file)
self.sleep_wake = SleepWake()
post_every_text = ""
if post_every:
post_every_text = f"- I reply to the last message every {post_every} messages posted."
chee_text = "- I respond to messages that are just '/chee'."
stats = self.replies.getStats()
self.about_text = self.about_text.format(actions = actions, period = period,
post_every_text = post_every_text, chee_text = chee_text,
stats_total = stats["total"], stats_quotes = stats["quotes"], stats_images = stats["images"]
)
#print("DEBUG: ", self.about_text) # Debugging
self.allowed_group_ids = self.getAllowedIds(group_ids)
self.allowed_group_names = self.getAllowedIds(group_names)
if self.allowed_group_ids or self.allowed_group_names:
logger.info(f"Allowed Group IDs: {self.allowed_group_ids}")
logger.info(f"Allowed Group Names: {self.allowed_group_names}")
else:
logger.info("No Group IDs or Group Names specified, the bot is open to ALL groups!")
bot = telegram.Bot(token = token)
logger.info(f"Successfully authenticated! {bot.get_me()}")
self.my_username = bot.get_me().username
self.my_id = bot.get_me().id
logger.info(f"My usernamne: {self.my_username}, My ID: {self.my_id}")
updater = Updater(token = token)
dispatcher = updater.dispatcher
#
# Catch errors
#
dispatcher.add_error_handler(self.errorHandler)
#
# We're just gonna reply to everything.
#
echo_handler = MessageHandler(Filters.all, self.echo)
dispatcher.add_handler(echo_handler)
updater.start_polling()
#
# Our handler that is fired when a message comes in
#
def echo(self, update, context):
results = self.echoParseMessage(update, context)
if not results:
return(None)
(message, text, chat_id) = results[0], results[1], results[2]
try:
self.echoComposeReply(context, update, message, text, chat_id)
except telegram.error.BadRequest as e:
if "Have no rights to send a message" in str(e):
logger.warning(f"echo(): Unable to send message to chat_id={chat_id}: {str(e)}")
else:
raise(e)
#
# Parse our message and figure out if we should reply.
#
def echoParseMessage(self, update, context):
# Set some defaults
reply = ""
# Filter newlines out of our message for logging and matching purposes
message = update.message
text = ""
if update.message.text:
text = update.message.text.replace("\r", " ").replace("\n", " ")
# How old is the message?
age = time.time() - message.date.timestamp()
logger.info(f"New Message: age={age:.3f}, chat_id={update.effective_chat.id}, chat_name='{update.effective_chat.title}', text={text[0:30]}...")
#logger.info(f"Update: {update}") # Debugging
#logger.info(f"Message: {message}") # Debugging
#logger.info(f"Effective chat: {update.effective_chat}") # Debugging
# Bail if the message is too old, otherwise we risk spamming groups after a hiatus
if age > 10:
logger.info(f"Message is {age} > 10 seconds old, ignoring.")
return(None)
# Was this a bot add/remove?
if self.filter.messageIsIgnorable(update, message, self.my_id):
return(None)
# Was this a DM?
if self.filter.messageIsDm(update):
logger.info("This is a DM, talk about ourself and bail out (for now...)")
context.bot.send_message(chat_id = update.effective_chat.id, text = self.about_text)
return(None)
#
# Bail out if we're not in an allowed group
#
chat_id = update.effective_chat.id
chat_name = update.effective_chat.title
if not self.match.doesGroupMatch(self.allowed_group_ids, self.allowed_group_names,
chat_id, chat_name):
return(None)
return([message, text, chat_id])
#
# Compose our reply
#
def echoComposeReply(self, context, update, message, text, chat_id):
reply = ""
message_to_me = False
if self.match.doesUserMatch(self.my_id, self.my_username, update.message, text):
message_to_me = True
#
# Announce ourself it added to a group
#
#if self.filter.botWasAddedToGroup(update, message, self.my_id):
# logger.info(f"I was added to the chat '{chat_name}' ({chat_id}), let's say hi!")
# reply = self.about_text
#
# Get our rate limiter for this chat
#
limiter = self.rate_limiters.getRateLimiter(chat_id)
#
# Did the user ask us for help?
#
if message_to_me:
#
# Did the user ask us for help?
#
if self.filter.messageContainsHelp(text):
logger.info("User asked us for help, give it.")
reply = self.about_text
elif self.filter.messageContainsStats(text):
logger.info("User wants to know the bot stats")
reply = self.getStats(limiter)
#
# See if anyone in the chat said "/chee"
#
if self.filter.messageIsChee(text):
reply = "chee"
logger.info("String '/chee' is exact match.")
#
# I'm not thrilled about calling checkForFoulLanguage() twice, but Python
# doesn't let me do "if (value = func())" syntax like other languages do.
# Once this goes into a class, I can have the function just set a classwide value instead.
#
if self.profanity.hasFoulLanguage(update, text):
reply = self.profanity.getReply()
logger.info("Profanity detected")
#
# If the message wasn't to the bot, and we're not replying to a user, stop.
#
if not reply:
if not message_to_me:
#
# See if we should reply and do so.
#
should_post = self.counters.update(chat_id)
if should_post:
delay = 10
#delay = 1 # Debugging
threading.Timer(delay, self.sendRandomMessageFromThread,
args = (context.bot, limiter, chat_id,)
).start()
logger.info(f"We hit our num message threshold for posting, scheduled group post in {delay} seconds...")
return(None)
#
# If we made it here, we're sending SOME kind of reply.
#
#
# If we already have a reply, then send it out.
# Otherwise, we have no idea what the user is talking about, so let's just
# grab a random string of text or URL.
#
if reply:
self.sendMessage(context.bot, limiter, chat_id, reply = reply,
message_id = message.message_id)
else:
self.sendRandomReply(context.bot, limiter, chat_id,
message_id = message.message_id)
#
# Handle our errors
#
def errorHandler(self, update: object, context: CallbackContext):
error_string = str(context.error)
if "terminated by other getUpdates request" in error_string:
logger.warning("Looks like another instance of this bot is running. Stop doing that.")
else:
logger.error(msg="Exception while handling an update:", exc_info=context.error)
#
# Based on an example at
# https://github.com/python-telegram-bot/python-telegram-bot/blob/master/examples/errorhandlerbot.py#L30
#
#errors = traceback.format_exception(None, context.error, context.error.__traceback__)
#error= ''.join(errors)
#logger.info(error)
#
# Split up our comma-delimited list of IDs, and filter out empty strings.
#
def getAllowedIds(self, group_ids):
retval = [ id for id in group_ids if id != "" ]
return(retval)
#
# Figure out which reply function to use, get a reply, and send it off!
#
def sendRandomReply(self, bot, limiter, chat_id, message_id):
reply = self.replies.getRandomMessage()
if "url" in reply:
caption = ""
if "caption" in reply:
caption = reply["caption"]
self.sendMessage(bot, limiter, chat_id,
image_url = reply["url"], caption = caption,
message_id = message_id)
elif "caption" in reply:
self.sendMessage(bot, limiter, chat_id,
reply = reply["caption"], message_id = message_id)
else:
logger.warn(f"Reply '{reply}' has neither caption not URL. Probably a blank line! I warned you at startup!")
#
# Send a random message to the group.
#
def sendRandomMessage(self, bot, limiter, chat_id):
reply = self.replies.getRandomMessage()
if "url" in reply:
self.sendMessage(bot, limiter, chat_id,
image_url = reply["url"], caption = reply["caption"])
else:
self.sendMessage(bot, limiter, chat_id,
reply = reply["caption"])
#
# Callback to send a random message from a thread, where there was a delay.
#
def sendRandomMessageFromThread(self, bot, limiter, chat_id):
self.sendRandomMessage(bot, limiter, chat_id)
#
# Return the current stats for the bot in the current channel.
#
def getStats(self, limiter):
stats = self.replies.getStats()
retval = (f"I can send {limiter.actions} messages every {limiter.period} seconds."
+ f"I have {limiter.getQuota()-1:.1f} more messages left in my quota.\n\n"
+ f"I can say {stats['total']} different things, choosing from {stats['quotes']} quotes and {stats['images']} pictures."
)
return(retval)
#
# Send a message in a way that honors our rate limiter's quota.
# All messages are either a reply or an image.
#
def sendMessage(self, bot, limiter, chat_id,
reply = None, image_url = None, caption = None, message_id = None
):
if self.sleep_wake.isAsleep(chat_id):
logger.info(f"We're asleep, not sending reply to chat {chat_id}...")
return(None)
if limiter.action():
if reply:
logger.info(f"Sending reply: {reply[0:40]}..., reply_to={message_id} quota_left={limiter.getQuota():.3f}")
if not message_id:
bot.send_message(chat_id = chat_id, text = reply)
else:
bot.send_message(chat_id = chat_id, text = reply,
reply_to_message_id = message_id)
elif image_url:
newline = "\n"
if not message_id:
bot.send_photo(chat_id = chat_id, photo = image_url, caption = caption)
logger.info(f"Sending image: {image_url}, caption: {caption.replace(newline, ' ')[0:20]}... quota_left={limiter.getQuota():.3f}")
else:
logger.info(f"Sending image: {image_url}, caption: {caption.replace(newline, ' ')[0:20]}... reply_to={message_id} quota_left={limiter.getQuota():.3f}")
bot.send_photo(chat_id = chat_id, photo = image_url, caption = caption,
reply_to_message_id = message_id)
else:
raise Exception(f"Not sure what to send with a message that is not a reply and doesn't have a URL")
#
# Let the group know that we've gone over our quota.
# Yes, this will only work with one group, even if we are listening in multiple groups.
#
if limiter.isQuotaExhausted():
self.sleep_wake.goToSleep(bot, limiter, chat_id)
else:
logger.info(f"Not sending message, quota currently exhausted. quota_left={limiter.getQuota():.3f}")
|
from sklearn.base import BaseEstimator, TransformerMixin
from pyyawt.denoising import wden
import numpy as np
from sklearn.utils.validation import check_is_fitted
class WaveletDenoiser(BaseEstimator, TransformerMixin):
def __init__(self, threshold_rule='sqtwolog', threshold_type='s', scale='one', level=1, wavelet_name='sym4'):
self.threshold_rule = threshold_rule
self.threshold_type = threshold_type
self.scale = scale
self.level = level
self.wavelet_name = wavelet_name
self.denoised_ = None
self.coef_ = None
self.length_ = None
def fit(self, X):
results = [
wden(X=x,
TPTR=self.threshold_rule,
SORH=self.threshold_type,
SCAL=self.scale,
N=self.level,
wname=self.wavelet_name)
for x in np.vsplit(X)
]
self.denoised_ = np.row_stack([result[0] for result in results])
self.coef_ = [result[1] for result in results]
self.length_ = [result[2] for result in results]
return self
def transform(self, X):
check_is_fitted(self, ['denoised_'])
if self.denoised_.shape == X.shape:
return self.denoised_
else:
raise ValueError('Passed X is different shape from that used in fit. This function can only be applied to '
'the same X the estimator was fitted with.')
|
from threading import Thread
from config import mismatch_cron_interval_sec, mismatch_email_service_url
import logging
from src.db import ModelRepo
import requests
from logging.config import dictConfig
log = logging.getLogger('file')
from logging.config import dictConfig
repo = ModelRepo()
class AlertCronProcessor(Thread):
def __init__(self, event):
Thread.__init__(self)
self.stopped = event
# Cron JOB to update filter set params
def run(self):
run = 0
while not self.stopped.wait(mismatch_cron_interval_sec):
log.info(f'Mismatch identifier cron Processor run :{run}')
try:
headers = {"Content-Type": "application/json"}
body = {"emails":[]}
request_url = mismatch_email_service_url
log.info("Intiating request to check data mismatch %s"%request_url)
result=requests.post(url=request_url, headers = headers, json = body)
log.info(result.content)
run += 1
except Exception as e:
run += 1
log.exception(f'Exception on Metric Cron Processor on run : {run} , exception : {e}')
# Log config
dictConfig({
'version': 1,
'formatters': {'default': {
'format': '[%(asctime)s] {%(filename)s:%(lineno)d} %(threadName)s %(levelname)s in %(module)s: %(message)s',
}},
'handlers': {
'info': {
'class': 'logging.FileHandler',
'level': 'DEBUG',
'formatter': 'default',
'filename': 'info.log'
},
'console': {
'class': 'logging.StreamHandler',
'level': 'DEBUG',
'formatter': 'default',
'stream': 'ext://sys.stdout',
}
},
'loggers': {
'file': {
'level': 'DEBUG',
'handlers': ['info', 'console'],
'propagate': ''
}
},
'root': {
'level': 'DEBUG',
'handlers': ['info', 'console']
}
})
|
from tailow.operators.base import Operator
class SizeOperator(Operator):
"""
operator to query for arrays by number of elements
"""
def to_query(self, field_name, value):
return {"$size": value}
def get_value(self, field, value):
return field.to_son(value)
|
import pandas as pd
import torch
from PIL import Image, ImageFile
from torch.utils.data import Dataset
import os
device = torch.device("cuda:0")
ImageFile.LOAD_TRUNCATED_IMAGES = True
class CollectionsDataset(Dataset):
def __init__(self, csv_file, root_dir, num_classes, image_size, folds=None, transform=None):
if folds is None:
folds = []
self.data = pd.read_csv(csv_file)
if len(folds) > 0:
self.data = self.data[self.data.fold.isin(folds)].reset_index(drop=True)
self.root_dir = root_dir
self.transform = transform
self.num_classes = num_classes
self.image_size = image_size
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
img_name = os.path.join(self.root_dir, self.data.loc[idx, 'id'] + '.png')
image = Image.open(img_name)
labels = self.data.loc[idx, 'attribute_ids']
labels = labels.split()
label_tensor = torch.zeros(self.num_classes)
for i in labels:
label_tensor[int(i)] = 1
if self.transform:
image = self.transform(image)
return {'image': image,
'labels': label_tensor
}
class CollectionsDatasetTest(Dataset):
def __init__(self, csv_file, root_dir, image_size, transform=None):
self.data = pd.read_csv(csv_file)
self.root_dir = root_dir
self.transform = transform
self.image_size = image_size
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
img_name = os.path.join(self.root_dir, self.data.loc[idx, 'id'] + '.png')
image = Image.open(img_name)
if self.transform:
image = self.transform(image)
return {'image': image}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.MemberRule import MemberRule
class ZhimaCreditPeZmgoTemplateCreateModel(object):
def __init__(self):
self._benefit_url = None
self._biz_no = None
self._consume_pid_list = None
self._contact = None
self._ext_info = None
self._member_agreement = None
self._member_mode = None
self._member_rule = None
self._partner_id = None
self._template_name = None
@property
def benefit_url(self):
return self._benefit_url
@benefit_url.setter
def benefit_url(self, value):
self._benefit_url = value
@property
def biz_no(self):
return self._biz_no
@biz_no.setter
def biz_no(self, value):
self._biz_no = value
@property
def consume_pid_list(self):
return self._consume_pid_list
@consume_pid_list.setter
def consume_pid_list(self, value):
if isinstance(value, list):
self._consume_pid_list = list()
for i in value:
self._consume_pid_list.append(i)
@property
def contact(self):
return self._contact
@contact.setter
def contact(self, value):
self._contact = value
@property
def ext_info(self):
return self._ext_info
@ext_info.setter
def ext_info(self, value):
self._ext_info = value
@property
def member_agreement(self):
return self._member_agreement
@member_agreement.setter
def member_agreement(self, value):
self._member_agreement = value
@property
def member_mode(self):
return self._member_mode
@member_mode.setter
def member_mode(self, value):
self._member_mode = value
@property
def member_rule(self):
return self._member_rule
@member_rule.setter
def member_rule(self, value):
if isinstance(value, MemberRule):
self._member_rule = value
else:
self._member_rule = MemberRule.from_alipay_dict(value)
@property
def partner_id(self):
return self._partner_id
@partner_id.setter
def partner_id(self, value):
self._partner_id = value
@property
def template_name(self):
return self._template_name
@template_name.setter
def template_name(self, value):
self._template_name = value
def to_alipay_dict(self):
params = dict()
if self.benefit_url:
if hasattr(self.benefit_url, 'to_alipay_dict'):
params['benefit_url'] = self.benefit_url.to_alipay_dict()
else:
params['benefit_url'] = self.benefit_url
if self.biz_no:
if hasattr(self.biz_no, 'to_alipay_dict'):
params['biz_no'] = self.biz_no.to_alipay_dict()
else:
params['biz_no'] = self.biz_no
if self.consume_pid_list:
if isinstance(self.consume_pid_list, list):
for i in range(0, len(self.consume_pid_list)):
element = self.consume_pid_list[i]
if hasattr(element, 'to_alipay_dict'):
self.consume_pid_list[i] = element.to_alipay_dict()
if hasattr(self.consume_pid_list, 'to_alipay_dict'):
params['consume_pid_list'] = self.consume_pid_list.to_alipay_dict()
else:
params['consume_pid_list'] = self.consume_pid_list
if self.contact:
if hasattr(self.contact, 'to_alipay_dict'):
params['contact'] = self.contact.to_alipay_dict()
else:
params['contact'] = self.contact
if self.ext_info:
if hasattr(self.ext_info, 'to_alipay_dict'):
params['ext_info'] = self.ext_info.to_alipay_dict()
else:
params['ext_info'] = self.ext_info
if self.member_agreement:
if hasattr(self.member_agreement, 'to_alipay_dict'):
params['member_agreement'] = self.member_agreement.to_alipay_dict()
else:
params['member_agreement'] = self.member_agreement
if self.member_mode:
if hasattr(self.member_mode, 'to_alipay_dict'):
params['member_mode'] = self.member_mode.to_alipay_dict()
else:
params['member_mode'] = self.member_mode
if self.member_rule:
if hasattr(self.member_rule, 'to_alipay_dict'):
params['member_rule'] = self.member_rule.to_alipay_dict()
else:
params['member_rule'] = self.member_rule
if self.partner_id:
if hasattr(self.partner_id, 'to_alipay_dict'):
params['partner_id'] = self.partner_id.to_alipay_dict()
else:
params['partner_id'] = self.partner_id
if self.template_name:
if hasattr(self.template_name, 'to_alipay_dict'):
params['template_name'] = self.template_name.to_alipay_dict()
else:
params['template_name'] = self.template_name
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = ZhimaCreditPeZmgoTemplateCreateModel()
if 'benefit_url' in d:
o.benefit_url = d['benefit_url']
if 'biz_no' in d:
o.biz_no = d['biz_no']
if 'consume_pid_list' in d:
o.consume_pid_list = d['consume_pid_list']
if 'contact' in d:
o.contact = d['contact']
if 'ext_info' in d:
o.ext_info = d['ext_info']
if 'member_agreement' in d:
o.member_agreement = d['member_agreement']
if 'member_mode' in d:
o.member_mode = d['member_mode']
if 'member_rule' in d:
o.member_rule = d['member_rule']
if 'partner_id' in d:
o.partner_id = d['partner_id']
if 'template_name' in d:
o.template_name = d['template_name']
return o
|
# Copyright 2019 Megagon Labs, Inc. and the University of Edinburgh. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import csv
import os
import json
import commentjson
from random import shuffle
import re
import sys
from tqdm import tqdm
import nltk
from nltk.stem import PorterStemmer
from nltk.tokenize.treebank import TreebankWordDetokenizer
from nltk import word_tokenize as tokenizer
class Prepare:
"""Prepare train/dev/test.csv files"""
def __init__(self, basepath, p_name, source_file, s_min=1, s_max=20, e_min=2, e_max=1000, t_max=150,
filter_empty=True, do_stemming=True, num_shuffle=0, split=(0.8, 0.1, 0.1), unk_ratio=-1,
word_as_vocab=False):
self.basepath = basepath
self.name = p_name
self.source_file = os.path.join(self.basepath, source_file)
self.s_min = s_min # minimum num of sents in a review
self.s_max = s_max # maximum num of sents in a review
self.e_min = e_min # minimum num of extractions in a review
self.e_max = e_max # maximum num of extractions in a review
self.t_max = t_max # maximum num of tokens in a review
self.filter_empty = filter_empty # filter sent with no extraction
self.do_stemming = do_stemming
self.num_shuffle = num_shuffle # num of shuffles of extractions
self.split = split # entity split between train/dev/test
self.unk_ratio = unk_ratio # ratio of unknown vocab in test
self.word_as_vocab = word_as_vocab # vocab as word (or extraction span)
self.target_path = os.path.join(self.basepath,
"data",
self.name)
# create path if not exist
if not os.path.exists(self.target_path):
os.makedirs(self.target_path)
self.stemmer = PorterStemmer() if self.do_stemming else None
self.detokenizer = TreebankWordDetokenizer()
def run(self):
# Read reviews
print("*****Reading reviews from file*****")
self.reviews, self.entities = self._read_reviews(self.source_file)
# Split entities into train/dev/test dataset.
print("*****Split entities into train/dev/test*****")
train, dev, test = self._split_train_dev_test()
# Write to csv files
print("*****Write files into train/dev/test*****")
wr_train = csv.writer(open(os.path.join(self.target_path, "train.csv"), "w", encoding="utf-8", newline=""))
wr_dev = csv.writer(open(os.path.join(self.target_path, "dev.csv"), "w", encoding="utf-8", newline=""))
wr_test = csv.writer(open(os.path.join(self.target_path, "test.csv"), "w", encoding="utf-8", newline=""))
# Write header
wr_train.writerow(["eid", "rid", "review", "extraction", "input_text"])
wr_dev.writerow(["eid", "rid", "review", "extraction", "input_text"])
wr_test.writerow(["eid", "rid", "review", "extraction", "input_text"])
for r_id, review in enumerate(tqdm(self.reviews, desc="reviews")):
lists = self._review_to_lists(review["ty_id"], r_id, review)
for row in lists:
if review["ty_id"] in train:
wr_train.writerow(row)
elif review["ty_id"] in dev:
wr_dev.writerow(row)
else:
wr_test.writerow(row)
def _read_reviews(self, source_file):
""" Read reviews from file and conduct initial pruning
"""
entities = set([])
reviews = []
num_exts = 0
with open(source_file, "r", encoding="utf-8") as file:
for _, line in enumerate(tqdm(file, desc="reviews")):
review = json.loads(str(line))
# Process sentences & extractions
sents = review["sentences"]
exts = review["extractions"]
# Filter sentences with NO extractions
if self.filter_empty:
sents = [sents[i] for i in set([e["sid"] for e in exts])]
# Prune by number of sentences
if len(sents) < self.s_min or len(sents) > self.s_max:
continue
# Prune by number of extractions
if len(exts) < self.e_min or len(exts) > self.e_max:
continue
# Process extractions & sentences
for ext in review["extractions"]:
ext["opinion"] = self._process_span(ext["opinion"])
ext["aspect"] = self._process_span(ext["aspect"])
sents = [self.detokenizer.detokenize(toks) for toks in sents]
# Validate number of tokens per review
num_tokens = len(tokenizer(" ".join(sents)))
if num_tokens > self.t_max:
continue
review["sentences"] = sents
reviews.append(review)
entities.add(review["ty_id"])
num_exts += len(exts)
print("Average number of extractions per review: {}".format(num_exts / (0.0 + len(reviews))))
return reviews, entities
def _split_train_dev_test(self):
""" Split training, validating, and testing dataset.
"""
train, dev, test = [], [], []
num_train = int(len(self.entities) * self.split[0])
num_dev = int(len(self.entities) * self.split[1])
num_test = len(self.entities) - num_train - num_dev
if self.unk_ratio < 0:
entities = list(self.entities)
shuffle(entities)
train = entities[:num_train]
dev = entities[num_train:num_train + num_dev]
test = entities[num_train + num_dev:]
return train, dev, test
# Get statistics
vocab_size, entity_freq = self._create_stats(self.word_as_vocab)
test_vocab = int(vocab_size * self.unk_ratio)
# Select entity in test set in greedy fashion
entities = list(self.entities)
shuffle(entities)
cur_test_vocab = 0
for e_id in entities:
freq = entity_freq[e_id]
if cur_test_vocab + freq <= test_vocab and len(test) <= num_test:
test.append(e_id)
else:
train.append(e_id)
# Select dev from train set
dev = train[:num_dev]
train = train[num_dev:]
return train, dev, test
def _extraction2input(self, extraction, sep="[SEP]"):
return " {} ".format(sep).join([" ".join(e.split(",")[0:2]) for e in extraction.split(";")])
def _review_to_lists(self, e_id, r_id, review):
sents = review["sentences"]
exts = []
for ext in review["extractions"]:
opinion = self.detokenizer.detokenize(ext["opinion"])
aspect = self.detokenizer.detokenize(ext["aspect"])
ext_item = [opinion, aspect, ext["attribute"], ext["sentiment"]]
exts.append(",".join(ext_item))
lists = []
# lists.append([str(e_id), str(r_id), " ".join(sents), ";".join(exts)])
lists.append([str(e_id), str(r_id), " ".join(sents), ";".join(exts), self._extraction2input(";".join(exts))])
for i in range(self.num_shuffle):
shuffle(exts)
# lists.append([str(e_id), str(r_id), " ".join(sents), ";".join(exts)])
lists.append(
[str(e_id), str(r_id), " ".join(sents), ";".join(exts), self._extraction2input(";".join(exts))])
return lists
def _create_stats(self, split_word=False):
""" Create vocab frequency statistics for entities.
Args:
split_word (True or False): when set to True, "vocab" is based on
extractions; otherwise, "vocab" is based on individual word
Returns:
vocab_size (int): total vocabulary size
entity_low_freq_count (dict): low freq vocab count for each entity
"""
# Count vocab frequency: "vocab": frequency & initialize entity freq
vocab_freq = {}
entity_freq = {}
for _, review in enumerate(tqdm(self.reviews, desc="build_vocab")):
if review["ty_id"] not in entity_freq:
entity_freq[review["ty_id"]] = 0
for ext in review["extractions"]:
for vocab in self._extraction_to_vocab(ext, split_word):
if vocab not in vocab_freq:
vocab_freq[vocab] = 0
vocab_freq[vocab] += 1
# Update entity low freq count: "entity": low freq vocab count
for _, review in enumerate(tqdm(self.reviews, desc="update_stats")):
# Collect vocab for all extractions
low_freq_vocab = 0
for ext in review["extractions"]:
for vocab in self._extraction_to_vocab(ext, split_word):
low_freq_vocab += 1 if vocab_freq[vocab] <= 1 else 0
entity_freq[review["ty_id"]] += low_freq_vocab
return len(vocab_freq), entity_freq
def _extraction_to_vocab(self, extraction, split_word):
""" Create vocabulary for each extraction
"""
vocabs = extraction["opinion"] + extraction["aspect"]
if split_word:
return [self.detokenizer.detokenize(vocabs)]
return vocabs
def _process_span(self, span):
""" Tokenize a span and stemming tokens if required.
"""
span = re.sub(r',', '', span)
span = re.sub(r';', '', span)
tokens = nltk.word_tokenize(span)
if self.do_stemming:
tokens = [self.stemmer.stem(token) for token in tokens]
return tokens
if __name__ == "__main__":
assert len(sys.argv) > 1, "Please specify prepare configuration file!"
config_file = sys.argv[1]
with open(config_file, "r") as file:
configs = commentjson.loads(file.read())
if "BASEPATH" not in os.environ:
basepath = "."
else:
basepath = os.environ["BASEPATH"]
filter_empty = True if configs["filter_empty"].lower() == "True" else False
do_stemming = True if configs["do_stemming"].lower() == "True" else False
word_as_vocab = True if configs["word_as_vocab"].lower() == "True" else False
preparer = Prepare(basepath,
configs["p_name"], configs["source_file"],
s_min=configs["s_min"], s_max=configs["s_max"],
e_min=configs["e_min"], e_max=configs["e_max"],
t_max=configs["t_max"],
filter_empty=filter_empty,
do_stemming=do_stemming,
num_shuffle=configs["num_shuffle"],
split=configs["split"],
unk_ratio=configs["unk_ratio"],
word_as_vocab=word_as_vocab)
preparer.run()
|
from random import randint
from collections import OrderedDict
# from collections module
my_dict = OrderedDict() # provide ordered dictionary
my_dict['isim'] = 'ali'
my_dict['cinsiyet'] = 'erkek'
my_dict['yas'] = 123
print(my_dict)
# from random module
x = randint(1, 19) # random number generator
print(x)
|
from tensorflow_asr.featurizers.text_featurizers import CharFeaturizer
txf = CharFeaturizer(None, blank_at_zero=True)
a = txf.extract("fkaff aksfbfnak kcjhoiu")
print(a)
|
from azureml.core import Workspace, Experiment, Environment, ScriptRunConfig
ws = Workspace.from_config()
experiment = Experiment(workspace=ws, name='pre-process')
config = ScriptRunConfig(source_directory='src/azure_pipeline/src/', script='preprocess.py', compute_target='cpu-cluster')
# set up environment
env = Environment.from_conda_specification(name='env', file_path='.azureml/env.yml')
config.run_config.environment = env
run = experiment.submit(config)
aml_url = run.get_portal_url()
print(aml_url)
|
# farm_animals = {"sheep","cow","hen"}
#
# print(farm_animals)
#
# for animal in farm_animals:
# print(animal)
#
# print("="*40)
#
# wild_animals = set(["lion","tiger","panther","elephant","fox"])
#
# print(wild_animals)
# for animal in wild_animals:
# print(animal)
#
# farm_animals.add("horse")
# wild_animals.add("horse")
#
# print(farm_animals)
# print(wild_animals)
#
# #creating an empty set as empty_set={} would return a dictionary
#
#
# empty_set = set()
# empty_set2 = {}
#
# empty_set.add("a")
# #empty_set2.add("a") will throw an error saying that no add attribute to the dict object
#
# #an iterative object can be passed as arguments to the set function
# #though we used lists in the above code, we can also use ranges, tuples
#
#
# even = range(0,40,2)
# even_set = set(even)
# print(even_set)
#
# squares_tuple = (4,9,6,16,25)
#
# print(set(squares_tuple))
#
#Union , intersection, difference and subsets of Sets
even = set(range(0,40,2))
print(even)
print(len(even))
squares = set((4,9,6,16,25))
print(squares)
print(len(squares))
#union syntax
print(even.union(squares))
print(len(even.union(squares)))
#or
print(squares.union(even))
#intersection
print(even.intersection((squares)))
#or
print(squares.intersection((even)))
# alternatively and can be used
print(squares & even)
# to implement set A \ set B
print("="*100)
print(sorted(even))
print(sorted(squares))
#two methods to find the difference
print(sorted(even.difference(squares)))
print(sorted(squares.difference(even)))
print(sorted(squares - even))
print(sorted(even - squares))
print("="*40)
# difference_update method changes the even set in place
#print(sorted(even))
#even.difference_update(squares)
#print(sorted(even))
# print("symmetric difference")
# symmetric difference is nothing but elements which are in either one set or the other but not both
# print(sorted(even.symmetric_difference(squares)))
#
# print(sorted(squares.symmetric_difference(even)))
# Similar to difference_update symmetric_difference_update behaves likewise, but its just that
# it performs symmetric difference
# There are two methods to perform remove operation from sets which are discard and remove
# Remove raises an error if the item to be removed doesnt exit
# Discard wont actually raise an error
squares.discard(4)
squares.remove(16)
squares.discard(8)
print(squares)
# try catch to raise an exception to handle errors
try:
squares.remove(8)
except KeyError:
print("the item 8 is not a member of the set")
# Subset and Superset
print("="*40)
even = set(range(0,40,2))
print(even)
squares_tuple = (4,6,16)
squares = set(squares_tuple)
print(squares)
if squares.issubset(even):
print("squares is a subset of even")
if even.issuperset(squares):
print("even is a superset of squares")
# Another type of set called frozen set whose items cannot be changed i.e. immutable
#frozen set
print("="*40)
even = frozenset(range(0,100,2))
print(even)
|
import numpy as np
import _pickle as pickle
import os
from Sliding_Block import *
from LQR import dlqr
import sys
sys.path.insert(0,'./../')
from Housekeeping import *
def generate_demonstrator_logs():
if not os.path.exists(LOGS_DIRECTORY):
os.makedirs(LOGS_DIRECTORY)
file_to_save_logs = LOGS_DIRECTORY + 'demonstrator_logs.pkl'
logs_for_all_blocks = {}
for block_mass in ALL_BLOCK_MASSES_TO_VALIDATE:
logs_for_a_block_and_initial_state = {}
for initial_state in INITIALIZATION_STATES_TO_VALIDATE:
all_observations = []
all_controls = []
all_costs = []
env = Sliding_Block(mass=block_mass, initial_state=initial_state)
K, X, eigVals = dlqr(env.A, env.B, env.Q, env.R)
observation = env.state
control = -1. * np.dot(K, observation)
step_limit = 0
while (step_limit < MAXIMUM_NUMBER_OF_STEPS):
step_limit += 1
all_observations.append(observation)
all_controls.append(control)
observation, cost, finish = env.step(control)
all_costs.append(cost)
control = -1. * np.dot(K, observation)
logs_for_a_block_and_initial_state[str(initial_state)] = {OBSERVATIONS_LOG_KEY: np.concatenate(all_observations), DEMONSTRATOR_CONTROLS_LOG_KEY: np.concatenate(all_controls),
DEMONSTRATOR_COSTS_LOG_KEY: np.concatenate(all_costs)}
logs_for_all_blocks[str(block_mass)] = logs_for_a_block_and_initial_state
with open(file_to_save_logs, 'wb') as f:
pickle.dump(logs_for_all_blocks, f, protocol=-1)
if __name__ == '__main__':
generate_demonstrator_logs() |
from datetime import datetime
import json
import decimal
from server.models.associations import Associations
from datetime import datetime
from pytz import timezone
import pytz
def comma_num(num):
return "{:,}".format(int(num))
def pst_time(date):
utc_dt = pytz.utc.localize(date)
pst_tz = timezone('US/Pacific')
pst_dt = pst_tz.normalize(utc_dt.astimezone(pst_tz))
return "{} PST".format(pst_dt.strftime('%m/%d/%Y %H:%M'))
def pst_date(date):
utc_dt = pytz.utc.localize(date)
pst_tz = timezone('US/Pacific')
pst_dt = pst_tz.normalize(utc_dt.astimezone(pst_tz))
return "{}".format(pst_dt.strftime('%m/%d/%Y'))
def get_property_types():
return ['apartment', 'condo', 'duplex']
def get_status_types():
return ['Application Pending', 'Application Approved', 'Ready for move-in', 'Not Available', 'Ready On']
def get_associations():
associations = Associations.query.all()
acn_list = []
for a in associations:
acn_list.append({
'acn_name':a.acn_name,
'acn_loc':a.acn_loc
})
return acn_list
def get_associations_by_loc():
associations = Associations.query.all()
acn_list = {}
for a in associations:
if not a.acn_loc in acn_list:
acn_list[a.acn_loc] = [{
'acn_loc':a.acn_name,
'acn_url':a.acn_url
}]
else:
acn_list[a.acn_loc].append({
'acn_loc':a.acn_name,
'acn_url':a.acn_url
})
return acn_list
def serialize(object, classname):
columns = classname.__table__.columns
list = {}
for c in columns:
# print(type(getattr(object, c.key)))
if isinstance(getattr(object, c.key), datetime):
list[c.key] = (getattr(object,c.key)).isoformat()
elif isinstance(getattr(object, c.key), decimal.Decimal):
t = float(getattr(object,c.key))
if t % 1 == 0:
list[c.key] = int(t)
else:
list[c.key] = t
else:
# print(getattr(object, c.key))
list[c.key] = getattr(object, c.key)
# print(dir(self))
# for c in self:
# print(c)
# print(getattr(c, c.key))
return list
def jinjaf_init(app):
app.jinja_env.globals.update(pst_time = pst_time)
app.jinja_env.globals.update(comma_num = comma_num)
|
from rest_framework import routers
from app.apiviews import (
EntryViewSet,
NotificationViewSet,
PrivateMessageViewSet,
TagViewSet,
)
router = routers.DefaultRouter()
router.register(r"entries", EntryViewSet)
router.register(r"notifications", NotificationViewSet, basename="notifications")
router.register(r"privatemessages", PrivateMessageViewSet, basename="privatemessages")
router.register(r"tags", TagViewSet, basename="tags")
|
def main():
matching_names = []
name_data = {'Mick': {2010: 2, 2000: 1, 1910: 10}, 'Sam': 2021, 'Tim': 2022}
target = 'Mick'
y = name_data[target].get(1910)
if y is None:
print('**')
else:
x = name_data[target][1910]
print(x)
if __name__ == "__main__":
main() |
import base64
import json
import logging
import time
import traceback
import sys
from apiclient import errors
from docopt import docopt
from googleapiclient import discovery
from google.cloud import pubsub_v1
logger = logging.getLogger('resurrect.prere')
PROJECT_ID = ' YOUR PROJECTID '
class GoogleCloud:
"""Helper class for interacting with Google Cloud compute API."""
def __init__(self, project_id):
# disable cache discovery
# https://github.com/google/google-api-python-client/issues/299#issuecomment-268915510
compute = discovery.build('compute', 'v1', cache_discovery=False)
self.inst_api = compute.instances()
self.project_id = project_id
def get_instance(self, zone, inst_name):
"""Return a dictionary describing a GCE instance, if it exists."""
return self.inst_api.get(project=self.project_id, zone=zone,
instance=inst_name).execute()
def start_instance(self, zone, inst_name):
"""Call the start GCE instance API, returning the operation response."""
return self.inst_api.start(project=self.project_id, zone=zone,
instance=inst_name).execute()
def resurrect_instance(project_id, instance_desc):
"""Try resurrecting a terminated (preempted) GCE instance.
Input `instance_desc`: dictionary with the instance 'name' and 'zone'.
Ignores instance if: it doesn't exist; it's already running.
Retry if: instance not yet terminated.
"""
try:
inst_name, zone = instance_desc['name'], instance_desc['zone']
except KeyError:
logger.error('Parsed message missing mandatory fields: %r', instance_desc)
return
except TypeError:
logger.error('Parsed message not valid dictionary: %r', instance_desc)
return
logger.info('Got resurrection request for instance "%s" in zone "%s"',
inst_name, zone)
gcloud = GoogleCloud(project_id)
still_running_count = 0
while True:
try:
gce_inst = gcloud.get_instance(zone, inst_name)
except (errors.HttpError, TypeError):
logger.warning('No instance named "%s" in zone "%s"', inst_name, zone)
return
if gce_inst['status'] == 'TERMINATED':
logger.info('Attempting to start terminated instance "%s" in zone "%s"',
inst_name, zone)
response = gcloud.start_instance(zone, inst_name)
logger.debug('Started GCE operation: %r', response)
return
elif gce_inst['status'] == 'STOPPING':
logger.info('Instance "%s/%s" is stopping - waiting for termination',
zone, inst_name)
time.sleep(10.0)
elif gce_inst['status'] == 'RUNNING':
still_running_count += 1
if still_running_count > 6:
logger.warning('Instance "%s/%s" has been running for the last 3 '
'minutes - assuming it\'s not about to terminate',
zone, inst_name)
return
logger.info('Instance "%s/%s" still running - waiting for termination',
zone, inst_name)
time.sleep(30.0)
else:
logger.warning('Not sure how to handle instance "%s/%s" status: "%s" '
'-- ignoring', zone, inst_name, gce_inst['status'])
def configure_logging():
"""Configure DEBUG-level logging with console output."""
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
def cloudfunc_entry(event, context):
"""Triggered from a message on a Cloud Pub/Sub topic.
Args:
event (dict): Event payload.
context (google.cloud.functions.Context): Metadata for the event.
"""
try:
print(event)
if 'data' in event:
pubsub_message = base64.b64decode(event['data']).decode('utf-8')
else:
pubsub_message = event
#for key in event.keys():
# print(key)
#pubsub_message = base64.b64decode(event).decode('utf-8')
#print(pubsub_message)
#configure_logging()
## resurrect instance based on message data
try:
if 'data' in event:
instance_desc = json.loads(pubsub_message)
else:
instance_desc = pubsub_message
except:
logger.exception('Failed parsing JSON message - ignoring it\n%s', pubsub_message)
else:
resurrect_instance(PROJECT_ID, instance_desc)
except Exception as err:
print(f'There was an error {err}')
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import asyncio
import logging
from typing import Any, Callable, List, NoReturn, Optional, Sequence, Union
import torch
import torch.multiprocessing as mp
from rich.console import Console
import moolib
import rlmeta.utils.asycio_utils as asycio_utils
from rlmeta.core.launchable import Launchable
from rlmeta.core.remote import Remotable
console = Console()
class Server(Launchable):
def __init__(self, name: str, addr: str, timeout: float = 60) -> None:
self._name = name
self._addr = addr
self._timeout = timeout
self._services = []
self._process = None
self._server = None
self._loop = None
self._tasks = None
def __repr__(self):
return f'Server(name={self._name} addr={self._addr})'
@property
def name(self) -> str:
return self._name
@property
def addr(self) -> str:
return self._addr
@property
def timeout(self) -> float:
return self._timeout
def add_service(self, service: Union[Remotable,
Sequence[Remotable]]) -> None:
if isinstance(service, (list, tuple)):
self._services.extend(service)
else:
self._services.append(service)
def start(self) -> None:
self.init_launching()
self._process = mp.Process(target=self.run)
self._process.start()
def join(self) -> None:
self._process.join()
def terminate(self) -> None:
if self._process is not None:
self._process.terminate()
def run(self) -> NoReturn:
self.init_execution()
self._start_services()
def init_launching(self) -> None:
for service in self._services:
if isinstance(service, Launchable):
service.init_launching()
def init_execution(self) -> None:
for service in self._services:
if isinstance(service, Launchable):
service.init_execution()
self._server = moolib.Rpc()
self._server.set_name(self._name)
self._server.set_timeout(self._timeout)
console.log(f"Server={self.name} listening to {self._addr}")
try:
self._server.listen(self._addr)
except:
console.log(f"ERROR on listen({self._addr}) from: server={self}")
raise
def _start_services(self) -> NoReturn:
self._loop = asyncio.get_event_loop()
self._tasks = []
console.log(f"Server={self.name} starting services: {self._services}")
for service in self._services:
for method in service.remote_methods:
method_impl = getattr(service, method)
batch_size = getattr(method_impl, "__batch_size__", None)
self._add_server_task(service.unique_name(method), method_impl,
batch_size)
try:
if not self._loop.is_running():
self._loop.run_forever()
except Exception as e:
logging.error(e)
raise
finally:
for task in self._tasks:
task.cancel()
self._loop.stop()
self._loop.close()
console.log(f"Server={self.name} services started")
def _add_server_task(self, func_name: str, func_impl: Callable[..., Any],
batch_size: Optional[int]) -> None:
if batch_size is None:
que = self._server.define_queue(func_name)
else:
que = self._server.define_queue(func_name,
batch_size=batch_size,
dynamic_batching=True)
task = asycio_utils.create_task(self._loop,
self._async_process(que, func_impl))
self._tasks.append(task)
async def _async_process(self, que: moolib.Queue,
func: Callable[..., Any]) -> None:
try:
while True:
ret_cb, args, kwargs = await que
ret = func(*args, **kwargs)
ret_cb(ret)
except asyncio.CancelledError:
pass
except Exception as e:
logging.error(e)
raise e
class ServerList:
def __init__(self, servers: Optional[Sequence[Server]] = None) -> None:
self._servers = []
if servers is not None:
self._servers.extend(servers)
def __getitem__(self, index: int) -> Server:
return self._servers[index]
@property
def servers(self) -> List[Server]:
return self._servers
def append(self, server: Server) -> None:
self.servers.append(server)
def extend(self, servers: Union[ServerList, Sequence[Server]]) -> None:
if isinstance(servers, ServerList):
self.servers.extend(servers.servers)
else:
self.servers.extend(servers)
def start(self) -> None:
for server in self.servers:
server.start()
def join(self) -> None:
for server in self.servers:
server.join()
def terminate(self) -> None:
for server in self.servers:
server.terminate()
ServerLike = Union[Server, ServerList]
|
import requests
with open('lifx_token.txt', 'r') as myfile:
token=myfile.read().replace('\n', '')
headers = {
"Authorization": "Bearer %s" % token,
}
#response = requests.get('https://api.lifx.com/v1/lights/all', headers=headers)
#print(response.text)
data = {
"period": 2,
"cycles": 6,
"color": "green",
"from_color": "blue"
}
response = requests.post('https://api.lifx.com/v1/lights/all/effects/breathe', data=data, headers=headers)
|
import pytest
from discovery import api
def sample_payload():
return {
"Name": "my-query",
"Session": "adf4238a-882b-9ddc-4a9d-5b6758e4159e",
"Token": "",
"Service": {
"Service": "redis",
"Failover": {"NearestN": 3, "Datacenters": ["dc1", "dc2"]},
"Near": "node1",
"OnlyPassing": False,
"Tags": ["primary", "!experimental"],
"NodeMeta": {"instance_type": "m3.large"},
"ServiceMeta": {"environment": "production"},
},
"DNS": {"TTL": "10s"},
}
def sample_response():
return {"ID": "8f246b77-f3e1-ff88-5b48-8ec93abf3e05"}
def sample_read_response():
return [
{
"ID": "8f246b77-f3e1-ff88-5b48-8ec93abf3e05",
"Name": "my-query",
"Session": "adf4238a-882b-9ddc-4a9d-5b6758e4159e",
"Token": "<hidden>",
"Service": {
"Service": "redis",
"Failover": {"NearestN": 3, "Datacenters": ["dc1", "dc2"]},
"OnlyPassing": False,
"Tags": ["primary", "!experimental"],
"NodeMeta": {"instance_type": "m3.large"},
"ServiceMeta": {"environment": "production"},
},
"DNS": {"TTL": "10s"},
"RaftIndex": {"CreateIndex": 23, "ModifyIndex": 42},
}
]
def sample_execute_response():
return {
"Service": "redis",
"Nodes": [
{
"Node": {
"ID": "40e4a748-2192-161a-0510-9bf59fe950b5",
"Node": "foobar",
"Address": "10.1.10.12",
"Datacenter": "dc1",
"TaggedAddresses": {"lan": "10.1.10.12", "wan": "10.1.10.12"},
"NodeMeta": {"instance_type": "m3.large"},
},
"Service": {
"ID": "redis",
"Service": "redis",
"Tags": None,
"Meta": {"redis_version": "4.0"},
"Port": 8000,
},
"Checks": [
{
"Node": "foobar",
"CheckID": "service:redis",
"Name": "Service 'redis' check",
"Status": "passing",
"Notes": "",
"Output": "",
"ServiceID": "redis",
"ServiceName": "redis",
},
{
"Node": "foobar",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "",
"ServiceID": "",
"ServiceName": "",
},
],
"DNS": {"TTL": "10s"},
"Datacenter": "dc3",
"Failovers": 2,
}
],
}
def sample_explain_response():
return {
"Query": {
"ID": "8f246b77-f3e1-ff88-5b48-8ec93abf3e05",
"Name": "my-query",
"Session": "adf4238a-882b-9ddc-4a9d-5b6758e4159e",
"Token": "<hidden>",
# 'Name': 'geo-db',
"Template": {
"Type": "name_prefix_match",
"Regexp": "^geo-db-(.*?)-([^\\-]+?)$",
},
"Service": {
"Service": "mysql-customer",
"Failover": {"NearestN": 3, "Datacenters": ["dc1", "dc2"]},
"OnlyPassing": True,
"Tags": ["primary"],
"Meta": {"mysql_version": "5.7.20"},
"NodeMeta": {"instance_type": "m3.large"},
},
}
}
@pytest.fixture
@pytest.mark.asyncio
def query(consul_api):
return api.Query(client=consul_api)
@pytest.mark.asyncio
@pytest.mark.parametrize("expected", [sample_response()])
async def test_create(query, expected):
query.client.expected = expected
response = await query.create(sample_payload())
response = await response.json()
assert response == sample_response()
@pytest.mark.asyncio
@pytest.mark.parametrize("expected", [sample_read_response()])
async def test_read_without_uuid(query, expected):
query.client.expected = expected
response = await query.read()
response = await response.json()
assert response == sample_read_response()
@pytest.mark.asyncio
@pytest.mark.parametrize("expected", [sample_read_response()])
async def test_read_with_uuid(query, expected):
query.client.expected = expected
response = await query.read("8f246b77-f3e1-ff88-5b48-8ec93abf3e05")
response = await response.json()
assert response == sample_read_response()
@pytest.mark.asyncio
@pytest.mark.parametrize("expected", [200])
async def test_delete(query, expected):
query.client.expected = expected
response = await query.delete("8f246b77-f3e1-ff88-5b48-8ec93abf3e05")
assert response.status == 200
@pytest.mark.asyncio
@pytest.mark.parametrize("expected", [200])
async def test_update(query, expected):
query.client.expected = expected
response = await query.update(
"8f246b77-f3e1-ff88-5b48-8ec93abf3e05", sample_payload()
)
assert response.status == 200
@pytest.mark.asyncio
@pytest.mark.parametrize("expected", [sample_execute_response()])
async def test_execute(query, expected):
query.client.expected = expected
response = await query.execute("8f246b77-f3e1-ff88-5b48-8ec93abf3e05")
response = await response.json()
assert response == sample_execute_response()
@pytest.mark.asyncio
@pytest.mark.parametrize("expected", [sample_explain_response()])
async def test_explain(query, expected):
query.client.expected = expected
response = await query.explain("8f246b77-f3e1-ff88-5b48-8ec93abf3e05")
response = await response.json()
assert response == sample_explain_response()
|
import pandas as pd
from sklearn.svm import SVC
import matplotlib.pyplot as plt
from pandas.plotting import scatter_matrix
# Import data
df = pd.read_csv("train.csv")
# Inspect data
scatter_matrix(df, alpha=0.2, figsize=(12, 12), diagonal='kde')
plt.show()
|
from .containers import Dynamics, Cost
from .controller import iLQR, MPC
from .utils import GetSyms, Constrain, SoftConstrain, Bounded
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Data Processing for Journal Entries
# Author: Gordon Blake
"""
Import command
mongoimport --db vitaDB --collection entries --jsonArray
< C:\Users\gblak\OneDrive\CodePractice\webdev\vita-app\data\parsed_entries.json
Fixing the date
db.getCollection('entries').find().forEach(function(entry) {
entry.timestamp = new Date(entry.timestamp);
db.getCollection('entries').save(entry);
});
"""
import re, datetime, os, nltk, json
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from nltk.util import ngrams
# import cPickle as pickle
import pandas as pd
from collections import defaultdict, Counter
MONTH_NAMES = {
"january" : 1,
"february" : 2,
"march": 3,
"april" : 4,
"may" : 5,
"june" : 6,
"july" : 7,
"august" : 8,
"september" : 9,
"october" : 10,
"november" : 11,
"december" : 12,
"jan": 1,
"feb": 2,
"mar": 3,
"apr": 4,
"jun": 6,
"jul": 7,
"aug": 8,
"sep": 9,
"oct": 10,
"nov": 11,
"dec": 12
}
TIME_PATTERN = re.compile("(\w+), (\w+) (\d+), (\d{4})(?: AT|,) ((\d+):(\d{2})) (AM|PM)")
MONTH_PATTERN = re.compile("(" + "|".join(MONTH_NAMES.keys()) + ") (20\d{2})",
flags=re.IGNORECASE)
# (?:(\d+)(?:\–\d+)? )?([^,]+), ([^,]+), ([^,]+), ([^,•]*[^\s•])(( • (\d+)° (.+))?
#TODO: Include dash in num or no?
ADDRESS_PATTERN_FULL = re.compile(
"(?:(\d+)(?:\–\d+)? )?([^,]+), ([^,]+), ([^,]+), ([^,•]*[^\s•])( • (\d+)°F? (.+))?")
ADDRESS_PATTERN_STREET = re.compile("([^,]+), ([^,]+), ([^,]+), ([^,•]*[^\s•])( • (\d+)°F? (.+))?")
ADDRESS_PATTERN_CITY = re.compile("([^,]+), ([^,]+), ([^,•]*[^\s•])( • (\d+)°F? (.+))?")
ADDRESS_PATTERN_PLACENAME = re.compile("(\d+)?\s*(.+)")
DAYONE_MOMENT_PATTERN = re.compile("(.*)!\[\]\(dayone-moment:[^)]+\)(.*)",
flags=re.DOTALL) #Need to match across lines
# PUNCT_TOKENS = [',', '.', '(', ')', '!', '$', '%', '?', "'", '"']
PUNCT_PATTERN = re.compile(",|\.|\(|\)|!|\$|%|\?|\"|“|”")
NON_PERSONS = frozenset(["Anyway", "Had", "Lots", "Space", "How", "Molten", "Artichoke", "Afterward",
"Beyond", "Meekly", "Country", "Tears", "Dwarf", "Hmm", "Good", "Trader", "Mi", "Creo",
"Sal", "Elder", "Kitchen", "Forget", "Smooth", "Brie", "Church", "Lie", "Google", "Okay",
"Yup", "Tomorrow", "Ten", "Tis", "Basically", "Habla", "Whiny", "Busy", "Brother", "Annoy",
"Lessons", "Carols", "Econ", "Which", "Chorale", "Works", "Always", "Got", "Break", "Gorgonzola",
"Gratitute", "Sister", "Heads", "Market", "Pretty", "Ye", "Were", "Catch", "Apples", "Steam",
"Zion", "Cool", "Micro", "Wealthfront", "Studied", "Psych", "Juntos", "Bro", "Math", "Think",
"Christmas", "Stanford", "Made", "Played", "Read", "Hope", "Home", "Museum", "Hopefully", "Left",
"Ungh", "Twitter"])
# IGNORE_TOKENS = [')', '(', '.', 'a', ',', 'the', 'and', 'an', 'of', 'in', 'that', 'for',
# 'on', 'i', 'to', 'from', 'which', 'this', 'with', 'it', 'at', "n't", 'my', 'was', 'we', 'had',
# 'so', 'as', 'about', 'were', 'are', 'is', "'s"]
# LAT_PATTERN = re.compile("(\d+\.\d+)° (?:N|S), (\d+\.\d+)° (?:W|E)")
#TODO: Determine groupings based on API needs
# Module-level function so default-dict is pickleable
# def dd(): return 0
# class MonthGroup:
# def __init__(self, month, year):
# self.month = month
# self.year = year
# self.entries = {}
# def addEntry(entry):
# assert(isinstance(entry, Entry))
# assert(entry.getMonth() == self.month)
# self.entries[entry.getDay] = entry
# def getNumEntries(self):
# return len(self.entries)
# def __str__(self):
# return "{} {} : {} Entries".format(self.month, self.year, self.getNumEntries())
sid = SentimentIntensityAnalyzer()
class EntryEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, Entry) or isinstance(obj, Location) or isinstance(obj, Weather):
return obj.__dict__
elif isinstance(obj, datetime.datetime):
return obj.isoformat() #TODO: May need different format for mongoose Date
return json.JSONEncoder.default(self, obj)
class Entry:
def __init__(self, timestamp, loc, weather, text):
self.timestamp = timestamp
self.loc = loc
self.weather = weather
self.tokens = Entry.tokenize(text)
self.wordCounts = Entry.countWords(self.tokens)
self.length = len(self.tokens)
self.namedEntities = Entry.extractNamedEntities(text)
self.sentiment = self.extractSentenceSentiment(text)
# self.timestamp = Entry.parseTimestamp(header)
# self.loc, self.weather = Entry.parseLoc(footer)
# # self.text = text #TODO: example sentences?
# self.tokens = Entry.tokenize(text)
# self.wordCounts = Entry.countWords(self.tokens)
# self.length = len(self.tokens)
# self.namedEntities = Entry.extractNamedEntities(text)
# self.sentiment = self.extractSentenceSentiment(text)
#TODO: S
# self.sentences = nltk.tokenize. TODO
# Timestamp is in (weekday, M, D, Y, military time) format
@classmethod
def fromString(cls, text, header, footer):
timestamp = Entry.parseTimestamp(header)
loc, weather = Entry.parseLoc(footer)
return cls(timestamp, loc, weather, text)
@classmethod
def fromJSON(cls, json_object):
# timestamp = datetime.datetime(json_object["creationDate"])
timestamp = json_object["creationDate"]
if "location" in json_object:
loc = Location.fromJSON(json_object["location"])
else:
loc = None
if "weather" in json_object:
weather = Weather.fromJSON(json_object["weather"])
else:
weather = None
text = Entry.parseTextFromJSON(json_object["text"])
return cls(timestamp, loc, weather, text) #TODO: strip dayone-moment
def getTimestamp(self):
"""
Timestame is a datetime object
"""
return self.timestamp
def getLength(self):
return len(self.tokens)
def __str__(self):
return "Timestamp: {}\nCounts: {}\nLocation: {}".format(self.timestamp, self.wordCounts, self.loc)
@staticmethod
def getMonthNum(name):
name = name.lower()
return MONTH_NAMES[name]
@staticmethod
def countWords(tokens):
"""
Word attributes are:
count: Number of times word appears in entry
pos: part of speech of word
sentiment: number of sentences with the word with pos, neutral, and neg
sentiment in the entry
"""
counts = {}
for token in tokens:
word = PUNCT_PATTERN.sub("", token["word"])
if len(word) == 0: continue
if word in counts:
counts[word]["count"] += 1
else:
counts[word] = {"count": 1,
"pos": token["pos"], "sentiment": {"pos": 0, "neu": 0, "neg": 0}}
return counts
@staticmethod
def parseTextFromJSON(text):
"""
Strips dayone-moment tags from input text
text utf-8 encoded text string
"""
match = DAYONE_MOMENT_PATTERN.match(text)
if match:
# Exactly one of match groups 1 and 3 should be None and one should be text
if match.group(1) == "":
if match.group(2) == "":
raise RuntimeError("Found empty text for entry: {}".format(text))
else:
text = match.group(2)
else:
if not match.group(2) == "":
raise RuntimeError(
"""Found multiple texts around moment tag for entry: {}\nGroup 1: {}\nGroup 2:{}""".format(
text, len(match.group(1)), match.group(2)))
else:
text = match.group(1)
return text.encode('utf-8')
@staticmethod
def parseLoc(line):
match = ADDRESS_PATTERN_FULL.match(line)
weather = None
if match:
if match.group(6) != None: weather = Weather(match.group(7), match.group(8))
return Location(match), weather
match = ADDRESS_PATTERN_STREET.match(line)
if match:
if match.group(5) != None: weather = Weather(match.group(6), match.group(7))
return Location(match, 4), weather
match = ADDRESS_PATTERN_CITY.match(line)
if match:
if match.group(4) != None: weather = Weather(match.group(5), match.group(6))
return Location(match, 3), weather
raise RuntimeError("Footer: {} not parsing".format(line))
#TODO: determine format based on Google Charts API
@staticmethod
def parseTimestamp(res):
month = Entry.getMonthNum(res.group(2))
hour = int(res.group(6))
if hour != 12 and res.group(8) == "PM":
hour = int(res.group(6)) + 12
return datetime.datetime(int(res.group(4)), month, int(res.group(3)), hour, int(res.group(7)))
@staticmethod
def tokenize(text):
text = text.strip().lower()
tokens = nltk.word_tokenize(text.decode("utf-8"))
tokens = nltk.pos_tag(tokens)
tokens = [{"word": x[0], "pos": x[1]} for x in tokens]
return tokens
@staticmethod
def extractNamedEntities(text):
tags = nltk.pos_tag(nltk.word_tokenize(text.decode("utf-8")))
# for entry in tags:
# print entry[0]
# entry[0].decode("utf-8")
tags = [(x[0].encode("ascii", "ignore"), x[1]) for x in tags]
chunks = nltk.ne_chunk(tags);
# print chunks
# in ['PERSON', 'GPE', 'ORGANIZATION']
people = set()
for i in chunks.subtrees(filter = lambda x: x.label() == 'PERSON'):
if(i[0][0] in NON_PERSONS): continue
people.add(i[0][0])
return list(people)
def extractSentenceSentiment(self, text):
sentences = nltk.sent_tokenize(text.decode("utf-8"))
entrySentiment = {"pos": 0, "neu": 0, "neg": 0, "score": 0}
for sentence in sentences:
scores = sid.polarity_scores(sentence)
tokens = nltk.word_tokenize(sentence.strip().lower())
entrySentiment["score"] += scores["compound"]
if scores["compound"] > 0:
sentiment = "pos"
elif scores["compound"] < 0:
sentiment = "neg"
else:
sentiment = "neu"
entrySentiment[sentiment] += 1
for word in tokens:
if word in self.wordCounts:
self.wordCounts[word]["sentiment"][sentiment] += 1
return entrySentiment
# def ddWordCount(elem): #TODO: remove
# return {"count": 1, "pos": elem[1]}
class Weather:
def __init__(self, temp, conditions):
self.temp = temp
self.conditions = conditions
@classmethod
def fromJSON(cls, json_object):
return cls(
Weather.celsiusToFahrenheit(json_object["temperatureCelsius"]),
json_object["conditionsDescription"])
@staticmethod
def celsiusToFahrenheit(celsiusTemp):
fahrenheitTemp = (9.0 / 5) * celsiusTemp + 32
return int(round(fahrenheitTemp ,0))
class Location:
def __init__(self, match, entries = 5):
if entries == 5:
self.num = match.group(1) #TODO: multiple nums?
self.street = match.group(2)
self.city = match.group(3)
self.region = match.group(4)
self.country = match.group(5)
elif entries == 4:
self.num = None
self.street = match.group(1)
self.city = match.group(2)
self.region = match.group(3)
self.country = match.group(4)
elif entries == 3:
self.num = None
self.street = None
self.city = match.group(1)
self.region = match.group(2)
self.country = match.group(3)
def __init__(self, placeName, city, region, country):
self.num, self.street = Location.splitPlaceName(placeName)
self.city = city
self.region = region
self.country = country
@classmethod
def fromJSON(cls, json_object):
return cls(
json_object["placeName"].encode('utf-8'),
json_object["localityName"].encode('utf-8'),
json_object["administrativeArea"].encode('utf-8'),
json_object["country"].encode('utf-8'))
def getType(self):
return self.type
def __str__(self):
return "Number: {}\nStreet:{}\nCity:{}\nRegion:{}\nCountry:{}".format(
self.num, self.street, self.city, self.region, self.country)
@staticmethod
def splitPlaceName(placeName):
# TODO: Extend regex to match addresses of the form 228-232 Street Name
match = ADDRESS_PATTERN_PLACENAME.match(placeName)
if match:
return (match.group(1), match.group(2)) #TODO: test
else:
return (None, None)
def parseJSON(name):
with open(name, 'r') as read_file:
entries = []
data = json.loads(read_file.read())
for entry in data["entries"]:
entries.append(Entry.fromJSON(entry))
outfile = open('parsed_entries.json', "wb")
json.dump(entries, outfile, cls=EntryEncoder)
def parseFile(name):
with open(name, 'r') as f:
next = f.readline()
entries = []
buf = ""
curHeader = ""
prevLine = ""
while next != "":
header = TIME_PATTERN.match(next)
if header:
if curHeader:
buf = buf[0:buf.find(prevLine)]
# print "Header: {}\nFooter: {}".format(curHeader.group(0), prevLine)
entries.append(Entry.fromString(buf, curHeader, prevLine))
curHeader = header
buf = ""
Footer = ""
else:
if not MONTH_PATTERN.match(next): #Skip month stamps?
buf = ''.join([buf, next])
if next.strip(): prevLine = next #TODO: could track buffer index
next = f.readline()
# findLongest(entries)
# pickle.dump(entries, outfile, -1)
outfile = open('parsed_entries.json', "wb")
json.dump(entries, outfile, cls=EntryEncoder)
def generateNGrams(filename):
with open(filename, 'r') as f:
next = f.readline()
buffers = []
buf = ""
curHeader = ""
prevLine = ""
while next != "":
header = TIME_PATTERN.match(next)
if header:
if curHeader:
buf = buf[0:buf.find(prevLine)]
buffers.append(buf)
curHeader = header
buf = ""
Footer = ""
else:
if not MONTH_PATTERN.match(next): #Skip month stamps?
buf = ''.join([buf, next])
if next.strip(): prevLine = next #TODO: could track buffer index
next = f.readline()
text = ''.join(buffers)
tokens = nltk.word_tokenize(text.decode('utf-8'))
#TODO: Lowercase tokens or no?
# trigrams = generateGramsDict(tokens, 3)
res = []
unigrams = Counter(tokens)
totalWords = sum(unigrams.values())
unigrams = dict(unigrams)
start = 0
unigramList = []
for word, count in unigrams.iteritems():
end = start + float(count) / totalWords
unigramList.append({"word": word, "range": [start, end]})
start = end
res.append({"name": "1-gram", "content": unigramList})
for size in range(2, 6):
name = "{}-gram".format(size)
res.append({"name": name, "content": generateGramsDict(tokens, size)})
outfile = open('ngrams.json', "wb")
json.dump(res, outfile)
# outfile = open('unigrams.json', "wb")
# json.dump(unigramList, outfile)
def generateGramsDict(tokens, n):
# Note: uncomment to laplace smooth
# K_SIZE = 0.001 # Size of add-K smoothing factor
grams = ngrams(tokens, n)
grams = dict(Counter(grams))
res = defaultdict(lambda: [])
for gram, count in grams.iteritems():
key = ' '.join(gram[0:n-1])
res[key].append({"word": gram[n-1], "range": count})
# K_smooth = K_SIZE * len(grams)
for _, endList in res.iteritems():
# Note: Uncomment to Laplace Smooth
#endList.append({"word": "<*>", "range": K_smooth}) #TODO: Better wildcard char?
# WE don't add K to entries because the wildcard is a uniform random draw from all grams
total = sum(map(lambda x: x["range"], endList))
start = 0
for item in endList:
end = start + float(item["range"]) / total
item["range"] = [start, end] #Note this range is open on top end [start, end)
start = end
return res
def processHealthData(filename):
output = []
healthData = pd.read_csv(filename)
for _, row in healthData.iterrows():
item = dict(row)
item["distance"] = round(item["distance"], 2)
item["date"] = datetime.datetime.strptime(row["date"], "%m/%d/%y")
output.append(item)
outfile = open("health.json", "wb")
json.dump(output, outfile, cls=EntryEncoder)
def main():
print os.getcwd()
os.chdir("C:\Users\gblak\OneDrive\CodePractice\webdev\\vita-app\data")
# with open("parsed_entries.p", 'rb') as f:
# entries = pickle.load(f)
# for entry in entries: print entry
# parseFile("problemChildren.txt")
# parseFile("testText.txt")
# parseFile("all-entries-2018-03-09.txt")
# generateNGrams("smallTest.txt")
# generateNGrams("all-entries-2018-03-09.txt")
# processHealthData("Health Data.csv")
parseJSON("all-entries-6-18-18.json")
# generateNGrams("all-entries-2018-03-09.txt")
if __name__ == '__main__': main() |
# -- coding: utf-8 --
import tensorflow as tf
import numpy as np
import argparse
from model.hyparameter import parameter
import os
import pandas as pd
train_path=r'data/train.csv'
class DataIterator(): #切记这里的训练时段和测试时段的所用的对象不变,否则需要重复加载数据
def __init__(self,
site_id=0,
is_training=True,
time_size=3,
prediction_size=1,
data_divide=0.9,
window_step=1,
normalize=False,
hp=None):
'''
:param is_training: while is_training is True,the model is training state
:param field_len:
:param time_size:
:param prediction_size:
:param target_site:
'''
self.min_value=0.000000000001
self.site_id=site_id # ozone ID
self.time_size=time_size # time series length of input
self.prediction_size=prediction_size # the length of prediction
self.is_training=is_training # true or false
self.data_divide=data_divide # the divide between in training set and test set ratio
self.window_step=window_step # windows step
self.para=hp
self.source_data=self.get_source_data(train_path)
# self.data=self.source_data.loc[self.source_data['ZoneID']==self.site_id]
self.id_dict = dict()
self.data=self.source_data
self.id_index=dict()
# 路字典,用以记录收费站之间是否有路存在
for line in self.data.values:
if (line[0], line[1]) not in self.id_dict and line[0] != line[1]:
self.id_dict[(int(line[0]), int(line[1]))] = 1
self.length=self.data.values.shape[0] # data length
self.max_list,self.min_list=self.get_max_min(self.data) # max and min are list type, used for the later normalization
self.normalize=normalize
if self.normalize:
self.normalization(data=self.data,index=6,max_list=self.max_list,min_list=self.min_list) # normalization
def get_source_data(self,file_path=None):
'''
:return:
'''
data = pd.read_csv(file_path, encoding='utf-8')
return data
def get_max_min(self,data=None):
'''
:return: the max and min value of input features
'''
min_list=[]
max_list=[]
# print('the shape of features is :',self.data.values.shape[1])
for i in range(data.values.shape[1]):
min_list.append(min(data[list(data.keys())[i]].values))
max_list.append(max(data[list(data.keys())[i]].values))
print('the max feature list is :',max_list)
print('the min feature list is :', min_list)
return max_list,min_list
def normalization(self,data=None,index=1,max_list=[],min_list=[]):
keys=list(data.keys())
for i in range(index,len(keys)):
data[keys[i]]=(data[keys[i]] - np.array(min_list[i])) / (np.array(max_list[i]) - np.array(min_list[i]+self.min_value))
def generator_(self):
'''
:return: yield the data of every time,
shape:input_series:[time_size,field_size]
label:[predict_size]
'''
para=self.para
if self.is_training:
low,high=24*6*para.site_num, int(self.data.shape[0]//para.site_num * self.data_divide)*para.site_num
else:
low,high=int(self.data.shape[0]//para.site_num * self.data_divide) *para.site_num, self.data.shape[0]
while low+para.site_num*(para.input_length + para.output_length)<= high:
label=self.data.values[low + self.time_size * para.site_num: low + (self.time_size + self.prediction_size) * para.site_num,-2:-1]
label=np.concatenate([label[i * para.site_num:(i + 1) * para.site_num, :] for i in range(self.prediction_size)], axis=1)
yield (self.data.values[low:low+self.time_size*para.site_num, 6:7],
self.data.values[low:low+(self.time_size+self.prediction_size)*para.site_num, 4],
self.data.values[low:low + (self.time_size+self.prediction_size)* para.site_num, 5],
label)
if self.is_training:
low += self.window_step*para.site_num
else:
low+=self.prediction_size*para.site_num
return
def next_batch(self,batch_size,epochs, is_training=True):
'''
:return the iterator!!!
:param batch_size:
:param epochs:
:return:
'''
self.is_training=is_training
dataset=tf.data.Dataset.from_generator(self.generator_,output_types=(tf.float32,tf.int32, tf.int32, tf.float32))
if self.is_training:
dataset=dataset.shuffle(buffer_size=int(self.data.values.shape[0]//self.para.site_num * self.data_divide-self.time_size-self.prediction_size)//self.window_step)
dataset=dataset.repeat(count=epochs)
dataset=dataset.batch(batch_size=batch_size)
iterator=dataset.make_one_shot_iterator()
return iterator.get_next()
# #
if __name__=='__main__':
para = parameter(argparse.ArgumentParser())
para = para.get_para()
iter=DataIterator(site_id=0,normalize=False,hp=para, time_size=6, prediction_size=3)
print(iter.data.keys())
# print(iter.data.loc[iter.data['ZoneID']==0])
next=iter.next_batch(1,1, False)
with tf.Session() as sess:
for _ in range(4):
x,y=sess.run(next)
print(x.shape)
print(y.shape)
print(x[0])
print(y[0]) |
#!/usr/bin/env python
# coding=utf-8
from unittest import TestCase
from onepiece.example import hello_world
class HelloWordTestCase(TestCase):
def test_hello_world(self):
self.assertEqual('One Piece', hello_world())
|
from fbs.fbs import FBSType
class FBSJavaType(FBSType):
_VALUES_TO_JAVA_TYPES = {
FBSType.BOOL: "boolean",
FBSType.BYTE: "char",
FBSType.UBYTE: "char",
FBSType.SHORT: "short",
FBSType.USHORT: "short",
FBSType.INT: "int",
FBSType.UINT: "int",
FBSType.FLOAT: "float",
FBSType.LONG: "long",
FBSType.ULONG: "long",
FBSType.DOUBLE: "double",
FBSType.STRING: "String",
FBSType.STRUCT: "interface",
FBSType.TABLE: "interface",
FBSType.UNION: "interface",
FBSType.VECTOR: "interface",
FBSType.ENUM: "interface",
}
|
#!/usr/bin/python
#-*- coding: utf-8 -*-
#encoding=utf-8
import urllib2
import os
import re
from bs4 import BeautifulSoup
import time
# 下载文件
def downloadBook(bookLink):
req = urllib2.Request(bookLink)
req.add_header('User-Agent','Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36')
response = urllib2.urlopen(req)
return response.read()
# 解析书籍下载链接
def parseBookDownloadLink(bookPageLink):
html = urllib2.urlopen(bookPageLink).read()
soup = BeautifulSoup(html,"html.parser")
downloadTag = soup.findAll('a',attrs={"class":"download-link"})
downloadlinks = []
for item in downloadTag:
downloadlinks.append(item.get('href'))
# return [epubDownloadLink,mobiDownloadLink]
return downloadlinks
# 解析书名
def parseBookName(bookPageLink):
html = urllib2.urlopen(bookPageLink).read()
soup = BeautifulSoup(html,"html.parser")
titleTag = soup.findAll('div',attrs={"class":"h1-wrapper"})[0]
title = titleTag.text
print "Book Name:"+title
return title
# 保存书籍文件
def saveFile(bookPageLink, bookFolder):
bookName = parseBookName(bookPageLink)
#因为要创建文件夹,故而等去除空格等
detailFolder = bookFolder + "_".join(bookName.split())
os.chdir(bookFolder)
isExists=os.path.exists(detailFolder)
if not isExists:
os.mkdir(detailFolder)
os.chdir(detailFolder)
# 解析出下载链接
downloadlinks = parseBookDownloadLink(bookPageLink)
# print downloadlinks
for link in downloadlinks:
# 拼接出完成的文件名
filetype = link.split('.')[-1]
filename = bookName + "." + filetype
downloadFile = downloadBook(link)
with open(filename, 'wb') as f:
f.write(downloadFile)
f.close()
# 解析出书页链接
def parseBookPageLink(bookListPageLink):
html = urllib2.urlopen(bookListPageLink).read()
soup = BeautifulSoup(html,"html.parser")
bookTag = soup.findAll('div',attrs={"class":"thumb-holder"})
bookLinks=[]
for item in bookTag:
bookLinkTag = item.a
bookpageLink = bookLinkTag.get('href')
bookLinks.append(bookpageLink)
return bookLinks
# 下载子乌书简的书,希望下到的读者不要下的次数太多,对他们的服务器造成困扰
def downloadZi5Book():
filesavepath = '/WytheData/Book/'
basePageListLink = "http://book.zi5.me/page/"
for page in range(1,51):
listPageLink = basePageListLink + str(page)
print listPageLink
bookLinks = parseBookPageLink(listPageLink)
for book in bookLinks:
print "Page " + str(page)
saveFile(book, filesavepath)
print "Wait a while"
# 等待一会,避免造成太大的压力,同时也避免被封
time.sleep(5)
print "Download all finish"
if __name__ == '__main__':
downloadZi5Book() |
"""
-------------------------------------------------------------------------------
Name: dataPreprocessing
Purpose: Pre-processing of the local test data
Author: Christian Buchholz, Marcus Vogt
Created: 01.12.2021
Copyright: Chair of Sustainable Manufacturing and Life Cycle Engineering, Institute of Machine Tools and Production Technology, Technische Universität Braunschweig, Langer Kamp 19b, 38106 Braunschweig, Germany
Licence: MIT (see License)
-------------------------------------------------------------------------------
"""
import numpy as np
def humanheat (activity):
# regarding to VDI 2078 p.27
# in [W]
if activity == 1:
heatFlowHuman = 100
elif activity == 2:
heatFlowHuman = 125
elif activity == 3:
heatFlowHuman = 170
elif activity == 4:
heatFlowHuman = 210
else:
raise Exception('wrong input for activity in modul "helpers.dataPreprocessing", function "humanheat". Allowed input arguments are: 1, 2, 3, 4')
return heatFlowHuman # [W]
def humanhumidity (T_Ges, activity):
# regarding to VDI 2078 p. 26
# in [kg/s]
if activity == 1 :
X_activity = (-86 + 5.4 * T_Ges) / (3600 * 1000) # Activity I ; at 20°C 22 g/h
elif activity == 2 :
X_activity = (-58 + 5.4 * T_Ges) / (3600 * 1000) # Activity II ; at 20°C 50 g/h
elif activity == 3 :
X_activity = (-18 + 5.8 * T_Ges) / (3600 * 1000) # Activity III at 20°C 98g/h // e.g -> 0.0000272 kg/s
elif activity == 4 :
X_activity = (-75 + 9.4 * T_Ges) / (3600 * 1000) # Activity IV ; at 20°C 113 g/h
else:
raise Exception('wrong input for activity in modul "helpers.dataPreprocessing", function "humanhumidity". Allowed input arguments are: 1, 2, 3, 4')
return X_activity
def short_new(data, defined_length):
"""
This function shortens the data series "data" to the specified length "defined_length". This is done by averaging over several data points of the original data series. Important: len(data) must be an integer multiple of defined_length.
"""
long_data = len(data)
short_data = defined_length
q = int(long_data / short_data)
data_new = np.zeros(short_data)
for j in range(short_data):
k = 0
for i in range(q):
if i + q * j >= long_data:
break
k = k + data[q * j + i]
data_new[j] = k / q
return data_new
def long_new(data, defined_length):
"""
This function plugs the data series "data" into the specified length "defined_length". This is done by copying existing data points to create the missing data points. Important: defined_length must be an integer multiple of len(data).
"""
long_data = defined_length
short_data = len(data)
q = int(long_data / short_data)
data_new = np.zeros(long_data)
for j in range(short_data):
for i in range(q):
if i + q * j >= long_data:
break
data_new[i + q * j] = data[j]
return data_new
def rescale_data(data, defined_length):
"""
This function calls the functions short_new() or long_new() depending on the length and desired length of the data series.
"""
if len(data) < defined_length:
data_new = long_new(data, defined_length)
return data_new
elif len(data) > defined_length:
data_new = short_new(data, defined_length)
return data_new
elif len(data) == defined_length:
data_new = data
return data_new
def room_model(x, t, m_in, T_in, X_in, m_X_del, beta_CO2_in, beta_CO2_prod, m_prod, Q_gain, T_amb_room, k, A, C_sub, specificHeatCapacityDryAir, specificHeatCapacityWaterVapor, delta_h_water, m_air_room):
"""
This function implements a differential equation system that describes the dynamic spatial behaviour.
:param x: float: initial states
:param t: float: time series for integration
:param m_in: float: value of the air mass flow entering the room [kg/s]
:param T_in: float: temperature of the air mass flow entering the room [°C]
:param X_in: float: humidity of the air mass flow entering the room [kg water / kg air]
:param m_X_del: float: moisture load [kg/s]
:param beta_CO2_in: float: CO2-concentration of the air mass flow entering the room [ppm]
:param beta_CO2_prod: float: CO2 concentration of the air emitted by humans [ppm]
:param m_prod: float: air mass flow emitted by humans [kg/s]
:param Q_gain: float: heat input into the room [W]
:param T_amb_room: float: temperature in the building outside the room [°C]
:param k: float: heat transfer coefficient [J/(m^2*K)]
:param A: float: wall surface [m^2]
:param C_sub: float: heat capacity [J]
:param specificHeatCapacityDryAir: float: specific heat capacity of dry air [J/kg]
:param specificHeatCapacityWaterVapor: float: specific heat capacity of water vapor [J/kg]
:param delta_h_water: float: Specific evaporation enthalpy of water [J/kg]
:param m_air_room: float: Mass of the air inside the room [kg]
:return dxdt: float: solution of ODE
"""
# summarising some expressions into coefficients in order to subsequently shorten the model equations
c1 = (k * A) / C_sub
c2 = specificHeatCapacityDryAir / C_sub
c3 = specificHeatCapacityWaterVapor / C_sub
c4 = delta_h_water / C_sub
c5 = 1 / C_sub
c6 = 1 / m_air_room
T_room = x[0]
X_room = x[1]
beta_CO2_room = x[2]
dT_roomdt = -c1 * T_room - c2 * m_in * T_room - c3 * m_in * X_room * T_room + c2 * m_in * T_in + c4 * m_in * X_in + c3 * m_in * X_in * T_in - c4 * m_in * X_room + c5 * Q_gain + c1 * T_amb_room
dX_roomdt = -c6 * m_in * X_room + c6 * m_in * X_in + c6 * m_X_del
dbeta_CO2_roomdt = c6 * (beta_CO2_in * m_in - beta_CO2_room * m_in + beta_CO2_prod * m_prod)
dxdt = [dT_roomdt, dX_roomdt, dbeta_CO2_roomdt]
return dxdt
|
# 取消协程
import asyncio
from asyncio import Task
def method_one():
async def cancellable(delay=10):
loop = asyncio.get_running_loop()
now = loop.time()
try:
print(f'sleeping from {now} for {delay} seconds...')
await asyncio.sleep(delay)
print(f'sleep {delay} seconds ...')
except asyncio.CancelledError:
print(f'Cancelled at {now} after {loop.time() - now} seconds')
async def main():
coro = cancellable()
task = asyncio.create_task(coro)
await asyncio.sleep(3)
task.cancel()
asyncio.run(main())
def method_two():
async def cancellable(delay=10):
loop = asyncio.get_running_loop()
now = loop.time()
try:
print(f'sleep from {now} for {delay} seconds ...')
await asyncio.sleep(delay)
print(f'slept for {delay} seconds without disturbance ...')
except asyncio.CancelledError:
print(f'Cancelled at {now} after {loop.time() - now} seconds')
async def main():
coro = cancellable()
task = asyncio.create_task(coro) # 不堵塞
await asyncio.sleep(3)
def canceller(_task: Task, _fut):
_task.cancel()
_fut.set_result(None)
loop = asyncio.get_running_loop()
fut = loop.create_future()
loop.call_soon_threadsafe(canceller, task, fut)
await fut
asyncio.run(main())
if __name__ == '__main__':
method_two()
|
from __future__ import print_function, absolute_import
from six import iteritems, itervalues
from six.moves import range
from ._file_reader import FileReader
from ._table_data import PunchTableData
def _default_callback(table_data):
print(table_data.header)
class OP2Reader(object):
def __init__(self, filename):
self.file = None # fortran format reader
self._done_reading = False
self._callback = _default_callback
def register_callback(self, callback):
assert callable(callback)
self._callback = callback
def close(self):
self.file.close()
def read(self):
self._done_reading = True # remove this when done writing code
while not self._done_reading:
data = self._read_data_block() # reads a table, matrix, whatever
# data is a class of OP2Table, OP2Matrix, whatever... classes that don't exist yet
# which are subclasses of ResultTableData
# ResultTableData.data should be a numpy array, can be in memory if possible
# but in general should be a numpy.memmap array
# table/matrix formats are determined by ident tables
# the data from the ident tables is used in a lookup table that will return the appropriate dtype
# the lookup data can be defined in a comma delimited file
# if data is result data, then it should be sorted by subcase if not already SORT1
# this would be tricky though since each result table would be an element, for example,
# and all element result tables haven't been read yet... they're passed over to the h5n db;
# might be able to do this in pytables when done reading op2... just need to be able to
# write the correct domains for each element's subcase
# TODO: in future if geometry tables are to be read, then it doesn't make sense
# to subclass from ResultTableData; maybe should be moved and renamed to
# TableData or something
self._callback(data)
def _read_header(self):
pass
def _read_data_block(self):
# if self.file.tell() == 0:
# return self._read_header()
# otherwise read some data block
pass
|
def find_max(l):
if len(l) == 1: # base case
return l[0]
else:
n = len(l)
left = l[:n//2]
right = l[n//2:]
l_start = left[0]
l_end = left[-1]
r_start = right[0]
r_end = right[-1]
if l_end == r_start:
return l_end
elif l_end < r_start:
return find_max(right)
else:
return find_max(left)
l = [1,2]
print(find_max(l)) |
import cv2
import numpy as np
from utils.opencv_util import OpenCVUtil
# def test_match():
# OpenCVUtil.init_load()
#
# img1 = cv2.imread("../data/small/aaweapon_type_1.png", cv2.COLOR_BGR2GRAY)
# img1_gray = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
# img2 = cv2.imread("../data/test.png", cv2.COLOR_BGR2GRAY)
# img2_gray = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
# akaze = cv2.AKAZE_create()
#
# kp1, des1 = akaze.detectAndCompute(img1_gray, None)
# out = cv2.drawKeypoints(img1_gray, kp1, None)
# cv2.imshow("mutch_image_src", out)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# height = img2_gray.shape[0]
# width = img2_gray.shape[1]
# img2_gray_big = cv2.resize(img2_gray, (int(width * 5), int(height * 3)))
# kp2, des2 = akaze.detectAndCompute(img2_gray_big, None)
# out2 = cv2.drawKeypoints(img2_gray_big, kp2, None)
# cv2.imshow("mutch_image_src2", out2)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
#
# bf = cv2.BFMatcher()
# matches = bf.knnMatch(des1, des2, k=2)
# good = []
# for m, n in matches:
# if m.distance < 0.4 * n.distance:
# good.append([m])
#
# img_kaze = cv2.drawMatchesKnn(out, kp1, out2, kp2, good, None, flags=2)
#
# # 結果の表示
# cv2.namedWindow("mutch_image_src", cv2.WINDOW_KEEPRATIO | cv2.WINDOW_NORMAL)
# cv2.imshow("mutch_image_src", img_kaze)
#
# cv2.waitKey(0)
# cv2.destroyAllWindows()
def test_image_mask():
img1 = cv2.imread("z://Screenshot_20191222-185148.jpg")
# BGRでの色抽出
# black = [0, 0, 0]
red = [0, 0, 255]
# white = [255, 255, 255]
bgrLower = np.array([110, 110, 110]) # 抽出する色の下限(BGR)
bgrUpper = np.array([200, 200, 200]) # 抽出する色の上限(BGR)
img_mask = cv2.inRange(img1, bgrLower, bgrUpper) # BGRからマスクを作成
# result = img1[np.where((img1 == img_mask).all(axis=2))] = red
result = cv2.bitwise_and(img1, img1, mask=img_mask) # 元画像とマスクを合成
height = result.shape[0]
width = result.shape[1]
end = False
for h in range(0, height - 1):
for w in range(0, width - 1):
b = result[h, w][0]
g = result[h, w][1]
r = result[h, w][2]
if b != 0 and g != 0 and r != 0:
print("-->[" + str(w) + "," + str(h) + "] " + str(b) + ", " + str(g) + ", " + str(r))
end = True
left = w, h
break
if end is True:
break
end = False
for h in range(height - int(height / 10), 0, -1):
for w in range(width - 1, 0, -1):
b = result[h, w][0]
g = result[h, w][1]
r = result[h, w][2]
if b != 0 and g != 0 and r != 0:
print("->[" + str(w) + "," + str(h) + "] " + str(b) + ", " + str(g) + ", " + str(r))
end = True
right = w, h
break
if end is True:
break
rect_img = img1[left[1]:right[1], left[0]:right[0]]
rect_img = rect_img[245:285, 177:270]
cv2.imwrite("z://rect.jpg", rect_img)
# print(result[0, 0])
cv2.namedWindow("mutch_image_src", cv2.WINDOW_KEEPRATIO | cv2.WINDOW_NORMAL)
cv2.imshow("mutch_image_src", rect_img)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
"""
Given A Series Of N Positive Integers a1,a2,a3........an. , Find The Minimum And Maximum Values That Can Be Calculated By Summing Exactly N-1 Of The N Integers. Then Print The Respective Minimum And Maximum Values As A Single Line Of Two Space-Separated Long Integers.
Input Format
First Line Take Input Value Of N
Second Line Take Input N Space Separated Integer Value
Output Format
Two Space Separated Value ( One Maximum Sum And One Minimum Sum )
Constraints
0 < N < 100001
0 <= ai < 1013
Sample Input: Sample Output:
5 10 14
1 2 3 4 5
Explanation
Our initial numbers are 1,2,3,4 and 5. We can calculate the following sums using four of the five integers:
If we sum everything except 1, our sum is 2+3+4+5=14 .
If we sum everything except 2, our sum is 1+3+4+5=13 .
If we sum everything except 3, our sum is 1+2+4+5=12 .
If we sum everything except 4, our sum is 1+3+4+5=11 .
If we sum everything except 5, our sum is 1+2+3+4=10 .
As you can see, the minimal sum is 1+2+3+4=10 and the maximal sum is 2+3+4+5=14. Thus, we print these minimal and maximal sums as two space-separated integers on a new line."""
N = raw_input()
array = raw_input().split()
array = map(int, array)
array.sort()
total = total1 = 0
for i in range(len(array)-1):
total = total + int(array[i])
for j in range(1,len(array)):
total1 = total1 + int(array[j])
print total,total1
|
#!/usr/bin/env python3
#
import logging
import time
import requests
from telnetlib import Telnet
from camdoris.camdoris_cfg import *
from izquierbot.izquierbot import sendMessageToAdmin
class Camdoris(object):
def __init__(self, host, user, password, port, timeout, check_interval):
self.host = host
self.user = user
self.password = password
self.port = port
self.timeout = timeout
self.check_interval = check_interval
self.site_down_alarm_triggered = False
def sendTelnetCommand(self, conn, cmd, prompt=CAMDORIS_PROMPT_REGEX):
conn.write(f"{cmd}\n".encode("ascii"))
match, _, data = conn.expect([prompt.encode("ascii")], timeout=self.timeout)
if match == -1:
logging.error(
f"Reached timeout expecting prompt ('{prompt}'). Received from server: '{data.decode()}'"
)
raise TimeoutError()
return data
def fetchConfig(self):
output = b""
with Telnet(host=self.host, port=self.port, timeout=self.timeout) as conn:
conn.read_until("Username> ".encode("ascii"))
output += self.sendTelnetCommand(conn, "login")
output += self.sendTelnetCommand(conn, "show ftp")
output += self.sendTelnetCommand(conn, "show image mask")
splitted = output.decode().split("\r\n")
_ = splitted.pop(0)
_ = splitted.pop(-1)
return "\n".join(splitted)
def isSiteUp(self):
try:
sc = requests.head(
f"http://{self.host}/appletvid.html", timeout=self.timeout
).status_code
return True if sc == 200 else False
except:
return False
def isCompliant(self):
try:
current_config = self.fetchConfig()
except:
logging.warning("could not check compliance. aborting...")
return False
return all([c in current_config for c in CAMDORIS_COMPLIANT_CONFIG_LIST])
def configure(self):
with Telnet(host=self.host, port=self.port, timeout=self.timeout) as conn:
conn.read_until("Username> ".encode("ascii"))
self.sendTelnetCommand(conn, self.user)
self.sendTelnetCommand(conn, "set privilege over", "Password> ")
self.sendTelnetCommand(conn, self.password)
for cmd in CAMDORIS_CONFIGURATION_CMD_LIST:
self.sendTelnetCommand(conn, cmd)
self.sendTelnetCommand(conn, CAMDORIS_TEST_TRIGGER_CMD)
def monitor(self):
while True:
logging.info("checking site...")
if not self.isSiteUp():
if not self.site_down_alarm_triggered:
# se debe informar la caida del sitio
logging.warning("site is down - alerting admins...")
try:
sendMessageToAdmin("camdoris: hey! site is down.")
self.site_down_alarm_triggered = True
except:
logging.warning("could not send msg.")
logging.info("done.")
else:
# el sitio ya estaba abajo y fue alertado
logging.warning("site is down - already alerted.")
else:
# se debe informar vuelta a la normalidad
if self.site_down_alarm_triggered:
logging.info("site is up - alerting admins...")
try:
sendMessageToAdmin("camdoris: site is back online.")
self.site_down_alarm_triggered = False
except:
logging.warning("could not send msg.")
logging.info("done.")
else:
logging.info("site is up.")
logging.info("checking compliance...")
if not self.isCompliant():
logging.info("camera is not compliant - configuring now...")
try:
self.configure()
continue
except:
pass
else:
logging.info("camera is compliant.")
time.sleep(self.check_interval)
|
# https://edabit.com/challenge/q4bBcq5NET4CH5Rcb
# Jay and Silent Bob have been given a fraction of an ounce but they only understand grams.
# Convert a fraction passed as a string to grams with up to two decimal places. An ounce weighs 28 grams.
def jay_and_bob(weight: str) -> str:
try:
weight_dict = {"ounce": 28, "half": 14, "quarter": 7, "eighth": 3.5, "tenth": 2.8}
if weight in weight_dict:
grams = weight_dict[weight]
return f"{grams} grams"
else:
return "Yo, what?"
except TypeError as err:
return f"Error: {err}"
print(jay_and_bob("ounce"))
print(jay_and_bob("half"))
print(jay_and_bob("quarter"))
print(jay_and_bob("eighth"))
print(jay_and_bob("tenth"))
print(jay_and_bob("pound"))
|
import numpy as np
import tensorflow as tf
from baselines.common.runners import AbstractEnvRunner
import copy
class Runner(AbstractEnvRunner):
"""
We use this object to make a mini batch of experiences
__init__:
- Initialize the runner
run():
- Make a mini batch
"""
def __init__(self, *, env, model, nsteps, gamma, lam):
super().__init__(env=env, model=model, nsteps=nsteps)
# Lambda used in GAE (General Advantage Estimation)
self.lam = lam
# Discount rate
self.gamma = gamma
def run(self, adap_tinfo=False, training=True):
# Here, we init the lists that will contain the mb of experiences
mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_neglogpacs = [],[],[],[],[],[]
epinfos = []
mb_node_feature, mb_adjacency, mb_pivot, mb_node_mask, mb_target_information, mb_edge_feature = [],[],[],[],[],[]
mb_next_node_feature, mb_next_adjacency = [],[]
mb_ep_rewards = []
mb_pivot_neglogpacs = []
num_envs = self.obs['node_attributes'].shape[0]
class_rewards, class_nodes, class_masks = [[] for i in range(10)], [[] for i in range(10)], [[] for i in range(10)]
class_idx = [[] for i in range(10)]
class_targets = [[] for i in range(10)]
mb_pivot_mask, mb_offset_mask = [],[]
# For n in range number of steps
for _ in range(self.nsteps):
# Given observations, get action value and neglopacs
# We already have self.obs because Runner superclass run self.obs[:] = env.reset() on init
obs = copy.deepcopy(self.obs)
node_feature, adjacency, node_mask, target_information, edge_feature, target_class, current_pic = \
tuple([obs.get(key) for key in ['node_attributes', 'adjacency', 'node_mask', 'target_information', 'edge_attributes', 'target_class', 'current_pic']])
if adap_tinfo:
target_information = target_information - np.logical_and(target_information, current_pic)
obs['target_information'] = target_information
pivot_mask, offset_mask = self.model.predict_mask(obs, inference=True)
pivot_mask_logits, offset_mask_logits = self.model.predict_mask(obs, inference=False)
actions, values, self.states, neglogpacs, pivot_neglogpacs, pivot = self.model.step(obs, pivot_mask, offset_mask, training)
actions = actions._numpy()
mb_obs.append(copy.deepcopy(self.obs))
mb_actions.append(np.squeeze(actions))
mb_values.append(values._numpy())
mb_neglogpacs.append(neglogpacs._numpy())
mb_dones.append(np.squeeze(self.dones))
pivot = pivot._numpy()
mb_pivot_neglogpacs.append(pivot_neglogpacs._numpy())
mb_pivot_mask.append(pivot_mask._numpy())
mb_offset_mask.append(offset_mask._numpy())
__ = list(map(lambda x, y : x.append(y), (mb_node_feature, mb_adjacency, mb_pivot, mb_node_mask, mb_target_information, mb_edge_feature),
(node_feature, adjacency, pivot, node_mask, target_information, edge_feature)))
"""pivot : (num_envs, 1), actions : (num_envs, 1)"""
one_flag = np.ones((num_envs, 1)).astype(np.int32)
self.obs, rewards, self.dones, ep_rewards = self.env.step(np.concatenate([pivot, actions, one_flag], axis=-1))
mb_rewards.append(rewards)
mb_ep_rewards.append(ep_rewards)
next_obs = copy.deepcopy(self.obs)
next_node_feature, next_adjacency = tuple([next_obs.get(key) for key in ['node_attributes', 'adjacency']])
mb_next_node_feature.append(next_node_feature)
mb_next_adjacency.append(next_adjacency)
___ = list(map(lambda target_class, idx : class_rewards[target_class].append(np.expand_dims(np.array(ep_rewards[idx]), axis=-1)[None,...]),
target_class[:, 0], np.arange(target_class.shape[0])))
___ = list(map(lambda target_class, idx : class_nodes[target_class].append(node_feature[idx][None,...]),
target_class[:, 0], np.arange(target_class.shape[0])))
___ = list(map(lambda target_class, idx : class_masks[target_class].append(node_mask[idx][None,...]),
target_class[:, 0], np.arange(target_class.shape[0])))
___ = list(map(lambda target_class, idx : class_targets[target_class].append(target_information[idx][None,...]),
target_class[:, 0], np.arange(target_class.shape[0])))
___ = list(map(lambda tc, idx : class_idx[tc].append(target_class[idx][None,...]),
target_class[:, 0], np.arange(target_class.shape[0])))
#batch of steps to batch of rollouts
mb_obs = np.asarray(mb_obs)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32)
mb_actions = np.asarray(mb_actions)
mb_values = np.asarray(mb_values, dtype=np.float32)
mb_neglogpacs = np.asarray(mb_neglogpacs, dtype=np.float32)
mb_dones = np.asarray(mb_dones, dtype=np.bool)
last_values = self.model.value(self.obs)._numpy()
mb_node_feature, mb_adjacency, mb_pivot, mb_node_mask, \
mb_target_information, mb_edge_feature = list(map(lambda x : np.asarray(x), (mb_node_feature, mb_adjacency, mb_pivot, mb_node_mask,
mb_target_information, mb_edge_feature)))
mb_next_node_feature = np.asarray(mb_next_node_feature)
mb_next_adjacency = np.asarray(mb_next_adjacency)
mb_ep_rewards = np.asarray(mb_ep_rewards, dtype=np.float32)
mb_pivot_neglogpacs = np.asarray(mb_pivot_neglogpacs, dtype=np.float32)
mb_class_rewards = tuple(map(lambda x : sf01(np.asarray(x)), [sub_list for sub_list in class_rewards if sub_list != []]))
mb_class_nodes = tuple(map(lambda x : sf01(np.asarray(x)), [sub_list for sub_list in class_nodes if sub_list != []]))
mb_class_masks = tuple(map(lambda x : sf01(np.asarray(x)), [sub_list for sub_list in class_masks if sub_list != []]))
mb_class_targets = tuple(map(lambda x : sf01(np.asarray(x)), [sub_list for sub_list in class_targets if sub_list != []]))
mb_class_idx = tuple(map(lambda x : sf01(np.asarray(x)), [sub_list for sub_list in class_idx if sub_list != []]))
mb_pivot_mask = np.asarray(mb_pivot_mask, dtype=np.float32)
mb_offset_mask = np.asarray(mb_offset_mask, dtype=np.float32)
# discount/bootstrap off value fn
mb_returns = np.zeros_like(mb_rewards)
mb_advs = np.zeros_like(mb_rewards)
lastgaelam = 0
for t in reversed(range(self.nsteps)):
if t == self.nsteps - 1:
nextnonterminal = 1.0 - self.dones
nextvalues = last_values
else:
nextnonterminal = 1.0 - mb_dones[t+1]
nextvalues = mb_values[t+1]
delta = mb_rewards[t] + self.gamma * nextvalues * nextnonterminal - mb_values[t]
mb_advs[t] = lastgaelam = delta + self.gamma * self.lam * nextnonterminal * lastgaelam
mb_returns = mb_advs + mb_values
# print(mb_returns.shape, mb_dones.shape, mb_actions.shape, mb_values.shape, mb_neglogpacs.shape)
return (*map(sf01, (mb_node_feature, mb_adjacency, mb_pivot, mb_node_mask, mb_target_information, mb_edge_feature,
mb_next_node_feature, mb_next_adjacency)),
*map(lambda x : np.expand_dims(sf01(x), axis=-1), (mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs, mb_pivot_neglogpacs,
mb_rewards, mb_ep_rewards)),
mb_class_rewards, mb_class_nodes, mb_class_masks, mb_class_targets, mb_class_idx, sf01(mb_pivot_mask), sf01(mb_offset_mask))
class Runner_Mask_Train(AbstractEnvRunner):
"""
We use this object to make a mini batch of experiences
__init__:
- Initialize the runner
run():
- Make a mini batch
"""
def __init__(self, *, env, model, nsteps, gamma, lam):
super().__init__(env=env, model=model, nsteps=nsteps)
# Lambda used in GAE (General Advantage Estimation)
self.lam = lam
# Discount rate
self.gamma = gamma
def run(self, adap_tinfo=False, training=True):
# Here, we init the lists that will contain the mb of experiences
mb_node_feature, mb_adjacency, mb_node_mask, mb_edge_feature = [],[],[],[]
num_envs = self.obs['node_attributes'].shape[0]
mb_avail = []
# For n in range number of steps
for _ in range(self.nsteps):
# Given observations, get action value and neglopacs
# We already have self.obs because Runner superclass run self.obs[:] = env.reset() on init
obs = copy.deepcopy(self.obs)
node_feature, adjacency, node_mask, edge_feature = \
tuple([obs.get(key) for key in ['node_attributes', 'adjacency', 'node_mask', 'edge_attributes']])
"""pivot_mask : (num_envs, max_node_num, 1), offset_mask : (num_envs, max_node_num, ac_space, n)
node_mask : (num_envs, max_node_num, 1)"""
__ = list(map(lambda x, y : x.append(y), (mb_node_feature, mb_adjacency, mb_node_mask, mb_edge_feature),
(node_feature, adjacency, node_mask, edge_feature)))
"""pivot : (num_envs, 1), actions : (num_envs, 1)"""
zero_flag = np.zeros((num_envs, 1)).astype(np.int32)
self.obs, rewards, self.dones, available_actions = self.env.step(np.concatenate([zero_flag, zero_flag, zero_flag], axis=-1))
mb_avail.append(np.stack(available_actions))
#batch of steps to batch of rollouts
mb_node_feature, mb_adjacency, mb_node_mask, mb_edge_feature = list(map(lambda x : np.asarray(x), (mb_node_feature, mb_adjacency, mb_node_mask, mb_edge_feature)))
mb_avail = np.asarray(mb_avail, dtype=np.float32)
return (*map(sf01, (mb_node_feature, mb_adjacency, mb_node_mask, mb_edge_feature)), sf01(mb_avail))
def sf01(arr):
"""
swap and then flatten axes 0 and 1
"""
s = arr.shape
return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])
|
# -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/Reference
Release: STU3
Version: 3.0.2
Revision: 11917
Last updated: 2019-10-24T11:53:00+11:00
"""
from pydantic import Field
from . import element, fhirtypes
class Reference(element.Element):
"""Disclaimer: Any field name ends with ``__ext`` does't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
A reference from one resource to another.
"""
resource_type = Field("Reference", const=True)
display: fhirtypes.String = Field(
None,
alias="display",
title="Text alternative for the resource",
description=(
"Plain text narrative that identifies the resource in addition to the "
"resource reference."
),
# if property is element of this resource.
element_property=True,
)
display__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_display", title="Extension field for ``display``."
)
identifier: fhirtypes.IdentifierType = Field(
None,
alias="identifier",
title="Logical reference, when literal reference is not known",
description=(
"An identifier for the other resource. This is used when there is no "
"way to reference the other resource directly, either because the "
"entity is not available through a FHIR server, or because there is no "
"way for the author of the resource to convert a known identifier to an"
" actual location. There is no requirement that a Reference.identifier "
"point to something that is actually exposed as a FHIR instance, but it"
" SHALL point to a business concept that would be expected to be "
"exposed as a FHIR instance, and that instance would need to be of a "
"FHIR resource type allowed by the reference."
),
# if property is element of this resource.
element_property=True,
)
reference: fhirtypes.String = Field(
None,
alias="reference",
title="Literal reference, Relative, internal or absolute URL",
description=(
"A reference to a location at which the other resource is found. The "
"reference may be a relative reference, in which case it is relative to"
" the service base URL, or an absolute URL that resolves to the "
"location where the resource is found. The reference may be version "
"specific or not. If the reference is not to a FHIR RESTful server, "
"then it should be assumed to be version specific. Internal fragment "
"references (start with '#') refer to contained resources."
),
# if property is element of this resource.
element_property=True,
)
reference__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_reference", title="Extension field for ``reference``."
)
|
import cv2
import cvzone
from cvzone.HandTrackingModule import HandDetector
import pyautogui
import numpy as np
import time
wCam, hCam = 640, 480
frameR = 100 # Frame reduction
smoothening = 10
INDEX_FINGER_TIP = 8
MIDDLE_FINGER_TIP = 12
INDEX_FINGER = 1
MIDDLE_FINGER = 2
pTime = 0
plocX, plocY = 0,0
clocX, clocY = 0,0
cap = cv2.VideoCapture(0)
cap.set(3, wCam) # set width of capture window
cap.set(4, hCam) # set height of capture window
detector = HandDetector(maxHands=1)
wScr, hScr = pyautogui.size()
while True:
# Find the hand landmarks
success, img = cap.read()
hands, img = detector.findHands(img, draw=True)
if hands:
hand = hands[0]
lmList = hand["lmList"] # list of 21 landmark points
bbox = hand["bbox"] # bounding box info x, y, w, h
# Get the tip of the index and middle fingers
if len(lmList) != 0:
x1, y1 = lmList[INDEX_FINGER_TIP]
x2, y2 = lmList[MIDDLE_FINGER_TIP]
# Check which fingers are up
fingers = detector.fingersUp(hand)
cv2.rectangle(img, (frameR, frameR), (wCam - frameR, hCam - frameR), (255, 0, 255), 2)
# If only index finger which means in Mouse Moving Mode
if fingers[INDEX_FINGER] == 1 and fingers[MIDDLE_FINGER] == 0:
# Convert coordinates from webcam coords to screen coords for correct position
scrCoordX = np.interp(x1, (frameR, wCam-frameR), (0, wScr))
scrCoordY = np.interp(y1, (frameR, hCam-frameR), (0, hScr))
# Smoothen the values
clocX = plocX + (scrCoordX - plocX) / smoothening
clocY = plocY + (scrCoordY - plocY) / smoothening
# Move the Mouse
pyautogui.moveTo(wScr - clocX, clocY)
cv2.circle(img, (x1, y1), 15, (255, 0, 255), cv2.FILLED)
plocX, plocY = clocX, clocY
# If both index and middle fingers are up, it is Mouse Clicking Mode
if fingers[INDEX_FINGER] == 1 and fingers[MIDDLE_FINGER] == 1:
# Find distance between the fingers
length, lineInfo, img = detector.findDistance(lmList[INDEX_FINGER_TIP], lmList[MIDDLE_FINGER_TIP], img)
# Click mouse if distance is short
if length < 40:
cv2.circle(img, (lineInfo[4], lineInfo[5]), 15, (0, 255, 0), cv2.FILLED)
pyautogui.click()
# Frame Rate
cTime = time.time()
fps = 1/(cTime - pTime)
pTime = cTime
cv2.putText(img, str(int(fps)), (20, 50), cv2.FONT_HERSHEY_PLAIN, 3, (255, 0, 0), 3)
# Display
cv2.imshow("Image", img)
cv2.waitKey(1)
|
import json
import sys
import os
from tqdm import tqdm
from mdf_refinery.parsers.tab_parser import parse_tab
from mdf_refinery.validator import Validator
# VERSION 0.3.0
# This is the converter for: Dataset for "Canopy uptake dominates nighttime carbonyl sulfide fluxes in a boreal forest"
# Arguments:
# input_path (string): The file or directory where the data resides.
# NOTE: Do not hard-code the path to the data in the converter (the filename can be hard-coded, though). The converter should be portable.
# metadata (string or dict): The path to the JSON dataset metadata file, a dict or json.dumps string containing the dataset metadata, or None to specify the metadata here. Default None.
# verbose (bool): Should the script print status messages to standard output? Default False.
# NOTE: The converter should have NO output if verbose is False, unless there is an error.
def convert(input_path, metadata=None, verbose=False):
if verbose:
print("Begin converting")
# Collect the metadata
# NOTE: For fields that represent people (e.g. mdf-data_contact), other IDs can be added (ex. "github": "jgaff").
# It is recommended that all people listed in mdf-data_contributor have a github username listed.
#
# If there are other useful fields not covered here, another block (dictionary at the same level as "mdf") can be created for those fields.
# The block must be called the same thing as the source_name for the dataset.
if not metadata:
## Metadata:dataset
dataset_metadata = {
"mdf": {
"title": "Dataset for \"Canopy uptake dominates nighttime carbonyl sulfide fluxes in a boreal forest\"",
"acl": ["public"],
"source_name": "carbonyl_sulfide_fluxes",
"data_contact": {
"given_name": "Huilin",
"family_name": "Chen",
"email": "Huilin.Chen@rug.nl",
"institution": "University of Groningen, University of Colorado"
},
"data_contributor": [{
"given_name": "Evan",
"family_name": "Pike",
"email": "dep78@uchicago.edu",
"institution": "The University of Chicago",
"github": "dep78",
}],
"citation": ["Linda M.J. Kooijmans, Kadmiel Maseyk, Ulli Seibt, Wu Sun, Timo Vesala, Ivan Mammarella, … Huilin Chen. (2017). Dataset for \"Canopy uptake dominates nighttime carbonyl sulfide fluxes in a boreal forest\" [Data set]. Zenodo. http://doi.org/10.5281/zenodo.580303"],
"author": [{
"given_name": "Linda M.J.",
"family_name": "Kooijmans",
"institution": "University of Groningen",
},
{
"given_name": "Kadmiel",
"family_name": "Maseyk",
"institution": "The Open University",
},
{
"given_name": "Ulli",
"family_name": "Seibt",
"institution": "University of California",
},
{
"given_name": "Wu",
"family_name": "Sun",
"institution": "University of California",
},
{
"given_name": "Timo",
"family_name": "Vesala",
"institution": "University of Helsinki",
},
{
"given_name": "Ivan",
"family_name": "Mammarella",
"institution": "University of Helsinki",
},
{
"given_name": "Pasi",
"family_name": "Kolari",
"institution": "University of Helsinki",
},
{
"given_name": "Juho",
"family_name": "Aalto",
"institution": "University of Helsinki",
},
{
"given_name": "Alessandro",
"family_name": "Franchin",
"institution": "University of Helsinki, University of Colorado",
},
{
"given_name": "Roberta",
"family_name": "Vecchi",
"institution": "University of Milan",
},
{
"given_name": "Gianluigi",
"family_name": "Valli",
"institution": "University of Milan",
},
{
"given_name": "Huilin",
"family_name": "Chen",
"email": "Huilin.Chen@rug.nl",
"institution": "University of Groningen, University of Colorado",
}],
"license": "https://creativecommons.org/licenses/by/4.0/",
"collection": "Carbonyl Sulfide Fluxes",
#"tags": [""],
"description": "Nighttime averaged ecosystem fluxes of COS and CO2 obtained through the radon-tracer and eddy-covariance method as presented in \"Canopy uptake dominates nighttime carbonyl sulfide fluxes in a boreal forest\" submitted to Atmospheric Chemistry and Physics.",
"year": 2017,
"links": {
"landing_page": "https://doi.org/10.5281/zenodo.580303",
"publication": ["https://www.atmos-chem-phys-discuss.net/acp-2017-407/"],
#"data_doi": "",
#"related_id": "",
"txt": {
#"globus_endpoint": ,
"http_host": "https://zenodo.org",
"path": "/record/580303/files/Kooijmans_et_al_2017_ACPD_20170516.txt",
},
},
},
#"mrr": {
#},
#"dc": {
#},
}
## End metadata
elif type(metadata) is str:
try:
dataset_metadata = json.loads(metadata)
except Exception:
try:
with open(metadata, 'r') as metadata_file:
dataset_metadata = json.load(metadata_file)
except Exception as e:
sys.exit("Error: Unable to read metadata: " + repr(e))
elif type(metadata) is dict:
dataset_metadata = metadata
else:
sys.exit("Error: Invalid metadata parameter")
# Make a Validator to help write the feedstock
# You must pass the metadata to the constructor
# Each Validator instance can only be used for a single dataset
# If the metadata is incorrect, the constructor will throw an exception and the program will exit
dataset_validator = Validator(dataset_metadata)
# Get the data
# Each record should be exactly one dictionary
# You must write your records using the Validator one at a time
# It is recommended that you use a parser to help with this process if one is available for your datatype
# Each record also needs its own metadata
with open(os.path.join(input_path, "Kooijmans_et_al_2017_ACPD_20170516.txt"), "r") as raw_in:
data = raw_in.read()
description = "".join(data.split("\n\n")[1:2])
start = "##########################################\n"
for line in tqdm(parse_tab(data.split(start)[-1], sep=","), desc="Processing Data", disable=not verbose):
## Metadata:record
record_metadata = {
"mdf": {
"title": "Carbonyl Sulfide Fluxes doy: " + line["doy"],
"acl": ["public"],
#"composition": ,
#"tags": ,
"description": description,
"raw": json.dumps(line),
"links": {
#"landing_page": ,
#"publication": ,
#"data_doi": ,
#"related_id": ,
"txt": {
"globus_endpoint": "82f1b5c6-6e9b-11e5-ba47-22000b92c6ec",
"http_host": "https://data.materialsdatafacility.org",
"path": "/collections/carbonyl_sulfide_fluxes/Kooijmans_et_al_2017_ACPD_20170516.txt",
},
},
#"citation": ,
#"data_contact": {
#"given_name": ,
#"family_name": ,
#"email": ,
#"institution": ,
#},
#"author": [{
#"given_name": ,
#"family_name": ,
#"email": ,
#"institution": ,
#}],
#"year": ,
},
#"dc": {
#},
}
## End metadata
# Pass each individual record to the Validator
result = dataset_validator.write_record(record_metadata)
# Check if the Validator accepted the record, and stop processing if it didn't
# If the Validator returns "success" == True, the record was written successfully
if not result["success"]:
if not dataset_validator.cancel_validation()["success"]:
print("Error cancelling validation. The partial feedstock may not be removed.")
raise ValueError(result["message"] + "\n" + result.get("details", ""))
# You're done!
if verbose:
print("Finished converting")
|
import logging
import logging.config
def init_logging(file=None, file_level=logging.DEBUG, stdout=True):
config = {
'version': 1,
'formatters': {
'consoleFormatter': {
'format': '%(levelname)s %(message)s'
},
'fileFormatter': {
'format': '%(asctime)s [%(filename)s:%(lineno)d] %(levelname)s %(message)s'
},
},
'handlers': {
'consoleHandler': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'consoleFormatter',
'stream': 'ext://sys.stdout',
},
},
'loggers': {
'': {
'handlers': [],
'level': 'DEBUG',
}
}
}
if file is not None:
config['handlers']['fileHandler'] = {
'level': file_level,
'class': 'logging.FileHandler',
'formatter': 'fileFormatter',
'filename': file,
}
config['loggers']['']['handlers'].append('fileHandler')
if stdout:
config['loggers']['']['handlers'].append('consoleHandler')
logging.config.dictConfig(config)
|
import pandas as pd
from tqdm.notebook import tqdm
from .get_patient_protein_to_mutations_dict import get_patient_protein_to_mutations_dict
from .is_core import is_core
from .is_in_elaspic import is_in_elaspic
from collections import defaultdict
def counts_baseline_vs_our_method_personalized(
proteins: list, patients: list,
snv_data: pd.DataFrame,
elaspic_core_data: pd.DataFrame,
elaspic_interface_data: pd.DataFrame, prediction_data: pd.DataFrame,
add_core_flag_1_case_dict: dict
):
"""
Generates personalized protein counts for BASELINE and OUR_METHOD.
Parameters
----------
proteins : <list>
List of proteins.
patients : <list>
A list of TCGA patients.
snv_data : <DataFrame>
An SNV dataframe, we use the processed version of SNV.
elaspic_core_data : <DataFrame>
The ELASPIC results file that contains only the `core` type entries.
elaspic_interface_data : <DataFrame>
The ELASPIC results file that contains only the `interface` type entries. It will be used to
check if a specific (protein, mutation) pair is an interface via `is_interface` function.
prediction_data : <DataFrame>
The dataframe which contains prediction column, along with protein, mutation, interactor columns.
add_core_flag_1_case_dict : <None> or <dict>
Controls whether to add `ELASPIC degree` or to add +0 (i.e. ignoring `core_flag=1` case).
If `None`, `core_flag=1` case adds +0.
Otherwise `core_flag=1` case adds ELASPIC degree.
Returns
-------
proteins_to_counts_baseline_dict : <dict>
A dictionary that maps each protein to counts for BASELINE.
proteins_to_counts_our_method_dict : <dict>
A dictionary that maps each protein to counts for OUR_METHOD.
"""
if add_core_flag_1_case_dict:
print('Adding ELASPIC Degree when `core_flag=1`') # TODO
else:
print('Adding +0 when `core_flag=1`')
add_core_flag_1_case_dict = defaultdict(int)
# MAIN: PERSONALIZED
personalized_proteins_to_counts_baseline_dict = dict()
personalized_proteins_to_counts_our_method_dict = dict()
for patient in tqdm(patients):
# Setting the counts 0.
proteins_to_counts_baseline_dict = dict.fromkeys(proteins, 0)
proteins_to_counts_our_method_dict = dict.fromkeys(proteins, 0)
# Filter SNV data for current patient.
patient_snv_data = snv_data[snv_data["Tumor_Sample_Barcode"] == patient]
for protein, mutations in get_patient_protein_to_mutations_dict(patient_snv_data).items():
core_flag = 'N/A'
# print(protein, mutations)
for mutation in mutations:
# Check if (protein.mutation) is in ELASPIC.
if is_in_elaspic(protein, mutation, elaspic_core_data, elaspic_interface_data):
# print(f'{protein}.{mutation} IS IN ELASPIC.')
if is_core(protein, mutation, elaspic_core_data):
# print(' → core found!')
core_flag = 1
break
else:
# print(' → interface found!')
core_flag = 0
else:
# print(f'{protein}.{mutation} IS NOT IN ELASPIC.')
# print(f'CORE_FLAG = {core_flag}')
continue
if core_flag == 1:
# print(f'CORE_FLAG = {core_flag}')
# print('+ Adding counts.. ', brca_proteins_to_elaspic_degree[protein])
# increase baseline counts by elaspic degree
# Increase our method counts by elaspic degree
proteins_to_counts_baseline_dict[protein] += add_core_flag_1_case_dict[protein]
proteins_to_counts_our_method_dict[protein] += add_core_flag_1_case_dict[protein]
elif core_flag == 0:
# For our model: Contains Disruptive interactions only.
disruptive_interactions = set()
for mutation in mutations:
# Check if (protein.mutation) is in ELASPIC.
if is_in_elaspic(protein, mutation, elaspic_core_data, elaspic_interface_data):
prediction_search = prediction_data[
(prediction_data['UniProt_ID'] == protein) &
(prediction_data['Mutation'] == mutation) &
(prediction_data['Prediction'] == 0)].copy()
# add interactor proteins to disruptive_interactions set.
interactor_list = prediction_search['Interactor_UniProt_ID'].to_list()
disruptive_interactions.update(interactor_list)
# For baseline model: Contains Increasing+NoEff interactions and Disruptive interactions
interactions = set()
for mutation in mutations:
# Check if (protein.mutation) is in ELASPIC.
if is_in_elaspic(protein, mutation, elaspic_core_data, elaspic_interface_data):
prediction_search = prediction_data[
(prediction_data['UniProt_ID'] == protein) &
(prediction_data['Mutation'] == mutation)].copy()
# add interactor proteins to interactions set.
interactor_list = prediction_search['Interactor_UniProt_ID'].to_list()
interactions.update(interactor_list)
# print(f'CORE_FLAG = {core_flag}')
# print('+ Adding counts.. ')
# increase baseline counts by elaspic degree
proteins_to_counts_baseline_dict[protein] += len(interactions)
# Increase our method counts by depending our predictions
proteins_to_counts_our_method_dict[protein] += len(disruptive_interactions)
personalized_proteins_to_counts_baseline_dict[patient] = proteins_to_counts_baseline_dict
personalized_proteins_to_counts_our_method_dict[patient] = proteins_to_counts_our_method_dict
return personalized_proteins_to_counts_baseline_dict, personalized_proteins_to_counts_our_method_dict
|
import shlex
from subprocess import PIPE, STDOUT, Popen
def get_simple_cmd_output(cmd, stderr=STDOUT):
args = shlex.split(cmd)
return Popen(args, stdout=PIPE, stderr=stderr).communicate()[0].decode("utf8")
def get_simple_cmd_output_lines(cmd, stderr=STDOUT):
return get_simple_cmd_output(cmd, stderr).splitlines()
|
from uwsgiconf.config import Section
def test_applications_basics(assert_lines):
assert_lines([
'need-app = true',
], Section().applications.set_basic_params(exit_if_none=True))
assert_lines([
'mount = /articles=app.py',
], Section().applications.mount('/articles', 'app.py'))
assert_lines([
'lazy-apps = true',
], Section().applications.switch_into_lazy_mode())
|
from abc import ABC, abstractmethod
from data_generators.basic_generator import *
from data_generators.standard_generator import StandardDataGenerator
class Environment(ABC):
"""Environment abstract base class.
Constructor method uploads all the basic data from the given json source in the given mode"""
def __init__(self, mode='all', bid=None, src='src/basic003.json', generator='basic'):
if generator == 'basic':
self.data_gen = BasicDataGenerator(src)
elif generator == 'standard':
self.data_gen = StandardDataGenerator(src)
else:
raise NotImplementedError
self.bids = self.data_gen.get_bids()
self.prices = self.data_gen.get_prices()
self.margins = self.data_gen.get_margins()
self.n_clicks = self.data_gen.get_daily_clicks(mode=mode)
if bid is not None:
self.cpc = self.data_gen.get_costs_per_click(mode=mode, bid=bid)
self.conv_rates = self.data_gen.get_conversion_rates(mode=mode, bid=bid)
self.tau = self.data_gen.get_future_purchases(mode=mode, bid=bid)
self.features = self.data_gen.get_features()
self.customer_classes = self.data_gen.get_classes()
@abstractmethod
def round(self, pulled_arm):
"""Play a single round of the environment"""
pass
|
import random
import re
import numpy as np
from utils import get_segments, get_ids, create_padding_mask
def preprocess(file, BATCH_SIZE, max_length, tokenizer):
train_dataset = []
input_vocab_size = len(tokenizer.vocab)
f = open(file, 'r')
words = f.read()
words = words.replace('\n\n', '.')
words = words.replace('\n', ' ')
words = re.split('[;:.!?]', words)
i = 0
for _ in range(len(words)//BATCH_SIZE + 1):
if i + 1 >= len(words):
break
input_ids_list = []
segment_list = []
is_masked_list = []
is_next_list = []
for j in range(BATCH_SIZE):
if i + 1 >= len(words):
break
now = int(random.random() > 0.5) # decide if the 2nd sentence has to be next sentence or not
if now == 1:
res = ["[CLS]"] + tokenizer.tokenize(words[i]) + ["[SEP]"] + tokenizer.tokenize(words[i+1]) + ["[SEP]"]
else:
res = ["[CLS]"] + tokenizer.tokenize(words[i]) + ["[SEP]"] + tokenizer.tokenize(words[random.randint(0, len(words) - 1)]) + ["[SEP]"]
input_ids = get_ids(res,tokenizer, max_length)
segment_list.append(get_segments(res, max_length))
is_next_list.append(now)
is_masked = [0]*max_length
for ind in range(max_length):
if input_ids[ind] == 0: # is padding token appears, then break
break
if input_ids[ind] == 101 or input_ids[ind] == 102: # don't mask [CLS] and [SEP] tokens
continue
if random.random() < 0.15: # mask 15% of tokens
is_masked[ind] = input_ids[ind]
if random.random() < 0.8: # out of 15%, mask 80%
input_ids[ind] = 103
elif random.random() < 0.5: # replace 10% with random token
input_ids[ind] = random.randint(1000, input_vocab_size)
#in the remaining tokens, keep the same token
input_ids_list.append(input_ids)
is_masked_list.append(is_masked)
if now == 1:
i += 2
else:
i += 1
input_ids_list = np.array(input_ids_list)
is_masked_list = np.array(is_masked_list)
masks = create_padding_mask(input_ids_list)
segment_list = np.array(segment_list)
is_next_list = np.array(is_next_list)
is_next_list = np.reshape(is_next_list, (len(is_next_list), 1))
train_dataset.append([input_ids_list, segment_list, masks, is_next_list, is_masked_list])
return train_dataset
|
from nginxauthdaemon import app
|
# App 8 file searcher
|
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
import os
import sys
import six.moves.urllib.request
import six.moves.urllib.parse
import six.moves.urllib.error
import six.moves.urllib.parse
from optparse import OptionParser
import json
from six.moves.configparser import ConfigParser, NoOptionError
import webbrowser
import oauth2 as oauth
from io import open
from six.moves import input
def main():
op = OptionParser(usage='usage: %prog [options]',
description='Reads the wiki pages from one Allura wiki instance and uploads them to another Allura wiki instance.')
op.add_option('-f', '--from-wiki', action='store', dest='from_wiki',
help='URL of wiki API to copy from like http://fromserver.com/rest/p/test/wiki/')
op.add_option('-t', '--to-wiki', action='store', dest='to_wiki',
help='URL of wiki API to copy to like http://toserver.com/rest/p/test/wiki/')
op.add_option('-D', '--debug', action='store_true',
dest='debug', default=False)
(options, args) = op.parse_args(sys.argv[1:])
base_url = options.to_wiki.split('/rest/')[0]
oauth_client = make_oauth_client(base_url)
wiki_data = six.moves.urllib.request.urlopen(options.from_wiki).read()
wiki_json = json.loads(wiki_data)['pages']
for p in wiki_json:
from_url = options.from_wiki + six.moves.urllib.parse.quote(p)
to_url = options.to_wiki + six.moves.urllib.parse.quote(p)
try:
page_data = six.moves.urllib.request.urlopen(from_url).read()
page_json = json.loads(page_data)
if options.debug:
print(page_json['text'])
break
resp = oauth_client.request(
to_url, 'POST', body=six.moves.urllib.parse.urlencode(dict(text=page_json['text'].encode('utf-8'))))
if resp[0]['status'] == '200':
print("Posted {0} to {1}".format(page_json['title'], to_url))
else:
print("Error posting {0} to {1}: {2} (project may not exist)".format(page_json['title'], to_url, resp[0]['status']))
break
except Exception:
print("Error processing " + p)
raise
def make_oauth_client(base_url):
"""
Build an oauth.Client with which callers can query Allura.
"""
config_file = os.path.join(os.environ['HOME'], '.allurarc')
cp = ConfigParser()
cp.read(config_file)
REQUEST_TOKEN_URL = base_url + '/rest/oauth/request_token'
AUTHORIZE_URL = base_url + '/rest/oauth/authorize'
ACCESS_TOKEN_URL = base_url + '/rest/oauth/access_token'
oauth_key = option(cp, base_url, 'oauth_key',
'Forge API OAuth Key (%s/auth/oauth/): ' % base_url)
oauth_secret = option(cp, base_url, 'oauth_secret',
'Forge API Oauth Secret: ')
consumer = oauth.Consumer(oauth_key, oauth_secret)
try:
oauth_token = cp.get(base_url, 'oauth_token')
oauth_token_secret = cp.get(base_url, 'oauth_token_secret')
except NoOptionError:
client = oauth.Client(consumer)
resp, content = client.request(REQUEST_TOKEN_URL, 'GET')
assert resp['status'] == '200', resp
request_token = dict(six.moves.urllib.parse.parse_qsl(content))
pin_url = "%s?oauth_token=%s" % (
AUTHORIZE_URL, request_token['oauth_token'])
if getattr(webbrowser.get(), 'name', '') == 'links':
# sandboxes
print(("Go to %s" % pin_url))
else:
webbrowser.open(pin_url)
oauth_verifier = input('What is the PIN? ')
token = oauth.Token(
request_token['oauth_token'], request_token['oauth_token_secret'])
token.set_verifier(oauth_verifier)
client = oauth.Client(consumer, token)
resp, content = client.request(ACCESS_TOKEN_URL, "GET")
access_token = dict(six.moves.urllib.parse.parse_qsl(content))
oauth_token = access_token['oauth_token']
oauth_token_secret = access_token['oauth_token_secret']
cp.set(base_url, 'oauth_token', oauth_token)
cp.set(base_url, 'oauth_token_secret', oauth_token_secret)
# save oauth token for later use
cp.write(open(config_file, 'w'))
print('Saving oauth tokens in {} for later re-use'.format(config_file))
print()
access_token = oauth.Token(oauth_token, oauth_token_secret)
oauth_client = oauth.Client(consumer, access_token)
return oauth_client
def option(cp, section, key, prompt=None):
if not cp.has_section(section):
cp.add_section(section)
if cp.has_option(section, key):
value = cp.get(section, key)
else:
value = input(prompt or ('%s: ' % key))
cp.set(section, key, value)
return value
if __name__ == '__main__':
main()
|
from pyspark import SparkContext
from pyspark.streaming import StreamingContext
import pickle
def averdd(x):
global dict,counts,i
i += 1
print(i)
ts, mo, da, ho, mi, te, pr, hu, ws, wd, cl, wc = x.split(',')
if mi=='0':
init_dict()
dict['hour'] = ho
dict['temp'] += float(te)/counts
dict['pressure'] += float(pr)/counts
dict['humidity'] += float(hu)/counts
dict['wind_sp'] += float(ws)/counts
dict['wind_de'] += float(wd)/counts
dict['clouds'] += float(cl)/counts
dict['weather_code'] += float(wc)/counts
filename = 'test.json'
with open(filename,'wb') as file:
pickle.dump(dict,file)
def init_dict():
global dict
dict['hour'] = 0
dict['temp'] = 0
dict['pressure'] = 0
dict['humidity'] = 0
dict['wind_de'] = 0
dict['wind_sp'] = 0
dict['clouds'] = 0
dict['weather_code'] = 0
def f(x):
global counts
counts = x.take(1)
for item in counts:
counts = item
if __name__ == '__main__':
i = 0
dict = {}
init_dict()
sc = SparkContext('local[2]','weather')
ssc = StreamingContext(sc,10)
#/large_data_streaming/project/streaming/bb
local = '/Users/michael/OneDrive/Documents/large_data_streaming/project/streaming/datasets_streaming'
lines = ssc.textFileStream(local)
count = lines.count()
count.foreachRDD(f)
lines.foreachRDD(lambda rdd: rdd.foreach(averdd))
ssc.start()
ssc.awaitTermination() |
#!/usr/bin/env python
# pylint:disable=E0401, E0611, C0103, W0621, W0612
# pylint:disable=R0912, R0914, R0915
'''Margarita - An Flask Application for managing
Reposado Catalogs'''
# This Flask application will only work with
# Python 3 - Tested with Python 3.6.8 on CentOS 8
# Original Project by jessepeterson
# https://github.com/jessepterson/margarita
# Joshua D. Miller - josh@psu.edu
# The Pennsylvania State University
# Last Updated February 5, 2020
# Imports Needed
from __future__ import print_function
from urllib.parse import urlparse
from distutils.version import LooseVersion
from operator import itemgetter
import getopt
import os
import sys
from flask import (Flask, jsonify, request, render_template,
redirect, session, Response, make_response)
from onelogin.saml2.auth import OneLogin_Saml2_Auth
from onelogin.saml2.utils import OneLogin_Saml2_Utils
from secret_key import SECRET_KEY
# Reposado library which you will need to make
# a symbolic link to in your margarita directory
from reposadolib import reposadocommon
try:
import json
except ImportError:
# couldn't find json, try simplejson library
import simplejson as json
# Configure the Application
app = Flask(__name__)
app.config['SECRET_KEY'] = SECRET_KEY
app.config['SAML_PATH'] = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'saml')
apple_catalog_version_map = {
# Catalina
'index-10.15-10.14-10.13-10.12-10.11-10.10-10.9'
'-mountainlion-lion-snowleopard-leopard.merged-1.sucatalog': '10.15',
# Mojave
'index-10.14-10.13-10.12-10.11-10.10-10.9'
'-mountainlion-lion-snowleopard-leopard.merged-1.sucatalog': '10.14',
# High Sierra
'index-10.13-10.12-10.11-10.10-10.9-mountainlion'
'-lion-snowleopard-leopard.merged-1.sucatalog': '10.13',
# Sierra
'index-10.12-10.11-10.10-10.9-mountainlion'
'-lion-snowleopard-leopard.merged-1.sucatalog': '10.12',
# El Capitan
'index-10.11-10.10-10.9-mountainlion-lion-'
'snowleopard-leopard.merged-1.sucatalog': '10.11',
# Yosemite
'index-10.10-10.9-mountainlion-lion-snowleopard'
'-leopard.merged-1.sucatalog': '10.10',
# Mavericks
'index-10.9-mountainlion-lion-snowleopard'
'-leopard.merged-1.sucatalog': '10.9',
# Mountain Lion
'index-mountainlion-lion-snowleopard-'
'leopard.merged-1.sucatalog': '10.8',
# Lion
'index-lion-snowleopard-leopard.merged-1.sucatalog': '10.7',
# Snow Leopard
'index-leopard-snowleopard.merged-1.sucatalog': '10.6',
# Leopard
'index-leopard.merged-1.sucatalog': '10.5',
# Tiger
'index-1.sucatalog': '10.4',
'index.sucatalog': '10.4',
}
# cache the keys of the catalog version map dict
apple_catalog_suffixes = apple_catalog_version_map.keys()
def init_saml_auth(req):
'''Attaches the SAML_PATH settings to Python SAML'''
auth = OneLogin_Saml2_Auth(req, custom_base_path=app.config['SAML_PATH'])
return auth
def prepare_flask_request(request):
'''Sets up the flask request
Reference https://github.com/onelogin/python-saml
/tree/master/demo-flask/templates'''
# If server is behind proxys or balancers use the HTTP_X_FORWARDED fields
url_data = urlparse(request.url)
return {
'https': 'on' if request.scheme == 'https' else 'off',
'http_host': request.host,
'server_port': url_data.port,
'script_name': request.path,
'get_data': request.args.copy(),
'post_data': request.form.copy(),
# Uncomment if using ADFS as IdP, https://github.com/onelogin/python-saml/pull/144
# 'lowercase_urlencoding': True,
'query_string': request.query_string
}
def versions_from_catalogs(cats):
'''Given an iterable of catalogs return the corresponding OS X versions'''
versions = set()
for cat in cats:
# take the last portion of the catalog URL path
short_cat = cat.split('/')[-1]
if short_cat in apple_catalog_suffixes:
versions.add(apple_catalog_version_map[short_cat])
return versions
def json_response(r):
'''Glue for wrapping raw JSON responses'''
return Response(json.dumps(r), status=200, mimetype='application/json')
@app.route('/', methods=['GET', 'POST'])
def index():
'''Function to ask for SAML Authentication and then
display the Margarita login page'''
req = prepare_flask_request(request)
auth = init_saml_auth(req)
errors = []
error_reason = None
not_auth_warn = False
success_slo = False
attributes = False
paint_logout = False
if 'sso' in request.args:
print('Authenticating....')
return redirect(auth.login())
# If AuthNRequest ID need to be stored in order to later validate it, do instead
# sso_built_url = auth.login()
# request.session['AuthNRequestID'] = auth.get_last_request_id()
# return redirect(sso_built_url)
elif 'sso2' in request.args:
return_to = '%sattrs/' % request.host_url
return redirect(auth.login(return_to))
elif 'slo' in request.args:
name_id = session_index = name_id_format = name_id_nq = name_id_spnq = None
if 'samlNameId' in session:
name_id = session['samlNameId']
if 'samlSessionIndex' in session:
session_index = session['samlSessionIndex']
if 'samlNameIdFormat' in session:
name_id_format = session['samlNameIdFormat']
if 'samlNameIdNameQualifier' in session:
name_id_nq = session['samlNameIdNameQualifier']
if 'samlNameIdSPNameQualifier' in session:
name_id_spnq = session['samlNameIdSPNameQualifier']
return redirect(auth.logout(
name_id=name_id, session_index=session_index, nq=name_id_nq,
name_id_format=name_id_format, spnq=name_id_spnq))
# If LogoutRequest ID need to be stored in order to later validate it, do instead
# slo_built_url = auth.logout(name_id=name_id, session_index=session_index)
# session['LogoutRequestID'] = auth.get_last_request_id()
# return redirect(slo_built_url)
elif 'acs' in request.args:
request_id = None
if 'AuthNRequestID' in session:
request_id = session['AuthNRequestID']
auth.process_response(request_id=request_id)
errors = auth.get_errors()
not_auth_warn = not auth.is_authenticated()
if len(errors) == 0:
if 'AuthNRequestID' in session:
del session['AuthNRequestID']
session['samlUserdata'] = auth.get_attributes()
session['samlNameIdFormat'] = auth.get_nameid_format()
session['samlNameIdNameQualifier'] = auth.get_nameid_nq()
session['samlNameIdSPNameQualifier'] = auth.get_nameid_spnq()
session['samlSessionIndex'] = auth.get_session_index()
self_url = OneLogin_Saml2_Utils.get_self_url(req)
if 'RelayState' in request.form and self_url != request.form['RelayState']:
return redirect(auth.redirect_to(request.form['RelayState']))
elif auth.get_settings().is_debug_active():
error_reason = auth.get_last_error_reason()
elif 'sls' in request.args:
request_id = None
if 'LogoutRequestID' in session:
request_id = session['LogoutRequestID']
dscb = lambda: session.clear()
url = auth.process_slo(request_id=request_id, delete_session_cb=dscb)
errors = auth.get_errors()
if len(errors) == 0:
if url is not None:
return redirect(url)
else:
success_slo = True
elif auth.get_settings().is_debug_active():
error_reason = auth.get_last_error_reason()
if 'samlUserdata' in session:
paint_logout = True
if len(session['samlUserdata']) > 0:
attributes = session['samlUserdata'].items()
return render_template(
'index.html',
errors=errors,
error_reason=error_reason,
not_auth_warn=not_auth_warn,
success_slo=success_slo,
attributes=attributes,
paint_logout=paint_logout
)
@app.route('/metadata/')
def metadata():
'''Get the metadata for your IDP after
you have configured your settings.json
and advanced_settings.json in your
saml directory'''
req = prepare_flask_request(request)
auth = init_saml_auth(req)
settings = auth.get_settings()
metadata = settings.get_sp_metadata()
errors = settings.validate_metadata(metadata)
if len(errors) == 0:
resp = make_response(metadata, 200)
resp.headers['Content-Type'] = 'text/xml'
else:
resp = make_response(', '.join(errors), 500)
return resp
@app.route('/branches', methods=['GET'])
def list_branches():
'''Returns catalog branch names and associated updates'''
catalog_branches = reposadocommon.get_catalog_branches()
return json_response(catalog_branches.keys())
def get_description_content(html):
'''Gets the content descriptions of items'''
if len(html) == 0:
return None
# in the interest of (attempted) speed, try to avoid regexps
lwrhtml = html.lower()
celem = 'p'
startloc = lwrhtml.find('<' + celem + '>')
if startloc == -1:
startloc = lwrhtml.find('<' + celem + ' ')
if startloc == -1:
celem = 'body'
startloc = lwrhtml.find('<' + celem)
if startloc != -1:
startloc += 6 # length of <body>
if startloc == -1:
# no <p> nor <body> tags. bail.
return None
endloc = lwrhtml.rfind('</' + celem + '>')
if endloc == -1:
endloc = len(html)
elif celem != 'body':
# if the element is a body tag, then don't include it.
# DOM parsing will just ignore it anyway
endloc += len(celem) + 3
return html[startloc:endloc]
def product_urls(cat_entry):
'''Retreive package URLs for a given reposado product CatalogEntry.
Will rewrite URLs to be served from local reposado repo if necessary.'''
packages = cat_entry.get('Packages', [])
pkg_urls = []
for package in packages:
pkg_urls.append(
{'url': reposadocommon.rewrite_one_url(
package['URL']), 'size': package['Size'],})
return pkg_urls
@app.route('/products', methods=['GET'])
def products():
'''Get all products available currently whether
listed or unlisted from reposado'''
products = reposadocommon.get_product_info()
catalog_branches = reposadocommon.get_catalog_branches()
prodlist = []
for prodid in products.keys():
if ('title' in products[prodid] and 'version' in products[prodid]
and 'PostDate' in products[prodid]):
prod = {
'title': products[prodid]['title'],
'version': products[prodid]['version'],
'PostDate': products[prodid]['PostDate'].strftime(
'%Y-%m-%d'),
'description': get_description_content(
products[prodid]['description']),
'id': prodid,
'depr': len(
products[prodid].get(
'AppleCatalogs', [])) < 1,
'branches': [],
'oscatalogs': sorted(versions_from_catalogs(
products[prodid].get(
'OriginalAppleCatalogs')),
key=LooseVersion,
reverse=True),
'packages': product_urls(
products[prodid]['CatalogEntry']),
}
for branch in catalog_branches.keys():
if prodid in catalog_branches[branch]:
prod['branches'].append(branch)
prodlist.append(prod)
else:
print('Invalid update!')
sprodlist = sorted(prodlist, key=itemgetter('PostDate'), reverse=True)
return json_response({'products': sprodlist, 'branches': list(catalog_branches.keys())})
@app.route('/new_branch/<branchname>', methods=['POST'])
def new_branch(branchname):
'''Create a new branch in reposado'''
catalog_branches = reposadocommon.get_catalog_branches()
if branchname in catalog_branches:
reposadocommon.print_stderr('Branch %s already exists!', branchname)
abort(401)
catalog_branches[branchname] = []
reposadocommon.write_catalog_branches(catalog_branches)
return jsonify(result='success')
@app.route('/delete_branch/<branchname>', methods=['POST'])
def delete_branch(branchname):
'''Delete a branch in reposado'''
catalog_branches = reposadocommon.get_catalog_branches()
if not branchname in catalog_branches:
reposadocommon.print_stderr('Branch %s does not exist!', branchname)
return
del catalog_branches[branchname]
# this is not in the common library, so we have to duplicate code
# from repoutil
for catalog_URL in reposadocommon.pref('AppleCatalogURLs'):
localcatalogpath = reposadocommon.get_local_pathname_from_url(catalog_URL)
# now strip the '.sucatalog' bit from the name
if localcatalogpath.endswith('.sucatalog'):
localcatalogpath = localcatalogpath[0:-10]
branchcatalogpath = localcatalogpath + '_' + branchname + '.sucatalog'
if os.path.exists(branchcatalogpath):
reposadocommon.print_stdout(
'Removing %s', os.path.basename(branchcatalogpath))
os.remove(branchcatalogpath)
reposadocommon.write_catalog_branches(catalog_branches)
return jsonify(result=True)
@app.route('/add_all/<branchname>', methods=['POST'])
def add_all(branchname):
'''Add all products to branch in reposado'''
products = reposadocommon.get_product_info()
catalog_branches = reposadocommon.get_catalog_branches()
catalog_branches[branchname] = products.keys()
reposadocommon.write_catalog_branches(catalog_branches)
reposadocommon.write_all_branch_catalogs()
return jsonify(result=True)
@app.route('/process_queue', methods=['POST'])
def process_queue():
catalog_branches = reposadocommon.get_catalog_branches()
for change in request.json:
prodId = change['productId']
branch = change['branch']
if branch not in catalog_branches.keys():
print('No such catalog')
continue
if change['listed']:
# if this change /was/ listed, then unlist it
if prodId in catalog_branches[branch]:
print('Removing product %s from branch %s' % (prodId, branch, ))
catalog_branches[branch].remove(prodId)
else:
# if this change /was not/ listed, then list it
if prodId not in catalog_branches[branch]:
print('Adding product %s to branch %s' % (prodId, branch, ))
catalog_branches[branch].append(prodId)
print('Writing catalogs')
reposadocommon.write_catalog_branches(catalog_branches)
reposadocommon.write_all_branch_catalogs()
return jsonify(result=True)
@app.route('/dup_apple/<branchname>', methods=['POST'])
def dup_apple(branchname):
'''Duplicate apple branch in reposado to one of your branches'''
catalog_branches = reposadocommon.get_catalog_branches()
if branchname not in catalog_branches.keys():
print('No branch ' + branchname)
return jsonify(result=False)
# generate list of (non-deprecated) updates
products = reposadocommon.get_product_info()
prodlist = []
for prodid in products.keys():
if len(products[prodid].get('AppleCatalogs', [])) >= 1:
prodlist.append(prodid)
catalog_branches[branchname] = prodlist
print('Writing catalogs')
reposadocommon.write_catalog_branches(catalog_branches)
reposadocommon.write_all_branch_catalogs()
return jsonify(result=True)
@app.route('/dup/<frombranch>/<tobranch>', methods=['POST'])
def dup(frombranch, tobranch):
'''Duplicate one of your branches to another'''
catalog_branches = reposadocommon.get_catalog_branches()
if frombranch not in catalog_branches.keys() or tobranch not in catalog_branches.keys():
print('No branch ' + branchname)
return jsonify(result=False)
catalog_branches[tobranch] = catalog_branches[frombranch]
print('Writing catalogs')
reposadocommon.write_catalog_branches(catalog_branches)
reposadocommon.write_all_branch_catalogs()
return jsonify(result=True)
@app.route('/config_data', methods=['POST'])
def config_data():
'''Get current configuration from reposado'''
# catalog_branches = reposadocommon.getCatalogBranches()
check_prods = request.json
if len(check_prods) > 0:
cd_prods = reposadocommon.check_or_remove_config_data_attr(
check_prods, suppress_output=True)
else:
cd_prods = []
response_prods = {}
for prod_id in check_prods:
response_prods.update({prod_id: True if prod_id in cd_prods else False})
print(response_prods)
return json_response(response_prods)
@app.route('/remove_config_data/<product>', methods=['POST'])
def remove_config_data(product):
'''Remove configuration data'''
# catalog_branches = reposadocommon.getCatalogBranches()
check_prods = request.json
products = reposadocommon.check_or_remove_config_data_attr(
[product, ], remove_attr=True, suppress_output=True)
return json_response(products)
def main():
'''main function that runs Margarita as a WSGI
Web Application'''
optlist, args = getopt.getopt(sys.argv[1:], 'db:p:')
flaskargs = {}
flaskargs['host'] = '0.0.0.0'
flaskargs['port'] = 8089
flaskargs['threaded'] = True
for o, a in optlist:
if o == '-d':
flaskargs['debug'] = True
elif o == '-b':
flaskargs['host'] = a
elif o == '-p':
flaskargs['port'] = int(a)
app.run(**flaskargs)
if __name__ == '__main__':
main()
|
# Generated by Django 2.2.2 on 2019-06-23 15:23
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('tracemap', '0006_species_mdd_id'),
]
operations = [
migrations.RenameField(
model_name='audiorecording',
old_name='is_noise',
new_name='hide',
),
]
|
# python3 asteroids.py
# by Etienne Pitout
import math
import time
import random
import turtle
import engine
WIDTH = 640
HEIGHT = 480
MAXY = HEIGHT // 2
MINY = -HEIGHT // 2
MAXX = WIDTH // 2
MINX = -WIDTH // 2
MYX = 0
MYY = 0
MYDELTA = 0
MYCOLOR = 'white'
MYSHAPE = 'triangle'
THETA = 10
BGCOLOR = 'black'
TEXTCOLOR = 'white'
MYSHOTSPEED = 4
NUMSHOT = 0
MAXAS = 5 #maximum asteroids
CURAS = 0
MAXSHOTS = 5
DECEL = 0.03
SHOTLIFE = 100
# state singleton class
class S:
def __init__(self):
self.playing = False # game state
self.me = None
self.shots = 0
self.curas = 0
self.score = 0
s = None
# class for throwing an exception
class Replay(BaseException): pass
# game objects
class Me(engine.GameObject):
def __init__(self):
self.dirx = 1.0
self.diry = 0.0
self.theta = 0
super().__init__(MYX, MYY, 0, -MYDELTA, MYSHAPE, MYCOLOR)
turtle.tiltangle(0)
def heading(self):
dx = self.dirx
dy = self.diry
return turtle.towards(self.x + dx, self.y + dy)
def delete(self):
# this catches OOB cases as well as collisions
super().delete()
def moveu(self):
#move forward
self.deltax += round(self.dirx, 3)
self.deltay += round(self.diry, 3)
def movel(self):
#turn left
self.theta += THETA
newdirx = math.cos(math.radians(self.theta))
newdiry = math.sin(math.radians(self.theta))
self.dirx = newdirx
self.diry = newdiry
#self.moveu() #aligns the ship
def mover(self):
#turn right
self.theta -= THETA
newdirx = math.cos(math.radians(self.theta))
newdiry = math.sin(math.radians(self.theta))
self.dirx = newdirx
self.diry = newdiry
#self.moveu()
def move(self):
#deceleration
if self.deltax > 0:
self.deltax -= self.deltax * DECEL
if self.deltay > 0:
self.deltay -= self.deltay * DECEL
if self.deltax < 0:
self.deltax -= self.deltax * DECEL
if self.deltay < 0:
self.deltay -= self.deltay * DECEL
newdx = self.deltax
newdy = self.deltay
#screen wraparound
if self.x >= WIDTH / 2:
self.x = -WIDTH / 2
elif self.x <= -WIDTH / 2:
self.x = WIDTH / 2 - 2
elif self.y >= HEIGHT / 2:
self.y = -HEIGHT / 2
elif self.y <= -HEIGHT / 2:
self.y = HEIGHT / 2
super().move()
def update(self):
turtle.shapesize(1,1.4)
#screen wraparound
if self.x > WIDTH / 2:
self.x = WIDTH / 2
elif self.x < -WIDTH / 2:
self.x = -WIDTH / 2
elif self.y > HEIGHT / 2:
self.y = HEIGHT / 2
elif self.y < -HEIGHT / 2:
self.y = -HEIGHT / 2
super().update()
def get_bc(self):
# bounding circle, for circle-based collision detection
return self.x, self.y, 10
def getx(self): return self.x
def gety(self): return self.y
def getdirx(self): return self.dirx
def getdiry(self): return self.diry
def getdelx(self): return self.deltax
def getdely(self): return self.deltay
class MyShot(engine.GameObject):
def __init__(self, x, y, deltax, deltay):
super().__init__(x, y, deltax, deltay,
'circle', 'white')
def delete(self):
# this catches OOB cases as well as collisions
super().delete()
def update(self):
turtle.shapesize(0.2,0.2)
turtle.settiltangle(0)
#screen wraparound
if self.x >= WIDTH / 2:
self.x = -WIDTH / 2
elif self.x <= -WIDTH / 2:
self.x = WIDTH / 2 - 2
elif self.y >= HEIGHT / 2:
self.y = -HEIGHT / 2
elif self.y <= -HEIGHT / 2:
self.y = HEIGHT / 2
super().update()
if self.age >= SHOTLIFE:
engine.del_obj(self)
s.shots -= 1
def get_bc(self):
# bounding circle, for circle-based collision detection
return self.x, self.y, 10
class Asteroid(engine.GameObject):
def __init__(self, x, y, deltax, deltay, sizx, sizy):
super().__init__(x, y, deltax, deltay,
'turtle', 'white')
self.sizex = sizx
self.sizey = sizy
def delete(self):
# this catches OOB cases as well as collisions
super().delete()
def update(self):
turtle.shapesize(self.sizex, self.sizey)
turtle.settiltangle(0)
#screen wraparound
if self.x >= WIDTH / 2:
self.x = -WIDTH / 2
elif self.x <= -WIDTH / 2:
self.x = WIDTH / 2 - 2
elif self.y >= HEIGHT / 2:
self.y = -HEIGHT / 2
elif self.y <= -HEIGHT / 2:
self.y = HEIGHT / 2
super().update()
turtle.shapesize(0.1, 0.1)
def get_bc(self):
# bounding circle, for circle-based collision detection
return self.x, self.y, 10
# collision handling
def iscoll_circle(obj1, obj2):
x1, y1, r1 = obj1.get_bc()
x2, y2, r2 = obj2.get_bc()
# from http://devmag.org.za/2009/04/13/basic-collision-detection-in-2d-part-1/
# take the Euclidean distance between the center points, and if
# that's less than the sum of the radii, then intersection occurred
d = math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
return d < (r1 + r2)
def col_asteroid2(obj2, obj1):
return col_asteroid(obj1, obj2)
def col_asteroid(obj1, obj2):
if iscoll_circle(obj1, obj2):
x1, y1, r1 = obj1.get_bc()
x2, y2, r2 = obj2.get_bc()
engine.del_obj(obj1)
engine.del_obj(obj2)
s.shots -= 1
s.curas -= 1
s.score += 1
draw_score()
#create 2 new asteroids
if(obj1.sizex > 2 or obj1.sizey > 2):
xpos = obj1.x
ypos = obj1.y
if(obj1.sizex == 1):
xsize = 1
else:
xsize = random.randint(1, obj1.sizex-1)
if(obj1.sizey == 1):
ysize = 1
else:
ysize = random.randint(1, obj1.sizey-1)
deltx = random.randint(-1, 1)
delty = random.randint(-1, 1)
obj = Asteroid(xpos, ypos, deltx, delty, xsize, ysize)
engine.add_obj(obj)
xpos = obj1.x
ypos = obj1.y
if(obj1.sizex == 1):
xsize = 1
else:
xsize = random.randint(1, obj1.sizex-1)
if(obj1.sizey == 1):
ysize = 1
else:
ysize = random.randint(1, obj1.sizey-1)
deltx = random.randint(-1, 1)
delty = random.randint(-1, 1)
obj = Asteroid(xpos, ypos, deltx, delty, xsize, ysize)
engine.add_obj(obj)
def col_gameover(obj1, obj2):
if iscoll_circle(obj1, obj2):
x1, y1, r1 = obj1.get_bc()
x2, y2, r2 = obj2.get_bc()
engine.del_obj(obj1)
engine.del_obj(obj2)
lose()
# callbacks
def spawn_asteroid_cb():
if(s.curas >= MAXAS):
return
xpos = random.randint(MINX, MAXX)
ypos = random.randint(MINY, MAXY)
xsize = random.randint(1, 5)
ysize = random.randint(1, 5)
deltx = random.randint(-5, 5)
delty = random.randint(-5, 5)
obj = Asteroid(xpos, ypos, deltx, delty, xsize, ysize)
engine.add_obj(obj)
s.curas += 1
def input_cb(key):
if key == 'q' or key == 'Q':
exit()
if key == 'space':
if not s.playing:
# replay
raise Replay()
else:
if(s.shots <= MAXSHOTS):
engine.add_obj(MyShot(s.me.getx(), s.me.gety(), MYSHOTSPEED * s.me.getdirx() + s.me.getdelx(), MYSHOTSPEED * s.me.getdiry() + s.me.getdely()))
s.shots += 1
if key == 'Up':
s.me.moveu()
elif key =='Right':
s.me.mover()
elif key =='Left':
s.me.movel()
# high-level routines: initialization, title screen, gameplay
def init():
engine.init_screen(WIDTH, HEIGHT)
turtle.bgcolor(BGCOLOR)
def banner(s, color=TEXTCOLOR):
turtle.home()
turtle.color(color)
turtle.write(s, True, align='center', font=('Arial', 48, 'italic'))
time.sleep(3)
turtle.undo()
def title_screen():
banner('TURTLEOIDS')
def lose():
s.playing = False
mesg = 'Score %d - press space to play again' % s.score
turtle.goto(0, 0)
turtle.color(TEXTCOLOR)
turtle.write(mesg, True, align='center', font=('Arial', 24, 'italic'))
def draw_score():
turtle.goto(0, MAXY-25)
turtle.dot(50, 'black')
turtle.color('white')
turtle.write(s.score, align='center', font=('Arial', 14, 'normal'))
def play():
global s
s = S()
engine.init_engine()
engine.set_keyboard_handler(input_cb)
s.me = Me()
engine.add_obj(s.me)
engine.add_random_event(0.01, spawn_asteroid_cb)
engine.register_collision(Asteroid, MyShot, col_asteroid)
engine.register_collision(MyShot, Asteroid, col_asteroid2)
engine.register_collision(Me, Asteroid, col_gameover)
engine.register_collision(Asteroid, Me, col_gameover)
draw_score()
s.playing = True
engine.engine()
# main routine
if __name__ == '__main__':
init()
title_screen()
while True:
try:
play()
except Replay:
pass
|
from greenlet import greenlet
def consumer():
last = ''
while True:
receival = pro.switch(last)
if receival is not None:
print 'Consume %s' % receival
last = receival
def producer(n):
con.switch()
x = 0
while x < n:
x += 1
print 'Produce %s' % x
last = con.switch(x)
pro = greenlet(producer)
con = greenlet(consumer)
pro.switch(5)
|
from keystone.manage2 import base
from keystone.manage2 import common
from keystone.manage2 import mixins
from keystone.backends import models
@common.arg('--user-id',
required=True,
help='identifies the user who can authenticate with this credential')
@common.arg('--tenant-id',
required=False,
help='identifies the tenant upon which the crednetial is valid')
@common.arg('--type',
required=True,
help="credential type (e.g. 'EC2')")
@common.arg('--key',
required=True)
@common.arg('--secret',
required=True)
class Command(base.BaseBackendCommand, mixins.DateTimeMixin):
"""Creates a new credential."""
# pylint: disable=E1101,R0913
def create_credential(self, user_id, credential_type, key, secret,
tenant_id=None):
self.get_user(user_id)
self.get_tenant(tenant_id)
obj = models.Credentials()
obj.user_id = user_id
obj.tenant_id = tenant_id
obj.type = credential_type
obj.key = key
obj.secret = secret
return self.credential_manager.create(obj)
def run(self, args):
"""Process argparse args, and print results to stdout"""
credential = self.create_credential(user_id=args.user_id,
tenant_id=args.tenant_id, credential_type=args.type,
key=args.key, secret=args.secret)
print credential.id
|
#!/usr/bin/env python3
import sys
import numpy as np
from example import AmiciExample
class ExampleDirac(AmiciExample):
def __init__(self):
AmiciExample.__init__( self )
self.numX = 2
self.numP = 4
self.numK = 0
self.modelOptions['theta'] = np.log10([1, 0.5, 2, 3])
self.modelOptions['ts'] = np.linspace(0, 3, 1001)
self.modelOptions['pscale'] = 2
self.solverOptions['atol'] = 1e-16
self.solverOptions['maxsteps'] = 1e4
self.solverOptions['nmaxevent'] = 10
self.solverOptions['rtol'] = 1e-8
self.solverOptions['sens_ind'] = []
self.solverOptions['sensi'] = 0
self.solverOptions['sensi_meth'] = 1
def writeNoSensi(filename):
ex = ExampleDirac()
ex.writeToFile(filename, '/model_dirac/nosensi/')
def writeSensiForward(filename):
ex = ExampleDirac()
ex.solverOptions['sens_ind'] = np.arange(0, ex.numP)
ex.solverOptions['sensi'] = 1
ex.writeToFile(filename, '/model_dirac/sensiforward/')
def main():
if len(sys.argv) < 2:
print("Error: Must provide output file as first and only argument.")
sys.exit(1)
filename = sys.argv[1]
writeNoSensi(filename)
writeSensiForward(filename)
if __name__ == "__main__":
main()
|
# coding: utf-8
"""
qTest Manager API Version 8.6 - 9.1
qTest Manager API Version 8.6 - 9.1
OpenAPI spec version: 8.6 - 9.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class ManualTestLogResource(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, links=None, id=None, test_case_version_id=None, exe_start_date=None, exe_end_date=None, note=None, attachments=None, name=None, planned_exe_time=None, actual_exe_time=None, build_number=None, build_url=None, properties=None, status=None, result_number=None, test_step_logs=None, defects=None):
"""
ManualTestLogResource - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'links': 'list[Link]',
'id': 'int',
'test_case_version_id': 'int',
'exe_start_date': 'datetime',
'exe_end_date': 'datetime',
'note': 'str',
'attachments': 'list[AttachmentResource]',
'name': 'str',
'planned_exe_time': 'int',
'actual_exe_time': 'int',
'build_number': 'str',
'build_url': 'str',
'properties': 'list[PropertyResource]',
'status': 'StatusResource',
'result_number': 'int',
'test_step_logs': 'list[TestStepLogResource]',
'defects': 'list[LinkedDefectResource]'
}
self.attribute_map = {
'links': 'links',
'id': 'id',
'test_case_version_id': 'test_case_version_id',
'exe_start_date': 'exe_start_date',
'exe_end_date': 'exe_end_date',
'note': 'note',
'attachments': 'attachments',
'name': 'name',
'planned_exe_time': 'planned_exe_time',
'actual_exe_time': 'actual_exe_time',
'build_number': 'build_number',
'build_url': 'build_url',
'properties': 'properties',
'status': 'status',
'result_number': 'result_number',
'test_step_logs': 'test_step_logs',
'defects': 'defects'
}
self._links = links
self._id = id
self._test_case_version_id = test_case_version_id
self._exe_start_date = exe_start_date
self._exe_end_date = exe_end_date
self._note = note
self._attachments = attachments
self._name = name
self._planned_exe_time = planned_exe_time
self._actual_exe_time = actual_exe_time
self._build_number = build_number
self._build_url = build_url
self._properties = properties
self._status = status
self._result_number = result_number
self._test_step_logs = test_step_logs
self._defects = defects
@property
def links(self):
"""
Gets the links of this ManualTestLogResource.
:return: The links of this ManualTestLogResource.
:rtype: list[Link]
"""
return self._links
@links.setter
def links(self, links):
"""
Sets the links of this ManualTestLogResource.
:param links: The links of this ManualTestLogResource.
:type: list[Link]
"""
self._links = links
@property
def id(self):
"""
Gets the id of this ManualTestLogResource.
:return: The id of this ManualTestLogResource.
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this ManualTestLogResource.
:param id: The id of this ManualTestLogResource.
:type: int
"""
self._id = id
@property
def test_case_version_id(self):
"""
Gets the test_case_version_id of this ManualTestLogResource.
ID of the Test Case Version
:return: The test_case_version_id of this ManualTestLogResource.
:rtype: int
"""
return self._test_case_version_id
@test_case_version_id.setter
def test_case_version_id(self, test_case_version_id):
"""
Sets the test_case_version_id of this ManualTestLogResource.
ID of the Test Case Version
:param test_case_version_id: The test_case_version_id of this ManualTestLogResource.
:type: int
"""
self._test_case_version_id = test_case_version_id
@property
def exe_start_date(self):
"""
Gets the exe_start_date of this ManualTestLogResource.
Execution start date
:return: The exe_start_date of this ManualTestLogResource.
:rtype: datetime
"""
return self._exe_start_date
@exe_start_date.setter
def exe_start_date(self, exe_start_date):
"""
Sets the exe_start_date of this ManualTestLogResource.
Execution start date
:param exe_start_date: The exe_start_date of this ManualTestLogResource.
:type: datetime
"""
if exe_start_date is None:
raise ValueError("Invalid value for `exe_start_date`, must not be `None`")
self._exe_start_date = exe_start_date
@property
def exe_end_date(self):
"""
Gets the exe_end_date of this ManualTestLogResource.
Execution end date
:return: The exe_end_date of this ManualTestLogResource.
:rtype: datetime
"""
return self._exe_end_date
@exe_end_date.setter
def exe_end_date(self, exe_end_date):
"""
Sets the exe_end_date of this ManualTestLogResource.
Execution end date
:param exe_end_date: The exe_end_date of this ManualTestLogResource.
:type: datetime
"""
if exe_end_date is None:
raise ValueError("Invalid value for `exe_end_date`, must not be `None`")
self._exe_end_date = exe_end_date
@property
def note(self):
"""
Gets the note of this ManualTestLogResource.
Note
:return: The note of this ManualTestLogResource.
:rtype: str
"""
return self._note
@note.setter
def note(self, note):
"""
Sets the note of this ManualTestLogResource.
Note
:param note: The note of this ManualTestLogResource.
:type: str
"""
self._note = note
@property
def attachments(self):
"""
Gets the attachments of this ManualTestLogResource.
Test Log attachments
:return: The attachments of this ManualTestLogResource.
:rtype: list[AttachmentResource]
"""
return self._attachments
@attachments.setter
def attachments(self, attachments):
"""
Sets the attachments of this ManualTestLogResource.
Test Log attachments
:param attachments: The attachments of this ManualTestLogResource.
:type: list[AttachmentResource]
"""
self._attachments = attachments
@property
def name(self):
"""
Gets the name of this ManualTestLogResource.
Test Run's name
:return: The name of this ManualTestLogResource.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this ManualTestLogResource.
Test Run's name
:param name: The name of this ManualTestLogResource.
:type: str
"""
self._name = name
@property
def planned_exe_time(self):
"""
Gets the planned_exe_time of this ManualTestLogResource.
:return: The planned_exe_time of this ManualTestLogResource.
:rtype: int
"""
return self._planned_exe_time
@planned_exe_time.setter
def planned_exe_time(self, planned_exe_time):
"""
Sets the planned_exe_time of this ManualTestLogResource.
:param planned_exe_time: The planned_exe_time of this ManualTestLogResource.
:type: int
"""
if planned_exe_time is not None and planned_exe_time > 9999999:
raise ValueError("Invalid value for `planned_exe_time`, must be a value less than or equal to `9999999`")
if planned_exe_time is not None and planned_exe_time < 0:
raise ValueError("Invalid value for `planned_exe_time`, must be a value greater than or equal to `0`")
self._planned_exe_time = planned_exe_time
@property
def actual_exe_time(self):
"""
Gets the actual_exe_time of this ManualTestLogResource.
:return: The actual_exe_time of this ManualTestLogResource.
:rtype: int
"""
return self._actual_exe_time
@actual_exe_time.setter
def actual_exe_time(self, actual_exe_time):
"""
Sets the actual_exe_time of this ManualTestLogResource.
:param actual_exe_time: The actual_exe_time of this ManualTestLogResource.
:type: int
"""
self._actual_exe_time = actual_exe_time
@property
def build_number(self):
"""
Gets the build_number of this ManualTestLogResource.
Jenkins jobs build number
:return: The build_number of this ManualTestLogResource.
:rtype: str
"""
return self._build_number
@build_number.setter
def build_number(self, build_number):
"""
Sets the build_number of this ManualTestLogResource.
Jenkins jobs build number
:param build_number: The build_number of this ManualTestLogResource.
:type: str
"""
self._build_number = build_number
@property
def build_url(self):
"""
Gets the build_url of this ManualTestLogResource.
Jenkins jobs build URL
:return: The build_url of this ManualTestLogResource.
:rtype: str
"""
return self._build_url
@build_url.setter
def build_url(self, build_url):
"""
Sets the build_url of this ManualTestLogResource.
Jenkins jobs build URL
:param build_url: The build_url of this ManualTestLogResource.
:type: str
"""
self._build_url = build_url
@property
def properties(self):
"""
Gets the properties of this ManualTestLogResource.
:return: The properties of this ManualTestLogResource.
:rtype: list[PropertyResource]
"""
return self._properties
@properties.setter
def properties(self, properties):
"""
Sets the properties of this ManualTestLogResource.
:param properties: The properties of this ManualTestLogResource.
:type: list[PropertyResource]
"""
self._properties = properties
@property
def status(self):
"""
Gets the status of this ManualTestLogResource.
Test Log status
:return: The status of this ManualTestLogResource.
:rtype: StatusResource
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this ManualTestLogResource.
Test Log status
:param status: The status of this ManualTestLogResource.
:type: StatusResource
"""
if status is None:
raise ValueError("Invalid value for `status`, must not be `None`")
self._status = status
@property
def result_number(self):
"""
Gets the result_number of this ManualTestLogResource.
:return: The result_number of this ManualTestLogResource.
:rtype: int
"""
return self._result_number
@result_number.setter
def result_number(self, result_number):
"""
Sets the result_number of this ManualTestLogResource.
:param result_number: The result_number of this ManualTestLogResource.
:type: int
"""
self._result_number = result_number
@property
def test_step_logs(self):
"""
Gets the test_step_logs of this ManualTestLogResource.
Arrays of Test Step Log, With called test steps, the \"called_test_case_id\" and \"parent_test_step_id\" must be included in request body.
:return: The test_step_logs of this ManualTestLogResource.
:rtype: list[TestStepLogResource]
"""
return self._test_step_logs
@test_step_logs.setter
def test_step_logs(self, test_step_logs):
"""
Sets the test_step_logs of this ManualTestLogResource.
Arrays of Test Step Log, With called test steps, the \"called_test_case_id\" and \"parent_test_step_id\" must be included in request body.
:param test_step_logs: The test_step_logs of this ManualTestLogResource.
:type: list[TestStepLogResource]
"""
self._test_step_logs = test_step_logs
@property
def defects(self):
"""
Gets the defects of this ManualTestLogResource.
Array of Defect
:return: The defects of this ManualTestLogResource.
:rtype: list[LinkedDefectResource]
"""
return self._defects
@defects.setter
def defects(self, defects):
"""
Sets the defects of this ManualTestLogResource.
Array of Defect
:param defects: The defects of this ManualTestLogResource.
:type: list[LinkedDefectResource]
"""
self._defects = defects
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, ManualTestLogResource):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
# This module contains the shaders used to render snap point coordinates.
# The shaders for snapping to a vertex.
VERT_SHADER_V = """
#version 330
uniform mat4 p3d_ModelMatrix;
in vec4 p3d_Vertex;
void main() {
gl_Position = p3d_ModelMatrix * p3d_Vertex;
}
"""
GEOM_SHADER_V = """
#version 330
layout(triangles) in;
// Three points will be generated: 3 vertices
layout(points, max_vertices=3) out;
uniform mat4 p3d_ViewProjectionMatrix;
uniform mat4 p3d_ViewMatrixInverse;
uniform mat4 p3d_ProjectionMatrix;
uniform bool inverted;
uniform bool two_sided;
out vec3 snap_coords;
void main()
{
vec3 P0 = gl_in[0].gl_Position.xyz;
vec3 P1 = gl_in[1].gl_Position.xyz;
vec3 P2 = gl_in[2].gl_Position.xyz;
vec3 positions[3] = vec3[] (P0, P1, P2);
vec3 V0 = P1 - P0;
vec3 V1 = P2 - P1;
vec3 face_normal = inverted ? cross(V1, V0) : cross(V0, V1);
vec3 vec;
if (p3d_ProjectionMatrix[3].w == 1.)
// orthographic lens;
// use inverted camera direction vector
vec = p3d_ViewMatrixInverse[2].xyz;
else
// perspective lens;
// compute vector pointing from any point of triangle to camera origin
vec = p3d_ViewMatrixInverse[3].xyz - P0;
if (two_sided || dot(vec, face_normal) >= 0.) {
// generate points
for (int i = 0; i < 3; ++i)
{
gl_Position = p3d_ViewProjectionMatrix * vec4(positions[i], 1.0);
snap_coords = positions[i];
EmitVertex();
EndPrimitive();
}
}
}
"""
# The shaders for snapping to an edge midpoint.
VERT_SHADER_E = """
#version 330
uniform mat4 p3d_ModelMatrix;
in vec4 p3d_Vertex;
in int sides;
out Vertex
{
int side_gen;
} vertex;
void main() {
gl_Position = p3d_ModelMatrix * p3d_Vertex;
vertex.side_gen = sides;
}
"""
GEOM_SHADER_E = """
#version 330
layout(triangles) in;
// Three lines will be generated: 6 vertices
layout(line_strip, max_vertices=6) out;
uniform mat4 p3d_ViewProjectionMatrix;
uniform mat4 p3d_ViewMatrixInverse;
uniform mat4 p3d_ProjectionMatrix;
uniform bool inverted;
uniform bool two_sided;
in Vertex
{
int side_gen;
} vertex[];
out vec3 snap_coords;
void main()
{
int side_generation, S0, S1, S2;
int sides[3];
// determine which sides should be generated (1) or not (0)
side_generation = vertex[0].side_gen;
S0 = side_generation >> 2;
S1 = (side_generation ^ (S0 << 2)) >> 1;
S2 = side_generation ^ (S0 << 2) ^ (S1 << 1);
sides = int[] (S0, S1, S2);
vec3 P0 = gl_in[0].gl_Position.xyz;
vec3 P1 = gl_in[1].gl_Position.xyz;
vec3 P2 = gl_in[2].gl_Position.xyz;
vec3 positions[4] = vec3[] (P0, P1, P2, P0);
vec3 V0 = P1 - P0;
vec3 V1 = P2 - P1;
vec3 face_normal = inverted ? cross(V1, V0) : cross(V0, V1);
vec3 vec;
if (p3d_ProjectionMatrix[3].w == 1.)
// orthographic lens;
// use inverted camera direction vector
vec = p3d_ViewMatrixInverse[2].xyz;
else
// perspective lens;
// compute vector pointing from any point of triangle to camera origin
vec = p3d_ViewMatrixInverse[3].xyz - P0;
if (two_sided || dot(vec, face_normal) >= 0.) {
// generate sides
for (int i = 0; i < 3; ++i)
{
for (int j = 0; j < sides[i]; ++j)
{
snap_coords = (positions[i] + positions[i + 1]) * .5;
gl_Position = p3d_ViewProjectionMatrix * vec4(positions[i], 1.0);
EmitVertex();
gl_Position = p3d_ViewProjectionMatrix * vec4(positions[i + 1], 1.0);
EmitVertex();
EndPrimitive();
}
}
}
}
"""
# The shaders for snapping to a polygon center.
VERT_SHADER_P = """
#version 330
uniform mat4 p3d_ModelMatrix;
in vec4 p3d_Vertex;
in vec3 snap_pos;
out Vertex
{
vec3 snap_pos;
} vertex;
void main() {
gl_Position = p3d_ModelMatrix * p3d_Vertex;
vertex.snap_pos = (p3d_ModelMatrix * vec4(snap_pos, 1.)).xyz;
}
"""
GEOM_SHADER_P = """
#version 330
layout(triangles) in;
// One triangles will be generated: 3 vertices
layout(triangle_strip, max_vertices=3) out;
uniform mat4 p3d_ViewProjectionMatrix;
uniform mat4 p3d_ViewMatrixInverse;
uniform mat4 p3d_ProjectionMatrix;
uniform bool inverted;
uniform bool two_sided;
in Vertex
{
vec3 snap_pos;
} vertex[];
out vec3 snap_coords;
void main()
{
vec3 P0, P1, P2;
snap_coords = vertex[0].snap_pos;
if (inverted) {
P0 = gl_in[2].gl_Position.xyz;
P1 = gl_in[1].gl_Position.xyz;
P2 = gl_in[0].gl_Position.xyz;
}
else {
P0 = gl_in[0].gl_Position.xyz;
P1 = gl_in[1].gl_Position.xyz;
P2 = gl_in[2].gl_Position.xyz;
}
vec3 positions[3] = vec3[] (P0, P1, P2);
vec3 V0 = P1 - P0;
vec3 V1 = P2 - P1;
vec3 face_normal = cross(V0, V1);
vec3 vec;
if (p3d_ProjectionMatrix[3].w == 1.)
// orthographic lens;
// use inverted camera direction vector
vec = p3d_ViewMatrixInverse[2].xyz;
else
// perspective lens;
// compute vector pointing from any point of triangle to camera origin
vec = p3d_ViewMatrixInverse[3].xyz - P0;
if (two_sided || dot(vec, face_normal) >= 0.) {
for (int i = 0; i < 3; ++i)
{
gl_Position = p3d_ViewProjectionMatrix * vec4(positions[i], 1.0);
EmitVertex();
}
EndPrimitive();
}
}
"""
FRAG_SHADER = """
#version 330
uniform float snap_type_id;
in vec3 snap_coords;
layout(location = 0) out vec4 out_color;
void main() {
// output the snap point coordinates as a color value
out_color = vec4(snap_coords, snap_type_id);
}
"""
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
The MIT License (MIT)
Copyright (c) 2012-2013 Karsten Jeschkies <jeskar@web.de>
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
'''
Created on 06.09.2012
@author: karsten jeschkies <jeskar@web.de>
'''
from datetime import datetime
import logging
from models.mongodb_models import *
from mongoengine import *
import time
import unittest
logger = logging.getLogger("unittesting")
#Connect to test database
connect("nyan_test", port = 20545)
class UserFetchCase(unittest.TestCase):
def setUp(self):
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s',
level=logging.DEBUG)
#Fill Database
#If adding works is checked in tests
#add user
karsten = User(name = "Karsten Jeschkies", email = "jeskar2@web.de",
password= "1234")
karsten.save()
def tearDown(self):
#remove user
karsten = User.objects(name = "Karsten Jeschkies")
karsten.delete(safe=True)
def test_fetch_user(self):
#karsten aus datenbank holen
karsten = User.objects(name = "Karsten Jeschkies").first()
self.assertIsNotNone(karsten)
def test_fail_find(self):
no_user = User.objects(name="not found").first()
self.assertIsNone(no_user)
class VendorFetchCase(unittest.TestCase):
def setUp(self):
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s',
level=logging.DEBUG)
#Fill Database
#If adding works is checked in tests
#add vendor
vendor = Vendor(name="techcrunch", config="vendor config")
vendor.save()
def tearDown(self):
vendor = Vendor.objects(name = "techcrunch")
vendor.delete(safe=True)
def test_fetch_vendor(self):
techcrunch = Vendor.objects(name = "techcrunch").first()
self.assertIsNotNone(techcrunch)
def test_fail_find(self):
no_vendor = Vendor.objects(name = "not found").first()
self.assertIsNone(no_vendor)
class ArticleFetchCase(unittest.TestCase):
def setUp(self):
#add vendor
vendor = Vendor(name="techcrunch", config="vendor config")
vendor.save()
#create features
features = Features(version = '1.0')
features.data = [(1, 0.5), (3, 0.6)]
#add article
article = Article(vendor = vendor, url ="http://www.techcrunch.com",
author ="MG Siegler", clean_content = "Apple rocks!",
date = datetime.now())
article.features = features
article.save()
self._id = article.id
def tearDown(self):
Vendor.objects(name="techcrunch").delete()
Article.objects(author="MG Siegler").delete()
def test_fetch_article(self):
article = Article.objects(id = self._id).first()
self.assertIsNotNone(article)
self.assertIsNotNone(article.features)
self.assertEqual(article.features.version, '1.0')
def test_fetch_by_date(self):
time.sleep(3)
articles = Article.objects(date__lt=datetime.now())
self.assertGreaterEqual(len(articles), 1)
def test_features_data(self):
article = Article.objects(id = self._id).first()
#Tuples are converted to lists by mongodb
self.assertEqual([[1, 0.5], [3, 0.6]], article.features.data)
class SubscriptionsTestCase(unittest.TestCase):
def setUp(self):
#add vendor
vendor = Vendor(name="techcrunch", config="vendor config")
vendor.save()
#create features
features = Features(version = '1.0')
features.data = [(1, 0.5), (3, 0.6)]
#add article
article = Article(vendor = vendor, url ="http://www.techcrunch.com",
author ="MG Siegler", clean_content = "Apple rocks!")
article.features = features
article.save()
#add user
karsten = User(name = "Karsten Jeschkies", email = "jeskar@web.de",
password= "1234")
karsten.save()
#add subscription
karsten.subscriptions.append(vendor)
karsten.save()
def tearDown(self):
Vendor.objects().delete()
User.objects().delete()
def test_fetch_subscriptions(self):
user = User.objects(name="Karsten Jeschkies").first()
vendor = Vendor.objects(name="techcrunch").first()
self.assertIsNotNone(user)
self.assertEqual(len(user.subscriptions), 1)
self.assertEqual(vendor.id, user.subscriptions[0].id)
def test_add_and_remove_subscription(self):
vendor = Vendor.objects(name="techcrunch").first()
new_vendor = Vendor(name="mashable")
new_vendor.save()
User.objects(name="Karsten Jeschkies").update_one(add_to_set__subscriptions=new_vendor)
#retrieve user from db to see if new_vendor was saved
user = User.objects(name="Karsten Jeschkies").first()
self.assertIn(new_vendor, user.subscriptions)
#remove new_vendor
User.objects(name="Karsten Jeschkies").update_one(pull__subscriptions=new_vendor)
user.reload()
self.assertNotIn(new_vendor, user.subscriptions)
self.assertIn(vendor, user.subscriptions)
def test_get_article_for_subscription(self):
user = User.objects(name="Karsten Jeschkies").first()
articles = Article.objects(vendor__in=user.subscriptions)
self.assertEqual(len(articles), 1)
self.assertEqual(articles[0].author, "MG Siegler")
class FeedbackTestCase(unittest.TestCase):
def setUp(self):
#add article
article = Article(url ="http://www.techcrunch.com",
author ="MG Siegler", clean_content = "Apple rocks!")
article.save()
#add user
user = User(name = "Karsten Jeschkies", password="1234",
email="jeskar@web.de")
user.save()
#add feedbakc
feedback = ReadArticleFeedback(user_id = user.id,
article=article, score = 1.0)
feedback.save()
def tearDown(self):
Article.objects().delete()
User.objects().delete()
Feedback.objects().delete()
def test_get_feedback(self):
user = User.objects(name="Karsten Jeschkies").first()
feedback = ReadArticleFeedback.objects(user_id = user.id)
self.assertEqual(feedback[0].score, 1.0)
class RenkedArticlesTestCase(unittest.TestCase):
def setUp(self):
user = User(name="Karsten Jeschkies", password="1234",
email="jeskar@web.de")
user.save()
#ranked article 1
ranked_article_1 = RankedArticle(user_id = user.id, rating=0.6)
ranked_article_1.save()
#ranked article 2
ranked_article_2 = RankedArticle(user_id = user.id, rating=0.4)
ranked_article_2.save()
def tearDown(self):
User.objects().delete()
RankedArticle.objects().delete()
def test_get_top_ranked_articles(self):
user = User.objects(name="Karsten Jeschkies").first()
top_articles = (a.rating for a in RankedArticle.objects(user_id = user.id) if a.rating > 0.5)
self.assertIn(0.6, top_articles)
class UserTestCase(unittest.TestCase):
def setUp(self):
user = User(name="Karsten Jeschkies", email="jeskar@web.de",
password ="1234")
user.save()
learned_profile = UserModel()
learned_profile.data = [(1, 0.5), (3, 0.6)]
learned_profile.version='1.0'
learned_profile.user_id = user.id
learned_profile.save()
def tearDown(self):
User.objects().delete()
def test_get_learned_profile(self):
user = User.objects(name="Karsten Jeschkies").first()
learned_profile = UserModel.objects(user_id = user.id).first()
self.assertIn([1, 0.5], learned_profile.data)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
from apps.terreno.models import Medida
from django import forms
class MedidaForm(forms.ModelForm):
class Meta:
model = Medida
fields = ['largomedida',
'anchomedida',
'superficietotal']
labels = {'Largo':"Ingrese el Largo",
'Ancho': "Ingrese el Ancho",
'Superficie':"Medicion Superficie"
}
widgets = {
'largomedida': forms.TextInput(),
'anchomedida': forms.TextInput(),
#'descripcioncategoria': forms.Textarea()
}
def __init__(self,*args,**kwargs):
super().__init__(*args,**kwargs)
for field in iter(self.fields):
self.fields[field].widget.attrs.update({
'class': 'form-control'
}) |
from modeflip.utils.config import get_configuration
from modeflip.utils.mongo import MongoManager
from modeflip.models.statistics import *
local_config = get_configuration('production')
get_database = MongoManager(local_config, force_load=True)
config_db = get_database('mf_config')
sc = StatisticsConfig(config_db)
dids = [1, 2, 3]
[sc.set(Statistics(did=did)) for did in dids] |
#!/usr/bin/env python3
"""Quickly identify repos that are active and inactive from a user's personal...
repos, starred and watching
Note that this is the REST version and is less optimised than the Graph version
This has been kept so a comparison can be made
"""
from __future__ import annotations
from typing import Any
from metprint import LogType
from lib import github_rest
from lib.utils import getUsernameAndLifespan, printf
def forEachRepo(sourceRepo: dict[Any, Any]):
"""Is source repo alive?"""
printStr = ["dead", LogType.ERROR]
if github_rest.sourceAlive(sourceRepo, death):
printStr = ["alive", LogType.SUCCESS]
printf.logPrint(f"Source repo is {printStr[0]}! Head to {sourceRepo['html_url']}", printStr[1])
"""Get list of forked repos that are alive and newer than the source repo
"""
aliveRepos, forkedRepos = github_rest.getListOfAliveForks(repo, death)
printf.logPrint(
f"{len(aliveRepos)} out of {len(forkedRepos)} Forked repos are alive and newer than the source!",
LogType.BOLD,
)
for aliveRepo in aliveRepos:
github_rest.printRepo(aliveRepo)
username, death = getUsernameAndLifespan()
choice = input("User repos, watched or starred (R/w/s)>")
if choice.lower() == "s":
"""Get list of user starred"""
starredRepos = github_rest.getListOfUserRepos(username, "starred")
for repo in starredRepos:
forEachRepo(repo)
elif choice.lower() == "w":
"""Get list of user watched"""
watchedRepos = github_rest.getListOfUserRepos(username, "subscriptions")
for repo in watchedRepos:
forEachRepo(repo)
else:
"""Get list of user repos"""
sourceRepos = github_rest.getListOfUserRepos(username, "repos")
for repo in sourceRepos:
forEachRepo(repo)
|
'''
Created on Feb 5, 2013
@author: Brad
'''
from solver import Solver, has_count, has_size, SolvedSet
class JellyFishSolver(Solver):
NAME = "JellyFish"
TYPES = {1:"Row",2:"Col"}
def find(self, board, do_all = False):
solved_sets = []
possible = [has_count(has_size(board.get_row(i),2,None), 2, 4) for i in range(board.N) ]
jellyfish = [(k1,v1+v2+v3+v4) for i,p1 in enumerate(possible) for j,p2 in enumerate(possible) for k,p3 in enumerate(possible) for l,p4 in enumerate(possible) \
for k1,v1 in p1.items() for k2,v2 in p2.items() for k3,v3 in p3.items() for k4,v4 in p4.items() if p1 and p2 and p3 and p4 and l > k > j > i \
and k1 == k2 == k3 == k4 and len(set([c1.j for c1 in v1] + [c2.j for c2 in v2] + [c3.j for c3 in v3] + [c4.j for c4 in v4])) == 4]
for candidate, jellyfish_cells in jellyfish:
removed = []
for col in set([cell.j for cell in jellyfish_cells]):
removed += [cell for cell in board.get_col(col) if cell not in jellyfish_cells and cell.check_remove(set([candidate]))]
if removed:
solved_sets += [SolvedSet(JellyFishSolver(0,1), jellyfish_cells, set([candidate]), removed)]
if not do_all:
return solved_sets
possible = [has_count(has_size(board.get_col(i),2,None), 2, 4) for i in range(board.N) ]
jellyfish = [(k1,v1+v2+v3+v4) for i,p1 in enumerate(possible) for j,p2 in enumerate(possible) for k,p3 in enumerate(possible) for l,p4 in enumerate(possible) \
for k1,v1 in p1.items() for k2,v2 in p2.items() for k3,v3 in p3.items() for k4,v4 in p4.items() if p1 and p2 and p3 and p4 and l > k > j > i \
and k1 == k2 == k3 == k4 and len(set([c1.i for c1 in v1] + [c2.i for c2 in v2] + [c3.i for c3 in v3] + [c4.i for c4 in v4])) == 4]
for candidate, jellyfish_cells in jellyfish:
removed = []
for row in set([cell.i for cell in jellyfish_cells]):
removed = [cell for cell in board.get_row(row) if cell not in jellyfish_cells and cell.check_remove(set([candidate]))]
if removed:
solved_sets += [SolvedSet(JellyFishSolver(0,2), jellyfish_cells, set([candidate]), removed)]
if not do_all:
return solved_sets
return solved_sets |
# -*- coding: utf-8 -*-
# Copyright (c) 2018 Sergey Bogatyrets
# See LICENSE.md for details.
import functools
from copy import deepcopy
from OPi.constants import BOARD, BCM, SUNXI, CUSTOM
class _sunXi(object):
def __getitem__(self, value):
offset = ord(value[1]) - 65
pin = int(value[2:])
assert value[0] == "P"
assert 0 <= offset <= 25
assert 0 <= pin <= 31
return (offset * 32) + pin
_pin_map = {
# Physical pin to actual GPIO pin
BOARD: {
3: 12,
5: 11,
7: 6,
8: 13,
10: 14,
11: 1,
12: 110,
13: 0,
15: 3,
16: 68,
18: 71,
19: 64,
21: 65,
22: 2,
23: 66,
24: 67,
26: 21,
27: 19,
28: 18,
29: 7,
31: 8,
32: 200,
33: 9,
35: 10,
36: 201,
37: 20,
38: 198,
40: 199
},
# BCM pin to actual GPIO pin
BCM: {
2: 12,
3: 11,
4: 6,
6: 1,
7: 0,
8: 3,
10: 64,
11: 65,
12: 66,
14: 19,
15: 7,
18: 10,
19: 20,
24: 13,
25: 14,
26: 110,
28: 68,
29: 71,
31: 2,
32: 67,
33: 21,
34: 18,
36: 200,
38: 201,
39: 198,
40: 199
},
SUNXI: _sunXi(),
# User defined, initialized as empty
CUSTOM: {}
}
def set_custom_pin_mappings(mappings):
_pin_map[CUSTOM] = deepcopy(mappings)
def get_gpio_pin(mode, channel):
assert mode in [BOARD, BCM, SUNXI, CUSTOM]
return _pin_map[mode][channel]
bcm = functools.partial(get_gpio_pin, BCM)
board = functools.partial(get_gpio_pin, BOARD)
sunxi = functools.partial(get_gpio_pin, SUNXI)
custom = functools.partial(get_gpio_pin, CUSTOM)
|
import requests
import json
import xmltodict
import datetime
import calendar
import os.path
import os
from pathlib import Path
import pandas as pd
from openpyxl import Workbook
class Dolar(object):
def __init__(self):
self._url = 'https://www3.bcb.gov.br/bc_moeda/rest/converter/1/1/790/220/'
def get_value(self,year,month,day):
header = {'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36'}
page = requests.get(f'{self._url}{year}-{month}-{day}', headers=header)
if page.status_code == 200:
valor = json.loads(json.dumps(xmltodict.parse(page.content)))
print(valor['valor-convertido'])
return f'{day}-{month}-{year}' , "%.4f" %(1 / float(valor['valor-convertido']))
return '','--' # dia sem fechamento de cotação
def get_range_datas(self):
month = datetime.datetime.today().month -1
year = datetime.datetime.today().year
last_day = calendar.monthrange(year, month)[1]
return year,month,last_day
def get_dict_data(self, year,month,last_day):
valores = dict()
i = 1
while i <= last_day:
data,valor = self.get_value(year,month,i)
if valor != "--":
valores[data] = valor
print('.')
i+=1
return valores
class Arquivo(object):
def __init__(self):
self._local = Path('C:/CotacaoDolar')
self._path_txt = Path('C:/CotacaoDolar/Log.txt')
self._path_xlsx = Path('C:/CotacaoDolar/Valores.xlsx')
if not os.path.exists(self._local):
os.makedirs(self._local)
print('criou o arquivo')
self.arquivo_txt = open(self._path_txt, 'w')
self.arquivo_txt.write('Início da execução \n')
def set_valores_log(self, dict):
lower = 9999.99
higher = -1.99
sum = 0
if dict:
for data,value in dict.items():
aux_value = float(value)
if aux_value < lower:
lower = aux_value
if aux_value > higher:
higher = aux_value
sum += aux_value
self.arquivo_txt.write(f'Variação de valores no período:\n')
self.arquivo_txt.write(f'Máxima - {higher}\n')
self.arquivo_txt.write(f'Mínima - {lower}\n')
self.arquivo_txt.write(f'Média - {"%.4f" % float(sum/len(dict))} \n')
def salva_txt(self):
self.arquivo_txt.write(f'Fim da execução - {datetime.datetime.now()}')
self.arquivo_txt.close()
print('Arquivo gerado em C:/CotacaoDolar')
def build_xlsx(self, dict):
wb = Workbook()
dest_filename = self._path_xlsx
ws3 = wb.active
ws3.title = "Cotação Dólar"
index = 0
for data,valor in dict.items():
index += 1
_ = ws3.cell(column=1, row=index, value=data)
_ = ws3.cell(column=2, row=index, value=valor)
wb.save(filename = dest_filename)
def load_from_xlsx(self):
dfs = pd.read_excel(self._path_xlsx, sheet_name=None, header=None)
novo = dfs['Cotação Dólar']
dc = {}
i = 0
while(i < len(novo[0])):
dc[str(novo[0][i])] = str(novo[1][i])
i = i + 1
return dc |
from django import template
register = template.Library()
@register.filter
def richtext_isempty(value):
"""
Returns True if a value is None, an empty string, or empty paragraph tags
Working around known issue https://github.com/wagtail/wagtail/issues/3111
with modification of workaround suggested in
https://github.com/wagtail/wagtail/issues/4549#issuecomment-500568445.
Said workaround only worked for RichTextFields. RichTextBlock values
(wagtail.core.rich_text.RichText) passed in require accessing the `source`
attribute for the comparison.
The replace() calls will also ensure that any passed value that amounts to
nothing but whitespace will also be determined to be empty.
"""
blank_values = [None, '', '<p></p>']
if hasattr(value, 'source'):
# This is a RichTextBlock
return (
value.source is None
or value.source.replace(' ', '') in blank_values
)
# This is a RichTextField
return (
value is None
or value.replace(' ', '') in blank_values
)
|
#!/usr/bin/python2.7
# -*- coding: utf-8 -*-
__author__ = 'lycheng'
__email__ = "lycheng997@gmail.com"
from public import ListNode, TreeNode
from public import list_to_linked, linked_to_list
import unittest
class Solution(unittest.TestCase):
def search(self, A, target):
l, r = 0, len(A) - 1
while l <= r:
mid = (l + r) / 2
if target == A[mid]:
return True
if A[mid] > A[l]:
if A[l] <= target and target < A[mid]:
r = mid - 1
else:
l = mid + 1
elif A[mid] < A[l]:
if A[mid] > target or target >= A[l]:
r = mid - 1
else:
l = mid + 1
else:
l += 1
return False
def test_search(self):
src = [5, 1, 3]
self.assertEqual(self.search(src, 3), True)
def insertionSortList(self, head):
if not head or not head.next:
return head
pre_head = ListNode(-99999999999)
pre_head.next = head
it = head
while it.next:
if it.next.val < it.val:
pre = pre_head
while pre.next.val < it.next.val:
pre = pre.next
tmp = it.next
it.next = tmp.next
tmp.next = pre.next
pre.next = tmp
else:
it = it.next
return pre_head.next
def test_insertionSortList(self):
li = [5, 4, 3, 2, 1]
rv = self.insertionSortList(list_to_linked(li))
self.assertEqual(linked_to_list(rv), [1, 2, 3, 4, 5])
def grayCode(self, n):
if not n:
return [0]
rv = []
limit = 1 << n
for i in xrange(limit):
rv.append(i ^ (i >> 1))
return rv
def test_grayCode(self):
rv = sorted(self.grayCode(0))
self.assertEqual([0], rv)
rv = sorted(self.grayCode(1))
self.assertEqual([0, 1], rv)
rv = sorted(self.grayCode(2))
self.assertEqual([0, 1, 2, 3], rv)
rv = sorted(self.grayCode(3))
self.assertEqual(sorted([0,1,3,2,6,7,5,4]), rv)
def singleNumber(self, A):
num_len = 32
bits = [0 for i in range(num_len)]
for x in xrange(num_len):
count = 0
for num in A:
count += ((num >> x) & 1)
bits[num_len - 1 - x] = count % 3
neg = False
if bits[0]:
neg = True
if neg:
bits = "".join([str(1 - bit) for bit in bits])
result = -(int(bits, 2) + 1)
else:
bits = "".join([str(bit) for bit in bits])
result = int(bits, 2)
return result
def test_singleNumber(self):
src = [1, 1, 1, 2, 2, 2, 3]
self.assertEqual(3, self.singleNumber(src))
src = [-2,-2,1,1,-3,1,-3,-3,-4,-2]
self.assertEqual(-4, self.singleNumber(src))
def maxArea(self, height):
beg = 0
end = len(height) - 1
result = min(height[beg], height[end]) * (end - beg)
while beg < end:
cur = min(height[beg], height[end]) * (end - beg)
if cur > result:
result = cur
if height[beg] < height[end]:
beg += 1
else:
end -= 1
return result
def test_maxArea(self):
h = [1, 2]
self.assertEqual(1, self.maxArea(h))
h = [1, 2, 3, 4, 5, 6, 7, 8]
self.assertEqual(16, self.maxArea(h))
def divide(self, dividend, divisor):
if not dividend:
return 0
sign = 1 if (dividend > 0 and divisor > 0) or (dividend < 0 and divisor < 0) else -1
dividend = abs(dividend)
divisor = abs(divisor)
k = 0
tmp = divisor
while dividend > tmp:
tmp <<= 1
k += 1
if tmp == dividend:
return (1 << k) * sign
k -= 1
tmp >>= 1
rv = 0
while dividend >= divisor:
if dividend >= tmp:
rv += 1 << k
dividend -= tmp
tmp >>= 1
k -= 1
return rv * sign
def test_divide(self):
self.assertEqual(2, self.divide(4, 2))
self.assertEqual(7, self.divide(14, 2))
self.assertEqual(7, self.divide(15, 2))
self.assertEqual(-1, self.divide(-1, 1))
self.assertEqual(0, self.divide(1, 2))
self.assertEqual(2147483647, self.divide(2147483647, 1))
self.assertEqual(715827882, self.divide(2147483647, 3))
self.assertEqual(6, self.divide(19, 3))
def setZeroes(self, matrix):
clear_first_row = 0 in matrix[0]
clear_first_column = 0 in [row[0] for row in matrix]
for r_idx, _v in enumerate(matrix):
for c_idx, _v in enumerate(matrix[r_idx]):
if matrix[r_idx][c_idx]:
continue
matrix[r_idx][0] = 0
matrix[0][c_idx] = 0
for idx, val in enumerate(matrix[0]):
if val or not idx:
continue
for j in range(len(matrix)):
matrix[j][idx] = 0
for idx, val in enumerate([row[0] for row in matrix]):
if val or not idx:
continue
for j in range(len(matrix[0])):
matrix[idx][j] = 0
if clear_first_row:
for idx, val in enumerate(matrix[0]):
matrix[0][idx] = 0
if clear_first_column:
for idx, val in enumerate([row[0] for row in matrix]):
matrix[idx][0] = 0
return matrix
def test_setZeroes(self):
matrix = [[0]]
self.assertEqual([[0]], self.setZeroes(matrix))
matrix = [
[0, 1, 2],
[1, 0, 2],
[3, 1, 2],
]
result = [
[0, 0, 0],
[0, 0, 0],
[0, 0, 2],
]
self.assertEqual(result, self.setZeroes(matrix))
matrix = [
[1, 1, 2],
[1, 0, 2],
[3, 1, 2],
]
result = [
[1, 0, 2],
[0, 0, 0],
[3, 0, 2],
]
self.assertEqual(result, self.setZeroes(matrix))
def longestPalindrome(self, s):
# len_s = len(s)
# if len_s <= 1:
# return s
# m = [[False] * len_s for i in s]
# max_len = 0
# beg = 0
# end = 1
# for j in range(1, len_s):
# for i in range(j):
# m[i][j] = (s[i] == s[j]) and (j - i < 2 or m[i+1][j-1])
# if m[i][j] and j - i + 1 > max_len:
# beg = i
# end = j
# max_len = j - i + 1
# # use for j - i == 2
# m[j][j] = True
# return s[beg:end+1]
# the code above time Exceeded
def get_longest_palindrome(self, l, r):
while l >= 0 and r < len(s) and s[l] == s[r]:
l -= 1
r += 1
return s[l+1: r]
rv = ''
for i in range(len(s)):
s1 = get_longest_palindrome(s, i, i)
if len(s1) > len(rv):
rv = s1
s2 = get_longest_palindrome(s, i, i + 1)
if len(s2) > len(rv):
rv = s2
return rv
def test_longestPalindrome(self):
s = '123321'
self.assertEqual(s, self.longestPalindrome(s))
s = '12321'
self.assertEqual(s, self.longestPalindrome(s))
s = 'aaaabaaa'
self.assertEqual('aaabaaa', self.longestPalindrome(s))
s = 'abb'
self.assertEqual('bb', self.longestPalindrome(s))
def combine(self, n, k):
if n <= 0 or n < k:
return []
rv = []
def get_combine(idx, t):
if len(t) == k:
rv.append(t[:])
return
for i in range(idx, n+1):
t.append(i)
get_combine(i+1, t)
t.pop()
get_combine(1, [])
return rv
def connect(self, root):
if not root:
return
next_node = root.next
while next_node:
if next_node.left:
next_node = next_node.left
break
if next_node.right:
next_node = next_node.right
break
next_node = next_node.next
if root.right:
root.right.next = next_node
if root.left:
root.left.next = root.right if root.right else next_node
self.connect(root.right)
self.connect(root.left)
def test_connects(self):
root = TreeNode(1)
root.left = TreeNode(2)
root.right = TreeNode(3)
root.left.left = TreeNode(4)
root.right.right = TreeNode(5)
self.connect(root)
self.assertEqual(root.left.left.next.val, 5)
if __name__ == "__main__":
unittest.main()
|
import pytest
from dbks.util import *
@pytest.mark.parametrize(
"src_dict, tgt_dict, result",
[
({"a": "A", "num": 0}, {"a": "A", "num": 0}, True),
({"a": "B", "num": 0}, {"a": "A", "num": 0}, False),
({"a": "A", "b": "B", "num": 0}, {"a": "A", "num": 0}, False),
({"a": "A", "num": 0}, {"a": "A", "b": "B", "num": 0}, True),
({"a": "A", "num": 0}, {"b": "B", "num": 0}, False),
(
{"a": "A", "num": 0, "c": {"d": 1}},
{"a": "A", "num": 0, "c": {"d": 1}},
True,
),
(
{"a": "A", "num": 0, "c": {"d": 1}},
{"a": "A", "num": 0, "c": {"d": 2}},
False,
),
(
{"a": "A", "num": 0, "c": {"d": 1}},
{"b": "B", "num": 0, "c": {"d": 1}},
False,
),
(
{"a": "A", "num": 0, "c": {"d": 1}},
{"b": "B", "num": 0, "c": {"d": 2}},
False,
),
(
{"a": "A", "num": 0, "c": {"d": ["e"]}},
{"a": "A", "num": 0, "c": {"d": ["e"]}},
True,
),
(
{"a": "A", "num": 0, "c": {"d": ["e"]}},
{"a": "A", "num": 0, "c": {"d": ["e", "f"]}},
False,
),
(
{"a": "A", "num": 0, "c": {"d": ["e"]}},
{"b": "B", "num": 0, "c": {"d": ["e"]}},
False,
),
(
{"a": "A", "num": 0, "c": {"d": ["e"]}},
{"b": "B", "num": 0, "c": {"d": ["e", "f"]}},
False,
),
(
{"a": "A", "num": 0, "c": {"d": {"e": 1}}},
{"a": "A", "num": 0, "c": {"d": {"e": 1}}},
True,
),
(
{"a": "A", "num": 0, "c": {"d": {"e": 1}}},
{"a": "A", "num": 0, "c": {"d": {"e": 2}}},
False,
),
(
{"a": "A", "num": 0, "c": {"d": {"e": 1}}},
{"b": "B", "num": 0, "c": {"d": {"e": 1}}},
False,
),
(
{"a": "A", "num": 0, "c": {"d": {"e": 1}}},
{"b": "B", "num": 0, "c": {"d": {"e": 2}}},
False,
),
(
{"a": "A", "num": 0, "c": {"d": {"e": ["f"]}}},
{"a": "A", "num": 0, "c": {"d": {"e": ["f"]}}},
True,
),
(
{"a": "A", "num": 0, "c": {"d": {"e": ["f"]}}},
{"a": "A", "num": 0, "c": {"d": {"e": ["f", "g"]}}},
False,
),
(
{"a": "A", "num": 0, "c": {"d": {"e": ["f"]}}},
{"b": "B", "num": 0, "c": {"d": {"e": ["f"]}}},
False,
),
(
{"a": "A", "num": 0, "c": {"d": {"e": ["f"]}}},
{"b": "B", "num": 0, "c": {"d": {"e": ["f", "g"]}}},
False,
),
],
)
def test_util_same_as_target(src_dict, tgt_dict, result):
assert same_as_target(src_dict, tgt_dict) == result
|
"""
Create climate stripes version 1
Reference : https://matplotlib.org/matplotblog/posts/warming-stripes/
Author : Zachary M. Labe
Date : 18 January 2021
"""
### Import modules
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
from matplotlib.collections import PatchCollection
from matplotlib.colors import ListedColormap
import cmocean
import pandas as pd
import wget
directorydata = '/Users/zlabe/Data/BEST/States/'
directoryfigure = '/Users/zlabe/Documents/Research/Visualizations/Figures/Stripes/'
states = np.array(['alabama','alaska','arizona','arkansas','california','colorado','connecticut',
'delaware','florida','georgia','hawaii','idaho','illinois','indiana','iowa','kansas','kentucky',
'louisiana','maine','maryland','massachusetts','michigan','minnesota','mississippi','missouri',
'montana','nebraska','nevada','new-hampshire','new-jersey','new-mexico','new-york','north-carolina',
'north-dakota','ohio','oklahoma','oregon','pennsylvania','rhode-island','south-carolina',
'south-dakota','tennessee','texas','utah','vermont','virginia','washington','west-virginia',
'wisconsin','wyoming'])
###########################################################################
###########################################################################
# ### Collect data from BEST
# for i in range(states.shape[0]):
# url = 'http://berkeleyearth.lbl.gov/auto/Regional/TAVG/Text/%s-TAVG-Trend.txt' % states[i]
# filename = wget.download(url,out=directorydata)
###########################################################################
###########################################################################
### Read in data
timeq = 100 # years
year = np.empty((states.shape[0],timeq*12))
mon = np.empty((states.shape[0],timeq*12))
temp = np.empty((states.shape[0],timeq*12))
for i in range(states.shape[0]):
filename = '%s-TAVG-Trend.txt' % states[i]
yearq,monthq,anomq = np.genfromtxt(directorydata + filename,skip_header=70,usecols=[0,1,2],
unpack=True)
year[i,:] = yearq[-timeq*12:]
mon[i,:] = monthq[-timeq*12:]
temp[i,:] = anomq[-timeq*12:]
tt = np.reshape(temp,(states.shape[0],timeq,12))
mean = np.nanmean(tt,axis=2)
### Set parameters
yrmin = int(year.min())
yrmax = int(year.max())
rangemin = -2
rangemax = 2
years = np.unique(year)
###########################################################################
###########################################################################
###########################################################################
### Plot climate stripes
plt.rc('text',usetex=True)
plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']})
for i in range(states.shape[0]):
fig = plt.figure(figsize=(10, 1))
cmap = cmocean.cm.balance
ax = fig.add_axes([0, 0, 1, 1])
ax.set_axis_off()
col = PatchCollection([
Rectangle((y, 0), 1, 1)
for y in range(yrmin,yrmax+1)])
### Data
col.set_array(mean[i,:])
### Range
col.set_clim(rangemin,rangemax)
col.set_cmap(cmap)
ax.add_collection(col)
ax.set_ylim(0, 1)
ax.set_xlim(yrmin,yrmax+1)
plt.savefig(directoryfigure + '%s_stripes.png' % states[i],dpi=300)
### Save data
np.savetxt(directorydata + 'States_AnnualMean_BEST_%s-%s.txt' % (yrmin,yrmax),
mean) |
from forecastiowrap.api import ForecastioWrapper
|
#!/usr/bin/env python
# Gant-chart style build time visualization tool for bam event logs. Usefull to find things preventing
# good parallelization. Not very done, not very nice.
#
# build:
# bam --debug-eventlog evlog.txt
# Run:
# vistime.py evlog.txt
#
# Written by Markus Alind (markus.alind at gmail.com)
import sys
import os
import random
import copy
import math
import Tkinter
import tkMessageBox
def TimeToStr( val ):
if val >= 1.0:
return "% 3.2fs " % val
elif val >= 0.001:
return "% 3.2fms" % ( val * 1000 )
else:
return "% 3.2fus" % ( val * 1000**2 )
class Job:
def __init__( self, name, start, thread ):
self.name = name
self.start = start
self.end = None
self.thread = thread
def SetEnd( self, end ) :
self.end = end
def GetRunTime( self ):
if self.end :
return self.end - self.start
class JobThread:
def __init__ ( self ):
self.cur = None
self.all = []
class Jobs:
def __init__ ( self ):
self.map = {}
self.allJobs = []
self.threads = {}
def ParseLine( self, line ):
line = line.strip().split( None , 3 )
thread = int( line[ 0 ] )
time = float( line[ 1 ] )
action = line[ 2 ]
name = line[ 3 ]
if name == "build:":
return
if not self.threads.has_key( thread ):
self.threads[ thread ] = JobThread()
th = self.threads[ thread ]
if ( action == "begin" ):
assert ( not th.cur )
th.cur = Job( name, time, thread )
th.all.append( th.cur )
self.allJobs.append( th.cur )
elif ( action == "end" ) :
assert( th.cur )
th.cur.SetEnd( time )
th.cur = None
else:
assert( False )
def Parse( self, lines ):
for line in lines:
if line.strip():
self.ParseLine( line )
def GetThreads( self ):
res = self.threads.keys()
res.sort()
return res
class TkGui:
def __init__( self, jobs = None ):
self.axisGraphics = []
self.CreateWindow( ( 800, 800 ) )
self.canvasSize = ( 800, 800 )
self.CalcSizes( 32 )
self.InitCanvas( jobs )
def WindowLoop( self ):
self.win.mainloop()
def CalcSizes( self, xScale ):
#xSize = 2000
self.yRectSize = 20
self.ySpacing = self.yRectSize * 1.25
self.xScale = xScale
self.xMargin = 30
self.xStart = self.xMargin
self.yMargin = self.ySpacing * 2
self.timeOffset = 0
self.yStart = self.yMargin
#rects = []
#minTime = 0
#maxTime = 0
#~ for t in jobs.GetThreads():
#~ th = jobs.threads[ t ]
#~ minTime = min( minTime, th.all[ 0 ].start )
#~ maxTime = max( maxTime, th.all[ -1 ].end )
#~ numThreads = len( jobs.threads )
#~ ySize = ( numThreads ) * ySpacing + yMargin*2
#~ yStart = yMargin
#~ totTime = maxTime - minTime
#~ xScale = ( xSize - xMargin*2 ) / totTime
#~ xStart = xMargin
def CreateWindow( self, canvasSize ):
self.win = Tkinter.Tk( )
self.statusbar = Tkinter.Label( self.win, text="", bd=1, relief=Tkinter.SUNKEN, anchor=Tkinter.W )
self.statusbar.pack( side=Tkinter.BOTTOM, fill=Tkinter.X )
self.scrollbarx = Tkinter.Scrollbar( self.win, orient=Tkinter.HORIZONTAL, width=24 )
#self.scrollbarx.grid(column=0, row=1, sticky=(Tkinter.W,Tkinter.E))
self.scrollbarx.pack( side = Tkinter.BOTTOM, fill=Tkinter.X )
self.canvas = Tkinter.Canvas( self.win, bg="lightgray",
width = canvasSize[ 0 ], height = canvasSize[ 1 ],
scrollregion=( 0, 0, canvasSize[ 0 ], canvasSize[ 1 ] ),
xscrollcommand=self.scrollbarx.set
)
#self.canvas.grid( column=0, row=0, sticky=( Tkinter.N, Tkinter.W, Tkinter.E, Tkinter.S ) )
self.scrollbarx.config( command = self.canvas.xview )
#self.canvas.config(scrollregion=canvas.bbox(Tkinter.ALL))
self.canvas.pack( side = Tkinter.TOP, fill=Tkinter.X )
self.canvas.tag_bind( "jobrect", "<Enter>", self.Ev_RectEnter )
self.canvas.tag_bind( "jobrect", "<Leave>", self.Ev_RectLeave )
self.canvas.bind( '<Button-5>', lambda e: self.Ev_Zoom( e, -1 ) )
self.canvas.bind( '<Button-4>', lambda e: self.Ev_Zoom( e, 1 ) )
self.canvas.bind( '<Button-3>', self.Ev_MButtonRightDown )
self.canvas.bind( '<B3-Motion>', self.Ev_MButtonRightMove )
self.win.bind( '<MouseWheel>', self.Ev_ZoomWheel )
def UpdateCanvasSize( self, newSize ):
maxX = newSize[ 0 ] + self.xMargin
maxY = newSize[ 1 ] + self.yMargin
if not maxX == self.canvasSize[ 0 ] or not maxY == self.canvasSize[ 1 ]:
self.canvasSize = ( maxX, maxY )
self.canvas.config( scrollregion=( 0, 0, maxX, maxY ), )
#self.canvas.config( width=maxX, height=maxY, scrollregion=( 0, 0, maxX, maxY ), )
def UpdateAxis( self ):
# we just wipe everything
for obj in self.axisGraphics:
self.canvas.delete( obj )
self.axisGraphics = []
tbY = self.yMargin - self.ySpacing
tTickY = tbY - 2
tMajorY = 8
tMinorY = 5
tMinorPerMajor = 4
tMajorXTarget = 100
tbStartX = self.xMargin
tbStopX = self.canvasSize[ 0 ] - self.xMargin
baseLine = self.canvas.create_line( tbStartX, tbY, tbStopX, tbY )
self.axisGraphics.append( baseLine )
#print ( "self.timeMax,", self.timeMax, "tbStopX - tbStartX,", tbStopX - tbStartX )
secPerMajor = self.timeMax / ( ( tbStopX - tbStartX ) / tMajorXTarget )
#print ( "secPerMajor", secPerMajor )
#if secPerMajor < 1 :
# secPerMajor = 1
if secPerMajor < 30 :
if self.timeMax / secPerMajor > 1000 :
secPerMajor = self.timeMax / 1000
tenPot = math.floor( math.log10( secPerMajor ) )
norm = secPerMajor / ( 10**tenPot )
#print norm
for i in [ 5, 2, 1 ]:
if norm > i :
norm = i
break
secPerMajor = norm * ( 10**tenPot )
elif secPerMajor < 60 :
secPerMajor = 30
else:
minPerMajor = secPerMajor / 60
tenPot = math.floor( math.log10( minPerMajor ) )
norm = minPerMajor / ( 10**tenPot )
for i in [ 5, 2, 1 ]:
if norm > i :
norm = i
break
minPerMajor = norm * ( 10**tenPot )
secPerMajor = minPerMajor * 60
#print ( "secPerMajor", secPerMajor )
numMajor = int( math.floor( self.timeMax / secPerMajor ) )
#print ( "numMajor", numMajor )
for m in range( numMajor + 1 ):
majorPos = tbStartX + m * secPerMajor * self.xScale
self.axisGraphics.append( self.canvas.create_line( majorPos, tTickY, majorPos, tTickY + tMajorY ) )
time = m*secPerMajor
self.axisGraphics.append( self.canvas.create_text( majorPos, tTickY, anchor=Tkinter.S, text=TimeToStr(time) ) )
for minor in range(1, tMinorPerMajor ) :
minorPos = majorPos + minor * ( secPerMajor / tMinorPerMajor ) * self.xScale
if ( minorPos <= tbStopX ):
self.axisGraphics.append( self.canvas.create_line( minorPos, tTickY, minorPos, tTickY + tMinorY ) )
#print len( self.axisGraphics )
class JobRect:
def __init__( self, job ):
self.job = job
self.rect = None
def CalcRect(self, job ):
x0 = ( job.start - self.timeOffset ) * self.xScale +self.xStart
x1 = ( job.end - self.timeOffset ) * self.xScale + self.xStart
y0 = ( job.thread*self.ySpacing - self.yRectSize*0.5) + self.yStart
y1 = ( job.thread*self.ySpacing + self.yRectSize*0.5) + self.yStart
return ( x0, y0, x1, y1 )
def InitCanvas( self, jobs ):
self.guiIdToJob = {}
self.jobToGuiId = {}
self.jobRects = []
maxX = 0
maxY = 0
timeMax = 0
for t in jobs.GetThreads():
th = jobs.threads[ t ]
for j in th.all:
jr = self.JobRect( j )
( x0, y0, x1, y1 ) = self.CalcRect( jr.job )
maxX = max( maxX, x1 )
maxY = max( maxY, y1 )
timeMax = max( timeMax, jr.job.end )
#~ jr.rect = self.canvas.create_polygon(
#~ x0, y0,
#~ x0, y1,
#~ x1, y1,
#~ x1, y0,
#~ fill="lightgreen", outline="black", activefill="green", activeoutline="black", tag="jobrect" )
col = ( "lightgreen", "green" )
if not "job:" in j.name and ( "cache load" in j.name or "script parse" in j.name or "prepare" in j.name ):
col = ( "lightcyan", "cyan" )
elif not ("c " in j.name and "c++" in j.name ) and "link" in j.name:
col = ( "lightblue", "blue" )
elif "job:" in j.name and "precomp" in j.name :
col = ( "lightyellow", "yellow" )
elif "job:" in j.name and ".dll" in j.name :
col = ( "lightblue", "blue" )
jr.rect = self.canvas.create_rectangle(
( x0, y0, x1, y1 ),
fill=col[0], outline="black", activefill=col[1], activeoutline="black", tag="jobrect" )
self.guiIdToJob[ jr.rect ] = jr
self.jobToGuiId[ j.name ] = jr
self.jobRects.append( jr )
#canvas.tag_bind( rect, "<Enter>", test )
self.timeMax = timeMax
self.UpdateCanvasSize( ( maxX, maxY ) )
self.UpdateAxis()
def UpdateCanvas( self ):
maxX = 0
maxY = 0
for jr in self.jobRects:
( x0, y0, x1, y1 ) = self.CalcRect( jr.job )
maxX = max( maxX, x1 )
maxY = max( maxY, y1 )
self.canvas.coords( jr.rect, ( x0, y0, x1, y1 ) )
self.UpdateCanvasSize( ( maxX, maxY ) )
self.UpdateAxis()
def SetScale( self, xScale ):
self.CalcSizes( xScale )
self.UpdateCanvas()
def CenterAtTime( self, time ):
winWidth = self.canvas.winfo_width()
xNewCenter = ( time*self.xScale ) / ( self.canvasSize[ 0 ] - self.xMargin*2 ) - ( winWidth / self.canvasSize[ 0 ] ) / 2
self.canvas.xview_moveto( xNewCenter )
def Ev_RectEnter( self, event ):
curId = self.canvas.find_withtag( Tkinter.CURRENT )
jr = self.guiIdToJob[ curId[ 0 ] ]
text = "%s %s" % ( TimeToStr( jr.job.GetRunTime() ), jr.job.name )
self.statusbar.config( text=text )
#print guiIdToJob[ Tkinter.CURRENT ]
#print event
#print dir( event )
#print event.num
#canvas.itemconfigure( Tkinter.CURRENT, fill="red" )
def Ev_RectLeave( self, event ):
self.statusbar.config( text="" )
#print event
#print dir( event )
#print event.num
#canvas.itemconfigure( Tkinter.CURRENT, fill="lightgreen" )
def Ev_Zoom( self, event, val ):
# zoom but keep the spot under the cursor under the cursor
xCurWindow = event.x +0.5
xCurCanvas = self.canvas.canvasx( xCurWindow )
xCenterTime = ( xCurCanvas - self.xStart ) / self.xScale
#print "time:", xCenterTime
scale = self.xScale*0.75**(-val)
self.SetScale( scale )
#print self.canvasSize
#print "width", self.canvas.winfo_width()
winWidth = self.canvas.winfo_width()
xCenterPixel = xCenterTime*self.xScale + self.xStart - (xCurWindow - winWidth/2 )
xCenterFrac = xCenterPixel / ( self.canvasSize[ 0 ] ) - ( winWidth / self.canvasSize[ 0 ] ) / 2
#print xCenterPixel
#print "res:", xCenterFrac
self.canvas.xview_moveto( xCenterFrac )
def Ev_ZoomWheel( self, event ) :
self.Ev_Zoom( event, event.delta/120 )
def Ev_MButtonRightDown( self, event ):
self.canvas.scan_mark( event.x, 0 )
def Ev_MButtonRightMove( self, event ):
self.canvas.scan_dragto( event.x, 0, 1 )
def callback(event):
canvas = event.widget
x = canvas.canvasx(event.x)
y = canvas.canvasy(event.y)
print canvas.find_closest(x, y)
def main( argv ):
#bam --debug-eventlog log.txt
#com = argv[ 1 ]
logPath = argv[ 1 ]
jobs = Jobs( )
jobs.Parse( open( logPath ).readlines() )
# allSorted = sorted( jobs.allJobs, key = lambda j : j.GetRunTime(), reverse = True )
# for j in allSorted:
# print( "%s, % 2d, %s" % ( TimeToStr( j.GetRunTime() ), j.thread, j.name ) )
w = TkGui( jobs )
w.WindowLoop()
if __name__ == "__main__":
main( sys.argv ) |
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .utils import weight_reduce_loss
def _expand_onehot_labels(labels, label_weights, target_shape, ignore_index):
"""Expand onehot labels to match the size of prediction."""
bin_labels = labels.new_zeros(target_shape)
valid_mask = (labels >= 0) & (labels != ignore_index)
inds = torch.nonzero(valid_mask, as_tuple=True)
if inds[0].numel() > 0:
if labels.dim() == 3:
bin_labels[inds[0], labels[valid_mask], inds[1], inds[2]] = 1
else:
bin_labels[inds[0], labels[valid_mask]] = 1
valid_mask = valid_mask.unsqueeze(1).expand(target_shape).float()
if label_weights is None:
bin_label_weights = valid_mask
else:
bin_label_weights = label_weights.unsqueeze(1).expand(target_shape)
bin_label_weights *= valid_mask
return bin_labels, bin_label_weights
def bi_cross_entropy(pred,
label,
weight=None,
reduction='mean',
avg_factor=None,
ignore_index=255):
"""Calculate the binary CrossEntropy loss.
Args:
pred (torch.Tensor): The prediction with shape (N, 1).
label (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): Sample-wise loss weight.
reduction (str, optional): The method used to reduce the loss.
Options are "none", "mean" and "sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
class_weight (list[float], optional): The weight for each class.
ignore_index (int | None): The label index to be ignored. Default: 255
Returns:
torch.Tensor: The calculated loss
"""
#print(pred.shape)
if pred.dim() != label.dim():
assert (pred.dim() == 2 and label.dim() == 1) or (
pred.dim() == 4 and label.dim() == 3), \
'Only pred shape [N, C], label shape [N] or pred shape [N, C, ' \
'H, W], label shape [N, H, W] are supported'
label, weight = _expand_onehot_labels(label, weight, pred.shape,
ignore_index)
# weighted element-wise losses
# if weight is not None:
# weight = weight.float()
# loss = F.binary_cross_entropy_with_logits(
# pred, label.float(), reduction='none')
loss = F.binary_cross_entropy(
pred, label.float(), reduction='mean')
# do the reduction for the weighted loss
# loss = weight_reduce_loss(
# loss, weight, reduction=reduction, avg_factor=avg_factor)
return loss
@LOSSES.register_module()
class BceCrossEntropyLoss(nn.Module):
"""CrossEntropyLoss.
Args:
use_sigmoid (bool, optional): Whether the prediction uses sigmoid
of softmax. Defaults to False.
use_mask (bool, optional): Whether to use mask cross entropy loss.
Defaults to False.
reduction (str, optional): . Defaults to 'mean'.
Options are "none", "mean" and "sum".
class_weight (list[float], optional): Weight of each class.
Defaults to None.
loss_weight (float, optional): Weight of the loss. Defaults to 1.0.
"""
def __init__(self,
reduction='mean',
loss_weight=1.0):
super(BceCrossEntropyLoss, self).__init__()
self.reduction = reduction
self.loss_weight = loss_weight
self.cls_criterion = bi_cross_entropy
def forward(self,
cls_score,
label,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
"""Forward function."""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_cls = self.loss_weight * self.cls_criterion(
cls_score,
label,
weight,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss_cls
|
"""
Type and packaing and unpacking of data values
"""
# pylint: disable=redefined-builtin,too-many-return-statements,too-many-branches,no-member
import base64
import json
from io import BytesIO
from collections import OrderedDict
import inspect
import glob
import re
import sphinxcontrib.napoleon
import sphinxcontrib.napoleon.docstring
import matplotlib
import numpy
import pandas
import six
def type(value):
"""
Get the type code for a value
:param value: A Python value
:returns: Type code for value
"""
type_ = __builtins__['type'](value).__name__
if value is None:
return 'null'
elif type_ == 'bool':
return 'boolean'
elif type_ == 'int':
return 'integer'
elif type_ == 'float':
return 'number'
elif type_ == 'str' or type_ == 'unicode':
return 'string'
elif (
isinstance(value, (matplotlib.figure.Figure, matplotlib.artist.Artist)) or
(type_ == 'list' and len(value) == 1 and isinstance(value[0], matplotlib.artist.Artist))
):
# Use the special 'matplotlib' type to identify plot values that need
# to be converted to the standard 'image' type during `pack()`
return 'matplotlib'
elif type_ in ('tuple', 'list'):
return 'array'
elif type_ == 'dict':
type_ = value.get('type')
if type_ and isinstance(type_, str):
return type_
return 'object'
elif isinstance(value, pandas.DataFrame):
return 'table'
elif type_ == 'module':
return 'module'
elif callable(value):
return 'function'
else:
raise RuntimeError('Unhandled Python type: ' + type_)
def pack(value):
"""
Pack an object into a value package
:param value: A Python value
:returns: A value package
"""
type_ = type(value)
format_ = 'json'
if value is None:
data = None
elif type_ == 'boolean':
data = value
elif type_ in ('integer', 'number'):
data = value
elif type_ == 'string':
data = value
elif type_ in ('array', 'object'):
data = value
elif type_ == 'function':
return pack_function(value)
elif type_ == 'module':
return pack_module(value)
elif type_ == 'table':
# It is necessary to remove NANs before serialising
# as JSON. See https://stackoverflow.com/a/34467382
# for why we need to do this and why we used this approach
value = value.where(pandas.notnull(value), None)
columns = OrderedDict()
for column in value.columns:
col = value[column]
# See the list of numpy data types at
# https://docs.scipy.org/doc/numpy/reference/arrays.scalars.html#arrays-scalars-built-in
values = list(col)
if col.dtype in (numpy.bool_, numpy.bool8):
column_type = 'boolean'
values = [bool(row) for row in values]
elif col.dtype in (numpy.int8, numpy.int16, numpy.int32, numpy.int64):
column_type = 'integer'
values = [int(row) for row in values]
elif col.dtype in (numpy.float16, numpy.float32, numpy.float64):
column_type = 'number'
values = [float(row) for row in values]
elif col.dtype in (numpy.str_, numpy.unicode_,):
column_type = 'string'
elif col.dtype == numpy.object and values:
# Get the type from the type of the first value
column_type = {
str: 'string'
}.get(__builtins__['type'](values[0]))
else:
column_type = col.dtype.name
columns[column] = values
data = OrderedDict([('type', 'table'), ('data', columns)])
elif type_ == 'matplotlib':
image = BytesIO()
matplotlib.pyplot.savefig(image, format='png')
type_ = 'image'
src = 'data:image/png;base64,' + base64.encodestring(image.getvalue()).decode()
return {'type': type_, 'src': src}
else:
raise RuntimeError('Unable to pack object\n type: ' + type_)
return {'type': type_, 'format': format_, 'data': data}
def pack_function(func=None, file=None, dir=None):
"""
Pack a function object
Parses the source of the function (either a string or a file
path) to extract it's ``description``, ``param``, ``return`` etc
properties.
Parameters
----------
func : dict or string
A ``func`` operation. If a string is supplied then an operation
object is created with the ``source`` property set to
the string.
Returns
-------
func : dict
The compiled ``func`` operation
messages : list
A list of messages (e.g errors)
"""
messages = []
if func is None:
if file:
with open(file) as file_obj:
func = pack_function(file_obj.read())
return func
elif dir:
count = 0
for file in glob.glob(dir + '/*.py'):
pack_function(file=file)
count += 1
return count
else:
raise RuntimeError('No function provided to compile!')
elif callable(func):
func_obj = func
func = {
'type': 'function'
}
func_name = func_obj.__code__.co_name
elif isinstance(func, str) or isinstance(func, bytes):
# Parse function source and extract properties from the Function object
source = func
scope = {}
six.exec_(source, scope)
# Get name of function
names = [key for key in scope.keys() if not key.startswith('__')]
if len(names) > 1:
messages.append({
'type': 'warning',
'message': 'More than one function or object defining in function source: %s' % names
})
func_name = names[-1]
func_obj = scope[func_name]
func = {
'type': 'function'
}
else:
raise RuntimeError('Unhandled type')
# Extract parameter specifications
func_spec = inspect.getargspec(func_obj)
args = func_spec.args
if func_spec.varargs:
args.append(func_spec.varargs)
if func_spec.keywords:
args.append(func_spec.keywords)
params = []
for index, name in enumerate(args):
param = {
'name': name
}
if name == func_spec.varargs:
param['repeat'] = True
elif name == func_spec.keywords:
param['extend'] = True
if func_spec.defaults:
defaults_index = len(args) - len(func_spec.defaults) + index
if defaults_index > -1:
default = func_spec.defaults[defaults_index]
param['default'] = {
'type': type(default),
'data': default
}
params.append(param)
# Get docstring and parse it for extra parameter specs
docstring = func_obj.__doc__
docstring_params = {}
docstring_returns = {}
if docstring:
docstring = trim_docstring(docstring)
config = sphinxcontrib.napoleon.Config(napoleon_use_param=True, napoleon_use_rtype=True)
docstring = sphinxcontrib.napoleon.docstring.NumpyDocstring(docstring, config, what='function').lines()
docstring = sphinxcontrib.napoleon.docstring.GoogleDocstring(docstring, config, what='function').lines()
summary = docstring[0]
description = ''
pattern = re.compile(r'^:(param|returns|type|rtype)(\s+(\w+))?:(.*)$')
for line in docstring[1:]:
match = pattern.match(line)
if match:
type_ = match.group(1)
name = match.group(3)
desc = match.group(4).strip()
if type_ == 'param':
param = docstring_params.get(name, {})
param['description'] = desc
docstring_params[name] = param
elif type_ == 'type':
param = docstring_params.get(name, {})
param['type'] = desc
docstring_params[name] = param
elif type_ == 'returns':
docstring_returns['description'] = desc
elif type_ == 'rtype':
docstring_returns['type'] = desc
else:
description += line + '\n'
description = description.strip()
if len(summary):
func.update({'summary': summary})
if len(description):
func.update({'description': description})
for name, spec in docstring_params.items():
for index, param in enumerate(params):
if param['name'] == name:
params[index].update(spec)
break
if len(docstring_returns):
func.update({'returns': docstring_returns})
# Create methods dict
# FIXME: should use signature not func_name
methods = {}
methods[func_name] = {
'params': params
}
func = {
'name': func_name,
'id': str(id(func_obj)),
'methods': methods
}
return {
'type': 'function',
'format': 'json',
'data': func
}
def trim_docstring(docstring):
"""From https://www.python.org/dev/peps/pep-0257/"""
if not docstring:
return ''
# Convert tabs to spaces (following the normal Python rules)
# and split into a list of lines:
lines = docstring.expandtabs().splitlines()
# Determine minimum indentation (first line doesn't count):
indent = sys.maxsize
for line in lines[1:]:
stripped = line.lstrip()
if stripped:
indent = min(indent, len(line) - len(stripped))
# Remove indentation (first line is special):
trimmed = [lines[0].strip()]
if indent < sys.maxsize:
for line in lines[1:]:
trimmed.append(line[indent:].rstrip())
# Strip off trailing and leading blank lines:
while trimmed and not trimmed[-1]:
trimmed.pop()
while trimmed and not trimmed[0]:
trimmed.pop(0)
# Return a single string:
return '\n'.join(trimmed)
def pack_module(value):
return {
'type': 'module',
'data': {
'id': str(id(value))
}
}
def unpack(pkg):
"""
Unpack a value package into a Python value
:param pkg: The value package
:returns: A Python value
"""
if isinstance(pkg, str):
pkg = json.loads(pkg)
if not isinstance(pkg, dict):
raise RuntimeError('Package should be an `Object`')
if not ('type' in pkg and 'data' in pkg):
raise RuntimeError('Package should have fields `type`, `data`')
type_ = pkg['type']
format = pkg.get('format', 'json')
data = pkg['data']
if type_ == 'null':
return None
elif type_ == 'boolean':
return data == 'true'
elif type_ == 'integer':
return int(data)
elif type_ == 'number':
return float(data)
elif type_ == 'string':
return data
elif type_ == 'object' or type_ == 'array':
return json.loads(data)
elif type_ == 'table':
if format == 'json':
dataframe = pandas.DataFrame()
for name, column in data['data'].items():
dataframe[name] = column
return dataframe
elif format in ('csv', 'tsv'):
sep = ',' if format == 'csv' else '\t'
return pandas.read_csv(BytesIO(data.encode()), sep=sep)
else:
raise RuntimeError('Unable to unpack\n type: ' + type_ + '\n format: ' + format)
else:
raise RuntimeError('Unable to unpack\n type: ' + type_ + '\n format: ' + format)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.