content
stringlengths 5
1.05M
|
|---|
import datetime
def print_header():
print('--------------------')
print(' Due Date APP')
print('--------------------')
print()
def get_lmp_from_patient():
print("When was the patient's last normal menstrual cycle? ")
date_str = input('Format: [dd/mm/yyyy]? ')
# Desired format '05/26/2018'
parts = date_str.split('/')
# argument check
if len(parts) !=3:
print('Bad date found', date_str)
return get_lmp_from_patient()
year = int(parts[2])
month = int(parts[1])
day = int(parts[0])
lmp = datetime.date(year, month, day)
#print (lmp)
return lmp
def compute_days_between_dates(original_date, target_date):
this_year = datetime.date(original_date.year, original_date.month, original_date.day)
dt = this_year - target_date
weeks = dt/7
#this line converts days to weeks
gest_age = weeks
return gest_age.days
#gest_age rounds the weeks up i.e. 10 weeks 3 days to 11 weeks (unsure how to make as a decimal/fraction)
def print_gestational_age(gest_age):
print("Your current gestational age is {} weeks.".format(-gest_age))
def print_date_date_information(min_due_date, max_due_date, expected_due_date):
print('Your expected due date is', expected_due_date.strftime('%a %b %d %Y'))
print('But it may be as early as', min_due_date.strftime('%m/%d/%Y'))
print('Or as late as', max_due_date.strftime('%m/%d/%Y'))
def main():
print_header()
lmp_day = get_lmp_from_patient()
today = datetime.date.today()
gest_length = datetime.timedelta(days = 281)
gest_std = datetime.timedelta(days = 13)
expected_due_date = lmp_day + gest_length
min_due_date = expected_due_date - gest_std
max_due_date = expected_due_date + gest_std
print_date_date_information(min_due_date, max_due_date, expected_due_date)
age = compute_days_between_dates(lmp_day, today)
print_gestational_age(age)
main()
|
from torchvision import models
class Model:
def __init__(self, arch):
self.network = eval('models.{}(pretrained=True)'.format(arch))
self.input_size = self.network.classifier[0].in_features
# Freeze parameters so we don't backprop through them
for param in self.network.parameters():
param.requires_grad = False
def classifier(self, classifier):
self.network.classifier = classifier.network
|
import argparse
import nltk
def argparser():
p = argparse.ArgumentParser()
p.add_argument('-input', required=True)
p.add_argument('-output', required=True)
p.add_argument('-word_num',
type=int,
required=True,
help='how many words to use. the words are sorted by decreasing frequency.')
config = p.parse_args()
return config
if __name__ == "__main__":
config = argparser()
f = open(config.input, 'r')
vocabulary = []
for line in f:
if line.replace('\n', '').strip() != '':
vocabulary += line.replace('\n', '').strip().split()
vocabulary = nltk.Text(vocabulary)
print('build_vocab.py: number of tokens = {}'.format(len(vocabulary.tokens)))
print('build_vocab.py: number of unique tokens = {}'.format(len(set(vocabulary.tokens))))
print('build_vocab.py: frequency of vocabulary(top 10)\n{}'.format(vocabulary.vocab().most_common(10)))
f_out = open(config.output, 'w')
for idx, words in enumerate(dict(vocabulary.vocab().most_common(config.word_num)).keys()):
f_out.write(words + ' ' + str(idx) + '\n')
|
# -*- coding: utf-8 -*-
from config.basic import BasicSettings
class Settings(BasicSettings):
ES_HOSTS: list = ["localhost", ]
# ES_HOSTS: list = ["es_dev", ]
INFLUX_HOST = "localhost"
INFLUX_PORT = 8086
INFLUX_DB = "my_db"
|
"""
This is converted from tensorflow simple audio recognition tutorial: https://www.tensorflow.org/tutorials/audio/simple_audio
"""
import os
import pathlib
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import tensorflow as tf
from tensorflow.keras.layers.experimental import preprocessing
from tensorflow.keras import layers
from tensorflow.keras import models
from IPython import display
AUTOTUNE = tf.data.AUTOTUNE
class SpeechCommandModel():
def preprocess_dataset(self, files):
files_ds = tf.data.Dataset.from_tensor_slices(files)
output_ds = files_ds.map(get_waveform_and_label, num_parallel_calls=AUTOTUNE)
# show_wav_ds(output_ds)
# show_spectrogram(output_ds)
output_ds = output_ds.map(
lambda audio, label: get_spectrogram_and_label_id(audio, label, self.commands), num_parallel_calls=AUTOTUNE)
# show_all_spectrogram(spectrogram_ds, commands)
def spectrogram_preprocess(spectrogram, label_id):
spectrogram = preprocessing.Resizing(32, 32, interpolation='area')(spectrogram)
spectrogram = preprocessing.Normalization(mean=0.0, variance=1.0)(spectrogram) # [-1,1] conceptually
# spectrogram = preprocessing.Rescaling(0.5, offset=1.0)(spectrogram) # [0,1] conceptually
return spectrogram, label_id
output_ds = output_ds.map(spectrogram_preprocess)
return output_ds
def load_mini_speech_commands_dataset(self, data_dir):
data_dir = pathlib.Path(data_dir)
# Set seed for experiment reproducibility
seed = 42
tf.random.set_seed(seed)
np.random.seed(seed)
self.commands = np.array(tf.io.gfile.listdir(str(data_dir)))
self.commands = self.commands[self.commands != 'README.md']
print('Commands:', self.commands)
filenames = tf.io.gfile.glob(str(data_dir) + '/*/*')
filenames = tf.random.shuffle(filenames)
num_samples = len(filenames)
print('Number of total examples:', num_samples)
print('Number of examples per label:',
len(tf.io.gfile.listdir(str(data_dir / self.commands[0]))))
print('Example file tensor:', filenames[0])
train_files = filenames[:6400]
val_files = filenames[6400: 6400 + 800]
test_files = filenames[-800:]
print('Training set size', len(train_files))
print('Validation set size', len(val_files))
print('Test set size', len(test_files))
self.train_ds = self.preprocess_dataset(train_files)
self.val_ds = self.preprocess_dataset(val_files)
self.test_ds = self.preprocess_dataset(test_files)
def build(self, input_shape, num_labels):
self.model = models.Sequential([
layers.Input(shape=input_shape),
layers.Conv2D(32, 3, activation='relu'),
layers.Conv2D(64, 3, activation='relu'),
layers.MaxPooling2D(),
layers.Dropout(0.25),
layers.Flatten(),
layers.Dense(128, activation='relu'),
layers.Dropout(0.5),
layers.Dense(num_labels),
])
self.model.summary()
return self.model
def train(self, batch_size=64, EPOCHS=10):
self.model.compile(
optimizer=tf.keras.optimizers.Adam(),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'],
)
self.train_ds = self.train_ds.batch(batch_size)
self.val_ds = self.val_ds.batch(batch_size)
self.train_ds = self.train_ds.cache().prefetch(AUTOTUNE)
self.val_ds = self.val_ds.cache().prefetch(AUTOTUNE)
history = self.model.fit(
self.train_ds,
validation_data=self.val_ds,
epochs=EPOCHS,
callbacks=tf.keras.callbacks.EarlyStopping(verbose=1, patience=2),
)
return history
def test(self):
test_audio = []
test_labels = []
for audio, label in self.test_ds:
test_audio.append(audio.numpy())
test_labels.append(label.numpy())
test_audio = np.array(test_audio)
test_labels = np.array(test_labels)
y_pred = np.argmax(self.model.predict(test_audio), axis=1)
y_true = test_labels
test_acc = sum(y_pred == y_true) / len(y_true)
print(f'Test set accuracy: {test_acc:.0%}')
def save(self,fp):
self.model.save(fp)
def load_model(self, fp):
self.model = models.load_model(filepath=fp)
return self.model
def show_all_spectrogram(spectrogram_ds, commands):
rows = 3
cols = 3
n = rows * cols
fig, axes = plt.subplots(rows, cols, figsize=(10, 10))
for i, (spectrogram, label_id) in enumerate(spectrogram_ds.take(n)):
r = i // cols
c = i % cols
ax = axes[r][c]
plot_spectrogram(np.squeeze(spectrogram.numpy()), ax)
ax.set_title(commands[label_id.numpy()])
ax.axis('off')
plt.show()
def get_spectrogram_and_label_id(audio, label, commands):
spectrogram = get_spectrogram(audio)
spectrogram = tf.expand_dims(spectrogram, -1)
label_id = tf.argmax(label == commands)
return spectrogram, label_id
def show_wav_ds(waveform_ds):
rows = 3
cols = 3
n = rows * cols
fig, axes = plt.subplots(rows, cols, figsize=(10, 12))
for i, (audio, label) in enumerate(waveform_ds.take(n)):
r = i // cols
c = i % cols
ax = axes[r][c]
ax.plot(audio.numpy())
ax.set_yticks(np.arange(-1.2, 1.2, 0.2))
label = label.numpy().decode('utf-8')
ax.set_title(label)
plt.show()
def decode_audio(audio_binary):
audio, _ = tf.audio.decode_wav(audio_binary)
return tf.squeeze(audio, axis=-1)
def get_label(file_path):
parts = tf.strings.split(file_path, os.path.sep)
# Note: You'll use indexing here instead of tuple unpacking to enable this
# to work in a TensorFlow graph.
return parts[-2]
def get_waveform_and_label(file_path):
label = get_label(file_path)
audio_binary = tf.io.read_file(file_path)
waveform = decode_audio(audio_binary)
return waveform, label
def get_spectrogram(waveform):
# Padding for files with less than 16000 samples
zero_padding = tf.zeros([16000] - tf.shape(waveform), dtype=tf.float32)
# Concatenate audio with padding so that all audio clips will be of the
# same length
waveform = tf.cast(waveform, tf.float32)
equal_length = tf.concat([waveform, zero_padding], 0)
spectrogram = tf.signal.stft(
equal_length, frame_length=255, frame_step=128)
spectrogram = tf.abs(spectrogram)
return spectrogram
def plot_spectrogram(spectrogram, ax):
# Convert to frequencies to log scale and transpose so that the time is
# represented in the x-axis (columns). An epsilon is added to avoid log of zero.
log_spec = np.log(spectrogram.T + np.finfo(float).eps)
height = log_spec.shape[0]
width = log_spec.shape[1]
X = np.linspace(0, np.size(spectrogram), num=width, dtype=int)
Y = range(height)
ax.pcolormesh(X, Y, log_spec)
def show_spectrogram(waveform_ds):
for waveform, label in waveform_ds.take(1):
label = label.numpy().decode('utf-8')
spectrogram = get_spectrogram(waveform)
fig, axes = plt.subplots(2, figsize=(12, 8))
timescale = np.arange(waveform.shape[0])
axes[0].plot(timescale, waveform.numpy())
axes[0].set_title('Waveform')
axes[0].set_xlim([0, 16000])
plot_spectrogram(spectrogram.numpy(), axes[1])
axes[1].set_title('Spectrogram')
plt.show()
if __name__ == "__main__":
data_dir = "data/0_data/mini_speech_commands"
mModel = SpeechCommandModel()
mModel.load_mini_speech_commands_dataset(data_dir)
for spectrogram, _ in mModel.train_ds.take(1):
input_shape = spectrogram.shape
print('Input shape:', input_shape)
num_labels = len(mModel.commands)
print('Output Class:', num_labels)
mModel.build(input_shape=input_shape,num_labels=num_labels)
mModel.train()
mModel.test()
model_output_dir = "model/SavedModel/SpeechCommandModel/"
mModel.save(model_output_dir)
|
import unittest
def validBraces(string):
stack = []
braces = {"(": ")", "[": "]", "{": "}"}
for c in string:
if c in braces.keys():
stack.append(c)
else:
if not stack or braces[stack.pop()] != c:
return False
return not len(stack)
class TestExercise(unittest.TestCase):
def test_validBraces(self):
self.assertEqual(True, validBraces("()"))
self.assertEqual(False, validBraces("[(])"))
self.assertEqual(True, validBraces("(){}[]"))
self.assertEqual(False, validBraces("(}"))
self.assertEqual(False, validBraces("[(])"))
self.assertEqual(True, validBraces("([{}])"))
if __name__ == '__main__':
unittest.main()
|
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import operator
import unittest
import nose
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, Panel,
isnull, notnull,date_range)
from pandas.core.index import Index, MultiIndex
from pandas.tseries.index import Timestamp, DatetimeIndex
import pandas.core.common as com
from pandas.compat import StringIO, lrange, range, zip, u, OrderedDict, long
from pandas import compat
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assert_panel_equal,
assert_almost_equal,
ensure_clean)
import pandas.util.testing as tm
#------------------------------------------------------------------------------
# Generic types test cases
class Generic(object):
_multiprocess_can_split_ = True
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
@property
def _ndim(self):
return self._typ._AXIS_LEN
def _axes(self):
""" return the axes for my object typ """
return self._typ._AXIS_ORDERS
def _construct(self, shape, value=None, **kwargs):
""" construct an object for the given shape
if value is specified use that if its a scalar
if value is an array, repeat it as needed """
if isinstance(shape,int):
shape = tuple([shape] * self._ndim)
if value is not None:
if np.isscalar(value):
if value == 'empty':
arr = None
# remove the info axis
kwargs.pop(self._typ._info_axis_name,None)
else:
arr = np.empty(shape)
arr.fill(value)
else:
fshape = np.prod(shape)
arr = value.ravel()
new_shape = fshape/arr.shape[0]
if fshape % arr.shape[0] != 0:
raise Exception("invalid value passed in _construct")
arr = np.repeat(arr,new_shape).reshape(shape)
else:
arr = np.random.randn(*shape)
return self._typ(arr,**kwargs)
def _compare(self, result, expected):
self._comparator(result,expected)
def test_rename(self):
# single axis
for axis in self._axes():
kwargs = { axis : list('ABCD') }
obj = self._construct(4,**kwargs)
# no values passed
#self.assertRaises(Exception, o.rename(str.lower))
# rename a single axis
result = obj.rename(**{ axis : str.lower })
expected = obj.copy()
setattr(expected,axis,list('abcd'))
self._compare(result, expected)
# multiple axes at once
def test_get_numeric_data(self):
n = 4
kwargs = { }
for i in range(self._ndim):
kwargs[self._typ._AXIS_NAMES[i]] = list(range(n))
# get the numeric data
o = self._construct(n,**kwargs)
result = o._get_numeric_data()
self._compare(result, o)
# non-inclusion
result = o._get_bool_data()
expected = self._construct(n,value='empty',**kwargs)
self._compare(result,expected)
# get the bool data
arr = np.array([True,True,False,True])
o = self._construct(n,value=arr,**kwargs)
result = o._get_numeric_data()
self._compare(result, o)
# _get_numeric_data is includes _get_bool_data, so can't test for non-inclusion
def test_nonzero(self):
# GH 4633
# look at the boolean/nonzero behavior for objects
obj = self._construct(shape=4)
self.assertRaises(ValueError, lambda : bool(obj == 0))
self.assertRaises(ValueError, lambda : bool(obj == 1))
self.assertRaises(ValueError, lambda : bool(obj))
obj = self._construct(shape=4,value=1)
self.assertRaises(ValueError, lambda : bool(obj == 0))
self.assertRaises(ValueError, lambda : bool(obj == 1))
self.assertRaises(ValueError, lambda : bool(obj))
obj = self._construct(shape=4,value=np.nan)
self.assertRaises(ValueError, lambda : bool(obj == 0))
self.assertRaises(ValueError, lambda : bool(obj == 1))
self.assertRaises(ValueError, lambda : bool(obj))
# empty
obj = self._construct(shape=0)
self.assertRaises(ValueError, lambda : bool(obj))
# invalid behaviors
obj1 = self._construct(shape=4,value=1)
obj2 = self._construct(shape=4,value=1)
def f():
if obj1:
print("this works and shouldn't")
self.assertRaises(ValueError, f)
self.assertRaises(ValueError, lambda : obj1 and obj2)
self.assertRaises(ValueError, lambda : obj1 or obj2)
self.assertRaises(ValueError, lambda : not obj1)
class TestSeries(unittest.TestCase, Generic):
_typ = Series
_comparator = lambda self, x, y: assert_series_equal(x,y)
def test_rename_mi(self):
s = Series([11,21,31],
index=MultiIndex.from_tuples([("A",x) for x in ["a","B","c"]]))
result = s.rename(str.lower)
def test_get_numeric_data_preserve_dtype(self):
# get the numeric data
o = Series([1,2,3])
result = o._get_numeric_data()
self._compare(result, o)
o = Series([1,'2',3.])
result = o._get_numeric_data()
expected = Series([],dtype=object)
self._compare(result, expected)
o = Series([True,False,True])
result = o._get_numeric_data()
self._compare(result, o)
o = Series([True,False,True])
result = o._get_bool_data()
self._compare(result, o)
o = Series(date_range('20130101',periods=3))
result = o._get_numeric_data()
expected = Series([],dtype='M8[ns]')
self._compare(result, expected)
def test_nonzero_single_element(self):
s = Series([True])
self.assertRaises(ValueError, lambda : bool(s))
s = Series([False])
self.assertRaises(ValueError, lambda : bool(s))
class TestDataFrame(unittest.TestCase, Generic):
_typ = DataFrame
_comparator = lambda self, x, y: assert_frame_equal(x,y)
def test_rename_mi(self):
df = DataFrame([11,21,31],
index=MultiIndex.from_tuples([("A",x) for x in ["a","B","c"]]))
result = df.rename(str.lower)
def test_get_numeric_data_preserve_dtype(self):
# get the numeric data
o = DataFrame({'A' : [1,'2',3.] })
result = o._get_numeric_data()
expected = DataFrame(index=[0,1,2],dtype=object)
self._compare(result, expected)
class TestPanel(unittest.TestCase, Generic):
_typ = Panel
_comparator = lambda self, x, y: assert_panel_equal(x,y)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
|
pi = 3.14
r = 1.1
area=pi*(r**2)
print("the area of the circle is : ",area)
|
from app.models.book import Book
from app.schemas.book import Book as SchemaBook
from fastapi_sqlalchemy import db
from app.utils.pagination import paginate
def add_book(book: SchemaBook):
model_book = Book(title=book.title,
description=book.description,
author_id=book.author_id
)
db.session.add(model_book)
db.session.commit()
return model_book
def get_books(page: int, size: int):
books = db.session.query(Book).all()
paginated_books = paginate(books, page, size)
return paginated_books
def get_book(book_id: int):
book = db.session.get(Book, {"id": book_id})
return book
def update_book(book_id: int, book: SchemaBook):
book_db = db.session.get(Book, {"id": book_id})
if book_db:
book_db.title = book.title
book_db.description = book.description
book_db.author_id = book.author_id
return book_db
def delete_book(book_id: int):
book_db = db.session.get(Book, {"id": book_id})
if book_db:
db.session.delete(book_db)
db.session.commit()
return book_db
|
from setuptools import setup, find_packages
setup(
name='various_utilities',
version='0.1',
license='MIT', # TODO: add license
author="Cristian Desivo",
author_email='cdesivo92@gmail.com',
packages=find_packages('src'),
package_dir={'': 'src'},
url='', # TODO: add url
keywords='utilities',
install_requires=[
],
)
|
"""Tests."""
from typing import List
import pytest
from bs4 import element
ROOTS = ("buttons-on-top/default", "buttons-on-top/sphinx-conf")
@pytest.mark.parametrize("testroot", [pytest.param(r, marks=pytest.mark.sphinx("html", testroot=r)) for r in ROOTS])
def test(carousels: List[element.Tag], testroot: str):
"""Test."""
indicators = carousels[0].find_all("div", ["scbs-carousel-indicators"])[0]
control_prev = carousels[0].find_all("button", ["scbs-carousel-control-prev"])[0]
control_next = carousels[0].find_all("button", ["scbs-carousel-control-next"])[0]
assert indicators["class"] == ["scbs-carousel-indicators", "scbs-my-4", "scc-top-indicator"]
assert control_prev["class"] == ["scbs-carousel-control-prev", "scbs-my-4", "scc-top-control"]
assert control_next["class"] == ["scbs-carousel-control-next", "scbs-my-4", "scc-top-control"]
indicators = carousels[1].find_all("div", ["scbs-carousel-indicators"])[0]
control_prev = carousels[1].find_all("button", ["scbs-carousel-control-prev"])[0]
control_next = carousels[1].find_all("button", ["scbs-carousel-control-next"])[0]
assert indicators["class"] == ["scbs-carousel-indicators"]
assert control_prev["class"] == ["scbs-carousel-control-prev"]
assert control_next["class"] == ["scbs-carousel-control-next"]
indicators = carousels[2].find_all("div", ["scbs-carousel-indicators"])[0]
control_prev = carousels[2].find_all("button", ["scbs-carousel-control-prev"])[0]
control_next = carousels[2].find_all("button", ["scbs-carousel-control-next"])[0]
if testroot.endswith("conf"):
assert indicators["class"] == ["scbs-carousel-indicators", "scbs-my-4", "scc-top-indicator"]
assert control_prev["class"] == ["scbs-carousel-control-prev", "scbs-my-4", "scc-top-control"]
assert control_next["class"] == ["scbs-carousel-control-next", "scbs-my-4", "scc-top-control"]
else:
assert indicators["class"] == ["scbs-carousel-indicators"]
assert control_prev["class"] == ["scbs-carousel-control-prev"]
assert control_next["class"] == ["scbs-carousel-control-next"]
indicators = carousels[3].find_all("div", ["scbs-carousel-indicators"])
control_prev = carousels[3].find_all("button", ["scbs-carousel-control-prev"])
control_next = carousels[3].find_all("button", ["scbs-carousel-control-next"])
assert not indicators
assert not control_prev
assert not control_next
|
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for owners_finder.py."""
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from testing_support import filesystem_mock
import owners_finder
import owners
ben = 'ben@example.com'
brett = 'brett@example.com'
darin = 'darin@example.com'
jochen = 'jochen@example.com'
john = 'john@example.com'
ken = 'ken@example.com'
peter = 'peter@example.com'
tom = 'tom@example.com'
nonowner = 'nonowner@example.com'
def owners_file(*email_addresses, **kwargs):
s = ''
if kwargs.get('comment'):
s += '# %s\n' % kwargs.get('comment')
if kwargs.get('noparent'):
s += 'set noparent\n'
return s + '\n'.join(email_addresses) + '\n'
def test_repo():
return filesystem_mock.MockFileSystem(files={
'/DEPS': '',
'/OWNERS': owners_file(ken, peter, tom,
comment='OWNERS_STATUS = build/OWNERS.status'),
'/build/OWNERS.status': '%s: bar' % jochen,
'/base/vlog.h': '',
'/chrome/OWNERS': owners_file(ben, brett),
'/chrome/browser/OWNERS': owners_file(brett),
'/chrome/browser/defaults.h': '',
'/chrome/gpu/OWNERS': owners_file(ken),
'/chrome/gpu/gpu_channel.h': '',
'/chrome/renderer/OWNERS': owners_file(peter),
'/chrome/renderer/gpu/gpu_channel_host.h': '',
'/chrome/renderer/safe_browsing/scorer.h': '',
'/content/OWNERS': owners_file(john, darin, comment='foo', noparent=True),
'/content/content.gyp': '',
'/content/bar/foo.cc': '',
'/content/baz/OWNERS': owners_file(brett),
'/content/baz/froboz.h': '',
'/content/baz/ugly.cc': '',
'/content/baz/ugly.h': '',
'/content/common/OWNERS': owners_file(jochen),
'/content/common/common.cc': '',
'/content/foo/OWNERS': owners_file(jochen, comment='foo'),
'/content/foo/foo.cc': '',
'/content/views/OWNERS': owners_file(ben, john, owners.EVERYONE,
noparent=True),
'/content/views/pie.h': '',
})
class OutputInterceptedOwnersFinder(owners_finder.OwnersFinder):
def __init__(self, files, local_root, author, reviewers,
fopen, os_path, disable_color=False):
super(OutputInterceptedOwnersFinder, self).__init__(
files, local_root, author, reviewers, fopen, os_path,
disable_color=disable_color)
self.output = []
self.indentation_stack = []
def resetText(self):
self.output = []
self.indentation_stack = []
def indent(self):
self.indentation_stack.append(self.output)
self.output = []
def unindent(self):
block = self.output
self.output = self.indentation_stack.pop()
self.output.append(block)
def writeln(self, text=''):
self.output.append(text)
class _BaseTestCase(unittest.TestCase):
default_files = [
'base/vlog.h',
'chrome/browser/defaults.h',
'chrome/gpu/gpu_channel.h',
'chrome/renderer/gpu/gpu_channel_host.h',
'chrome/renderer/safe_browsing/scorer.h',
'content/content.gyp',
'content/bar/foo.cc',
'content/baz/ugly.cc',
'content/baz/ugly.h',
'content/views/pie.h'
]
def setUp(self):
self.repo = test_repo()
self.root = '/'
self.fopen = self.repo.open_for_reading
def ownersFinder(self, files, author=nonowner, reviewers=None):
reviewers = reviewers or []
finder = OutputInterceptedOwnersFinder(files,
self.root,
author,
reviewers,
fopen=self.fopen,
os_path=self.repo,
disable_color=True)
return finder
def defaultFinder(self):
return self.ownersFinder(self.default_files)
class OwnersFinderTests(_BaseTestCase):
def test_constructor(self):
self.assertNotEquals(self.defaultFinder(), None)
def test_skip_files_owned_by_reviewers(self):
files = [
'chrome/browser/defaults.h', # owned by brett
'content/bar/foo.cc', # not owned by brett
]
finder = self.ownersFinder(files, reviewers=[brett])
self.assertEqual(finder.unreviewed_files, {'content/bar/foo.cc'})
def test_skip_files_owned_by_author(self):
files = [
'chrome/browser/defaults.h', # owned by brett
'content/bar/foo.cc', # not owned by brett
]
finder = self.ownersFinder(files, author=brett)
self.assertEqual(finder.unreviewed_files, {'content/bar/foo.cc'})
def test_reset(self):
finder = self.defaultFinder()
i = 0
while i < 2:
i += 1
self.assertEqual(finder.owners_queue,
[brett, john, darin, peter, ken, ben, tom])
self.assertEqual(finder.unreviewed_files, {
'base/vlog.h',
'chrome/browser/defaults.h',
'chrome/gpu/gpu_channel.h',
'chrome/renderer/gpu/gpu_channel_host.h',
'chrome/renderer/safe_browsing/scorer.h',
'content/content.gyp',
'content/bar/foo.cc',
'content/baz/ugly.cc',
'content/baz/ugly.h'
})
self.assertEqual(finder.selected_owners, set())
self.assertEqual(finder.deselected_owners, set())
self.assertEqual(finder.reviewed_by, {})
self.assertEqual(finder.output, [])
finder.select_owner(john)
finder.reset()
finder.resetText()
def test_select(self):
finder = self.defaultFinder()
finder.select_owner(john)
self.assertEqual(finder.owners_queue, [brett, peter, ken, ben, tom])
self.assertEqual(finder.selected_owners, {john})
self.assertEqual(finder.deselected_owners, {darin})
self.assertEqual(finder.reviewed_by, {'content/bar/foo.cc': john,
'content/baz/ugly.cc': john,
'content/baz/ugly.h': john,
'content/content.gyp': john})
self.assertEqual(finder.output,
['Selected: ' + john, 'Deselected: ' + darin])
finder = self.defaultFinder()
finder.select_owner(darin)
self.assertEqual(finder.owners_queue, [brett, peter, ken, ben, tom])
self.assertEqual(finder.selected_owners, {darin})
self.assertEqual(finder.deselected_owners, {john})
self.assertEqual(finder.reviewed_by, {'content/bar/foo.cc': darin,
'content/baz/ugly.cc': darin,
'content/baz/ugly.h': darin,
'content/content.gyp': darin})
self.assertEqual(finder.output,
['Selected: ' + darin, 'Deselected: ' + john])
finder = self.defaultFinder()
finder.select_owner(brett)
self.assertEqual(finder.owners_queue, [john, darin, peter, ken, tom])
self.assertEqual(finder.selected_owners, {brett})
self.assertEqual(finder.deselected_owners, {ben})
self.assertEqual(finder.reviewed_by,
{'chrome/browser/defaults.h': brett,
'chrome/gpu/gpu_channel.h': brett,
'chrome/renderer/gpu/gpu_channel_host.h': brett,
'chrome/renderer/safe_browsing/scorer.h': brett,
'content/baz/ugly.cc': brett,
'content/baz/ugly.h': brett})
self.assertEqual(finder.output,
['Selected: ' + brett, 'Deselected: ' + ben])
def test_deselect(self):
finder = self.defaultFinder()
finder.deselect_owner(john)
self.assertEqual(finder.owners_queue, [brett, peter, ken, ben, tom])
self.assertEqual(finder.selected_owners, {darin})
self.assertEqual(finder.deselected_owners, {john})
self.assertEqual(finder.reviewed_by, {'content/bar/foo.cc': darin,
'content/baz/ugly.cc': darin,
'content/baz/ugly.h': darin,
'content/content.gyp': darin})
self.assertEqual(finder.output,
['Deselected: ' + john, 'Selected: ' + darin])
def test_print_file_info(self):
finder = self.defaultFinder()
finder.print_file_info('chrome/browser/defaults.h')
self.assertEqual(finder.output, ['chrome/browser/defaults.h [5]'])
finder.resetText()
finder.print_file_info('chrome/renderer/gpu/gpu_channel_host.h')
self.assertEqual(finder.output,
['chrome/renderer/gpu/gpu_channel_host.h [5]'])
def test_print_file_info_detailed(self):
finder = self.defaultFinder()
finder.print_file_info_detailed('chrome/browser/defaults.h')
self.assertEqual(finder.output,
['chrome/browser/defaults.h',
[ben, brett, ken, peter, tom]])
finder.resetText()
finder.print_file_info_detailed('chrome/renderer/gpu/gpu_channel_host.h')
self.assertEqual(finder.output,
['chrome/renderer/gpu/gpu_channel_host.h',
[ben, brett, ken, peter, tom]])
def test_print_comments(self):
finder = self.defaultFinder()
finder.print_comments(darin)
self.assertEqual(finder.output,
[darin + ' is commented as:', ['foo (at content)']])
def test_print_global_comments(self):
finder = self.ownersFinder(['content/common/common.cc'])
finder.print_comments(jochen)
self.assertEqual(finder.output,
[jochen + ' is commented as:', ['bar (global status)']])
finder = self.ownersFinder(['content/foo/foo.cc'])
finder.print_comments(jochen)
self.assertEqual(finder.output,
[jochen + ' is commented as:', ['bar (global status)',
'foo (at content/foo)']])
if __name__ == '__main__':
unittest.main()
|
text_file = "test.txt"
def read_file(text_file):
try:
with open(text_file, "r") as handle:
data = handle.read()
return data
except FileNotFoundError:
return None
def single_word(text_file):
with open("test.txt", "r") as handle:
data = handle.read()
counter = 0
for word in data.split():
if word == 'Python':
counter += 1
return counter
def line_number(text_file):
with open(text_file, "r") as handle:
data = handle.readlines()
return data
def longest_number(text_file):
with open("test.txt", "r") as handle:
data = handle.read()
results = []
for x in data.split():
word_length = len(x)
if word_length == 16:
word_length = x
results.append(word_length)
|
import random
import os
import math
import numpy as np
from osgeo import gdal
import scipy
from scipy import stats
from scipy import ndimage as ndi
from pyproj import Proj, CRS
# %% DEM LOADING AND CLIPPING FUNCTIONS
def convert_wgs_to_utm(lon: float, lat: float):
"""Based on lat and lng, return best utm epsg-code
https://stackoverflow.com/a/40140326/4556479"""
utm_band = str((math.floor((lon + 180) / 6 ) % 60) + 1)
if len(utm_band) == 1:
utm_band = '0'+utm_band
if lat >= 0:
epsg_code = '326' + utm_band
return epsg_code
epsg_code = '327' + utm_band
return epsg_code
def getDEMtiles(dem, tile_size_km):
"""
Loads a full DEM and produces a list of tile coordinates for generating
square tiles of a desired size (in km).
Parameters
----------
dem : string
Name, including relative file path to DEM file.
tile_size_km : int
Size of tiles to produce in kilometers.
Returns
-------
xys : list
List of tuples with (column, row) coordinates of upper left pixels.
psx : float
Approximate pixel size in longitude in meters.
psy : float
Approximate pixel size in latitude in meters.
step_meters : float
Approximate pixel size in meters.
tile_size_px : int
Size of tiles converted from kilometers to pixels based on pixel size.
"""
ds = gdal.Open(dem)
band = ds.GetRasterBand(1)
gt = ds.GetGeoTransform()
nan = band.GetNoDataValue()
# read as array and set NaN
el = band.ReadAsArray().astype(float)
el[el == nan] = np.nan
print('getting {}-km tiles from {}\nwith original shape {}'\
.format(tile_size_km, os.path.split(dem)[1], el.shape))
# get pixel size
cols = el.shape[1]
rows = el.shape[0]
minx, maxy = gt[0], gt[3]
maxx, miny = gt[0] + gt[1] * cols, gt[3] + gt[5] * rows
# read crs
crs = CRS.from_wkt(ds.GetProjection()).to_epsg()
# get step in m if geographic projection
if crs == 4326:
epsg_code = convert_wgs_to_utm(minx, miny)
pp = Proj('EPSG:{}'.format(epsg_code))
proj = CRS.from_epsg(epsg_code).to_wkt()
minx, miny = pp(minx, miny)
maxx, maxy = pp(maxx, maxy)
psx = (maxx - minx) / cols
psy = (maxy - miny) / rows
step_meters = np.round((psx + psy) / 2, 0)
# close dataset
ds = None
# get potential tiles (large then small)
tile_size_px = int(np.round(tile_size_km * 1000 / step_meters, 0))
xys = []
for xx in range(0, cols-tile_size_px, tile_size_px):
for yy in range(0, rows-tile_size_px, tile_size_px):
xys.append((xx, yy))
random.shuffle(xys)
print('made {} tiles'.format(len(xys)))
return xys, psx, psy, step_meters, tile_size_px
def loadDEMclip(dem, xx, yy, tile_size_px):
"""
Takes a DEM file and row and column coordinates and clips a square tile from
the DEM, returning the clip as a numpy array in memory. Nothing is written to
disk.
Parameters
----------
dem : string
Name, including relative file path to DEM file.
xx : int
Column coordinate of upper left pixel.
yy : int
Row coordinate of upper left pixel.
tile_size_px : int
Size of square tile in pixels.
Returns
-------
el : numpy array
Array clipped from DEM file.
"""
kwargs = {'format' : 'VRT',
'srcWin' : [xx, yy, tile_size_px, tile_size_px]}
ds = gdal.Translate('', dem, **kwargs)
band = ds.GetRasterBand(1)
gt = ds.GetGeoTransform()
nan = band.GetNoDataValue()
# print(nan)
# read as array and set NaN
el = band.ReadAsArray().astype(float)
el[el == nan] = np.nan
# close dataset
ds = None
return el
# %% SLOPE, ASPECT, HILLSHADE, and HPHS FUNCTIONS
def np_slope(z, d):
"""
https://github.com/UP-RS-ESP/TopoMetricUncertainty/blob/master/uncertainty.py
Provides slope in degrees.
"""
dy, dx = np.gradient(z, d)
slope = np.arctan(np.sqrt(dx*dx+dy*dy))*180/np.pi
return slope
def np_aspect(z, d):
"""
Outputs terrain aspect in degrees with North = 0; East = 90; South = 180; West = 270
See:
https://github.com/UP-RS-ESP/TopoMetricUncertainty/blob/master/uncertainty.py
and
https://github.com/USDA-ARS-NWRC/topocalc/blob/main/topocalc/gradient.py
and
https://github.com/LSDtopotools/LSD_Resolution/blob/a3ff6af7dc3fc865c838ce6eb968866431b80352/LSDRaster.cpp
"""
dy, dx = np.gradient(z, d)
a = 180 * np.arctan2(dy, -dx) / np.pi
aspect = 90 - a
aspect[a < 0] = 90 - a[a < 0]
aspect[a > 90] = 360 - a[a > 90] + 90
idx = (dy==0) & (dx==0)
aspect[idx] = 180
return aspect
def hillshade(array, spacing, azimuth=315, angle_altitude=20):
"""
This function is used to generate a hillshade of the topography. It produces
identical outputs to 'gdaldem hillshade -alg ZevenbergenThorne' (<--this was tested)
From here: https://github.com/LSDtopotools/LSDTopoTools_CRNBasinwide/blob/master/LSDRaster.cpp
"""
slope = np_slope(array, spacing)*np.pi/180
aspect = np_aspect(array, spacing)*np.pi/180
# This bit isn't necessary with above np_aspect output (0 North; 90 East)
# azimuth_math = 360 - azimuth + 90
# if azimuth_math >= 360.0:
# azimuth_math = azimuth_math - 360
azimuth_math = azimuth
azimuthrad = azimuth_math * np.pi /180.0
zenith_rad = (90 - angle_altitude) * np.pi / 180.0
shaded = (np.cos(zenith_rad) * np.cos(slope)) + (np.sin(zenith_rad) * np.sin(slope) * np.cos((azimuthrad) - aspect))
shaded = 255*(shaded + 1)/2
return shaded.astype(int)
def HPHS(el, step, kernel, azimuths, angles):
"""
Calculate HPHS metric
Parameters
----------
el : numpy array
Elevation values in array.
step : float
Average pixel spacing in meters.
kernel : numpy array
High pass filtering kernel.
azimuths : list
Sun azimuths.
angles : list
Sun elevation angles.
Returns
-------
hphs : numpy array
High-pass hillshade metric.
hs : numpy array
Hillshade image.
"""
highpasses = np.zeros((el.shape[0], el.shape[1], len(azimuths), len(angles)))
for ang_num, ang in enumerate(angles):
for az_num, az in enumerate(azimuths):
# HS
hs = hillshade(el, step, azimuth=az, angle_altitude=ang)
# edge filter
hp = abs(ndi.convolve(hs, kernel))
highpasses[:,:,az_num,ang_num] = hp[:]
# take maximum value from rotated stack
hphs = np.nanmax(highpasses, axis=2)[:,:, 0].astype(int)
return hphs, hs
def np_slope_diff_spacing(z, xspace, yspace):
"""
https://github.com/UP-RS-ESP/TopoMetricUncertainty/blob/master/uncertainty.py
Provides slope in degrees.
"""
dy, dx = np.gradient(z, xspace, yspace)
return np.arctan(np.sqrt(dx*dx+dy*dy))*180/np.pi
def np_aspect_diff_spacing(z, xspace, yspace):
"""
Outputs terrain aspect in degrees with North = 0; East = 90; South = 180; West = 270
See:
https://github.com/UP-RS-ESP/TopoMetricUncertainty/blob/master/uncertainty.py
and
https://github.com/USDA-ARS-NWRC/topocalc/blob/main/topocalc/gradient.py
and
https://github.com/LSDtopotools/LSD_Resolution/blob/a3ff6af7dc3fc865c838ce6eb968866431b80352/LSDRaster.cpp
"""
dy, dx = np.gradient(z, xspace, yspace)
a = 180 * np.arctan2(dy, -dx) / np.pi
aspect = 90 - a
aspect[a < 0] = 90 - a[a < 0]
aspect[a > 90] = 360 - a[a > 90] + 90
idx = (dy==0) & (dx==0)
aspect[idx] = 180
return aspect
def hillshade_diff_spacing(array, xspace, yspace, azimuth=315, angle_altitude=20):
"""
This function is used to generate a hillshade of the topography. It produces
identical outputs to 'gdaldem hillshade -alg ZevenbergenThorne' (<--this was tested)
From here: https://github.com/LSDtopotools/LSDTopoTools_CRNBasinwide/blob/master/LSDRaster.cpp
"""
slope = np_slope_diff_spacing(array, xspace, yspace)*np.pi/180
aspect = np_aspect_diff_spacing(array, xspace, yspace)*np.pi/180
# This bit isn't necessary with above np_aspect output (0 North; 90 East)
# azimuth_math = 360 - azimuth + 90
# if azimuth_math >= 360.0:
# azimuth_math = azimuth_math - 360
azimuth_math = azimuth
azimuthrad = azimuth_math * np.pi /180.0
zenith_rad = (90 - angle_altitude) * np.pi / 180.0
shaded = (np.cos(zenith_rad) * np.cos(slope)) + (np.sin(zenith_rad) * np.sin(slope) * np.cos((azimuthrad) - aspect))
shaded = 255*(shaded + 1)/2
return shaded.astype(int)
def HPHS_diff_spacing(el, xspace, yspace, kernel, azimuths, angles):
"""
Calculate HPHS metric
Parameters
----------
el : numpy array
Elevation values in array.
xspace : float
Longitudinal pixel spacing in meters.
yspace : float
Latitudinal pixel spacing in meters.
kernel : numpy array
High pass filtering kernel.
azimuths : list
Sun azimuths.
angles : list
Sun elevation angles.
Returns
-------
hphs : numpy array
High-pass hillshade metric.
hs : numpy array
Hillshade image.
"""
highpasses = np.zeros((el.shape[0], el.shape[1], len(azimuths), len(angles)))
for ang_num, ang in enumerate(angles):
for az_num, az in enumerate(azimuths):
# HS
hs = hillshade_diff_spacing(el, xspace, yspace, azimuth=az, angle_altitude=ang)
# edge filter
hp = abs(ndi.convolve(hs, kernel))
highpasses[:,:,az_num,ang_num] = hp[:]
# take maximum value from rotated stack
hphs = np.nanmax(highpasses, axis=2)[:,:, 0].astype(int)
# This normalization should not be done for inter DEM comparisons
# hphs = hphs / hphs.max()
return hphs, hs
# %% POLYNOMIAL FITTING FUNCTIONS
def plane_fit_RMSE(points):
"""
Simple function returns RMSE of fit plane to window. For higher order fitting
functions see curvFit_lstsq_polynom function below
Parameters
----------
points : array
(X, Y, Z) array of coordinates. Each row is an (X, Y, Z) triple.
Returns
-------
p1_rmse : float
Root mean square error of plane fit.
"""
ctr = points.mean(axis=0)
points = points - ctr
A = np.c_[points[:,0], points[:,1], np.ones(points.shape[0])]
p1_C,_,_,_ = scipy.linalg.lstsq(A, points[:,2], lapack_driver='gelsy') # coefficients
Z_pts = p1_C[0]*points[:,0] + p1_C[1]*points[:,1] + p1_C[2]
p1_dz = points[:,2] - Z_pts
mse = (np.square(p1_dz)).mean(axis=0)
p1_rmse = np.sqrt(mse)
return p1_rmse
def curvFit_lstsq_polynom(points, order=2):
"""
Surface fitting to 3D pointcloud. Order=1 assumes a linear plane and uses
a least squared approach. Higher order surfaces uses quadratic curve
fitting approaches: Fitting a second order polynom to a point cloud and
deriving the curvature in a simplified form. We follow:
Evans, I. S. (1980), An integrated system of terrain analysis and slope
mapping, Z. Geomorphol., 36, 274–295.
More details: https://gis.stackexchange.com/questions/37066/how-to-calculate-terrain-curvature
Original functions by B. Bookhagen: https://github.com/UP-RS-ESP/PC_geomorph_roughness/blob/master/pc_geomorph_roughness.py#L97
Parameters
----------
points : array
(X, Y, Z) array of coordinates. Each row is an (X, Y, Z) triple.
order : int, optional
DESCRIPTION. The default is 2.
Returns
-------
Coefficients (_C), residuals (_dz), root mean square errors (_rmse),
local slope (_slope), local aspect (_aspect), total curvature (_Curvature),
contour curvature (_curv_contour), tangential curvature (_curv_tan), and
profile curvature (_curv_profc), for each of the selected polynomial fitting
orders (p1, p2, and/or p4).
"""
#commented this out, because it will slow down processing.
#points = np.reshape(points, (np.shape(points)[0], -1)) # Collapse trialing dimensions
#try:
# assert points.shape[0] <= points.shape[1], "There are only {} points in {} dimensions.".format(points.shape[1], points.shape[0])
#except AssertionError:
# return np.nan, np.nan, np.nan
#points = points.T
#nr_of_points = len(points)
ctr = points.mean(axis=0)
points = points - ctr
# if points.shape[0] > 8:
if order==1:
#1st order
A = np.c_[points[:,0], points[:,1], np.ones(points.shape[0])]
p1_C,_,_,_ = scipy.linalg.lstsq(A, points[:,2], lapack_driver='gelsy') # coefficients
Z_pts = p1_C[0]*points[:,0] + p1_C[1]*points[:,1] + p1_C[2]
p1_dz = points[:,2] - Z_pts
mse = (np.square(p1_dz)).mean(axis=0)
p1_rmse = np.sqrt(mse)
p1_slope = np.sqrt( p1_C[0]**2 + p1_C[1]**2 )
#Calculate unit normal vector of fitted plane:
#N = C / np.sqrt(sum([x*x for x in p1_C]))
p1_aspect = np.rad2deg(np.arctan2(p1_C[1], -p1_C[0]))
if p1_aspect < 0:
p1_aspect = 90.0 - p1_aspect
elif p1_aspect > 90.0:
p1_aspect = 360.0 - p1_aspect + 90.0
else:
p1_aspect = 90.0 - p1_aspect
# return p1_C, p1_dz, p1_rmse,p1_slope, p1_aspect
else:
p1_C, p1_dz, p1_rmse,p1_slope, p1_aspect = np.nan, np.nan, np.nan, np.nan, np.nan,
# if points.shape[0] > 8:
if order==2:
#2nd order
# best-fit quadratic curve
#Z = Dx² + Ey² + Fxy + Gx + Hy + I
#z = r*x**2 + t * y**2 + s*x*y + p*x + q*y + u
A = np.c_[points[:,0]**2., \
points[:,1]**2., \
points[:,0]*points[:,1], \
points[:,0], points[:,1], np.ones(points.shape[0])]
p2_C,_,_,_ = scipy.linalg.lstsq(A, points[:,2], lapack_driver='gelsy') # coefficients
Z_pts = p2_C[0]*points[:,0]**2. + p2_C[1]*points[:,1]**2. + p2_C[2]*points[:,0]*points[:,1] + p2_C[3]*points[:,0] + p2_C[4]*points[:,1] + p2_C[5]
p2_dz = points[:,2] - Z_pts
mse = (np.square(p2_dz)).mean(axis=0)
p2_rmse = np.sqrt(mse)
#dZ_residuals = np.linalg.norm(errors)
fxx=p2_C[0]
fyy=p2_C[1]
fxy=p2_C[2]
fx=p2_C[3]
fy=p2_C[4]
#mean curvature (arithmetic average)
c_m = - ( (1 + (fy**2))*fxx - 2*fxy*fx*fy+ (1 + (fx**2))*fyy ) / (2*( (fx**2) + (fy**2) + 1)**(3/2) )
#tangential (normal to gradient) curvature
c_t = - ( ( fxx*(fy**2) - 2*fxy * fx * fy + fyy * (fx**2) ) / ( ( (fx**2) + (fy**2) ) * ((fx**2) + (fy**2) + 1)**(1/2) ) )
#difference (range of profile and tangential)
c_d = c_m - c_t
#profile (vertical or gradient direction) curvature
c_p = c_m + c_d
#contour (horizontal or contour direction)
c_c = - ( ( fxx * (fx**2) - 2 * fxy * fx * fy + fyy * (fx**2) ) / ( ( (fx**2) + (fy**2) )**(3/2) ) )
#Curvature = 2*fxx + 2*fyy
p2_Curvature = c_m
#curv_contour = Curvature
p2_curv_contour = c_c
p2_curv_tan = c_t
p2_curv_profc = c_p
p2_slope = np.sqrt( fx**2 + fy**2 )
#N = p2_C[3::] / np.sqrt(sum([x*x for x in p2_C[3::]]))
#azimuth = np.degrees(np.arctan2(N[1], N[0])) + 180
p2_aspect = np.rad2deg(np.arctan2(fy, -fx))
if p2_aspect < 0:
p2_aspect = 90.0 - p2_aspect
elif p2_aspect > 90.0:
p2_aspect = 360.0 - p2_aspect + 90.0
else:
p2_aspect = 90.0 - p2_aspect
# return p2_C, p2_dz, p2_rmse, p2_slope, p2_aspect, p2_Curvature, p2_curv_contour, p2_curv_tan, p2_curv_profc
else:
p2_C, p2_dz, p2_rmse, p2_slope, p2_aspect, p2_Curvature, p2_curv_contour, p2_curv_tan, p2_curv_profc = np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan
# if points.shape[0] > 8:
if order==4:
#4th order
# best-fit fourth-order polynomial
#Z = Ax²y² + Bx²y + Cxy² + Dx² + Ey² + Fxy + Gx + Hy + I
#A = [(Z1 + Z3 + Z7 + Z9) / 4 - (Z2 + Z4 + Z6 + Z8) / 2 + Z5] / L4
#B = [(Z1 + Z3 - Z7 - Z9) /4 - (Z2 - Z8) /2] / L3
#C = [(-Z1 + Z3 - Z7 + Z9) /4 + (Z4 - Z6)] /2] / L3
#D = [(Z4 + Z6) /2 - Z5] / L2
#E = [(Z2 + Z8) /2 - Z5] / L2
#F = (-Z1 + Z3 + Z7 - Z9) / 4L2
#G = (-Z4 + Z6) / 2L
#H = (Z2 - Z8) / 2L
#I = Z5
A = np.c_[points[:,0]**2. * points[:,1]**2., \
points[:,0]**2. * points[:,1], \
points[:,0] * points[:,1]**2., \
points[:,0]**2., \
points[:,1]**2., \
points[:,0]*points[:,1], \
points[:,0], points[:,1], \
np.ones(points.shape[0]) ]
p4_C,_,_,_ = scipy.linalg.lstsq(A, points[:,2], lapack_driver='gelsy') # coefficients
Z_pts = p4_C[0]*(points[:,0]**2.) * (points[:,1]**2.) \
+ p4_C[1]*(points[:,0]**2.) * points[:,1] \
+ p4_C[2]*points[:,0] * (points[:,1]**2.) \
+ p4_C[3]*(points[:,0]**2.) + p4_C[4]*points[:,1]**2. \
+ p4_C[5]*points[:,0] * points[:,1] \
+ p4_C[6]*points[:,0] + p4_C[7]*points[:,1] + p4_C[8]
p4_dz = points[:,2] - Z_pts
mse = (np.square(p4_dz)).mean(axis=0)
p4_rmse = np.sqrt(mse)
#dZ_residuals = np.linalg.norm(errors)
fx=p4_C[6]
fy=p4_C[7]
fxx=p4_C[3]
fxy=p4_C[5]
fyy=p4_C[4]
#mean curvature (arithmetic average)
c_m = - ( (1 + (fy**2))*fxx - 2*fxy*fx*fy+ (1 + (fx**2))*fyy ) / (2*( (fx**2) + (fy**2) + 1)**(3/2) )
#tangential (normal to gradient) curvature
c_t = - ( ( fxx*(fy**2) - 2*fxy * fx * fy + fyy * (fx**2) ) / ( ( (fx**2) + (fy**2) ) * ((fx**2) + (fy**2) + 1)**(1/2) ) )
#difference (range of profile and tangential)
c_d = c_m - c_t
#profile (vertical or gradient direction) curvature
c_p = c_m + c_d
#contour (horizontal or contour direction)
c_c = - ( ( fxx * (fx**2) - 2 * fxy * fx * fy + fyy * (fx**2) ) / ( np.sqrt( ( (fx**2) + (fy**2) )**(2) ) ) )
# p = fx
# q = fy
# r = fxx
# s = fxy
# t = fyy
# curv_k_h = - ( ( (q**2) * r - 2*p*q*s + (p**2) * t) / ( ((p**2) + (q**2)) * np.sqrt(1 + (p**2) + (q**2)) ) )
# curv_k_v = - ( ( (p**2) * r + 2*p*q*s + (q**2) * t) / ( ((p**2) + (q**2)) * np.sqrt( (1 + (p**2) + (q**2))**3 ) ) )
#Curvature = 2*fxx + 2*fyy
p4_Curvature = c_m
#curv_contour = Curvature
p4_curv_contour = c_c
p4_curv_tan = c_t
p4_curv_profc = c_p
p4_slope = np.sqrt( fx**2 + fy**2 )
#N = p4_C[6::] / np.sqrt(sum([x*x for x in p4_C[6::]]))
p4_aspect = np.rad2deg(np.arctan2(fy, -fx))
if p4_aspect < 0:
p4_aspect = 90.0 - p4_aspect
elif p4_aspect > 90.0:
p4_aspect = 360.0 - p4_aspect + 90.0
else:
p4_aspect = 90.0 - p4_aspect
# return p4_C, p4_dz, p4_rmse, p4_slope, p4_aspect, p4_Curvature, p4_curv_contour, p4_curv_tan, p4_curv_profc
else:
p4_C, p4_dz, p4_rmse, p4_slope, p4_aspect, p4_Curvature, p4_curv_contour, p4_curv_tan, p4_curv_profc = np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan
return p1_C, p1_dz, p1_rmse,p1_slope, p1_aspect, \
p2_C, p2_dz, p2_rmse, p2_slope, p2_aspect, p2_Curvature, p2_curv_contour, p2_curv_tan, p2_curv_profc, \
p4_C, p4_dz, p4_rmse, p4_slope, p4_aspect, p4_Curvature, p4_curv_contour, p4_curv_tan, p4_curv_profc
# %% DFT FUNCTIONS
def doDFT(arr, step):
"""
This is the main DFT function, it was originally inspired from and translated
from the Matlab version by Taylor Perron here: http://web.mit.edu/perron/www/downloads.html
References:
Perron, J. T., Kirchner, J. W., and Dietrich, W. E. (2008). Spectral signatures
of characteristic spatial scales and nonfractal structure in landscapes.
Journal of Geophysical Research 113. doi:10.1029/2007JF000866
Purinton, B. and Bookhagen, B. (2017). Validation of digital elevation models
(dems) and comparison of geomorphic metrics on the southern central andean plateau.
Earth Surface Dynamics 5, 211–237. doi:10.5194/esurf-5-211-2017
Parameters
----------
arr : numpy array
Input array, could be HPHS grid, elevation grid, or anything else.
step : float
Pixel size of input.
Returns
-------
f1d : numpy array
Radial frequency in 1D.
f2d : numpy array
Radial frequency in 2D.
p1d : numpy array
Power in 1D.
p2d : numpy array
Power in 2D.
F_ang1d : numpy array
Orientation in 1D.
F_ang2d : numpy array
Orientation in 2D.
"""
ny, nx = arr.shape
# fit plane and remove trend
x, y = np.meshgrid(range(nx), range(ny))
A = np.vstack([x.ravel(), y.ravel(), np.ones(len(x.ravel()))]).T
fit = np.linalg.lstsq(A, arr.ravel(), rcond=None)[0]
arr_fft = arr - (fit[0]*x + fit[1]*y + fit[2])
# apply hanning windowing to reduce spectral leakage on edges
hann_y = np.hanning(ny)
hann_x = np.hanning(nx)
hann_2d = np.sqrt(np.outer(hann_y, hann_x))
hann_weight = np.sum(hann_2d ** 2)
arr_fft = arr_fft * hann_2d
# This next step is done to optimize the Cooley and Turkey (1965)
# Discrete Fourier Transfom (DFT) method used by numpy, which operates
# most efficiently when the length scales are powers of 2 and the grid
# is square
Lx = int(2**(np.ceil(np.log(np.max((nx, ny)))/np.log(2))))
Ly = Lx
# Lx, Ly = nx, ny
# frequency increments
# dfx = 1/(step*Lx)
# dfy = 1/(step*Ly)
# run the fft
fft = np.fft.fftn(arr_fft, (Ly, Lx))
# shift zero frequency to center
fft_shift = np.fft.fftshift(fft)
# # index of zero frequency (DC component)
xc, yc = (Lx//2, Ly//2)
# # zero out the DC component
fft_shift[yc, xc] = 0
# get the DFT periodogram with units of m^2 for topography
# include weights of hann to correct for windowing
p2d = np.abs(fft_shift)**2 / (Lx * Ly * hann_weight)
# The periodogram is a measure of how much of the
# original elevation field's variance falls within a given frequency range.
# You can check that the sum of the periodogram is roughly equal to the
# variance in Z. (The variance will be somewhat less due to the zero padding.)
# calculate radial frequencies
# xc, yc = (Lx//2, Ly//2) # (Lx//2 + 1, Ly//2 - 1) # center coordinate
x, y = np.meshgrid(range(Lx), range(Ly))#[::-1])
# wavenumbers
kx = x - xc
ky = y - yc
# kx_, ky_ = np.meshgrid(range(-Lx//2, Lx//2 - 1), range(Ly//2, -Ly//2+1, -1))
# radial frequencies
fx = kx / (Lx * step)
fy = ky / (Ly * step)
f2d = np.sqrt(fx**2 + fy**2)
# w2d = np.sqrt((1/fx)**2 + (1/fy)**2)
# f2d = 1/w2d
# fourier angles
F_ang2d = np.rad2deg(np.arctan2(ky*step, kx*step))
# Create sorted, non-redundant vectors of frequency and power
p1d = p2d[:, 0:xc+1].copy() # only half the power (reflected across the center)
f1d = f2d[:, 0:xc+1].copy() # same for the frequency
F_ang1d = F_ang2d[:, 0:xc+1].copy() # same for angle
# set reundant columns to negative for clipping below
f1d[yc:Ly, xc] = -1
# concatenate frequency and power and sort by frequency
f1d = np.c_[f1d.ravel(), p1d.ravel(), F_ang1d.ravel()]
I = np.argsort(f1d[:, 0])
f1d = f1d[I, :]
# remove negative values
f1d = f1d[f1d[:, 0] > 0, :]
# extract power, angle, and frequency (factor of 2 corrects for taking half the spectrum)
p1d = 2 * f1d[:, 1] # the sum of the p2d and p1d should now be approximately equal
F_ang1d = f1d[:, 2]
f1d = f1d[:, 0]
return f1d, f2d, p1d, p2d, F_ang1d, F_ang2d
def fftNorm(f1d, f2d, p1d, p2d, bins=20):
"""
This is 1D power-law fit DFT normalization function.
References:
Purinton, B. and Bookhagen, B. (2017). Validation of digital elevation models
(dems) and comparison of geomorphic metrics on the southern central andean plateau.
Earth Surface Dynamics 5, 211–237. doi:10.5194/esurf-5-211-2017
Parameters
----------
f1d : numpy array
Radial frequency in 1D.
f2d : numpy array
Radial frequency in 2D.
p1d : numpy array
Power in 1D.
p2d : numpy array
Power in 2D.
bins : int, optional
Number of bins for power-law fit. The default is 20.
Returns
-------
bin_center : numpy array
Bin centers for power-law fit.
bin_med : numpy array
Bin medians for power-law fit.
pl_fit : numpy array
Resulting power-law fit.
fit : numpy array
Coefficients of power-law fit.
p1d_norm : numpy array
Normalized 1D power.
p2d_norm : numpy array
Normalized 2D power.
"""
# bin the data using log bins
f_bins = np.logspace(np.log10(f1d.min()), np.log10(f1d.max()), bins * 2 - 1)
bin_med, edges, _ = stats.binned_statistic(f1d, p1d, statistic=np.nanmedian,
bins=f_bins[::2])
# bin_center = edges[:-1] + np.diff(edges)/2
bin_center = f_bins[1::2]
# sometimes NaN values remain in some bins, throw those bins out
bin_center = bin_center[np.isfinite(bin_med)]
bin_med = bin_med[np.isfinite(bin_med)]
# apply a power-law fit to the bins
A = np.vstack([np.log10(bin_center), np.ones(len(bin_center))]).T
fit = np.linalg.lstsq(A, np.log10(bin_med), rcond=None)[0]
pl_fit = (10**fit[1]) * (bin_center**fit[0])
with np.errstate(divide='ignore', invalid='ignore'):
# use the power-law fit to normalize the 1D spectrum
p1d_norm = p1d / ((10**fit[1]) * (f1d**fit[0]))
# use the power-law fit to normalize the 2D spectrum
p2d_norm = p2d / ((10**fit[1]) * (f2d**fit[0]))
return bin_center, bin_med, pl_fit, fit, p1d_norm, p2d_norm
# %% OTHER FUNCTIONS
def CalculateChannelSlope(pts_array, slope_window_size=5):
"""
Clubb 2019 - JGR:ES
Modified from: https://github.com/UP-RS-ESP/river-clusters/blob/master/clustering.py
"""
grad = np.empty(len(pts_array))
slicer = (slope_window_size - 1)/2
for index, x in enumerate(pts_array):
start_idx = index-slicer
if start_idx < 0:
start_idx=0
end_idx = index+slicer+1
if end_idx > len(pts_array):
end_idx = len(pts_array)
# find the rows above and below relating to the window size. We use whatever nodes
# are available to not waste the data.
this_slice = pts_array[int(start_idx):int(end_idx)]
# now regress this slice
x = this_slice[:,0]
y = this_slice[:,1]
slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)
grad[index] = abs(slope)
return grad
|
USAGE_CONTENT = """Document and Blog Entry Manager
USAGE:
<command> [OPTIONS]
OPTIONS:
-i, -init initialize docs directory (don't delete exist file and dir).
-n, -new [<OPTS>] new document set under "work" dir (create dir, md file and category file).
OPTS (can also specify the following together):
-t, -title <DocTitle> specified document title (default: "Document").
-c, -category <CategoryName> specified category (default: empty value).
-s, -search <OPTS> search document entry (show entry id, title, group, category).
OPTS:
-g, -group <Group Name> search by group name.
-c, -category <Category Name> search by category name.
-t, -title <Keyword> search by title keyword (partial match).
-p, -push [<OPTS>] <DirName> push document set from "work" dir to "docs" dir.
OPTS: -a, -all in addition to the above, post your blog.
-r, -retrieve [<OPTS>] <DocEntryID> retrieve document set from "docs" dir to "work" dir (and backup).
OPTS: -c, -cancel cancel retrieve (move the backup back to "docs" dir).
-b, -blog <OPTS> operation to your blog.
OPTS (can't also specify the following together):
-c, -collect collect all blog entries from your blog.
-p, -push <DocEntryID> post specified document to your blog.
-h, -help show usage.
"""
def print_usage():
print(USAGE_CONTENT)
|
# newplots.py
"""Volume 1A: Data Visualization. Plotting file."""
from __future__ import print_function
import matplotlib
matplotlib.rcParams = matplotlib.rc_params_from_file('../../matplotlibrc')
# Decorator ===================================================================
from matplotlib import colors, pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from functools import wraps
from sys import stdout
import os
def _save(filename):
"""Decorator for saving, clearing, and closing figures automatically."""
try:
name, extension = filename.split(".")
except (ValueError, TypeError) as e:
raise ValueError("Invalid file name '{}'".format(filename))
if extension not in {"pdf", "png"}:
raise ValueError("Invalid file extension '{}'".format(extension))
if not os.path.isdir("figures"):
os.mkdir("figures")
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
print("{:.<40}".format(filename), end='')
stdout.flush()
plt.clf()
out = func(*args, **kwargs)
plt.savefig("figures/"+filename, format=extension)
print("done.")
return out
except Exception as e:
print("\n\t", e, sep='')
finally:
plt.clf()
plt.close('all')
return wrapper
return decorator
# Plots =======================================================================
import numpy as np
# Problem 1 (Anscombe's Quartet) ----------------------------------------------
def anscombe_data(save=False):
data = np.array([[10.0, 8.04, 10.0, 9.14, 10.0, 7.46, 8.0, 6.58],
[ 8.0, 6.95, 8.0, 8.14, 8.0, 6.77, 8.0, 5.76],
[13.0, 7.58, 13.0, 8.74, 13.0, 12.74, 8.0, 7.71],
[ 9.0, 8.81, 9.0, 8.77, 9.0, 7.11, 8.0, 8.84],
[11.0, 8.33, 11.0, 9.26, 11.0, 7.81, 8.0, 8.47],
[14.0, 9.96, 14.0, 8.10, 14.0, 8.84, 8.0, 7.04],
[ 6.0, 7.24, 6.0, 6.13, 6.0, 6.08, 8.0, 5.25],
[ 4.0, 4.26, 4.0, 3.10, 4.0, 5.39, 19.0, 12.50],
[12.0, 10.84, 12.0, 9.13, 12.0, 8.15, 8.0, 5.56],
[ 7.0, 4.82, 7.0, 7.26, 7.0, 6.42, 8.0, 7.91],
[ 5.0, 5.68, 5.0, 4.74, 5.0, 5.73, 8.0, 6.89]])
if save:
np.save("anscombe.npy", data)
return data
# Problem 2 (Line Plots / Small Multiples) ------------------------------------
@_save("chebyshev_bad.pdf")
def line_bad():
x = np.linspace(-1, 1, 200)
for n in range(9):
plt.plot(x, np.polynomial.Chebyshev.basis(n)(x), lw=1,
label= "n = {}".format(n))
plt.axis([-1.1, 1.1, -1.1, 1.1])
plt.legend()
@_save("chebyshev_good.pdf")
def line_good():
x = np.linspace(-1, 1, 200)
for n in range(9):
plt.subplot(3,3,n+1)
plt.plot(x, np.polynomial.Chebyshev.basis(n)(x))
plt.axis([-1.1, 1.1, -1.1, 1.1])
# Turn off extra tick marks and axis labels.
plt.tick_params(which="both", top="off", right="off")
if n < 6:
plt.tick_params(labelbottom="off")
if n % 3:
plt.tick_params(labelleft="off")
plt.title("n = {}".format(n))
def prob2():
line_bad()
line_good()
# Problem 3 (Scatter Plots) ---------------------------------------------------
@_save("scatter_1.pdf")
def scatter_1():
length, width, height = np.random.randint(1, 20, (3,50))
plt.scatter(length, width, s=100)
plt.grid()
plt.xlabel("Length (inches)", fontsize=18, color="white")
plt.ylabel("Width (inches)", fontsize=18)
plt.tick_params(labelbottom="off")
return length, width, height
@_save("scatter_2.pdf")
def scatter_2(length, width, height):
plt.scatter(length, width, c=height, s=100)
plt.grid()
plt.xlabel("Length (inches)", fontsize=18, color="white")
plt.ylabel("Width (inches)", fontsize=18, color="white")
plt.tick_params(labelbottom="off", labelleft="off")
cbar = plt.colorbar()
cbar.set_label("Height (inches)", fontsize=18)
@_save("scatter_3.pdf")
def scatter_3(length, width, height):
plt.scatter(length, width, s=length*width*height/2., alpha=.7)
plt.grid()
plt.xlabel("Length (inches)", fontsize=18)
plt.ylabel("Width (inches)", fontsize=18)
@_save("scatter_4.pdf")
def scatter_4(length, width, height):
plt.scatter(length, width, c=height, s=length*width*height/2., alpha=.7)
plt.grid()
plt.xlabel("Length (inches)", fontsize=18)
plt.ylabel("Width (inches)", fontsize=18, color="white")
plt.tick_params(labelleft="off")
cbar = plt.colorbar()
cbar.set_label("Height (inches)", fontsize=18)
def prob3():
l,w,h = scatter_1()
scatter_2(l,w,h)
scatter_3(l,w,h)
scatter_4(l,w,h)
# Problem 4 (Histograms) ------------------------------------------------------
@_save("histogram_1_bad.pdf")
def histogram_1_bad(N):
data = np.random.normal(size=N)
plt.plot(data)
return data
@_save("histogram_1_good.pdf")
def histogram_1_good(data):
plt.hist(data, bins=30)
@_save("histogram_2.pdf")
def histogram_2(N):
data = np.random.beta(a=5, b=2, size=N)
plt.hist(data, bins=30)
return data
@_save("histogram_3.pdf")
def histogram_3(data):
plt.hist(data, bins=30, lw=0, histtype="stepfilled")
plt.tick_params(axis="y", labelcolor='white')
plt.tick_params(left="off", top="off", right="off")
@_save("histogram_4.pdf")
def histogram_4(data):
freq, bin_edges = np.histogram(data, bins=30)
bin_centers = (bin_edges[:-1] + bin_edges[1:])/2.
plt.plot(bin_centers, freq, 'k-', lw=4)
plt.tick_params(axis="y", labelcolor="white")
plt.tick_params(left="off", top="off", right="off")
# plt.tick_params(left="off", top="off", right="off", labelleft="off")
@_save("earthquake.pdf")
def earthquake():
years, magnitudes, longitude, latitude = np.load("earthquakes.npy").T
plt.plot(years, magnitudes, '.')
plt.xlabel("Year")
plt.ylabel("Magnitude")
def prob4():
histogram_1_good(histogram_1_bad(1000))
data = histogram_2(10000)
histogram_3(data)
histogram_4(data)
earthquake()
# Problem 5 -------------------------------------------------------------------
@_save("heatmap_1.png")
def heatmap_1(N):
x = np.linspace(-1.5, 1.5, N)
X, Y = np.meshgrid(x, x.copy())
Z = Y**2 - X**3 + X**2
plt.pcolormesh(X, Y, Z, cmap="viridis")
plt.colorbar()
return X, Y, Z
@_save("heatmap_2.png")
def heatmap_2(X, Y, Z):
plt.contour(X, Y, Z, [-1, -.25, 0, .25, 1, 4], colors="white")
plt.pcolormesh(X, Y, Z, cmap="viridis")
plt.colorbar()
@_save("contour_1.pdf")
def contour_1(X, Y, Z):
plt.contour(X, Y, Z, 6, cmap="viridis")
plt.colorbar()
@_save("contour_2.pdf")
def contour_2(X, Y, Z):
plt.contourf(X, Y, Z, 12, cmap="viridis")
plt.colorbar()
@_save("heatmap_3.png")
def heatmap_3(N):
x = np.linspace(-6, 6, N)
X, Y = np.meshgrid(x, x.copy())
Z = np.abs(Y**2 - X**3 + X**2)
plt.pcolormesh(X, Y, Z, cmap="plasma")
plt.colorbar()
return X, Y, Z
@_save("contour_3.pdf")
def contour_3(X, Y, Z):
plt.contourf(X, Y, Z, 6, cmap="plasma", norm=colors.LogNorm())
plt.colorbar()
def prob5():
x,y,z = heatmap_1(200)
heatmap_2(x,y,z)
contour_1(x,y,z)
contour_2(x,y,z)
x,y,z = heatmap_3(200)
contour_3(x,y,z)
# Problem 6 -------------------------------------------------------------------
@_save("bar_1.pdf")
def bar_1():
labels = ["Lobster Thermador", "Baked Beans", "Crispy Bacon",
"Smoked Sausage", "Hannibal Ham", "Eggs", "Spam"]
values = [10, 11, 18, 19, 20, 21, 22]
positions = np.arange(len(labels))
plt.bar(positions, values, align="center")
plt.xticks(positions, labels)
return labels, values, positions
@_save("bar_2.pdf")
def bar_2(labels, values, positions):
plt.barh(positions, values, align="center")
plt.yticks(positions, labels)
plt.gcf().subplots_adjust(left=0.2)
@_save("pie.pdf")
def pie(labels, values, positions):
explode = np.zeros(len(values))
explode[np.random.randint(0,explode.size)] = .2
plt.pie(values, explode, labels, shadow=True,
startangle=np.random.randint(0,360,1))
plt.gca().set_aspect("equal")
@_save("dishonest_1.pdf")
def dishonest_1(N):
x = np.linspace(5, 10, N) + np.random.normal(size=N)/3.
y = .5*x + 4 + np.random.normal(size=N)/2.
plt.plot(x, y, 'o', ms=10)
return x, y
@_save("dishonest_2.pdf")
def dishonest_2(x, y):
plt.plot(x, y, 'o', ms=10)
plt.xlim(-5,20)
@_save("dishonest_3.pdf")
def dishonest_3(x, y):
plt.semilogy(x, y, 'o', ms=10)
@_save("honest.pdf")
def honest(x, y):
plt.plot(x, y, 'o', ms=10)
plt.xlim([0, x.max()+.2])
plt.ylim([0, x.max()+.2])
def country_data(save=True):
data = np.array([
[ 8.742, 374.056, 179.2, 167.6],
[ 10.985, 33.197, 160.0, 142.2],
[ 206.553, 1774.725, 173.1, 158.8],
[1378.36, 10866.444, 167 , 158.6],
[ 5.495, 229.810, 178.9, 165.3],
[ 81.771, 3355.772, 178 , 165 ],
[ 9.823, 120.687, 176 , 164 ],
[1330.09, 2073.543, 164.7, 161.2],
[ 127.00, 4123.258, 172.5, 158 ],
[ 24.214, 17.396, 165.6, 154.9],
[ 0.622, 4.588, 183.2, 168.4],
[ 5.237, 388.315, 182.4, 168 ],
[ 31.489, 192.084, 164 , 151 ],
[ 50.617, 1377.873, 170.8, 157.4],
[ 20.966, 82.316, 163.6, 151.4],
[ 8.342, 664.738, 175.4, 164 ],
[ 78.742, 718.221, 174 , 158.9],
[ 65.110, 2848.755, 177.8, 164.5],
[324.311, 17946.996, 176.1, 162.1],
[ 92.700, 193.599, 165.7, 155.2]
])
if save:
np.save("countries.npy", data)
return data
def prob6():
l,v,p = bar_1()
bar_2(l,v,p)
pie(l,v,p)
x,y = dishonest_1(20)
dishonest_2(x,y)
dishonest_3(x,y)
honest(x,y)
# =============================================================================
def save_all():
prob2()
prob3()
prob4()
prob5()
prob6()
if __name__ == '__main__':
save_all()
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum, EnumMeta
from six import with_metaclass
class _CaseInsensitiveEnumMeta(EnumMeta):
def __getitem__(self, name):
return super().__getitem__(name.upper())
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
try:
return cls._member_map_[name.upper()]
except KeyError:
raise AttributeError(name)
class AgreementType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The agreement type.
"""
NOT_SPECIFIED = "NotSpecified"
AS2 = "AS2"
X12 = "X12"
EDIFACT = "Edifact"
class ApiDeploymentParameterVisibility(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The Api deployment parameter visibility.
"""
NOT_SPECIFIED = "NotSpecified"
DEFAULT = "Default"
INTERNAL = "Internal"
class ApiTier(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The Api tier.
"""
NOT_SPECIFIED = "NotSpecified"
ENTERPRISE = "Enterprise"
STANDARD = "Standard"
PREMIUM = "Premium"
class ApiType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
NOT_SPECIFIED = "NotSpecified"
REST = "Rest"
SOAP = "Soap"
class AzureAsyncOperationState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The Azure async operation state.
"""
FAILED = "Failed"
SUCCEEDED = "Succeeded"
PENDING = "Pending"
CANCELED = "Canceled"
class DayOfWeek(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The day of the week.
"""
SUNDAY = "Sunday"
MONDAY = "Monday"
TUESDAY = "Tuesday"
WEDNESDAY = "Wednesday"
THURSDAY = "Thursday"
FRIDAY = "Friday"
SATURDAY = "Saturday"
class DaysOfWeek(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
SUNDAY = "Sunday"
MONDAY = "Monday"
TUESDAY = "Tuesday"
WEDNESDAY = "Wednesday"
THURSDAY = "Thursday"
FRIDAY = "Friday"
SATURDAY = "Saturday"
class EdifactCharacterSet(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The edifact character set.
"""
NOT_SPECIFIED = "NotSpecified"
UNOB = "UNOB"
UNOA = "UNOA"
UNOC = "UNOC"
UNOD = "UNOD"
UNOE = "UNOE"
UNOF = "UNOF"
UNOG = "UNOG"
UNOH = "UNOH"
UNOI = "UNOI"
UNOJ = "UNOJ"
UNOK = "UNOK"
UNOX = "UNOX"
UNOY = "UNOY"
KECA = "KECA"
class EdifactDecimalIndicator(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The edifact decimal indicator.
"""
NOT_SPECIFIED = "NotSpecified"
COMMA = "Comma"
DECIMAL = "Decimal"
class EncryptionAlgorithm(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The encryption algorithm.
"""
NOT_SPECIFIED = "NotSpecified"
NONE = "None"
DES3 = "DES3"
RC2 = "RC2"
AES128 = "AES128"
AES192 = "AES192"
AES256 = "AES256"
class ErrorResponseCode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The error response code.
"""
NOT_SPECIFIED = "NotSpecified"
INTEGRATION_SERVICE_ENVIRONMENT_NOT_FOUND = "IntegrationServiceEnvironmentNotFound"
INTERNAL_SERVER_ERROR = "InternalServerError"
INVALID_OPERATION_ID = "InvalidOperationId"
class EventLevel(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The event level.
"""
LOG_ALWAYS = "LogAlways"
CRITICAL = "Critical"
ERROR = "Error"
WARNING = "Warning"
INFORMATIONAL = "Informational"
VERBOSE = "Verbose"
class HashingAlgorithm(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The signing or hashing algorithm.
"""
NOT_SPECIFIED = "NotSpecified"
NONE = "None"
MD5 = "MD5"
SHA1 = "SHA1"
SHA2256 = "SHA2256"
SHA2384 = "SHA2384"
SHA2512 = "SHA2512"
class IntegrationAccountSkuName(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The integration account sku name.
"""
NOT_SPECIFIED = "NotSpecified"
FREE = "Free"
BASIC = "Basic"
STANDARD = "Standard"
class IntegrationServiceEnvironmentAccessEndpointType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The integration service environment access endpoint type.
"""
NOT_SPECIFIED = "NotSpecified"
EXTERNAL = "External"
INTERNAL = "Internal"
class IntegrationServiceEnvironmentNetworkDependencyCategoryType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The integration service environment network dependency category type.
"""
NOT_SPECIFIED = "NotSpecified"
AZURE_STORAGE = "AzureStorage"
AZURE_MANAGEMENT = "AzureManagement"
AZURE_ACTIVE_DIRECTORY = "AzureActiveDirectory"
SSL_CERTIFICATE_VERIFICATION = "SSLCertificateVerification"
DIAGNOSTIC_LOGS_AND_METRICS = "DiagnosticLogsAndMetrics"
INTEGRATION_SERVICE_ENVIRONMENT_CONNECTORS = "IntegrationServiceEnvironmentConnectors"
REDIS_CACHE = "RedisCache"
ACCESS_ENDPOINTS = "AccessEndpoints"
RECOVERY_SERVICE = "RecoveryService"
SQL = "SQL"
REGIONAL_SERVICE = "RegionalService"
class IntegrationServiceEnvironmentNetworkDependencyHealthState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The integration service environment network dependency health state.
"""
NOT_SPECIFIED = "NotSpecified"
HEALTHY = "Healthy"
UNHEALTHY = "Unhealthy"
UNKNOWN = "Unknown"
class IntegrationServiceEnvironmentNetworkEndPointAccessibilityState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The integration service environment network endpoint accessibility state.
"""
NOT_SPECIFIED = "NotSpecified"
UNKNOWN = "Unknown"
AVAILABLE = "Available"
NOT_AVAILABLE = "NotAvailable"
class IntegrationServiceEnvironmentSkuName(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The integration service environment sku name.
"""
NOT_SPECIFIED = "NotSpecified"
PREMIUM = "Premium"
DEVELOPER = "Developer"
class IntegrationServiceEnvironmentSkuScaleType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The integration service environment sku scale type.
"""
MANUAL = "Manual"
AUTOMATIC = "Automatic"
NONE = "None"
class KeyType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The key type.
"""
NOT_SPECIFIED = "NotSpecified"
PRIMARY = "Primary"
SECONDARY = "Secondary"
class MapType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The map type.
"""
NOT_SPECIFIED = "NotSpecified"
XSLT = "Xslt"
XSLT20 = "Xslt20"
XSLT30 = "Xslt30"
LIQUID = "Liquid"
class MessageFilterType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The message filter type.
"""
NOT_SPECIFIED = "NotSpecified"
INCLUDE = "Include"
EXCLUDE = "Exclude"
class OpenAuthenticationProviderType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Open authentication policy provider type.
"""
AAD = "AAD"
class ParameterType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The parameter type.
"""
NOT_SPECIFIED = "NotSpecified"
STRING = "String"
SECURE_STRING = "SecureString"
INT = "Int"
FLOAT = "Float"
BOOL = "Bool"
ARRAY = "Array"
OBJECT = "Object"
SECURE_OBJECT = "SecureObject"
class PartnerType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The partner type.
"""
NOT_SPECIFIED = "NotSpecified"
B2_B = "B2B"
class RecurrenceFrequency(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The recurrence frequency.
"""
NOT_SPECIFIED = "NotSpecified"
SECOND = "Second"
MINUTE = "Minute"
HOUR = "Hour"
DAY = "Day"
WEEK = "Week"
MONTH = "Month"
YEAR = "Year"
class SchemaType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The schema type.
"""
NOT_SPECIFIED = "NotSpecified"
XML = "Xml"
class SegmentTerminatorSuffix(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The segment terminator suffix.
"""
NOT_SPECIFIED = "NotSpecified"
NONE = "None"
CR = "CR"
LF = "LF"
CRLF = "CRLF"
class SigningAlgorithm(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The signing or hashing algorithm.
"""
NOT_SPECIFIED = "NotSpecified"
DEFAULT = "Default"
SHA1 = "SHA1"
SHA2256 = "SHA2256"
SHA2384 = "SHA2384"
SHA2512 = "SHA2512"
class SkuName(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The sku name.
"""
NOT_SPECIFIED = "NotSpecified"
FREE = "Free"
SHARED = "Shared"
BASIC = "Basic"
STANDARD = "Standard"
PREMIUM = "Premium"
class StatusAnnotation(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The status annotation.
"""
NOT_SPECIFIED = "NotSpecified"
PREVIEW = "Preview"
PRODUCTION = "Production"
class SwaggerSchemaType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The swagger schema type.
"""
STRING = "String"
NUMBER = "Number"
INTEGER = "Integer"
BOOLEAN = "Boolean"
ARRAY = "Array"
FILE = "File"
OBJECT = "Object"
NULL = "Null"
class TrackEventsOperationOptions(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The track events operation options.
"""
NONE = "None"
DISABLE_SOURCE_INFO_ENRICH = "DisableSourceInfoEnrich"
class TrackingRecordType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The tracking record type.
"""
NOT_SPECIFIED = "NotSpecified"
CUSTOM = "Custom"
AS2_MESSAGE = "AS2Message"
AS2_MDN = "AS2MDN"
X12_INTERCHANGE = "X12Interchange"
X12_FUNCTIONAL_GROUP = "X12FunctionalGroup"
X12_TRANSACTION_SET = "X12TransactionSet"
X12_INTERCHANGE_ACKNOWLEDGMENT = "X12InterchangeAcknowledgment"
X12_FUNCTIONAL_GROUP_ACKNOWLEDGMENT = "X12FunctionalGroupAcknowledgment"
X12_TRANSACTION_SET_ACKNOWLEDGMENT = "X12TransactionSetAcknowledgment"
EDIFACT_INTERCHANGE = "EdifactInterchange"
EDIFACT_FUNCTIONAL_GROUP = "EdifactFunctionalGroup"
EDIFACT_TRANSACTION_SET = "EdifactTransactionSet"
EDIFACT_INTERCHANGE_ACKNOWLEDGMENT = "EdifactInterchangeAcknowledgment"
EDIFACT_FUNCTIONAL_GROUP_ACKNOWLEDGMENT = "EdifactFunctionalGroupAcknowledgment"
EDIFACT_TRANSACTION_SET_ACKNOWLEDGMENT = "EdifactTransactionSetAcknowledgment"
class TrailingSeparatorPolicy(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The trailing separator policy.
"""
NOT_SPECIFIED = "NotSpecified"
NOT_ALLOWED = "NotAllowed"
OPTIONAL = "Optional"
MANDATORY = "Mandatory"
class UsageIndicator(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The usage indicator.
"""
NOT_SPECIFIED = "NotSpecified"
TEST = "Test"
INFORMATION = "Information"
PRODUCTION = "Production"
class WorkflowProvisioningState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The workflow provisioning state.
"""
NOT_SPECIFIED = "NotSpecified"
ACCEPTED = "Accepted"
RUNNING = "Running"
READY = "Ready"
CREATING = "Creating"
CREATED = "Created"
DELETING = "Deleting"
DELETED = "Deleted"
CANCELED = "Canceled"
FAILED = "Failed"
SUCCEEDED = "Succeeded"
MOVING = "Moving"
UPDATING = "Updating"
REGISTERING = "Registering"
REGISTERED = "Registered"
UNREGISTERING = "Unregistering"
UNREGISTERED = "Unregistered"
COMPLETED = "Completed"
RENEWING = "Renewing"
PENDING = "Pending"
WAITING = "Waiting"
IN_PROGRESS = "InProgress"
class WorkflowState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The workflow state.
"""
NOT_SPECIFIED = "NotSpecified"
COMPLETED = "Completed"
ENABLED = "Enabled"
DISABLED = "Disabled"
DELETED = "Deleted"
SUSPENDED = "Suspended"
class WorkflowStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The workflow status.
"""
NOT_SPECIFIED = "NotSpecified"
PAUSED = "Paused"
RUNNING = "Running"
WAITING = "Waiting"
SUCCEEDED = "Succeeded"
SKIPPED = "Skipped"
SUSPENDED = "Suspended"
CANCELLED = "Cancelled"
FAILED = "Failed"
FAULTED = "Faulted"
TIMED_OUT = "TimedOut"
ABORTED = "Aborted"
IGNORED = "Ignored"
class WorkflowTriggerProvisioningState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The workflow trigger provisioning state.
"""
NOT_SPECIFIED = "NotSpecified"
ACCEPTED = "Accepted"
RUNNING = "Running"
READY = "Ready"
CREATING = "Creating"
CREATED = "Created"
DELETING = "Deleting"
DELETED = "Deleted"
CANCELED = "Canceled"
FAILED = "Failed"
SUCCEEDED = "Succeeded"
MOVING = "Moving"
UPDATING = "Updating"
REGISTERING = "Registering"
REGISTERED = "Registered"
UNREGISTERING = "Unregistering"
UNREGISTERED = "Unregistered"
COMPLETED = "Completed"
class WsdlImportMethod(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The WSDL import method.
"""
NOT_SPECIFIED = "NotSpecified"
SOAP_TO_REST = "SoapToRest"
SOAP_PASS_THROUGH = "SoapPassThrough"
class X12CharacterSet(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The X12 character set.
"""
NOT_SPECIFIED = "NotSpecified"
BASIC = "Basic"
EXTENDED = "Extended"
UTF8 = "UTF8"
class X12DateFormat(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The x12 date format.
"""
NOT_SPECIFIED = "NotSpecified"
CCYYMMDD = "CCYYMMDD"
YYMMDD = "YYMMDD"
class X12TimeFormat(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The x12 time format.
"""
NOT_SPECIFIED = "NotSpecified"
HHMM = "HHMM"
HHMMSS = "HHMMSS"
HHMMS_SDD = "HHMMSSdd"
HHMMS_SD = "HHMMSSd"
|
""" discovery/short_interest_api.py tests """
import unittest
# from gamestonk_terminal.sentiment.reddit_api import popular_tickers
class TestSentimentRedditApi(unittest.TestCase):
def test_popular_tickers(self):
# popular_tickers(["-s", "wallstreetbets"])
print("")
|
import argparse
import os
import matplotlib.pyplot as plt
import numpy as np
from multiobject import generate_multiobject_dataset
from sprites import generate_dsprites, generate_binary_mnist
from utils import get_date_str, show_img_grid
supported_sprites = ['dsprites', 'binary_mnist']
def main():
args = parse_args()
### SETTINGS #############################
n = 100000 # num images
frame_size = (64, 64)
patch_size = 18
# count_distrib = {1: 1}
count_distrib = {0: 1/3, 1: 1/3, 2: 1/3}
allow_overlap = True
##########################################
# Generate sprites and labels
print("generating sprites...")
if args.dataset_type == 'dsprites':
sprites, labels = generate_dsprites(patch_size)
elif args.dataset_type == 'binary_mnist':
sprites, labels = generate_binary_mnist(patch_size)
else:
raise NotImplementedError
# Show sprites
show_img_grid(8, sprites, random_selection=True,
fname='gen_{}_sprites.png'.format(get_date_str()))
# Create dataset
print("generating dataset...")
ch = sprites[0].shape[-1]
img_shape = (*frame_size, ch)
dataset, n_obj, labels = generate_multiobject_dataset(
n, img_shape, sprites, labels,
count_distrib=count_distrib,
allow_overlap=allow_overlap)
print("done")
print("shape:", dataset.shape)
# Number of objects is part of the labels
labels['n_obj'] = n_obj
# Save dataset
print("saving...")
root = os.path.join('generated', args.dataset_type)
os.makedirs(root, exist_ok=True)
file_str = get_date_str()
fname = 'multi_' + args.dataset_type + '_' + file_str
fname = os.path.join(root, fname)
np.savez_compressed(fname, x=dataset, labels=labels)
print('done')
# Show samples and print their attributes
print("\nAttributes of saved samples:")
show_img_grid(4, dataset, labels,
fname='gen_{}_images.png'.format(get_date_str()))
# Show distribution of number of objects per image
plt.figure()
plt.hist(n_obj, np.arange(min(n_obj) - 0.5, max(n_obj) + 0.5 + 1, 1))
plt.title("Distribution of num objects per image")
plt.xlabel("Number of objects")
plt.savefig('gen_{}_distribution.png'.format(get_date_str()))
def parse_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
allow_abbrev=False)
parser.add_argument('--type',
type=str,
default='dsprites',
metavar='NAME',
dest='dataset_type',
help="dataset type")
args = parser.parse_args()
if args.dataset_type not in supported_sprites:
raise NotImplementedError(
"unsupported dataset '{}'".format(args.dataset_type))
return args
if __name__ == '__main__':
main()
|
'''
Tested on python3
About : Screen Shot Grabber
'''
import win32gui
import win32ui
import win32con
import win32api
#handle
h_desktop=win32gui.GetDesktopWindow()
#display size
width=win32api.GetSystemMetrics(win32con.SM_CXVIRTUALSCREEN)
height=win32api.GetSystemMetrics(win32con.SM_CYVIRTUALSCREEN)
left_side=win32api.GetSystemMetrics(win32con.SM_XVIRTUALSCREEN)
top_side=win32api.GetSystemMetrics(win32con.SM_YVIRTUALSCREEN)
#device context
context_desktop=win32gui.GetWindowDC(h_desktop)
context_image=win32ui.CreateDCFromHandle(context_desktop)
#memory based device context
mem_dc=context_image.CreateCompatibleDC()
ss=win32ui.CreateBitmap() #screenshot
ss.CreateCompatibleBitmap(context_image,width,height)
context_memory.SelectObject(ss)
#copy the screen
context_memory.BitBlt((0,0),(width,height),context_image,(left_side,top_side),win32con.SRCCOPY)
#save
ss.SaveBitmapFile(context_memory,'G:\\Temp\\screenshot.bmp')
#free
context_memory.DeleteDC()
win32gui.DeleteObject(ss.GetHandle())
|
from debexpo.tests import TestController
from pylons import url
from tempfile import mkdtemp
from shutil import rmtree
import os
import os.path
import pylons.test
class TestIndexController(TestController):
def setUp(self):
self.tempdir = mkdtemp()
def tearDown(self):
rmtree(self.tempdir)
def _generate_temppage(self, filename, text):
temppage = os.path.join(self.tempdir, filename)
f = open(temppage, 'w')
f.write(text)
f.close()
return temppage
def test_index(self):
# test a normal index page
testurl = url(controller='index', action='index')
response = self.app.get(testurl)
self.assertEquals(response.status_int, 200)
testtext = '<h1>Welcome to debexpo</h1>'
pylons.test.pylonsapp.config['debexpo.html.frontpage'] = \
self._generate_temppage('front.html', testtext)
response = self.app.get(testurl)
self.assertEquals(response.status_int, 200)
self.assertTrue(testtext in response)
del pylons.test.pylonsapp.config['debexpo.html.frontpage']
def test_contact(self):
response = self.app.get(url(controller='index', action='contact'))
self.assertEquals(response.status_int, 200)
def test_intro_maintainers(self):
testurl = url('intro-maintainers')
response = self.app.get(testurl)
self.assertEquals(response.status_int, 200)
testtext = '<h1>Introduction for maintainers: How will my package get into Debian</h1>'
pylons.test.pylonsapp.config['debexpo.html.maintainer_intro'] = \
self._generate_temppage('maintainer_intro.html', testtext)
response = self.app.get(testurl)
self.assertEquals(response.status_int, 200)
self.assertTrue(testtext in response)
del pylons.test.pylonsapp.config['debexpo.html.maintainer_intro']
def test_intro_sponsors(self):
testurl = url('sponsors')
response = self.app.get(testurl)
self.assertEquals(response.status_int, 200)
testtext = '<h1>The sponsoring process</h1>'
pylons.test.pylonsapp.config['debexpo.html.sponsors_intro'] = \
self._generate_temppage('sponsor_intro.html', testtext)
response = self.app.get(testurl)
self.assertEquals(response.status_int, 200)
self.assertTrue(testtext in response)
del pylons.test.pylonsapp.config['debexpo.html.sponsors_intro']
|
from collections import defaultdict
class Solution:
def arrangeWords(self, text) -> str:
words = defaultdict(list)
lengths = []
text = text.split(" ")
for word in text:
word = word.lower()
words[len(word)].append(word)
lengths.append(len(word))
lengths.sort()
res = []
for length in lengths:
for word in words[length]:
res.append(word)
del words[length]
res[0] = res[0].capitalize()
return " ".join(w for w in res)
s = Solution()
print(s.arrangeWords("keep calm and code on"))
|
from OdsLib.PySql import PySql
from OdsLib.Address import Address
from OdsLib.Customer import Customer
from OdsLib.Cart import Cart
from OdsLib.DeliveryExecutive import DeliveryExecutive
from OdsLib.Orders import Orders
from OdsLib.Product import Product
print("Imported ODS module")
|
"""
==================
StripUnits Deriver
==================
"""
from typing import Dict, Any
from vivarium.core.process import Deriver
from vivarium.library.units import remove_units
class StripUnits(Deriver):
"""StripUnits Deriver
Reads values specified by the 'keys' parameter under the 'units' port,
removes the units, and updates keys of the same name under the '_no_units'
port. Converts values before stripping them for all {key: unit_target}
pairs declared in the 'convert' parameter dictionary.
"""
name = 'strip_units'
defaults: Dict[str, Any] = {
'keys': [],
'convert': {}}
def __init__(self, parameters):
super().__init__(parameters)
self.convert = self.parameters['convert']
def ports_schema(self):
return {
'units': {key: {} for key in self.parameters['keys']},
'no_units': {key: {} for key in self.parameters['keys']}}
def next_update(self, timestep, states):
converted_units = {
state: value.to(self.convert[state])
if state in self.convert else value
for state, value in states['units'].items()}
return {
'no_units': {
key: {
'_value': remove_units(value),
'_updater': 'set'
} for key, value in converted_units.items()
}}
|
__author__ = 'teemu kanstren'
import json
import argparse
import os
from influxdb import InfluxDBClient
from datetime import datetime
parser = argparse.ArgumentParser()
parser.add_argument("-db", "--database", help="Database name", default="_internal", nargs='?')
parser.add_argument("-ip", "--hostname", help="Database address (ip/url)", default="localhost", nargs='?')
parser.add_argument("-p", "--port", help="Database port", default="8086", nargs='?')
parser.add_argument("-u", "--username", help="DB user name", default="root", nargs='?')
parser.add_argument("-pw", "--password", help="DB password", default="root", nargs='?')
parser.add_argument("-tl", "--timelength", help="Length of time for dump", default="1h", nargs='?')
parser.add_argument("-et", "--endtime", help="End time for dump", default='now()', nargs='?')
parser.add_argument("-f", "--filter", help="List of columns to filter", default='', nargs='?')
args = parser.parse_args()
host = args.hostname
port = args.port
username = args.username
password = args.password
dbname = args.database
time_length = args.timelength
end_time = args.endtime
filtered_str = args.filter
filtered = [x.strip() for x in filtered_str.split(',')]
client = InfluxDBClient(host, port, username, password, dbname)
#first we get list of all measurements in the selected db to dump them
query = 'show measurements'
result = client.query(query)
for measurements in result:
for measure in measurements:
measure_name = measure['name']
filename = "report_json/"+measure_name+'.json'
os.makedirs(os.path.dirname(filename), exist_ok=True)
#request all data for given timeframe and dump as json
with open(filename, 'w') as file:
query = """select * from "{}" where time > '{}' - {} AND time < '{}' """.format(measure_name, end_time, time_length, end_time)
result = client.query(query, epoch='ms')
lines = []
for point in result:
for item in point:
for col in filtered:
if col in item:
del item[col]
ms = item['time'] / 1000
d = datetime.fromtimestamp(ms)
item['readable_time'] = d.isoformat('T')+'Z'
#first we build a list of dictionaries for each measurement value
lines.append(item)
#finally put the list into a dict and use built-in functionality of "json" module to dump them all at once
out = {'data': lines}
print(json.dumps(out), file=file)
|
'''
Functions and classes for reading data
'''
import numpy as np
import tensorflow as tf
import pandas as pd
import collections
import os
from six.moves.urllib.request import urlretrieve
import zipfile
def load_zip_data(filename):
'''
return the zip data as unicode strings
:param: filename: name of file to open
:return:
'''
f = zipfile.ZipFile(filename, 'r')
for name in f.namelist():
return tf.compat.as_str(f.read(name))
f.close()
def load_csv_file(filename):
'''
return data from csv file
:param: filename: name of file with location
:return:
'''
with open(filename,'r') as f:
data = f.read()
f.close()
return data
def train_test_split(data, ratio =[0.6,0.2,0.2]):
'''
Based on ratio, splits the data into train, validation and testing steps
:param data: data on which the split needs to take place
:param ratio: a list containing ratio's for train, test and split
:return: train_data, valid_data and test_data
'''
[train_index, valid_index, test_index] = [int(x*len(data)) for x in ratio]
train_data = data[:train_index]
valid_data = data[train_index:train_index+valid_index]
test_data = data[len(data)-valid_index:]
return (train_data,valid_data,test_data)
class BatchGenerator(object):
'''
Given a source of data, generate batches with [batch_size, batch_len]
:param data: data given as a single string of characters. Can be train, test or validation
:param batch_size: data processed in parallel by the LSTM, stabilizes the variance of SGD
:param batch_len: the backprop limitation due to vanishing gradients
:return: a generator/yield function that returns the batches
'''
def __init__(self,data,batch_size,batch_len):
self.data = data
self.batch_size = batch_size
self.batch_len = batch_len
self.cursor = 0
self.segment_len = len(self.data)//self.batch_size
self.batch = np.zeros((self.batch_size,self.segment_len), dtype=np.int32)
self.epoch_size = (self.segment_len - 1) // self.batch_len
def preprocess(self):
counter = collections.Counter(self.data)
count_pairs = sorted(counter.items(), key=lambda x: -x[1])
self.chars, _ = zip(*count_pairs)
self.vocab_size = len(self.chars)
self.vocab = dict(zip(self.chars, range(self.vocab_size)))
tensor = np.array(list(map(self.vocab.get, self.data)))
return tensor
def create_batches(self):
# contiguous data should be part of a single batch_len
# the way to ensure that is to divide the data into segments based on batch_len
# with segments capturing continuous information
# and to move along the segments taking consequtive chunks
tensor = self.preprocess()
for i in range(self.batch_size):
self.batch[i] = tensor[self.segment_len*i:self.segment_len*(i+1)]
def next(self):
x = self.batch[:,self.cursor*self.batch_len:(self.cursor+1)*self.batch_len]
y = self.batch[:,self.cursor*self.batch_len+1:(self.cursor+1)*self.batch_len+1]
self.cursor = (self.cursor + 1)//self.epoch_size
return (x,y)
if __name__ == "__main__":
url = 'http://mattmahoney.net/dc/'
filename = 'text8.zip'
data = download_data(url,filename)
print data[:100]
train, val ,test = train_test_split(data)
batch_train = BatchGenerator(train,64,20)
batch_train.create_batches()
print batch_train.next()
|
import sys
__all__ = ['mock']
PY3 = sys.version_info[0] == 3
if PY3:
from unittest import mock
else:
import mock
def str2bytes(s):
if PY3:
return bytes(s, encoding='utf8')
else:
return s
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2021-01-16 14:47:23
# @Author : Joe Gao (jeusgao@163.com)
import os
import json
from utils import (
DIC_DataLoaders,
DIC_Resolvers,
)
from modules import(
DIC_Funcs,
DIC_Inits,
DIC_Losses,
DIC_Metrics,
DIC_Layers,
DIC_Bases,
DIC_Models,
DIC_Optimizers,
DIC_Tokenizers,
DIC_Generators_for_train,
DIC_Generators_for_pred,
)
def _get_dic(_dic):
_d = {}
for k, v in _dic.items():
_d[k] = {}
_d[k]['func'] = ''
if isinstance(v, dict) and v.get('params'):
_d[k]['params'] = v.get('params')
return _d
_dics = {
'DIC_Funcs': _get_dic(DIC_Funcs),
'DIC_Inits': _get_dic(DIC_Inits),
'DIC_Losses': _get_dic(DIC_Losses),
'DIC_Metrics': _get_dic(DIC_Metrics),
'DIC_Layers': _get_dic(DIC_Layers),
'DIC_Bases': _get_dic(DIC_Bases),
'DIC_Optimizers': _get_dic(DIC_Optimizers),
'DIC_Tokenizers': _get_dic(DIC_Tokenizers),
'DIC_DataLoaders': _get_dic(DIC_DataLoaders),
'DIC_Generators_for_train': _get_dic(DIC_Generators_for_train),
'DIC_Generators_for_pred': _get_dic(DIC_Generators_for_pred),
'DIC_Resolvers': _get_dic(DIC_Resolvers),
'DIC_Models': _get_dic(DIC_Models),
}
def env_init():
with open('params_templates.json', 'w') as f:
json.dump(_dics, f, ensure_ascii=False, indent=2)
if not os.path.exists('hub/bases'):
os.makedirs('hub/base')
if not os.path.exists('hub/models'):
os.makedirs('hub/models')
if not os.path.exists('data'):
os.mkdir('data')
if __name__ == '__main__':
env_init()
print('System initialized.')
|
from collections import deque
class Solution(object):
def shortestDistance(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
if not grid or not grid[0]:
return -1
row, col = len(grid), len(grid[0])
dists = [[0 for _ in xrange(col)] for _ in xrange(row)]
for i in xrange(row):
for j in xrange(col):
if grid[i][j] == 1:
self.bfs(grid, dists, i, j)
res = float('inf')
for i in xrange(row):
for j in xrange(col):
if grid[i][j] == 0 and dists[i][j] < res:
res = dists[i][j]
return -1 if res == float('inf') else res
def bfs(self, grid, dists, i, j):
queue = deque()
row, col = len(grid), len(grid[0])
queue.append(i * col + j)
diffs = [(0, 1), (0, -1), (1, 0), (-1, 0)]
visited = [[False for _ in xrange(col)] for _ in xrange(row)]
dist = 0
while queue:
sz = len(queue)
for _ in xrange(sz):
cur = queue.popleft()
for diff in diffs:
r, c = cur / col + diff[0], cur % col + diff[1]
if 0 <= r < row and 0 <= c < col and grid[r][c] == 0 and not visited[r][c]:
dists[r][c] += dist + 1
visited[r][c] = True
queue.append(r * col + c)
dist += 1
# It is a must, to identify the reachable empty space.
for i in xrange(row):
for j in xrange(col):
if grid[i][j] == 0 and not visited[i][j]:
grid[i][j] = 2
|
import numpy as np
import os
import glob
import matplotlib.pyplot as plt
from numpy.fft import fft, ifft
import NLS
import random as rand
# seed random number generator
subdirs = ['Aug1Data','Aug2Data','JulyData']
# Define something that will list directories that are not hidden
def listdirNH(path):
return glob.glob(os.path.join(path, '*'))
j = 0
for sd in subdirs:
files = listdirNH(sd+'/Rescaled')
n = 0
for f in files:
datavals = np.transpose(np.loadtxt(f).view(float))
N = len(datavals[1])
x = datavals[0] # Frequencies
ly = datavals[1] # Magnitudes
L = 3600*3
x = x*0.001 # Gets me to milliHertz (but the 2 pi version...)
scalevals = np.sqrt(ly/2)
randvals = np.zeros(len(ly))
randpn = np.zeros(len(ly))
pn = [-1,1]
for k in range(len(ly)):
rand.seed(k)
randpn[k] = rand.choice(pn)
rand.seed(k+1)
value = rand.random()*rand.choice(pn)
randvals[k] = value
ascale = randvals*scalevals
bscale = 1j*np.sqrt(ly-ascale**2)*randpn
fakevals = ascale+bscale
# Need to make a new k vector that actually has the frequencies we want
# Find the average spacing between the freqs
av = 0
for ind in range(1,len(x)):
diff = x[ind]-x[ind-1]
av = av+diff
aav = av/len(x)
xappend = np.linspace(0,x[0],int(x[0]/aav))
halfx = np.append(xappend,x)
fullx = np.append(halfx,-np.flip(halfx[1:-1],0))*1/L
yappend1 = np.zeros(len(xappend),dtype=complex)
yappend2 = np.zeros(len(halfx[1:-1]),dtype=complex)
fully = np.concatenate((yappend1,fakevals,yappend2))
if n == 0:
m = max(fully)
i = np.where(fully == m)
if len(i[0]) > 1:
newi = i[0][len(i[0])//2]
carriermode = np.array(newi)
carrierloc = fullx[carriermode]
else:
newi = i[0][0]
carriermode = np.array(newi)
carrierloc = fullx[carriermode]
print(f)
print(carriermode)
print(carrierloc)
n = n+1
# Rearrange data so the carrier wave is at mode zero
# First, find the carrier mode
#loc = np.where(fullx == carrierloc)
loc = np.where(np.logical_and(fullx>carrierloc*0.999, fullx<carrierloc*1.009))
print(carrierloc,loc,fullx[loc])
if len(loc[0])>1:
loc = loc[0][0]
else:
loc = loc[0][0]
#print(carrierloc,loc,fullx[loc],fully[loc])
yhata = fully[0:loc]
yhatb = fully[loc:]
yhatnew = np.append(yhatb,yhata) # Putting the carrier frequency in the 0 location
#plt.plot(fullx,np.abs(yhatnew),'.',markersize = 5)
# Define new t data (and a new number of data points)
NNN = 1024
tnew = np.linspace(0,10800-10800/NNN,NNN) # ADD TO ICTEMPORALDATA
# Find B
B=0 # ADD TO ICTEMPORALDATA
for v in range(N):
newvalue = yhatnew[v]*np.exp(2j*np.pi*fullx[v]*tnew) # Sum a new fourier series
B = B+newvalue
plt.title(f)
#plt.plot(tnew,np.real(B))
plt.plot(fullx,np.abs(yhatnew),'.',label = 'act')
plt.plot(1/L*NLS.kvec(NNN)*0.001,1/NNN*np.abs(fft(B)),'.',label = 'B')
plt.legend()
plt.show()
# plt.title(sd)
# plt.show()
|
import json
import pandas as pd
import numpy as np
import urllib.request
import gzip
import os
import csv
def download_json():
earliest_year = 2019
latest_year = 2020
file_paths = []
skipped_files = 0
for year in range(earliest_year, latest_year + 1):
file_name = "nvdcve-1.1-{}.json".format(year)
db_folder = "./data"
file_path = os.path.join(db_folder, file_name)
compressed_file_name = file_name + '.gz'
compressed_file_path = os.path.join(db_folder, compressed_file_name)
if not os.path.isdir(db_folder):
os.mkdir(db_folder)
# Only download if not already at rest
if not os.path.isfile(file_path):
# Download the vulnerabilty database (NVD)
url = "https://nvd.nist.gov/feeds/json/cve/1.1/{}.gz".format(file_name)
print('Downloading {}'.format(url))
response = urllib.request.urlretrieve(url, compressed_file_path)
print('Response: {}'.format(response))
# Extract the gzip
with gzip.GzipFile(compressed_file_path, 'rb') as infile:
s = infile.read()
# Save extracted file
with open(file_path, 'wb') as outfile:
outfile.write(s)
file_paths += [file_path]
else:
skipped_files += 1
print('{} files already on disk (download skipped)'.format(skipped_files))
return file_paths
def extract_table(file_path="./data/nvdcve-1.1-2020.json"):
# Open the .json file
with open(file_path, "r") as cve_json:
cve_dict = json.load(cve_json)
cve_dict.keys()
# We will write all items into a flat list, which will be upgraded to a pandas DataFrame later
vul_list = []
# We iterate over all CVE ids
nr_cve_items = len(cve_dict['CVE_Items'])
print("Total CVEs: \t{}".format(nr_cve_items))
for cve_index in range(nr_cve_items):
# Extract the CVE number of the vulnerability
cve_id = cve_dict['CVE_Items'][cve_index]['cve']['CVE_data_meta']['ID']
description = cve_dict['CVE_Items'][cve_index]['cve']['description']['description_data'][0]['value']
published_date = cve_dict['CVE_Items'][cve_index]['publishedDate']
# Extract the score if available
try:
cve_score = cve_dict['CVE_Items'][cve_index]['impact']['baseMetricV3']['cvssV3']['baseScore']
except:
cve_score = np.NaN
# Not every CVE id has a machine-readable config (cpe_match) entry.
# Supress exceptions with try.. except
i = 0
try:
entries = cve_dict['CVE_Items'][cve_index]['configurations']['nodes']
for config_index in range(len(entries)):
versions = set()
# Extract list of all vulnerable configurations (for 'OR' connector)
vulnerable_configs = []
if entries[config_index]['operator'] == 'OR':
for cpe in entries[0]['cpe_match']:
if cpe['vulnerable'] is True:
try:
max_vers = cpe['versionEndExcluding']
except:
max_vers = None
# aCPE string
cpe_string = cpe['cpe23Uri']
vulnerable_configs += [(cpe_string, max_vers)]
else:
for config in entries:
for cpe in config['children'][0]['cpe_match']:
if cpe['vulnerable'] is True:
try:
max_vers = cpe['versionEndExcluding']
except:
max_vers = None
# aCPE string
cpe_string = cpe['cpe23Uri']
vulnerable_configs += [(cpe_string, max_vers)]
# Remove duplicates
unique_vulnerable_configs = list(dict.fromkeys(vulnerable_configs))
for cpe, max_vers in unique_vulnerable_configs:
vendor = cpe.split(':')[3]
name = cpe.split(':')[4]
version = cpe.split(':')[5]
if version == '*':
if max_vers is not None:
version = max_vers
versions.add(version)
versions_string = '|'.join(versions)
vul_list += [[cve_id, vendor, name, versions_string,
description, cve_score, published_date]]
# Put in NaN in for name, version, vendor if unavailable in machine-readable cpe entries
except:
vendor = np.NaN
name = np.NaN
version = np.NaN
vul_list += [[cve_id, vendor, name, version, description, cve_score]]
return vul_list
def print_stats(vul_list):
# Print some stats
print("Unique Vers: \t{} ({:1.0f}%)".format(len(vul_list), len(vul_list)/nr_cve_items*100))
missing_values = 0
incomplete_entries = 0
for entry in vul_list:
a = entry.count('*')
a += entry.count(np.NaN)
if a > 0:
incomplete_entries += 1
missing_values += a
print("Incomplete:\t{} ({:1.0f}%)".format(incomplete_entries, incomplete_entries/len(vul_list)*100))
def export_csv(vul_list, file_path):
csv_string = "cve_id, vendor, name, versions_string, description, cve_score, published_date\n"
for line in vul_list:
line_string = ""
for entry in line:
line_string += '"' + str(entry) + '"' + ", "
line_string += "\n"
csv_string += line_string
with open(file_path, 'w') as outfile:
outfile.write(csv_string)
return csv_string
if __name__ == '__main__':
file_paths = download_json()
print(file_paths)
json_path="./data/nvdcve-1.1-2020.json"
vul_list = extract_table(file_path=json_path)
csv_path = json_path.replace("json", "csv")
export_csv(vul_list, file_path=csv_path)
|
expected_output = {
'vpn_id': {
1000: {
'vpn_id': 1000,
'encap': 'MPLS',
'esi': '0001.00ff.0102.0000.0011',
'eth_tag': 0,
'mp_resolved': True,
'mp_info': 'Remote all-active, ECMP Disable',
'pathlists': {
'ead_evi': {
'nexthop': {
'10.94.2.88': {
'label': 100010,
},
},
},
},
},
},
}
|
"""Interfaces for ClientModel and ServerModel."""
from abc import ABC, abstractmethod
import numpy as np
import random
from baseline_constants import ACCURACY_KEY, OptimLoggingKeys, AGGR_MEAN
from utils.model_utils import batch_data
class Model(ABC):
def __init__(self, lr, seed, max_batch_size, optimizer=None):
self.lr = lr
self.optimizer = optimizer
self.rng = random.Random(seed)
self.size = None
# largest batch size for which GPU will not run out of memory
self.max_batch_size = max_batch_size if max_batch_size is not None else 2 ** 14
print('***** using a max batch size of', self.max_batch_size)
self.flops = 0
def train(self, data, num_epochs=1, batch_size=10, lr=None):
"""
Trains the client model.
Args:
data: Dict of the form {'x': [list], 'y': [list]}.
num_epochs: Number of epochs to train.
batch_size: Size of training batches.
Return:
comp: Number of FLOPs computed while training given data
update: List of np.ndarray weights, with each weight array
corresponding to a variable in the resulting graph
averaged_loss: average of stochastic loss in the final epoch
"""
if lr is None:
lr = self.lr
averaged_loss = 0.0
batched_x, batched_y = batch_data(data, batch_size, rng=self.rng, shuffle=True)
if self.optimizer.w is None:
self.optimizer.initialize_w()
for epoch in range(num_epochs):
total_loss = 0.0
for i, raw_x_batch in enumerate(batched_x):
input_data = self.process_x(raw_x_batch)
raw_y_batch = batched_y[i]
target_data = self.process_y(raw_y_batch)
loss = self.optimizer.run_step(input_data, target_data)
total_loss += loss
averaged_loss = total_loss / len(batched_x)
# print('inner opt:', epoch, averaged_loss)
self.optimizer.end_local_updates() # required for pytorch models
update = np.copy(self.optimizer.w - self.optimizer.w_on_last_update)
self.optimizer.update_w()
comp = num_epochs * len(batched_y) * batch_size * self.flops
return comp, update, averaged_loss
def test(self, eval_data, train_data=None, split_by_user=True, train_users=True):
"""
Tests the current model on the given data.
Args:
eval_data: dict of the form {'x': [list], 'y': [list]}
train_data: None or same format as eval_data. If None, do not measure statistics on train_data.
Return:
dict of metrics that will be recorded by the simulation.
"""
if split_by_user:
output = {'eval': [-float('inf'), -float('inf')], 'train': [-float('inf'), -float('inf')]}
if self.optimizer.w is None:
self.optimizer.initialize_w()
total_loss, total_correct, count = 0.0, 0, 0
batched_x, batched_y = batch_data(eval_data, self.max_batch_size, shuffle=False, eval_mode=True)
for x, y in zip(batched_x, batched_y):
x_vecs = self.process_x(x)
labels = self.process_y(y)
loss = self.optimizer.loss(x_vecs, labels)
correct = self.optimizer.correct(x_vecs, labels)
total_loss += loss * len(y) # loss returns average over batch
total_correct += correct # eval_op returns sum over batch
count += len(y)
# counter_1 += 1
loss = total_loss / count
acc = total_correct / count
if train_users:
output['train'] = [loss, acc]
else:
output['eval'] = [loss, acc]
return {
ACCURACY_KEY: output['eval'][1],
OptimLoggingKeys.TRAIN_LOSS_KEY: output['train'][0],
OptimLoggingKeys.TRAIN_ACCURACY_KEY: output['train'][1],
OptimLoggingKeys.EVAL_LOSS_KEY: output['eval'][0],
OptimLoggingKeys.EVAL_ACCURACY_KEY: output['eval'][1]
}
else:
data_lst = [eval_data] if train_data is None else [eval_data, train_data]
output = {'eval': [-float('inf'), -float('inf')], 'train': [-float('inf'), -float('inf')]}
if self.optimizer.w is None:
self.optimizer.initialize_w()
# counter_0 = 0
for data, data_type in zip(data_lst, ['eval', 'train']):
# counter_1 = 0
total_loss, total_correct, count = 0.0, 0, 0
batched_x, batched_y = batch_data(data, self.max_batch_size, shuffle=False, eval_mode=True)
for x, y in zip(batched_x, batched_y):
x_vecs = self.process_x(x)
labels = self.process_y(y)
loss = self.optimizer.loss(x_vecs, labels)
correct = self.optimizer.correct(x_vecs, labels)
total_loss += loss * len(y) # loss returns average over batch
total_correct += correct # eval_op returns sum over batch
count += len(y)
# counter_1 += 1
loss = total_loss / count
acc = total_correct / count
output[data_type] = [loss, acc]
# counter_1 += 1
return {ACCURACY_KEY: output['eval'][1],
OptimLoggingKeys.TRAIN_LOSS_KEY: output['train'][0],
OptimLoggingKeys.TRAIN_ACCURACY_KEY: output['train'][1],
OptimLoggingKeys.EVAL_LOSS_KEY: output['eval'][0],
OptimLoggingKeys.EVAL_ACCURACY_KEY: output['eval'][1]
}
#def close(self):
# self.sess.close()
def process_x(self, raw_x_batch):
"""Pre-processes each batch of features before being fed to the model."""
return np.asarray(raw_x_batch)
# def process_y(self, raw_y_batch):
# """Pre-processes each batch of labels before being fed to the model."""
# res = []
# for i in range(len(raw_y_batch)):
# num = np.zeros(62) # Number of classes
# num[raw_y_batch[i]] = 1.0
# res.append(num)
# return np.asarray(res)
def process_y(self, raw_y_batch):
"""Pre-processes each batch of labels before being fed to the model."""
return np.asarray(raw_y_batch)
class ServerModel:
def __init__(self, model):
self.model = model
self.rng = model.rng
@property
def size(self):
return self.model.optimizer.size()
@property
def cur_model(self):
return self.model
def send_to(self, clients):
"""Copies server model variables to each of the given clients
Args:
clients: list of Client objects
"""
var_vals = {}
for c in clients:
c.model.optimizer.reset_w(self.model.optimizer.w)
c.model.size = self.model.optimizer.size()
@staticmethod
def weighted_average_oracle(points, weights):
"""Computes weighted average of atoms with specified weights
Args:
points: list, whose weighted average we wish to calculate
Each element is a list_of_np.ndarray
weights: list of weights of the same length as atoms
"""
tot_weights = np.sum(weights)
# Modif Here
# weighted_updates = [np.zeros_like(v) for v in points[0]]
weighted_updates = np.zeros_like(points[0])
for w, p in zip(weights, points):
weighted_updates += (w / tot_weights) * p
return weighted_updates
def update(self, updates, aggregation=AGGR_MEAN):
"""Updates server model using given client updates.
Args:
updates: list of (num_samples, update), where num_samples is the
number of training samples corresponding to the update, and update
is a list of variable weights
aggregation: Algorithm used for aggregation. Allowed values are:
[ 'mean'], i.e., only support aggregation with weighted mean
"""
if len(updates) == 0:
print('No updates obtained. Continuing without update')
return 1, False
def accept_update(u):
# norm = np.linalg.norm([np.linalg.norm(x) for x in u[1]])
norm = np.linalg.norm(u[1])
return not (np.isinf(norm) or np.isnan(norm))
all_updates = updates
updates = [u for u in updates if accept_update(u)]
if len(updates) < len(all_updates):
print('Rejected {} individual updates because of NaN or Inf'.format(len(all_updates) - len(updates)))
if len(updates) == 0:
print('All individual updates rejected. Continuing without update')
return 1, False
points = [u[1] for u in updates]
alphas = [u[0] for u in updates]
if aggregation == AGGR_MEAN:
weighted_updates = self.weighted_average_oracle(points, alphas)
num_comm_rounds = 1
else:
raise ValueError('Unknown aggregation strategy: {}'.format(aggregation))
# update_norm = np.linalg.norm([np.linalg.norm(v) for v in weighted_updates])
update_norm = np.linalg.norm(weighted_updates)
self.model.optimizer.w += np.array(weighted_updates)
self.model.optimizer.reset_w(self.model.optimizer.w) # update server model
updated = True
return num_comm_rounds, updated
class Optimizer(ABC):
def __init__(self, starting_w=None, loss=None, loss_prime=None):
self.w = starting_w
self.w_on_last_update = np.copy(starting_w)
self.optimizer_model = None
@abstractmethod
def loss(self, x, y):
return None
@abstractmethod
def gradient(self, x, y):
return None
@abstractmethod
def run_step(self, batched_x, batched_y): # should run a first order method step and return loss obtained
return None
@abstractmethod
def correct(self, x, y):
return None
def end_local_updates(self):
pass
def reset_w(self, w):
self. w = np.copy(w)
self.w_on_last_update = np.copy(w)
|
"""
余計な文字列や文字を検出し削除するプログラムである。
"""
import re
import function.Rename_func as Rename_func
print("これからファイルを移動しますが、既に存在した場合 削除 しますか? yesかnoで\n")
ans = input()
if re.search("yes", ans) or re.search("y", ans):
delete = "1"
else:
delete = "0"
path1 = 'H:/TV(h.265)'
path2 = 'D:/TV(h.265)/映画'
path3 = 'D:/TV(h.265)/夏目友人帳シリーズ/夏目友人帳'
path4 = 'D:/TV(h.265)/夏目友人帳シリーズ/夏目友人帳 陸'
path5 = 'D:/TV(h.265)/夏目友人帳シリーズ/続 夏目友人帳'
path6 = 'D:/TV(h.265)/ANIMAX MUSIX'
Rename_func.rename(delete)
openPath = '/dataset/'
startYear, endYear = 1999, 2021
for i in range(endYear - startYear + 1):
with open(openPath+str(startYear+i)+'.txt', encoding="utf-8") as f:
AnimeList = [s.strip() for s in f.readlines()]
try:
Rename_func.searchfolder(
0, 150, str(startYear + i), path1, AnimeList)
except: # この書き方は公式でもあまり推奨されません..
pass
|
import glob, os
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
### Reading Data ###
path = "images/"
imlist = glob.glob(os.path.join(path, "*.jpg"))
def dataset(file_list, size=(180, 300), flattened = False):
data = []
for i, file in enumerate(file_list):
image = cv.imread(file)
image2 = cv.cvtColor(image, cv.COLOR_RGB2BGR)
image = cv.resize(image2, size)
if flattened:
image = image.flatten()
data.append(image)
labels = [1 if f.split("\\")[-1][0] == 'P' else 0 for f in file_list]
return np.array(data), np.array(labels)
print('Reading data...')
x, y = dataset(imlist, flattened=True)
### Split data ###
print('Splitting data...')
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.33)
print(x_train.shape)
'''### Train SVCs and test ###
print('Training LinearSVC...')
clf = svm.LinearSVC()
clf.fit(x_train, y_train)
print('LinearSVC Score: ', clf.score(x_test, y_test))
print('Training SVC...')
clf = svm.SVC(gamma='scale')
clf.fit(x_train, y_train)
print('SVC Score: ', clf.score(x_test, y_test))'''
### Apply PCA ###
print('Applying PCA...')
pca = PCA()
pca.fit(x_train)
'''print(pca.components_.shape)
eigenvectors = pca.components_.reshape(478, 300, 180)[:64]
fig = plt.figure(figsize=(8, 8))
cols = 8
rows = 8
for i in range(1, 65):
fig.add_subplot(rows, cols, i)
plt.imshow(eigenvectors[i-1], cmap='Greys')
plt.show()'''
index = 0
sum_ = 0
for i in range(len(pca.explained_variance_ratio_)):
if sum_ > 0.90:
index = i
break
sum_+=pca.explained_variance_ratio_[i]
print("90 percent explained variance coverage component index: ", index)
'''arr = np.arange(1, len(pca.explained_variance_ratio_)+1)
plt.plot(arr, pca.explained_variance_ratio_)
plt.show()'''
'''pca = PCA(index)
x_train = pca.fit_transform(x_train)
x_test = pca.transform(x_test)
### Train SVCs and test using transformed data ###
print('Training LinearSVC...')
clf = svm.LinearSVC()
clf.fit(x_train, y_train)
print('LinearSVC Score: ', clf.score(x_train, y_test))
print('Training SVC...')
clf = svm.SVC(gamma='scale')
clf.fit(x_train, y_train)
print('SVC Score: ', clf.score(x_test, y_test))'''
accuracies_svc = []
accuracies_lsvc = []
for i in range(1,479):
print('Applying PCA...')
pca = PCA(i)
x_tr = pca.fit_transform(x_train)
x_ts = pca.transform(x_test)
### Train SVCs and test using transformed data ###
print('Training LinearSVC...')
clf = svm.LinearSVC()
clf.fit(x_tr, y_train)
acc = clf.score(x_ts, y_test)
print('LinearSVC Score: ', acc)
accuracies_lsvc.append(acc)
print('Training SVC...')
clf = svm.SVC(gamma='scale')
clf.fit(x_tr, y_train)
acc = clf.score(x_ts, y_test)
print('SVC Score: ', acc)
accuracies_svc.append(acc)
arr = np.arange(1, 479)
plt.plot(arr, accuracies_lsvc)
plt.show()
plt.plot(arr, accuracies_svc)
plt.show()
|
# Copyright (C) 2018-2021
# Author: Cesar Roman
# Contact: cesar@thecesrom.dev
"""OPC - UA Functions.
The following functions allow you to interact directly with an OPC-UA
server.
"""
from __future__ import print_function
__all__ = ["callMethod"]
def callMethod(connectionName, objectId, methodId, inputs):
"""Calls a method in an OPC UA server. To make the most of this
function, you'll need to be familiar with methods in the OPC-UA
server.
Args:
connectionName (str): The name of the OPC-UA connection to the
server that the method resides in.
objectId (str): The NodeId of the Object Node the Method is a
member of.
methodId (str): The NodeId of the Method Node to call.
inputs (list[object]): A list of input values expected by the
method.
Returns:
tuple: A tuple containing the following:
0: Resulting StatusCode for the call
1: A list of StatusCode objects corresponding to each
input argument
2: A list of output values
"""
print(connectionName, objectId, methodId, inputs)
return None, None, None
|
"""
Copyright (C) 2019 University of Massachusetts Amherst.
This file is part of "expLinkage"
http://github.com/iesl/expLinkage
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import torch
from torch.autograd import Variable
import numpy as np
from utils.Config import Config
class LinearClassifier(torch.nn.Module):
"""docstring for Linear Classifier"""
def __init__(self, config):
super(LinearClassifier, self).__init__()
assert isinstance(config, Config)
self.config = config
self.inputDim = config.inputDim # Dimension of vector for each point
self.outputDim = 1
self.seqModel = torch.nn.Sequential(
torch.nn.Linear(self.inputDim,self.outputDim)
)
tempAlphaVal = np.random.normal(self.config.alphaInitMu, self.config.alphaInitSigma, 1)[0]
if self.config.useGPU:
self.linkAlpha = Variable(torch.cuda.FloatTensor([tempAlphaVal]), requires_grad=True)
else:
self.linkAlpha = Variable(torch.FloatTensor([tempAlphaVal]), requires_grad=True)
def __str__(self):
printStr = ""
printStr += "-----------------Linear Classifier Parameters----------------------" + "\n"
printStr += "linkAlpha:" + str(self.linkAlpha) + "\n"
printStr += "inputDim::" + str(self.inputDim) + "\n"
printStr += "output dissimilarity\t" + str(self.config.outDisSim) + "\n"
printStr += "Layers::" + str(self.seqModel) + "\n"
printStr += self.getWeightStr()
printStr += "-------------------------------------------------------------------"
return printStr
def getWeightStr(self):
weightStr = ""
weightStr += "Weight::{}".format(self.seqModel[0].weight) + "\n"
weightStr += "Bias::{}".format(self.seqModel[0].bias) + "\n"
return weightStr
def pairForward(self,pairFeature):
if self.config.useGPU:
pairFeature = Variable(torch.cuda.FloatTensor(pairFeature))
else:
pairFeature = Variable(torch.Tensor(pairFeature))
prediction = self.seqModel(pairFeature)
return prediction
def pairBatchForward(self,pairFeatureList):
if self.config.useGPU:
pairFeatureList = Variable(torch.cuda.FloatTensor(pairFeatureList))
else:
pairFeatureList = Variable(torch.Tensor(pairFeatureList))
prediction = self.seqModel(pairFeatureList)
return prediction
class AvgLinearClassifier(LinearClassifier):
def __init__(self, config):
super(AvgLinearClassifier, self).__init__(config)
biasPresent = self.seqModel[0].bias is not None
self.updateNum = 0
self.avgWeights = torch.nn.Linear(self.inputDim, self.outputDim, bias=biasPresent)
def __str__(self):
printStr = ""
printStr += "-----------------Average Linear Classifier Parameters-----------------------------" + "\n"
printStr += "linkAlpha::\t" + str(self.linkAlpha) + "\n"
printStr += "inputDim::\t" + str(self.inputDim) + "\n"
printStr += "output dissimilarity\t" + str(self.config.outDisSim) + "\n"
printStr += "updateNum" + str(self.updateNum) + "\n"
printStr += "Layers::" + str(self.seqModel) + "\n"
printStr += self.getWeightStr()
printStr += "-------------------------------------------------------------------"
return printStr
def getWeightStr(self):
weightStr = ""
weightStr += "Weight::{}".format(self.seqModel[0].weight) + "\n"
weightStr += "Bias::{}".format(self.seqModel[0].bias) + "\n"
weightStr += "Avg Weight::{}".format(self.avgWeights.weight.data) + "\n"
weightStr += "Avg Bias::{}".format(self.avgWeights.bias.data)+ "\n"
return weightStr
# Average weights after making gradient update
def updateAvgWeights(self):
self.avgWeights.weight.data = self.updateNum * self.avgWeights.weight.data + self.seqModel[0].weight.data
if self.avgWeights.bias is not None:
self.avgWeights.bias.data = self.updateNum * self.avgWeights.bias.data + self.seqModel[0].bias.data
self.updateNum += 1
self.avgWeights.weight.data = self.avgWeights.weight.data / self.updateNum
if self.avgWeights.bias is not None:
self.avgWeights.bias.data = self.avgWeights.bias.data / self.updateNum
def pairAvgBatchForward(self, pairFeatureList):
if self.config.useGPU:
pairFeatureList = Variable(torch.cuda.FloatTensor(pairFeatureList))
else:
pairFeatureList = Variable(torch.Tensor(pairFeatureList))
prediction = self.avgWeights(pairFeatureList)
return prediction
def pairAvgForward(self,pairFeature):
if self.config.useGPU:
pairFeature = Variable(torch.cuda.FloatTensor(pairFeature))
else:
pairFeature = Variable(torch.Tensor(pairFeature))
prediction = self.avgWeights(pairFeature)
return prediction
|
__all__ = ['Reply']
import time
from collections import OrderedDict
from tron import Misc, Parsing
"""
Reply is a slight misnomer --
- Reply to an existing command. Need to specify:
- flag, cmd, [src], KVs
- Generate non-command KVs. Need to specify:
- flag, actorCid, actorMid, src, KVs
"""
class Reply(Misc.Object):
def __init__(self, cmd, flag, KVs, bcast=True, **argv):
""" Create a parsed Reply.
Args:
cmd - the Command which we are a Reply to.
flag - the completion state flag.
KVs - parsed or unparsed keys. We accept OrderedDicts,
lists & tuples, and strings. The latter are parsed
into OrderedDicts.
"""
Misc.Object.__init__(self, **argv)
self.ctime = time.time()
self.cmd = cmd
self.flag = flag
self.bcast = bcast
if isinstance(KVs, OrderedDict):
self.KVs = KVs
else:
self.KVs = self.parseKVs(KVs)
self.src = argv.get('src', cmd.actorName)
def finishesCommand(self):
""" Return true if the given flag finishes a command. """
return self.flag in ':fF'
def __str__(self):
return 'Reply(cmd=%s flag=%s KVs=%s)' % (self.cmd, self.flag, self.KVs)
def parseKVs(self, kvl):
""" Convert some form of keys to an OrderedDict.
We are trying to be ridiculously flexible here. Take:
- a string, which we parse as it came from an ICC.
- a list, which we parse either as a list of key=value strings or of (key, value) duples.
"""
if isinstance(kvl, str):
return Parsing.parseKVs(kvl)
od = OrderedDict()
if kvl is not None:
for i in kvl:
if isinstance(i, str):
k, v, junk = Parsing.parseKV(i)
od[k] = v
elif type(i) in (list, tuple) and len(i) == 2:
k, v, junk = Parsing.parseKV('%s=%s' % i)
else:
Misc.log('Reply', 'kvl item is not a string: %r' % (i))
raise Exception('kvl == %r' % (i))
return od
|
from abc import ABC, abstractmethod
import collections.abc
import re
import struct
import util
# Defines classes for all tag types, with common logic contained in abstract base classes
# Abstract Base Classes and Concrete classes are intertwined. I have separated them for convenience
# but putting them in separate files won't work, as some base classes use some of the concrete ones.
# If you can separate them without making a complete mess, please do so !
# I couldn't figure out how in a way that makes sense.
#-------------------------------------------- Functions --------------------------------------------
def from_snbt(snbt : str, pos : int = 0):
"""Create a TAG from SNBT when type is unknown"""
#print(f'Starting tests at {pos}')
for i in sorted(Base.subtypes, key = lambda i : i.snbtPriority):
try:
value, pos = i.from_snbt(snbt, pos)
except ValueError:
continue
else:
return value, pos
raise ValueError(f'Invalid snbt at {pos}')
#-------------------------------------- Abstract Base Classes --------------------------------------
class Base(ABC):
"""Abstract Base Class of all tag types"""
ID = None
"""ID of this Tag"""
snbtPriority = None
"""Determines priority for from_snbt
Lowest goes first
"""
@property
def bit_length(self):
"""Returns the BIT length of this tag's value after encoding"""
return self.byte_length * 8
@property
def byte_length(self):
return len(self.to_bytes())
@classmethod
@abstractmethod
def decode(cls, iterable):
"""Decode a value from an iterable of byte NBT data"""
pass
@classmethod
@abstractmethod
def encode(cls, value):
"""Encode a value into byte NBT data"""
pass
@classmethod
def from_bytes(cls, iterable):
"""Create a tag from an iterable of NBT data bytes"""
return cls(cls.decode(iterable))
@classmethod
@abstractmethod
def from_snbt(cls, snbt):
"""Create a new TAG from SNBT
Return a (value, pos) tuple, where :
- <value> is a tag created from SNBT
- <pos> is the character index following this tag's snbt
"""
pass
def to_bytes(self):
"""Return NBT data bytearray from self"""
return self.encode(self.value)
@abstractmethod
def to_snbt(self):
"""Return a SNBT representation of this tag"""
pass
@classmethod
@property
def subtypes(cls):
return sorted(
[i for i in util.all_subclasses(cls) if i.ID is not None],
key = lambda i : i.ID
)
@abstractmethod
def valueType(value):
"""Convert value to the same type as this tag's .value"""
pass
@property
def value(self):
return self._value
@value.setter
def value(self, newValue):
newValue = self.valueType(newValue)
try:
self.encode(newValue) #Raises an exception if newValue is incompatible
except Exception as e:
raise type(e)(str(e) + f'(Invalid value {newValue} for {type(self)})')
self._value = newValue
def __eq__(self, other):
try:
return self.value.__eq__(self.valueType(other))
except ValueError:
return False
def __ge__(self, other):
try:
return self.value.__ge__(self.valueType(other))
except ValueError:
return False
def __gt__(self, other):
try:
return self.value.__gt__(self.valueType(other))
except ValueError:
return False
def __le__(self, other):
try:
return self.value.__le__(self.valueType(other))
except ValueError:
return False
def __lt__(self, other):
try:
return self.value.__lt__(self.valueType(other))
except ValueError:
return False
def __repr__(self):
return self.to_snbt()
class Value(Base):
"""Abstract Base Class for all simple value tag types"""
def __init__(self, value = None):
value = 0 if value is None else value
self.value = value
def __int__(self):
return int(self.value)
def __float__(self):
return float(self.value)
@classmethod
def from_snbt(cls, snbt : str, pos : int = 0):
match = re.compile(cls.regex).match(snbt[pos:])
try:
return cls(match['value']), pos + match.end()
except:
raise ValueError(f'Invalid snbt for {cls} at {pos}')
util.make_wrappers(Value, coercedMethods = ['__add__', '__mod__', '__rmod__', '__mul__', '__rmul__'])
class Number(Value):
"""Abstract Base Class for numerical tag types
Assignments to .value are automatically checked for compatibility
"""
fmt = None
"""Struct format string for packing and unpacking"""
suffixes = None
"""valid SNBT suffixes"""
@classmethod
def decode(cls, iterable):
byteValue = util.read_bytes(iterable, n = len(cls()))
return struct.unpack(cls.fmt, byteValue)[0]
@classmethod
def encode(cls, value : int = 0):
return struct.pack(cls.fmt, cls.valueType(value))
def to_snbt(self):
return f'{self.value}' + ('' if self.suffixes is None else f'{self.suffixes[0]}')
def __len__(self):
"""Returns the BYTE length of this tag's value after encoding"""
return self.byte_length
util.make_wrappers( Number,
coercedMethods = [
'conjugate',
'imag',
'real',
'__abs__',
'__ceil__',
'__floor__',
'__floordiv__',
'__neg__',
'__pos__',
'__pow__',
'__round__',
'__sub__',
'__truediv__',
'__trunc__'
],
nonCoercedMethods = [
'as_integer_ratio',
'__bool__',
'__divmod__',
'__radd__',
'__rdivmod__',
'__rfloordiv__',
'__rpow__',
'__rsub__',
'__rtruediv__'
]
)
class Integer(Number):
"""Abstract Base Class for integer numerical tag types"""
valueType = int
@classmethod
@property
def regex(cls):
return f'(?P<value>(?P<negative>-)?\\d+){"" if cls.suffixes is None else f"(?P<suffix>[{cls.suffixes}])"}'
@property
def unsigned(self):
"""The unsigned equivalent of this tag's value"""
return struct.unpack(self.fmt.upper(), self.to_bytes())[0]
@unsigned.setter
def unsigned(self, newValue):
newValue = struct.pack(self.fmt.upper(), self.valueType(newValue))
self._value = self.decode(newValue)
util.make_wrappers( Integer,
coercedMethods = [
'denominator',
'numerator',
'__and__',
'__invert__',
'__lshift__',
'__or__',
'__rshift__',
'__xor__'
],
nonCoercedMethods = [
'__rand__',
'__index__',
'__rlshift__',
'__ror__',
'__rrshift__',
'__rxor__'
]
)
class Decimal(Number):
"""Abstract Base Class for decimal numerical tag types"""
regex = r'(?P<value>(?P<negative>-)?(?P<integer>\d+)?(?P<dot>\.)?(?P<decimal>(?(integer)\d*|\d+)))'
valueType = float
@classmethod
def fromhex(cls, string):
return cls( float.fromhex(string) )
util.make_wrappers(Decimal, nonCoercedMethods=['hex','is_integer'])
class Sequence(Base, collections.abc.Sequence):
"""Abstract Base Class for sequence tag types"""
pass
util.make_wrappers(Sequence, nonCoercedMethods = ['__getitem__', '__iter__', '__len__'])
class MutableSequence(Sequence, collections.abc.MutableSequence):
"""Abstract Base Class for MutableSequence tag types"""
prefix = None
"""SNBT list Prefix"""
valueType = list
def __init__(self, value = None):
"""Checks that all elements are type compatible through self.append"""
value = [] if value is None else value
self.value = []
for i in value:
self.append(i)
def append(self, value):
self.value.append(self.elementType(value))
@classmethod
def decode(cls, iterable):
iterator = iter(iterable)
elementType = cls.elementType
if isinstance(elementType, property):
elementType = Base.subtypes[ Byte.decode(iterator) ]
length = Int.decode(iterator)
return [elementType.from_bytes(iterator) for _ in range(length)]
@staticmethod
def encode(value = None):
value = [] if value is None else value
byteValue = Int.encode(len(value))
for element in value:
byteValue += element.to_bytes()
if isinstance(element, Compound):
byteValue += End.encode()
return byteValue
@property
def elementID(self):
return self.elementType.ID
@classmethod
def from_snbt(cls, snbt : str, pos : int = 0):
match = re.compile(f'\\[{cls.prefix}').match(snbt, pos)
if match is None:
raise ValueError(f'Missing "[{cls.prefix}" at {pos} for {cls}')
pos = match.end()
value = []
if snbt[pos] != ']':
if isinstance(cls.elementType, property):
elementType = type(from_snbt(snbt, pos)[0])
else:
elementType = cls.elementType
while True:
itemValue, pos = elementType.from_snbt(snbt, pos)
value.append(itemValue)
if snbt[pos] == ',':
pos += 1
continue
elif snbt[pos] == ']':
break
else:
raise ValueError(f'Missing "," or "]" at {pos}')
return cls(value), pos+1
def insert(self, key, value):
self.value = self[:key] + [value] + self[key:]
def sort(self, *, key=None, reverse=False):
self.value.sort(key=key, reverse=reverse)
def to_snbt(self):
return f'[{self.prefix}{",".join( [i.to_snbt() for i in self.value] )}]'
def __add__(self, other):
return type(self)( self.value + [self.elementType(i) for i in other] )
def __delitem__(self, key):
del self.value[key]
def __setitem__(self, key, value):
"""Replace self[key] with value.
Value must be able to convert to self.elementType
"""
self.value[key] = self.elementType(value)
util.make_wrappers( MutableSequence,
coercedMethods = [
'copy',
'__mul__',
'__rmul__'
],
nonCoercedMethods = [
'__radd__'
]
)
class MutableMapping(Base, collections.abc.MutableMapping):
"""Abstract Base Class for MutableMapping type TAGs"""
valueType = dict
def __init__(self, value=None):
value = value or {}
self.value = {}
for i in value:
self[i] = value[i]
@staticmethod
def decode(iterable):
iterator = iter(iterable)
value = {}
while True:
try:
itemType = Base.subtypes[ Byte.decode(iterator) ]
except StopIteration:
break
if itemType == End:
break
itemName = String.decode(iterator)
itemValue = itemType.from_bytes(iterator)
value[itemName] = itemValue
return value
@classmethod
def from_snbt(cls, snbt : str, pos : int = 0):
try:
assert snbt[pos] == '{'
except (AssertionError, IndexError):
raise ValueError(f'Missing "{{" at {pos}!')
pos += 1
regex = r'(?P<openQuote>")?(?P<name>(?(openQuote)[^"]|[^":,}])*)(?(openQuote)(?P<endQuote>")):'
pattern = re.compile(regex)
value = {}
if snbt[pos] != '}':
while True:
match = pattern.match(snbt, pos)
if match is not None:
value[match['name']], pos = from_snbt(snbt, match.end())
if snbt[pos] == ',':
pos += 1
continue
elif snbt[pos] == '}':
break
else:
raise ValueError(f'Missing "," or "}}" at {pos}')
else:
raise ValueError(f'Invalid name at {pos}')
return cls(value), pos+1
@staticmethod
def encode(value = None):
value = {} if value is None else value
byteValue = bytearray()
for element in value:
byteValue += Byte.encode(value[element].ID)
byteValue += String.encode(element)
byteValue += value[element].to_bytes()
if isinstance(value[element], Compound):
byteValue += End.encode()
return byteValue
def to_snbt(self):
pairs = []
for key in self:
value = self[key].to_snbt()
if ':' in key or ',' in key or '}' in key:
key = f'"{key}"'
pairs.append(f'{key}:{value}')
return f'{{{",".join(pairs)}}}'
def __setitem__(self, key, value):
"""Replace self[key] with <value>
<value> must type-compatible with self[key] if it exists
"""
if key in self:
if isinstance(self[key], List) and len(self[key]) > 0:
value = [self[key].elementType(i) for i in value]
value = type(self[key])(value)
try:
assert isinstance(value, Base)
except AssertionError:
raise ValueError(f'{type(self)} can only contain other TAGs, not {type(value)}')
self.value[key] = value
util.make_wrappers( MutableMapping,
nonCoercedMethods = ['keys', '__delitem__', '__getitem__', '__iter__', '__len__']
)
#---------------------------------------- Concrete Classes -----------------------------------------
class End(Base):
"""You probably don't want to use this.
Ends a Compound, expect erratic behavior if inserted inside one.
"""
__slots__ = []
ID = 0
snbtPriority = 12
valueType = None
def __init__(self, value = None):
pass
@classmethod
def decode(cls, iterable):
return
def encode(value = None):
return b'\x00'
@classmethod
def from_snbt(cls, snbt : str, pos : int = 0):
if snbt[pos:] == '':
return cls()
else:
raise ValueError(f'Invalid snbt for {cls} (expected empty string)')
def to_snbt(self):
return ''
class Byte(Integer):
"""Int8 tag (0 to 255)"""
__slots__ = ['_value']
ID = 1
fmt = '>b'
snbtPriority = 8
suffixes = 'bB'
class Short(Integer):
"""Int16 tag (-32,768 to 32,767)"""
__slots__ = ['_value']
ID = 2
fmt = '>h'
snbtPriority = 9
suffixes = 'sS'
class Int(Integer):
"""Int32 tag (-2,147,483,648 to 2,147,483,647)"""
__slots__ = ['_value']
ID = 3
fmt = '>i'
snbtPriority = 11
class Long(Integer):
"""Int64 tag (-9,223,372,036,854,775,808 to 9,223,372,036,854,775,807)"""
__slots__ = ['_value']
ID = 4
fmt = '>q'
snbtPriority = 10
suffixes = 'Ll'
class Float(Decimal):
"""Single precision float tag (32 bits)"""
__slots__ = ['_value']
ID = 5
fmt = '>f'
regex = f'{Decimal.regex}(?P<suffix>[fF])'
snbtPriority = 6
suffixes = 'fF'
class Double(Decimal):
"""Double precision float tag (64 bits)"""
__slots__ = ['_value']
ID = 6
fmt = '>d'
regex = f'{Decimal.regex}(?P<suffix>(?(dot)[dD]?|[dD]))'
snbtPriority = 7
suffixes = 'dD'
class Byte_Array(MutableSequence):
"""A Byte array
Contained tags have no name
"""
__slots__ = ['_value']
ID = 7
elementType = Byte
prefix = 'B;'
snbtPriority = 1
class String(Value, Sequence):
"""Unicode string tag
Payload : a Short for length, then a <length> bytes long UTF-8 string
"""
__slots__ = ['_value']
ID = 8
regex = r"""(?P<openQuote>['"])(?P<value>(?:(?!(?P=openQuote))[^\\]|\\.)*)(?P<endQuote>(?P=openQuote))"""
snbtPriority = 5
valueType = str
@classmethod
def decode(cls, iterable):
iterator = iter(iterable)
byteLength = Short.decode(iterator)
byteValue = util.read_bytes(iterator, n = byteLength)
return byteValue.decode(encoding='utf-8')
@staticmethod
def encode(value : str = ''):
byteValue = str.encode( str(value) )
return Short.encode(len(byteValue)) + byteValue
def isidentifier(self):
return False
def join(self, iterable):
iterable = [i if isinstance(i, str) else str(i) for i in iterable]
return self.__class__( self.value.join(iterable) )
def partition(self, sep):
"""Partition the String into three parts using the given separator.
This will search for the separator in the String. If the separator is found,
returns a 3-tuple containing the part before the separator, the separator
itself, and the part after it.
If the separator is not found, returns a 3-tuple containing the original String
and two empty TAG.String.
"""
return tuple( [self.__class__(i) for i in self.value.partition(sep)] )
def rpartition(self, sep):
return tuple( [self.__class__(i) for i in self.value.rpartition(sep)] )
def rsplit(self, sep=None, maxsplit=-1):
return [self.__class__(i) for i in self.value.rsplit(sep, maxsplit)]
def split(self, sep=None, maxsplit=-1):
return [self.__class__(i) for i in self.value.split(sep, maxsplit)]
def splitlines(self, keepends=False):
return [self.__class__(i) for i in self.value.splitlines(keepends)]
def to_snbt(self):
# f-string does not allow for backslashes inside the {}, hence the workaround
# I think this ban is stupid but I don't control python (yet ?)
return '"{}"'.format(''.join([char if char != '"' else r'\"' for char in self]))
def __str__(self):
return self.value
util.make_wrappers( String,
coercedMethods = [
'capitalize',
'casefold',
'center',
'expandtabs',
'format',
'format_map',
'lstrip',
'ljust',
'lower',
'replace',
'rjust',
'rstrip',
'strip',
'swapcase',
'title',
'translate',
'upper',
'zfill'
],
nonCoercedMethods = [
'endswith',
'find',
'isalnum',
'isalpha',
'isascii',
'isdecimal',
'isdigit',
'islower',
'isnumeric',
'isprintable',
'isspace',
'istitle',
'isupper',
'maketrans',
'rfind',
'rindex',
'startswith'
]
)
class List(MutableSequence):
"""A list of tags, all of type self.elementType
Type checks any additions unless it is empty
If empty, self.elementType will be End
"""
__slots__ = ['_value']
ID = 9
prefix = ''
snbtPriority = 4
def append(self, value):
"""Append to the list, perform type checking unless it is empty"""
if self.elementType == End and isinstance(value, Base):
self.value.append(value)
elif self.elementType != End:
super().append(value)
else:
raise ValueError('Can only append TAGs to empty List')
@property
def elementType(self):
if len(self) > 0:
return type(self[0])
else:
return End
@classmethod
def encode(cls, value = None):
value = [] if value is None else value
ID = value[0].ID if len(value) > 0 else 0
return Byte.encode(ID) + super().encode(value)
class Compound(MutableMapping):
"""A Tag dictionary, containing other named tags of any type."""
__slots__ = ['_value']
ID = 10
snbtPriority = 0
class Int_Array(MutableSequence):
"""A Int array
Contained tags have no name
"""
__slots__ = ['_value']
ID = 11
elementType = Int
prefix = 'I;'
snbtPriority = 2
class Long_Array(MutableSequence):
"""A Long array
Contained tags have no name
"""
__slots__ = ['_value']
ID = 12
elementType = Long
prefix = 'L;'
snbtPriority = 3
|
__author__ = 'yanivshalev'
from hydro.connectors.mysql import MySqlConnector
if __name__ == '__main__':
params = {
'source_type': 'mysql',
'connection_type': 'connection_string',
'connection_string': '127.0.0.1',
'db_name': 'test',
'db_user': 'xxx',
'db_password': 'yyy'
}
con = MySqlConnector(params)
print con.execute('select 1 a')
|
import pytest
import os
from xonsh.commands_cache import (CommandsCache, predict_shell,
SHELL_PREDICTOR_PARSER, predict_true, predict_false)
from tools import skip_if_on_windows
def test_commands_cache_lazy(xonsh_builtins):
cc = CommandsCache()
assert not cc.lazyin('xonsh')
assert 0 == len(list(cc.lazyiter()))
assert 0 == cc.lazylen()
TRUE_SHELL_ARGS = [
['-c', 'yo'],
['-c=yo'],
['file'],
['-i', '-l', 'file'],
['-i', '-c', 'yo'],
['-i', 'file'],
['-i', '-c', 'yo', 'file'],
]
@pytest.mark.parametrize('args', TRUE_SHELL_ARGS)
def test_predict_shell_parser(args):
ns, unknown = SHELL_PREDICTOR_PARSER.parse_known_args(args)
if ns.filename is not None:
assert not ns.filename.startswith('-')
@pytest.mark.parametrize('args', TRUE_SHELL_ARGS)
def test_predict_shell_true(args):
assert predict_shell(args)
FALSE_SHELL_ARGS = [
[],
['-c'],
['-i'],
['-i', '-l'],
]
@pytest.mark.parametrize('args', FALSE_SHELL_ARGS)
def test_predict_shell_false(args):
assert not predict_shell(args)
PATTERN_BIN_USING_TTY_OR_NOT = [
(False, {10: b'isnotatty'}),
(False, {12: b'isatty'}),
(False, {10: b'isatty', 100: b'tcgetattr', }),
(False, {10: b'isatty', 100: b'tcsetattr'}),
(True, {10: b'isatty', 100: b'tcsetattr', 1000: b'tcgetattr'}),
(True, {1000: b'libncurses'}),
(True, {2045: b'tcgetattr', 4095: b'tcgetattr', 6140: b'tcsetattr',
8190: b'isatty'}),
]
@pytest.mark.parametrize('args', PATTERN_BIN_USING_TTY_OR_NOT)
@skip_if_on_windows
def test_commands_cache_predictor_default(args):
cc = CommandsCache()
use_tty, patterns = args
f = open('testfile', 'wb')
where = list(patterns.keys())
where.sort()
pos = 0
for w in where:
f.write(b'\x20' * (w - pos))
f.write(patterns[w])
pos = w + len(patterns[w])
f.write(b'\x20' * (pos // 2))
f.close()
result = cc.default_predictor_readbin('', os.getcwd() + os.sep + 'testfile',
timeout=1, failure=None)
expected = predict_false if use_tty else predict_true
assert result == expected
|
import _plotly_utils.basevalidators
class ShapeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name='shape', parent_name='scattercarpet.line', **kwargs
):
super(ShapeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='plot',
role='style',
values=['linear', 'spline'],
**kwargs
)
|
from io import IncrementalNewlineDecoder
from pdb import set_trace as br
import numpy as np
with open('input.txt') as f:
input = f.readlines()
input = [i.strip() for i in input]
def scope(c):
if c in '()':
return 0
elif c in '[]':
return 1
elif c in '\{\}':
return 2
elif c in '<>':
return 3
else:
return -1 #error
s_paren=0
s_square=0
s_curly=0
s_angle=0
incomplete=[]
error=False
for [x,line] in enumerate(input):
s = scope(line[0])
old_s=[]
if line[0] in ']}>)':
print('bad opening')
error=False
for [i,character] in enumerate(line):
if character in ')]}>':
if scope(character)!=s:
if character==')':
s_paren+=1
elif character==']':
s_square+=1
elif character=='}':
s_curly +=1
elif character=='>':
s_angle +=1
error=True
break
else:
s=old_s[-1]
old_s.pop()
else:
old_s.append(s)
s=scope(character)
if error==False:
incomplete.append(input[x])
scores=[]
score=0
for line in incomplete:
cache=[0]
for character in line:
if character == cache[-1]:
cache.pop()
elif character == '(':
cache.append(')')
elif character == '[':
cache.append(']')
elif character == '{':
cache.append('}')
elif character == '<':
cache.append('>')
cache.pop(0)
cache.reverse()
string=''
score=0
for s in cache:
score*=5
if s==')':
score+=1
elif s==']':
score+=2
elif s=='}':
score+=3
elif s=='>':
score+=4
print(line," =", score)
scores.append(score)
print("median score =", int(np.median(scores)))
|
#
# PySNMP MIB module BIANCA-BRICK-L2TP-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/BIANCA-BRICK-L2TP-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 17:21:18 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, SingleValueConstraint, ValueSizeConstraint, ConstraintsUnion, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "SingleValueConstraint", "ValueSizeConstraint", "ConstraintsUnion", "ConstraintsIntersection")
DisplayString, = mibBuilder.importSymbols("RFC1158-MIB", "DisplayString")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Unsigned32, Bits, ModuleIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, Gauge32, ObjectIdentity, MibIdentifier, Counter64, Integer32, TimeTicks, enterprises, Counter32, NotificationType, IpAddress, iso = mibBuilder.importSymbols("SNMPv2-SMI", "Unsigned32", "Bits", "ModuleIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Gauge32", "ObjectIdentity", "MibIdentifier", "Counter64", "Integer32", "TimeTicks", "enterprises", "Counter32", "NotificationType", "IpAddress", "iso")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
bintec = MibIdentifier((1, 3, 6, 1, 4, 1, 272))
bibo = MibIdentifier((1, 3, 6, 1, 4, 1, 272, 4))
vpn = MibIdentifier((1, 3, 6, 1, 4, 1, 272, 4, 23))
l2tp = MibIdentifier((1, 3, 6, 1, 4, 1, 272, 4, 23, 8))
l2tpGlobals = MibIdentifier((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 10))
l2tpGlobUdpPort = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 10, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535)).clone(1701)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: l2tpGlobUdpPort.setStatus('mandatory')
l2tpGlobPortUsage = MibScalar((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 10, 20), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("single", 1), ("floating", 2))).clone('floating')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: l2tpGlobPortUsage.setStatus('mandatory')
l2tpTunnelProfileTable = MibTable((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 20), )
if mibBuilder.loadTexts: l2tpTunnelProfileTable.setStatus('mandatory')
l2tpTunnelProfileEntry = MibTableRow((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 20, 10), ).setIndexNames((0, "BIANCA-BRICK-L2TP-MIB", "l2tpTunnelProfileIndex"))
if mibBuilder.loadTexts: l2tpTunnelProfileEntry.setStatus('mandatory')
l2tpTunnelProfileIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 20, 10, 10), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: l2tpTunnelProfileIndex.setStatus('mandatory')
l2tpTunnelProfileName = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 20, 10, 15), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: l2tpTunnelProfileName.setStatus('mandatory')
l2tpTunnelProfileRemoteIpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 20, 10, 20), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: l2tpTunnelProfileRemoteIpAddress.setStatus('mandatory')
l2tpTunnelProfileRemoteIpAddressBackup = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 20, 10, 25), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: l2tpTunnelProfileRemoteIpAddressBackup.setStatus('mandatory')
l2tpTunnelProfileRemoteUdpPort = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 20, 10, 30), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535)).clone(1701)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: l2tpTunnelProfileRemoteUdpPort.setStatus('mandatory')
l2tpTunnelProfileRemoteHostname = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 20, 10, 40), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: l2tpTunnelProfileRemoteHostname.setStatus('mandatory')
l2tpTunnelProfileLocalIpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 20, 10, 50), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: l2tpTunnelProfileLocalIpAddress.setStatus('mandatory')
l2tpTunnelProfileLocalUdpPort = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 20, 10, 60), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: l2tpTunnelProfileLocalUdpPort.setStatus('mandatory')
l2tpTunnelProfileLocalHostname = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 20, 10, 70), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: l2tpTunnelProfileLocalHostname.setStatus('mandatory')
l2tpTunnelProfilePassword = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 20, 10, 80), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: l2tpTunnelProfilePassword.setStatus('mandatory')
l2tpTunnelProfileReceiveWindowSize = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 20, 10, 90), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 1)).clone(1)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: l2tpTunnelProfileReceiveWindowSize.setStatus('mandatory')
l2tpTunnelProfileHelloInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 20, 10, 100), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255)).clone(30)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: l2tpTunnelProfileHelloInterval.setStatus('mandatory')
l2tpTunnelProfileSessionDataSequencing = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 20, 10, 110), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("delete", 1), ("disabled", 2), ("enabled", 3))).clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: l2tpTunnelProfileSessionDataSequencing.setStatus('mandatory')
l2tpTunnelProfileMinRetryTime = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 20, 10, 120), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255)).clone(1)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: l2tpTunnelProfileMinRetryTime.setStatus('mandatory')
l2tpTunnelProfileMaxRetryTime = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 20, 10, 130), Integer32().subtype(subtypeSpec=ValueRangeConstraint(8, 255)).clone(16)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: l2tpTunnelProfileMaxRetryTime.setStatus('mandatory')
l2tpTunnelProfileMaxRetries = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 20, 10, 140), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255)).clone(5)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: l2tpTunnelProfileMaxRetries.setStatus('mandatory')
l2tpTunnelProfileRadiusAssignment = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 20, 10, 150), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disabled", 1), ("enabled", 2))).clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: l2tpTunnelProfileRadiusAssignment.setStatus('mandatory')
l2tpTunnelProfileRadiusGroupId = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 20, 10, 160), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 9))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: l2tpTunnelProfileRadiusGroupId.setStatus('mandatory')
l2tpTunnelTable = MibTable((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 30), )
if mibBuilder.loadTexts: l2tpTunnelTable.setStatus('mandatory')
l2tpTunnelEntry = MibTableRow((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 30, 10), ).setIndexNames((0, "BIANCA-BRICK-L2TP-MIB", "l2tpTunnelLocalTunnelId"))
if mibBuilder.loadTexts: l2tpTunnelEntry.setStatus('mandatory')
l2tpTunnelRemoteIpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 30, 10, 10), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: l2tpTunnelRemoteIpAddress.setStatus('mandatory')
l2tpTunnelRemoteUdpPort = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 30, 10, 20), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: l2tpTunnelRemoteUdpPort.setStatus('mandatory')
l2tpTunnelRemoteTunnelId = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 30, 10, 30), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: l2tpTunnelRemoteTunnelId.setStatus('mandatory')
l2tpTunnelRemoteReceiveWindowSize = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 30, 10, 35), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535)).clone(4)).setMaxAccess("readonly")
if mibBuilder.loadTexts: l2tpTunnelRemoteReceiveWindowSize.setStatus('mandatory')
l2tpTunnelRemoteHostname = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 30, 10, 40), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: l2tpTunnelRemoteHostname.setStatus('mandatory')
l2tpTunnelRemoteVendorName = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 30, 10, 50), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: l2tpTunnelRemoteVendorName.setStatus('mandatory')
l2tpTunnelLocalIpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 30, 10, 60), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: l2tpTunnelLocalIpAddress.setStatus('mandatory')
l2tpTunnelLocalUdpPort = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 30, 10, 70), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: l2tpTunnelLocalUdpPort.setStatus('mandatory')
l2tpTunnelLocalTunnelId = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 30, 10, 80), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: l2tpTunnelLocalTunnelId.setStatus('mandatory')
l2tpTunnelLocalReceiveWindowSize = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 30, 10, 85), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 1)).clone(1)).setMaxAccess("readonly")
if mibBuilder.loadTexts: l2tpTunnelLocalReceiveWindowSize.setStatus('mandatory')
l2tpTunnelLocalHostname = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 30, 10, 90), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: l2tpTunnelLocalHostname.setStatus('mandatory')
l2tpTunnelPassword = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 30, 10, 100), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: l2tpTunnelPassword.setStatus('mandatory')
l2tpTunnelHelloInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 30, 10, 120), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255)).clone(60)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: l2tpTunnelHelloInterval.setStatus('mandatory')
l2tpTunnelSessionDataSequencing = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 30, 10, 130), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2, 3))).clone(namedValues=NamedValues(("disabled", 2), ("enabled", 3))).clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: l2tpTunnelSessionDataSequencing.setStatus('mandatory')
l2tpTunnelMinRetryTime = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 30, 10, 140), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255)).clone(1)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: l2tpTunnelMinRetryTime.setStatus('mandatory')
l2tpTunnelMaxRetryTime = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 30, 10, 150), Integer32().subtype(subtypeSpec=ValueRangeConstraint(8, 255)).clone(16)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: l2tpTunnelMaxRetryTime.setStatus('mandatory')
l2tpTunnelMaxRetries = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 30, 10, 160), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255)).clone(5)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: l2tpTunnelMaxRetries.setStatus('mandatory')
l2tpTunnelState = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 30, 10, 170), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("idle", 1), ("waitctlreply", 2), ("waitctlconn", 3), ("established", 4), ("shutdown", 5))).clone('idle')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: l2tpTunnelState.setStatus('mandatory')
l2tpSessionTable = MibTable((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 40), )
if mibBuilder.loadTexts: l2tpSessionTable.setStatus('mandatory')
l2tpSessionEntry = MibTableRow((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 40, 10), ).setIndexNames((0, "BIANCA-BRICK-L2TP-MIB", "l2tpSessionLocalSessionId"))
if mibBuilder.loadTexts: l2tpSessionEntry.setStatus('mandatory')
l2tpSessionRemoteIpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 40, 10, 10), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: l2tpSessionRemoteIpAddress.setStatus('mandatory')
l2tpSessionRemoteUdpPort = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 40, 10, 20), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: l2tpSessionRemoteUdpPort.setStatus('mandatory')
l2tpSessionRemoteTunnelId = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 40, 10, 30), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: l2tpSessionRemoteTunnelId.setStatus('mandatory')
l2tpSessionRemoteSessionId = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 40, 10, 40), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: l2tpSessionRemoteSessionId.setStatus('mandatory')
l2tpSessionRemoteHostname = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 40, 10, 50), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: l2tpSessionRemoteHostname.setStatus('mandatory')
l2tpSessionLocalIpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 40, 10, 60), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: l2tpSessionLocalIpAddress.setStatus('mandatory')
l2tpSessionLocalUdpPort = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 40, 10, 70), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: l2tpSessionLocalUdpPort.setStatus('mandatory')
l2tpSessionLocalTunnelId = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 40, 10, 80), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: l2tpSessionLocalTunnelId.setStatus('mandatory')
l2tpSessionLocalSessionId = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 40, 10, 90), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: l2tpSessionLocalSessionId.setStatus('mandatory')
l2tpSessionLocalHostname = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 40, 10, 100), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: l2tpSessionLocalHostname.setStatus('mandatory')
l2tpSessionCallSerialNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 40, 10, 110), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: l2tpSessionCallSerialNumber.setStatus('mandatory')
l2tpSessionDataSequencing = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 40, 10, 120), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2, 3))).clone(namedValues=NamedValues(("disabled", 2), ("enabled", 3))).clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: l2tpSessionDataSequencing.setStatus('mandatory')
l2tpSessionState = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 40, 10, 130), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7))).clone(namedValues=NamedValues(("idle", 1), ("waittunnel", 2), ("waitcsanswer", 3), ("waitreply", 4), ("waitconnect", 5), ("established", 6), ("shutdown", 7))).clone('idle')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: l2tpSessionState.setStatus('mandatory')
l2tpSessionInfo = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 40, 10, 140), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: l2tpSessionInfo.setStatus('mandatory')
l2tpSessionClientPPPCrcErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 40, 10, 150), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: l2tpSessionClientPPPCrcErrors.setStatus('mandatory')
l2tpSessionClientPPPFramingErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 40, 10, 160), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: l2tpSessionClientPPPFramingErrors.setStatus('mandatory')
l2tpSessionClientPPPHardwareOverruns = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 40, 10, 170), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: l2tpSessionClientPPPHardwareOverruns.setStatus('mandatory')
l2tpSessionClientPPPBufferOverruns = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 40, 10, 180), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: l2tpSessionClientPPPBufferOverruns.setStatus('mandatory')
l2tpSessionClientPPPTimeoutErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 40, 10, 190), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: l2tpSessionClientPPPTimeoutErrors.setStatus('mandatory')
l2tpSessionClientPPPAlignmentErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 23, 8, 40, 10, 200), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: l2tpSessionClientPPPAlignmentErrors.setStatus('mandatory')
mibBuilder.exportSymbols("BIANCA-BRICK-L2TP-MIB", l2tpSessionTable=l2tpSessionTable, l2tpSessionEntry=l2tpSessionEntry, l2tpSessionClientPPPAlignmentErrors=l2tpSessionClientPPPAlignmentErrors, l2tpTunnelProfileMaxRetries=l2tpTunnelProfileMaxRetries, l2tpSessionRemoteUdpPort=l2tpSessionRemoteUdpPort, l2tpTunnelProfileHelloInterval=l2tpTunnelProfileHelloInterval, bintec=bintec, l2tpSessionLocalTunnelId=l2tpSessionLocalTunnelId, l2tpSessionLocalIpAddress=l2tpSessionLocalIpAddress, l2tpTunnelProfileIndex=l2tpTunnelProfileIndex, l2tpTunnelProfileName=l2tpTunnelProfileName, bibo=bibo, l2tpTunnelTable=l2tpTunnelTable, l2tpTunnelProfileRadiusAssignment=l2tpTunnelProfileRadiusAssignment, l2tpGlobPortUsage=l2tpGlobPortUsage, l2tpTunnelPassword=l2tpTunnelPassword, l2tpTunnelProfileRemoteIpAddress=l2tpTunnelProfileRemoteIpAddress, l2tpTunnelProfileReceiveWindowSize=l2tpTunnelProfileReceiveWindowSize, l2tpTunnelRemoteHostname=l2tpTunnelRemoteHostname, l2tp=l2tp, l2tpGlobUdpPort=l2tpGlobUdpPort, l2tpTunnelRemoteUdpPort=l2tpTunnelRemoteUdpPort, l2tpTunnelMinRetryTime=l2tpTunnelMinRetryTime, l2tpSessionRemoteIpAddress=l2tpSessionRemoteIpAddress, l2tpTunnelEntry=l2tpTunnelEntry, l2tpGlobals=l2tpGlobals, l2tpTunnelRemoteVendorName=l2tpTunnelRemoteVendorName, l2tpSessionRemoteHostname=l2tpSessionRemoteHostname, l2tpSessionLocalHostname=l2tpSessionLocalHostname, l2tpSessionClientPPPTimeoutErrors=l2tpSessionClientPPPTimeoutErrors, l2tpTunnelProfileLocalHostname=l2tpTunnelProfileLocalHostname, l2tpTunnelProfileMinRetryTime=l2tpTunnelProfileMinRetryTime, l2tpTunnelLocalTunnelId=l2tpTunnelLocalTunnelId, l2tpTunnelProfileRemoteIpAddressBackup=l2tpTunnelProfileRemoteIpAddressBackup, l2tpTunnelProfileRadiusGroupId=l2tpTunnelProfileRadiusGroupId, l2tpTunnelProfileTable=l2tpTunnelProfileTable, l2tpTunnelRemoteTunnelId=l2tpTunnelRemoteTunnelId, l2tpTunnelLocalUdpPort=l2tpTunnelLocalUdpPort, l2tpSessionInfo=l2tpSessionInfo, l2tpTunnelProfilePassword=l2tpTunnelProfilePassword, l2tpTunnelProfileLocalUdpPort=l2tpTunnelProfileLocalUdpPort, vpn=vpn, l2tpSessionClientPPPHardwareOverruns=l2tpSessionClientPPPHardwareOverruns, l2tpSessionClientPPPCrcErrors=l2tpSessionClientPPPCrcErrors, l2tpTunnelRemoteIpAddress=l2tpTunnelRemoteIpAddress, l2tpTunnelProfileMaxRetryTime=l2tpTunnelProfileMaxRetryTime, l2tpTunnelProfileEntry=l2tpTunnelProfileEntry, l2tpTunnelLocalHostname=l2tpTunnelLocalHostname, l2tpTunnelLocalIpAddress=l2tpTunnelLocalIpAddress, l2tpSessionLocalUdpPort=l2tpSessionLocalUdpPort, l2tpSessionCallSerialNumber=l2tpSessionCallSerialNumber, l2tpSessionState=l2tpSessionState, l2tpSessionClientPPPFramingErrors=l2tpSessionClientPPPFramingErrors, l2tpTunnelState=l2tpTunnelState, l2tpSessionDataSequencing=l2tpSessionDataSequencing, l2tpTunnelProfileLocalIpAddress=l2tpTunnelProfileLocalIpAddress, l2tpTunnelRemoteReceiveWindowSize=l2tpTunnelRemoteReceiveWindowSize, l2tpSessionLocalSessionId=l2tpSessionLocalSessionId, l2tpSessionClientPPPBufferOverruns=l2tpSessionClientPPPBufferOverruns, l2tpTunnelProfileRemoteHostname=l2tpTunnelProfileRemoteHostname, l2tpSessionRemoteTunnelId=l2tpSessionRemoteTunnelId, l2tpTunnelSessionDataSequencing=l2tpTunnelSessionDataSequencing, l2tpTunnelMaxRetryTime=l2tpTunnelMaxRetryTime, l2tpTunnelHelloInterval=l2tpTunnelHelloInterval, l2tpTunnelProfileSessionDataSequencing=l2tpTunnelProfileSessionDataSequencing, l2tpTunnelProfileRemoteUdpPort=l2tpTunnelProfileRemoteUdpPort, l2tpSessionRemoteSessionId=l2tpSessionRemoteSessionId, l2tpTunnelMaxRetries=l2tpTunnelMaxRetries, l2tpTunnelLocalReceiveWindowSize=l2tpTunnelLocalReceiveWindowSize)
|
# All rights reserved by forest fairy.
# You cannot modify or share anything without sacrifice.
# If you don't agree, keep calm and don't look at code bellow!
__author__ = "VirtualV <https://github.com/virtualvfix>"
__date__ = "13/10/17 20:54"
from config import CONFIG
from optparse import OptionGroup
from libs.core.options import Options
from libs.core.logger import getLoggers
from libs.core.unittest import filter, Tools
from libs.core.exceptions import RuntimeInterruptError
class Exec(Options):
"""
Test execute options.
"""
def __init__(self):
super(Exec, self).__init__()
self.logger, self.syslogger = getLoggers(__file__)
def group(self, parser):
group = OptionGroup(parser, 'Test execute filter')
group.add_option('-t', '--tests', dest='tests',
default=CONFIG.SYSTEM.DEFAULT_TEST if hasattr(CONFIG.SYSTEM, 'DEFAULT_TEST') else None,
help='Filter TestCases to run. Use comma "," as delimiter to specify one more TestCases '
+ 'Usage: -t "testCase1, testCase2". '
+ 'TestCases may be duplicated to configure same TestSuites with different parameters. '
+ 'Use [--case-list] option to print all available TestCases.')
group.add_option('-i', '--include', dest='include', default=None,
help='Filter TestSuites to run from selected TestCase [--tests] option. '
+ 'Usage: -i "[-]testSuite1,[-]testSuite2; [-]testSuite1". '
+ 'Minus symbol [-] is optional and change filter behavior '
+ 'to exclude TestSuite from run. '
+ 'Use semicolon ";" as delimiter to combine set of TestSuites for each TestCase '
+ 'specified in [--tests] option. '
+ 'TestSuites sets should be in same order as defined in the [--tests] option. '
+ 'TestSuites inside set should be delimited by comma ",". '
+ 'Use [--suite-list] option to print all available TestSuites.')
group.add_option('-p', '--parameters', dest='parameters', default=None,
help='Filter Tests to run from TestSuite or change variables values defined in '
+ 'TestSuite class. Usage: -p "#TestSuiteName1: [-/*]testName or testShortDescription, '
+ 'variable1=value1, variable2=value2, #TestSuiteName2: [-]testName; '
+ 'Minus symbol [-] is optional and change filter behavior to exclude Test from run. '
+ 'Asterisk symbol [*] is optional and affect Test order. All Tests with asterisk '
+ 'will be started with first priority. Asterisk symbol cannot be combined with minus ! '
+ 'Parameters for different TestCases should be separated by semicolon [;] and must be '
+ 'specified in the same order as defined in the [--test] option. '
+ 'All parameters are not case sensitive. '
+ 'Use [--test-list] option to print all available Tests.')
group.add_option('--preset', dest='preset', default=None,
help='Run Preset of tests. Usage: --preset "presetName, testCaseName.presetName" '
+ 'Presets may be global (TestCases preset) and local (preset in TestCase). '
+ 'Add name of all parent TestCases to specify local preset. '
+ 'Example: --preset "benchmarks.3d" # Local preset [3d] in [benchmarks] TestCase)')
return group
@property
def priority(self):
return 990
# def setup(self):
# # raise Exception('asd')
# print('setup test in lib')
# return 1+1
async def validate(self, options):
# Cases not specified
cases = Tools.convert_str_cases_to_list(options.tests)
if (len(cases)) == 0 and not (options.self_test is True
or options.case_list is True
or options.suite_list is True):
raise RuntimeInterruptError("No one TestCase was specified in [--test] option ! "
+ "Use [--case-list] to print all available TestCases")
# filter by --preset
if options.preset is not None:
await filter.Preset.filter_by_preset(options.preset)
else:
# filter by --tests option
await filter.Case.filter_by_cases(options.tests)
# filter by --include option
await filter.Suite.filter_by_suite(options.include)
# filter by --parameters option
await filter.Params.filter_by_params(options.parameters)
# lock changes after all filters
# CONFIG.UNITTEST.LOCK('SELECTED_TEST_CASES')
# print(CONFIG.UNITTEST.SELECTED_TEST_CASES)
# print(' ')
# print('+++', [[(case['name'], case['index'], suite['name'], id(suite['class']), suite['filters'],
# [(test['id'], id(test['test'])) for test in case['suites'][i]['tests']])
# for i, suite in enumerate(case['suites'])]
# for case in CONFIG.UNITTEST.SELECTED_TEST_CASES], '+++')
# print('+++', [[(suite['name'], [(test['id'], test['index']) for test in case['suites'][i]['tests']])
# for i, suite in enumerate(case['suites'])]
# for case in CONFIG.UNITTEST.SELECTED_TEST_CASES], '+++')
# if self.options.preset:
# self.logger.info('Used preset: {}'.format(self.options.preset))
|
# -*- coding: UTF-8 -*-
from nonebot import on_command, CommandSession,NoticeSession,on_notice,permission as perm
from helper import getlogger,msgSendToBot,CQsessionToStr,TempMemory,argDeal,data_read,data_save
from module.twitter import decode_b64,encode_b64,mintweetID
from plugins.twitter import tweet_event_deal
from module.tweettrans import TweetTrans,rate_limit_bucket
import nonebot
import time
import asyncio
import os
import traceback
import re
import module.permissiongroup as permissiongroup
import config
logger = getlogger(__name__)
__plugin_name__ = '烤推'
__plugin_usage__ = r"""
烤推指令前端
"""
#线程池
from concurrent.futures import ThreadPoolExecutor
pool = ThreadPoolExecutor(max_workers=64,thread_name_prefix="trans_Threads")
#烤推列表缓存
trans_tmemory = TempMemory('trans_tmemory.json',limit=300,autoload=True,autosave=True)
#烤推权限
permgroupname = 'transtweet'
permissiongroup.perm_addLegalPermGroup(__name__,'烤推模块',permgroupname)
permissiongroup.perm_addLegalPermUnit(permgroupname,'switch') #烤推切换权限
permissiongroup.perm_addLegalPermUnit(permgroupname,'trans') #烤推权限
trans_img_path = os.path.join(config.trans_img_path,'transtweet','transimg','')
transtemplate_filename = 'transtemplate.json'
transtemplate = {
#默认模版
'0':'<p dir="auto" style="color:#1DA1F2;font-size:0.7em;font-weight: 600;">翻译自日文</p>'
}
def loadTranstemplate():
global transtemplate
res = data_read(transtemplate_filename)
if res[0]:
transtemplate = res[2]
return res
def transtemplateInit():
global transtemplate
res = loadTranstemplate()
if not res[0]:
data_save(transtemplate_filename,transtemplate)
transtemplateInit()
def setTranstemplate(key,value):
transtemplate[key] = value
data_save(transtemplate_filename,transtemplate)
def perm_check(session: CommandSession,permunit:str,Remotely:dict = None,user:bool = False):
if Remotely != None:
return permissiongroup.perm_check(
Remotely['message_type'],
Remotely['sent_id'],
permgroupname,
permunit
)
elif user:
return permissiongroup.perm_check(
'private',
session.event['user_id'],
permgroupname,
permunit
)
return permissiongroup.perm_check(
session.event['message_type'],
(session.event['group_id'] if session.event['message_type'] == 'group' else session.event['user_id']),
permgroupname,
permunit
)
def perm_del(session: CommandSession,permunit:str,Remotely:dict = None):
if Remotely != None:
return permissiongroup.perm_del(
Remotely['message_type'],
Remotely['sent_id'],
Remotely['op_id'],
permgroupname,
permunit
)
return permissiongroup.perm_del(
session.event['message_type'],
(session.event['group_id'] if session.event['message_type'] == 'group' else session.event['user_id']),
session.event['user_id'],
permgroupname,
permunit
)
def perm_add(session: CommandSession,permunit:str,Remotely:dict = None):
if Remotely != None:
return permissiongroup.perm_add(
Remotely['message_type'],
Remotely['sent_id'],
Remotely['op_id'],
permgroupname,
permunit
)
return permissiongroup.perm_add(
session.event['message_type'],
(session.event['group_id'] if session.event['message_type'] == 'group' else session.event['user_id']),
session.event['user_id'],
permgroupname,
permunit
)
#预处理
def headdeal(session: CommandSession):
if session.event['message_type'] == "group" and session.event.sub_type != 'normal':
return False
return True
@on_command('transReloadTemplate',aliases=['重载烤推模版'], permission=perm.SUPERUSER,only_to_me = True)
async def transReloadTemplate(session: CommandSession):
if not headdeal(session):
return
res = loadTranstemplate()
if res[0]:
await session.send('重载成功')
else:
await session.send(res[1])
async def transswitch_group(session: CommandSession):
if perm_check(session,'-switch',user = True):
await session.send('操作被拒绝,权限不足(p)')
return
if perm_check(session,'-switch'):
await session.send('操作被拒绝,权限不足(g)')
return
if perm_check(session,'*'):
await session.send('操作无效,存在“*”权限')
return
if perm_check(session,'trans'):
perm_del(session,'trans')
await session.send('烤推授权关闭')
else:
perm_add(session,'trans')
await session.send('烤推授权开启')
async def transswitch_private(session: CommandSession):
user_id = session.event['user_id']
arglimit = [
{
'name':'msgtype', #参数名
'des':'消息类型', #参数错误描述
'type':'str', #参数类型int float str list dict (list与dict需要使用函数或正则表达式进行二次处理)
'strip':True, #是否strip
'lower':True, #是否转换为小写
'default':None, #默认值
'func':None, #函数,当存在时使用函数进行二次处理
're':None, #正则表达式匹配(match函数)
'vlimit':{
#参数限制表(限制参数内容,空表则不限制),'*':''表示允许任意字符串,值不为空时任意字符串将被转变为这个值
#'私聊':'private',
#'private':'private',
'群聊':'group',
'group':'group',
#'好友':'private',
'群':'group',
}
},
{
'name':'send_id', #参数名
'des':'对象ID', #参数错误描述
'type':'int', #参数类型int float str list dict (list与dict需要使用函数或正则表达式进行二次处理)
'strip':True, #是否strip
'lower':False, #是否转换为小写
'default':None, #默认值
'func':None, #函数,当存在时使用函数进行二次处理
're':None, #正则表达式匹配(match函数)
'vlimit':{
#参数限制表(限制参数内容,空表则不限制),'*':''表示匹配任意字符串,值不为空时任意字符串将被转变为这个值
}
}
]
res = argDeal(session.current_arg_text.strip(),arglimit)
if not res[0]:
await session.send(res[1]+'=>'+res[2])
return
args = res[1]
remote = {
'message_type':'group',
'sent_id':args['sendid'],
'op_id':user_id
}
if perm_check(session,'-switch'):
await session.send('操作被拒绝,权限不足(p)')
return
if perm_check(session,'-switch',remote):
await session.send('操作被拒绝,权限不足(g)')
return
if perm_check(session,'*',remote):
await session.send('操作无效,存在“*”权限(g)')
return
if perm_check(session,'trans',remote):
perm_del(session,'trans',remote)
await session.send('烤推授权关闭')
else:
perm_add(session,'trans',)
await session.send('烤推授权开启')
@on_command('transswitch',aliases=['ts','烤推授权'], permission=perm.SUPERUSER,only_to_me = True)
async def transswitch(session: CommandSession):
if not headdeal(session):
return
message_type = session.event['message_type']
if message_type == 'group':
await transswitch_group(session)
else:
await transswitch_private(session)
def deal_trans(arg,ad) -> dict:
trans = {
'type_html':'',
'source':arg,
'text':{}
}
tests = arg.split('##')
if len(tests) == 1:
kvc = tests[0].partition("#!")
trans['text']['main'] = []
trans['text']['main'].append(kvc[0].strip())
if kvc[2] != '':
trans['text']['main'].append(kvc[2].strip())
else:
for test in tests:
test = test.strip()
if test == '':
continue
kv = re.findall(r'^([0-9]{1,2}|main|m)\s{1}(.+)',test,re.S)
if kv == []:
return None #格式不正确
kv = list(kv[0])
if kv[0].isnumeric():
kv[0] = str(int(kv[0]))
elif kv[0] == 'm':
kv[0] = 'main'
kvc = kv[1].partition("#!")
trans['text'][kv[0]] = []
trans['text'][kv[0]].append(kvc[0].strip())
if kvc[2] != '':
trans['text'][kv[0]].append(kvc[2].strip())
return trans
def send_msg(session: CommandSession,msg):
session.bot.sync.send_msg(self_id=session.self_id,group_id=session.event['group_id'],message=msg)
def send_res(session: CommandSession,args):
global transtemplate
group_id =session.event['group_id']
user_id = session.event['user_id']
tweet_id = args['tweet_id']
trans = args['trans']
try:
#使用64进制减少长度
tasktype = encode_b64(int(time.time()),offset = 0)
type_html = transtemplate['0']
if str(group_id) in transtemplate:
type_html = transtemplate[str(group_id)]
trans['type_html'] = type_html
#检查推文缓存
tweet_sname = 's'
tweet = tweet_event_deal.tryGetTweet(tweet_id)
if tweet != None:
logger.info('检测到缓存:' + tweet['id_str'] + '(' + tweet['user']['screen_name'] + ')')
#logger.info(tweet)
tweet_cache = tweet
tweet_sname = tweet_cache['user']['screen_name']
tt = TweetTrans()
res = tt.getTransFromTweetID(
str(tweet_id),
args['trans'],
tweet_sname,
encode_b64(group_id,offset=0)+'-'+str(tasktype)
)
if res[0]:
time.sleep(1)
if 'nickname' in session.event.sender:
nick = session.event.sender['nickname']
else:
nick = str(user_id)
trans_tmemory.join({
'id':tweet_id,
'group':group_id,
'mintrans':trans['source'][0:15].replace("\n"," "),
'tweetid':encode_b64(tweet_id),
'tasktype':tasktype,
'trans':trans,
'op':user_id,
'opnick':nick
})
send_msg(session,
trans_img_path + encode_b64(group_id,offset=0)+'-'+str(tasktype) + '.png' +"\n" + \
str('[CQ:image,timeout=' + config.img_time_out + \
',file='+trans_img_path + encode_b64(group_id,offset=0)+'-'+str(tasktype) + '.png' + ']') + "\n"\
"使用 !tl 查看烤推历史"
)
else:
send_msg(session,"错误,"+res[2])
del tt
except:
s = traceback.format_exc(limit=10)
logger.error(s)
send_msg(session,"错误,烤推服务异常!")
@on_command('trans',aliases=['t','烤推'], permission=perm.SUPERUSER | perm.PRIVATE_FRIEND | perm.GROUP_OWNER | perm.GROUP,only_to_me = False)
async def trans(session: CommandSession):
if not headdeal(session):
return
message_type = session.event['message_type']
#group_id = (session.event['group_id'] if message_type == 'group' else None)
#user_id = session.event['user_id']
if message_type != 'group':
return
if perm_check(session,'-switch',user = True):
await session.send('操作被拒绝,权限不足(p)')
return
if not perm_check(session,'trans'):
await session.send('操作未授权')
return
logger.info(CQsessionToStr(session))
if not rate_limit_bucket.consume(1):
await session.send("烤推繁忙,请稍后再试")
return
def checkTweetId(a,ad):
if a[:1] == '#':
ta = a[1:]
if not ta.isdecimal():
return None
res = mintweetID.find(lambda item,val:item[1]==val,int(ta))
if res == None:
return None
return res[0]
elif a.isdecimal() and int(a) > 1253881609540800000:
return a
else:
res = decode_b64(a)
if res == -1:
return None
return res
arglimit = [
{
'name':'tweet_id', #参数名
'des':'推特ID', #参数错误描述
'type':'int', #参数类型int float str list dict (list与dict需要使用函数或正则表达式进行二次处理)
'strip':True, #是否strip
'lower':False, #是否转换为小写
'default':None, #默认值
'func':checkTweetId, #函数,当存在时使用函数进行二次处理
're':None, #正则表达式匹配(match函数)
'vlimit':{
#参数限制表(限制参数内容,空表则不限制),'*':''表示匹配任意字符串,值不为空时任意字符串将被转变为这个值
}
},{
'name':'trans', #参数名
'des':'翻译内容', #参数错误描述
'type':'dict', #参数类型int float str list dict (list与dict需要使用函数或正则表达式进行二次处理)
'strip':True, #是否strip
'lower':False, #是否转换为小写
'default':{
'type_html':'',
'source':'',
'text':{}
}, #默认值
'func':deal_trans, #函数,当存在时使用函数进行二次处理
're':None, #正则表达式匹配(match函数)
'vlimit':{
#参数限制表(限制参数内容,空表则不限制),'*':''表示匹配任意字符串,值不为空时任意字符串将被转变为这个值
}
}
]
args = argDeal(session.current_arg_text.strip(),arglimit)
if not args[0]:
await session.send(args[1] + '=>' + args[2])
return
pool.submit(send_res,session,args[1])
await session.send("图片合成中...")
def getlist(groupid:int,page:int=1):
ttm = trans_tmemory.tm.copy()
length = len(ttm)
cout = 0
s = "昵称,任务标识,推文标识,翻译简写\n"
for i in range(length - 1,-1,-1):
if ttm[i]['group'] == groupid:
if cout >= (page-1)*5 and cout < (page)*5:
s = s + str(ttm[i]['opnick'] if ttm[i]['opnick'] else ttm[i]['op']) + ',' + ttm[i]['tasktype'] + ',' + ttm[i]['tweetid'] + ',' + ttm[i]['mintrans'] + "\n"
cout = cout + 1
totalpage = (cout)//5 + (0 if cout%5 == 0 else 1)
s = s + '页数:'+str(page)+'/'+str(totalpage)+'总记录数:'+str(cout) + '\n'
s = s + '使用!tgt 任务标识 获取指定任务图片' + "\n"
s = s + '使用!gt 推文标识 获取指定推文最后的译文图片'
return s
@on_command('translist',aliases=['tl','烤推列表'], permission=perm.SUPERUSER | perm.PRIVATE_FRIEND | perm.GROUP_OWNER | perm.GROUP,only_to_me = False)
async def translist(session: CommandSession):
if not headdeal(session):
return
message_type = session.event['message_type']
group_id = (session.event['group_id'] if message_type == 'group' else None)
#user_id = session.event['user_id']
if message_type != 'group':
return
if perm_check(session,'-switch',user = True):
await session.send('操作被拒绝,权限不足(p)')
return
if not perm_check(session,'trans'):
await session.send('操作未授权')
return
logger.info(CQsessionToStr(session))
arglimit = [
{
'name':'page', #参数名
'des':'页码', #参数错误描述
'type':'int', #参数类型int float str list dict (list与dict需要使用函数或正则表达式进行二次处理)
'strip':True, #是否strip
'lower':False, #是否转换为小写
'default':1, #默认值
'func':None, #函数,当存在时使用函数进行二次处理
're':None, #正则表达式匹配(match函数)
'vlimit':{
#参数限制表(限制参数内容,空表则不限制),'*':''表示匹配任意字符串,值不为空时任意字符串将被转变为这个值
}
}
]
args = argDeal(session.current_arg_text.strip(),arglimit)
if not args[0]:
await session.send(args[1] + '=>' + args[2])
return
args = args[1]
page = args['page']
if page < 1:
await session.send("页码不能为负")
return
s = getlist(group_id,page)
await session.send(s)
@on_command('gettrans',aliases=['gt','获取翻译'], permission=perm.SUPERUSER | perm.PRIVATE_FRIEND | perm.GROUP_OWNER | perm.GROUP,only_to_me = False)
async def gettrans(session: CommandSession):
if not headdeal(session):
return
message_type = session.event['message_type']
#group_id = (session.event['group_id'] if message_type == 'group' else None)
#user_id = session.event['user_id']
if message_type != 'group':
return
if perm_check(session,'-switch',user = True):
await session.send('操作被拒绝,权限不足(p)')
return
if not perm_check(session,'trans'):
await session.send('操作未授权')
return
logger.info(CQsessionToStr(session))
def checkTweetId(a,ad):
if a[:1] == '#':
ta = a[1:]
if not ta.isdecimal():
return None
res = mintweetID.find(lambda item,val:item[1]==val,int(ta))
if res == None:
return None
return res[0]
elif a.isdecimal() and int(a) > 1253881609540800000:
return a
else:
res = decode_b64(a)
if res == -1:
return None
return res
arglimit = [
{
'name':'tweet_id', #参数名
'des':'推特ID', #参数错误描述
'type':'int', #参数类型int float str list dict (list与dict需要使用函数或正则表达式进行二次处理)
'strip':True, #是否strip
'lower':False, #是否转换为小写
'default':None, #默认值
'func':checkTweetId, #函数,当存在时使用函数进行二次处理
're':None, #正则表达式匹配(match函数)
'vlimit':{
#参数限制表(限制参数内容,空表则不限制),'*':''表示匹配任意字符串,值不为空时任意字符串将被转变为这个值
}
}
]
args = argDeal(session.current_arg_text.strip(),arglimit)
if not args[0]:
await session.send(args[1] + '=>' + args[2])
return
args = args[1]
tweet_id = args['tweet_id']
ttm = trans_tmemory.tm.copy()
length = len(ttm)
for i in range(length - 1,-1,-1):
if ttm[i]['id'] == tweet_id:
await session.send(trans_img_path + encode_b64(ttm[i]['group'],offset=0)+'-'+str(ttm[i]['tasktype']) + '.png' +"\n" + \
str('[CQ:image,timeout=' + config.img_time_out + \
',file='+trans_img_path + encode_b64(ttm[i]['group'],offset=0)+'-'+str(ttm[i]['tasktype']) + '.png' + ']'))
return
await session.send("未查找到推文翻译")
@on_command('typeGettrans',aliases=['tgt'], permission=perm.SUPERUSER | perm.PRIVATE_FRIEND | perm.GROUP_OWNER | perm.GROUP,only_to_me = False)
async def typeGettrans(session: CommandSession):
if not headdeal(session):
return
message_type = session.event['message_type']
#group_id = (session.event['group_id'] if message_type == 'group' else None)
#user_id = session.event['user_id']
if message_type != 'group':
return
if perm_check(session,'-switch',user = True):
await session.send('操作被拒绝,权限不足(p)')
return
if not perm_check(session,'trans'):
await session.send('操作未授权')
return
logger.info(CQsessionToStr(session))
arg = session.current_arg_text.strip()
if arg == '':
await session.send('缺少参数')
return
ttm = trans_tmemory.tm.copy()
length = len(ttm)
for i in range(length - 1,-1,-1):
if ttm[i]['tasktype'] == arg:
await session.send(trans_img_path + encode_b64(ttm[i]['group'],offset=0)+'-'+str(ttm[i]['tasktype']) + '.png' +"\n" + \
str('[CQ:image,timeout=' + config.img_time_out + \
',file='+trans_img_path + encode_b64(ttm[i]['group'],offset=0)+'-'+str(ttm[i]['tasktype']) + '.png' + ']'))
return
await session.send("未查找到推文翻译")
@on_command('transabout',aliases=['ta','烤推帮助'],only_to_me = False)
async def transabout(session: CommandSession):
if not headdeal(session):
return
message_type = session.event['message_type']
if message_type != 'group':
return
res = perm_check(session,'trans')
logger.info(CQsessionToStr(session))
msg = '当前版本为烤推机测试版V2.33' + "\n" + \
'授权状态:' + ("已授权" if res else "未授权") + "\n" + \
'!ts -切换烤推授权' + "\n" + \
'!t 推文ID 翻译 -合成翻译' + "\n" + \
'!tl -已翻译推文列表' + "\n" + \
'!gt 推文ID/推文标识 -获取最后翻译' + "\n" + \
'!tgt 任务标识 -获取指定翻译' + "\n" + \
'!gtt 推文ID/推文标识 -获取指定推文内容' + "\n" + \
'多层回复翻译:' + "\n" + \
'##1 第一层翻译' + "\n" + \
'#! 第一层层内推文(转推并评论类型里的内嵌推文)' + "\n" + \
'##2 第二层翻译' + "\n" + \
'##main 主翻译' + "\n" + \
'烤推支持换行参数,如有需要可以更换翻译自日文到任意图片或文字' + "\n" + \
'如果出现问题可以 !反馈 反馈内容 反馈信息'
await session.send(msg)
|
'''Module File'''
|
import discord
from discord.ext import commands
class Mod:
"""Useful moderation commands to keep the server under control."""
def __init__(self, bot):
self.bot = bot
@commands.command()
@commands.guild_only()
@commands.has_permissions(kick_members=True)
async def kick(self, ctx, user: discord.Member, *, reason: str = None):
"""Kicks a user from the server."""
if user == ctx.author:
return await ctx.send("Kicking yourself? smh.")
if user == self.bot.user:
return await ctx.send("I can't kick myself.")
res = f", for reason: `{reason}`" if reason else ""
try:
await user.kick(reason=reason)
await ctx.send(f"Kicked {user}{res}")
except discord.Forbidden:
await ctx.send("I don't have permissions to kick that user.")
except Exception as e:
raise e
@commands.command()
@commands.guild_only()
@commands.has_permissions(manage_messages=True)
async def purge(self, ctx, amount):
"""Purges X amount of messages from a channel"""
try:
amount = int(amount)
except ValueError:
return await ctx.send("Enter a number only!")
if amount > 200:
return await ctx.send("You can purge only upto 200 at a time!")
try:
await ctx.channel.purge(limit=amount+1, bulk=True)
await ctx.send(f"Purged **{amount}** messages", delete_after=3)
except discord.Forbidden:
await ctx.send(f"I need the `Manage Messages` permission to do this.")
def setup(bot):
bot.add_cog(Mod(bot))
|
# Copyright (c) 2021, PublicSpaces and Contributors
# See license.txt
# import frappe
import unittest
class TestToolToets(unittest.TestCase):
pass
|
"""Base classes for Rattr components."""
import ast
from abc import ABCMeta, abstractmethod, abstractproperty
from ast import NodeTransformer, NodeVisitor # noqa: F401
from itertools import product
from typing import Dict, List, Optional
from rattr import error
from rattr.analyser.context import Context
from rattr.analyser.context.symbol import Import
from rattr.analyser.types import FuncOrAsyncFunc, FunctionIR
class Assertor(NodeVisitor):
"""Assertor base class.
An assertor can either be strict or non-strict, set by the constructor
argument `is_strict`, having the following affect:
is_strict:
on condition failure, log fatal error (halts execution)
not is_strict:
on condition failure, log warning
"""
def __init__(self, is_strict: bool = True) -> None:
self.is_strict: bool = is_strict
self.context: Context = None
def assert_holds(self, node: ast.AST, context: Context) -> None:
"""Entry point for an Assertor, visit the tree to assert properties."""
self.context = context
super().visit(node)
def failed(self, message: str, culprit: Optional[ast.AST] = None) -> None:
"""Handle assertion failure."""
if self.is_strict:
handler = error.fatal
else:
handler = error.warning
handler(message, culprit)
class CustomFunctionAnalyser(NodeVisitor, metaclass=ABCMeta):
"""Base class for a custom function visitor."""
@abstractproperty
def name(self) -> str:
"""Return the name of the function handled by this analyser."""
return ""
@abstractproperty
def qualified_name(self) -> str:
"""Return the qualified name of the function."""
return ""
@abstractmethod
def on_def(self, name: str, node: FuncOrAsyncFunc, ctx: Context) -> FunctionIR:
"""Return the IR of the definition of the handled function."""
return {
"sets": set(),
"gets": set(),
"dels": set(),
"calls": set(),
}
@abstractmethod
def on_call(self, name: str, node: ast.Call, ctx: Context) -> FunctionIR:
"""Return the IR produced by a call to the handled function.
The returned IR will be union'd with the IR of the caller function.
Argument `fna` is the FunctionAnalyser instance that the call is from.
"""
return {
"sets": set(),
"gets": set(),
"dels": set(),
"calls": set(),
}
class CustomFunctionHandler:
"""Dispatch to CustomFunctionAnalyser.
If a builtin and user-defined function have the same name, then the builtin
function analyser will take precedence.
Initialiser:
Register the given builtin and user-defined custom function analysers.
"""
def __init__(
self,
builtins: Optional[List[CustomFunctionAnalyser]] = None,
user_defined: Optional[List[CustomFunctionAnalyser]] = None,
) -> None:
self._builtins: Dict[str, CustomFunctionAnalyser] = dict()
self._user_def: Dict[str, CustomFunctionAnalyser] = dict()
for analyser in builtins or []:
self._builtins[analyser.name] = analyser
for analyser in user_defined or []:
self._user_def[analyser.name] = analyser
def __get_by_name(self, name: str) -> Optional[CustomFunctionAnalyser]:
analyser = None
if name in self._user_def:
analyser = self._user_def[name]
if name in self._builtins:
analyser = self._builtins[name]
return analyser
def __get_by_symbol(
self, name: str, ctx: Context
) -> Optional[CustomFunctionAnalyser]:
symbols: List[Import] = list()
_imports: filter[Import] = filter(
lambda s: isinstance(s, Import), ctx.symbol_table.symbols()
)
for symbol in _imports:
# From imported
if name in (symbol.name, symbol.qualified_name):
symbols.append(symbol)
# Module imported
if name.startswith(f"{symbol.name}."):
symbols.append(symbol)
for symbol, analyser in product(symbols, self._user_def.values()):
# From imported
if analyser.qualified_name == symbol.qualified_name:
return analyser
# Module imported
if analyser.qualified_name == name.replace(
symbol.name, symbol.qualified_name
):
return analyser
return None
def get(self, name: str, ctx: Context) -> Optional[CustomFunctionAnalyser]:
"""Return the analyser for the function `name`, `None` otherwise."""
analyser = self.__get_by_name(name)
if analyser is None:
analyser = self.__get_by_symbol(name, ctx)
return analyser
def has_analyser(self, name: str, ctx: Context) -> bool:
"""Return `True` if there is a analyser for the function `name`."""
return self.get(name, ctx) is not None
def handle_def(self, name: str, node: FuncOrAsyncFunc, ctx: Context) -> FunctionIR:
"""Dispatch to the to the appropriate analyser."""
analyser = self.get(name, ctx)
if analyser is None:
raise ValueError
return analyser.on_def(name, node, ctx)
def handle_call(self, name: str, node: ast.Call, ctx: Context) -> FunctionIR:
"""Dispatch to the to the appropriate analyser."""
analyser = self.get(name, ctx)
if analyser is None:
raise ValueError
return analyser.on_call(name, node, ctx)
|
#
# Copyright (c) 2011 Daniel Truemper truemped@googlemail.com
#
# settings.py 10-Jan-2011
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Simple class for working with settings.
Adopted from the Django based settings system.
"""
from spyder import defaultsettings
class Settings(object):
"""
Class for handling spyder settings.
"""
def __init__(self, settings=None):
"""
Initialize the settings.
"""
# load the default settings
for setting in dir(defaultsettings):
if setting == setting.upper():
setattr(self, setting, getattr(defaultsettings, setting))
# now override with user settings
if settings is not None:
for setting in dir(settings):
if setting == setting.upper():
setattr(self, setting, getattr(settings, setting))
|
#!/usr/bin/env python3
# Ryan Hodgson - hodgson@csu.fullerton.edu
class MyPRNG:
'Lehmer Generator'
def __init__(self):
self.m = 2147483647
self.a = 16807
def next_prn(self):
self.seed = (self.a * self.seed) % self.m
return(self.seed)
def setSeed(self, rseed):
self.seed = int(rseed)
|
import numpy as np
#Taking Input in Form of list and saving in a list
l=[0]
n=list(map(int,input().split(' ')))
l[0]=n
for i in range(len(n)-1):
l.append(list(map(int,input().split(' '))))
l=np.array(l)
c=0
for i in range(len(n)):
if (sum(l[:,i])==(len(n)-1)):
c=i
if (sum(l[c])==0):
print("id : "+str(c))
else:
print("No Celebrity")
|
"""
accuracy_assessment.py: contains functions to assess the accuracy of the RF classifier. The following metrics are
evaluated:
- Confusion matrix (CM)
- Kappa statistic (Kappa)
@author: Anastasiia Vynohradova
"""
import gdal
import numpy as np
import pandas as pd
from sklearn.metrics import confusion_matrix
from statsmodels.stats.inter_rater import cohens_kappa
from sklearn.metrics import classification_report
def get_kappa(cf_matrix):
"""
The function calculates a Kappa coefficient
Parameters
----------
cf_matrix: numpy.ndarray
array with confusion matrix
Returns
-------
res_kappa: str
str with the Kappa Coefficient
"""
kappa = cohens_kappa(cf_matrix).kappa
res_kappa = f'Kappa Coefficient is {kappa}'
return print(res_kappa)
def accuracy_assessment(prediction, out_ref_p):
"""
The function calculates the overall accuracy
Parameters
----------
prediction: numpy.ndarray
array with the predicted labels
out_ref_p:
path to resampled/reclassified reference product
Returns
-------
This function has no return
"""
# open reference product
print('\n#################### - Accuracy Assessment - ####################')
ref_p = gdal.Open(out_ref_p)
ref_p = np.array(ref_p.GetRasterBand(1).ReadAsArray())
ref_p = ref_p.flatten()
cf_matrix_pd = pd.crosstab(ref_p, prediction, rownames=['Actual'], colnames=['Predicted'], margins=True)
print('\n########## - Confusion Matrix - ##########')
# display full dataframe without truncation
with pd.option_context('display.max_columns', 100,
'display.width', 640):
print(cf_matrix_pd)
cf_matrix = confusion_matrix(ref_p, prediction, normalize=None)
class_report = classification_report(ref_p, prediction)
print('\n########## - Classification report - ##########')
print(class_report)
get_kappa(cf_matrix)
|
# -*- coding: utf-8 -*-
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Basic EL functions of the Oozie workflow"""
import re
def first_not_null(str_one, str_two):
"""
It returns the first not null value, or null if both are null.
Note that if the output of this function is null and it is used as string,
the EL library converts it to an empty string. This is the common behavior
when using firstNotNull() in node configuration sections.
"""
if str_one:
return str_one
return str_two if str_two else ""
def concat(str_one, str_two):
"""
Returns the concatenation of 2 strings. A string
with null value is considered as an empty string.
"""
if not str_one:
str_one = ""
if not str_two:
str_two = ""
return str_one + str_two
def replace_all(src_string, regex, replacement):
"""
Replace each occurrence of regular expression match in
the first string with the replacement string and return the
replaced string. A 'regex' string with null value is considered as
no change. A 'replacement' string with null value is consider as an empty string.
"""
if not regex:
return src_string
if not replacement:
replacement = ""
return re.sub(regex, replacement, src_string)
def append_all(src_str, append, delimiter):
"""
Add the append string into each split sub-strings of the
first string(=src=). The split is performed into src string
using the delimiter . E.g. appendAll("/a/b/,/c/b/,/c/d/", "ADD", ",")
will return /a/b/ADD,/c/b/ADD,/c/d/ADD. A append string with null
value is consider as an empty string. A delimiter string with value null
is considered as no append in the string.
"""
if not delimiter:
return src_str
if not append:
append = ""
split_str = src_str.split(delimiter)
appended_list = []
for split in split_str:
appended_list.append(split + append)
return delimiter.join(appended_list)
def trim(src_str):
"""
It returns the trimmed value of the given string.
A string with null value is considered as an empty string.
"""
if not src_str:
return ""
# May not behave like java, their documentation is unclear what
# types of whitespace they strip..
return src_str.strip()
def url_encode(src_str):
"""
It returns the URL UTF-8 encoded value of the given string.
A string with null value is considered as an empty string.
"""
if not src_str:
return ""
import urllib.parse
return urllib.parse.quote(src_str, encoding="UTF-8")
def timestamp():
"""
It returns the UTC current date and time
in W3C format down to the second (YYYY-MM-DDThh:mm:ss.sZ).
i.e.: 1997-07-16T19:20:30.45Z
"""
import datetime
import pytz
return datetime.datetime.now(pytz.utc).isoformat()
def to_json_str(py_map):
import json
return json.dumps(py_map)
|
import logging
from flask import Flask
import settings
from routes import objects, pages, api
# from secure.api import api -- not implemented yet
app = Flask(__name__)
# regex paths
app.url_map.strict_slashes = True
app.register_blueprint(pages.pages)
app.register_blueprint(api.api)
app.register_blueprint(objects.modelx)
# app.register_blueprint(api) -- rename this from 'api' to secure or something
# run the app
if __name__ == '__main__':
logging.basicConfig(
filename=settings.LOGFILE,
level=settings.LOG_LEVEL,
datefmt='%Y-%m-%d %H:%M:%S',
format='%(asctime)s %(levelname)s %(filename)s:%(lineno)s %(message)s'
)
# logging.log(logging.INFO, 'PROMS Started, %(host)s:%(port)d' % {'host': settings.BASE_URI, 'port': settings.PORT})
# prod logging
# handler = logging.FileHandler(settings.LOGFILE)
# handler.setLevel(settings.LOG_LEVEL)
# formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# handler.setFormatter(formatter)
# app.logger.addHandler(handler)
app.run(
threaded=True,
debug=settings.DEBUG
)
|
from bs4 import *
import re, bs4
from doi_utils import *
# doi 10.1088
def parse_iop(soup):
is_french = False
authors, affiliations = [], {}
for elt in soup.find_all(class_='mb-05'):
is_elt_ok = False
for sub_elt in elt.children:
if(isinstance(sub_elt, bs4.element.Tag)) :
if sub_elt.name=='sup':
nb_sup = sub_elt.get_text()
is_elt_ok = True
elif is_elt_ok:
affiliations[nb_sup] = sub_elt.strip()
if re.search(fr_regex, sub_elt.lower()):
is_french = True
for author_elt in soup.find_all('span', {'itemprop':'author'}):
author = {}
for sub_elt in author_elt:
if isinstance(sub_elt, bs4.element.NavigableString):
full_name = sub_elt
if author_elt.find('sup') is None:
continue
nb_sups = author_elt.find('sup').get_text().split(',')
full_name = author_elt.find('span').get_text()
author['full_name'] = full_name
author['affiliations_info'] = []
for nb_sup in nb_sups:
if nb_sup in affiliations:
author['affiliations_info'].append({'structure_name': affiliations[nb_sup]})
authors.append(author)
return {'authors_from_html':authors, 'affiliations_complete': [affiliations[a] for a in affiliations], 'is_french':is_french}
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.checkpoint import checkpoint
from slp.modules.feedforward import FF
def calc_scores(dk):
def fn(q, k):
return torch.matmul(q, k.transpose(-1, -2)) / math.sqrt(dk)
return fn
class Attention(nn.Module):
"""Some Information about Attention"""
def __init__(self,
attention_size=512,
input_size=None,
dropout=.1,
grad_checkpoint=False):
super(Attention, self).__init__()
if input_size is None:
input_size = attention_size
self.dk = input_size
self.grad_checkpoint = grad_checkpoint
self.k = nn.Linear(input_size, attention_size, bias=False)
self.q = nn.Linear(input_size, attention_size, bias=False)
self.v = nn.Linear(input_size, attention_size, bias=False)
self.drop = nn.Dropout(dropout)
self._reset_parameters()
def forward(self, x, queries=None, values=None, attention_mask=None):
'''
x : (B, L, D)
queries : (B, L, D)
values : (B, L, D)
'''
if queries is None:
queries = x
if values is None:
values = x
k = self.k(x) # (B, L, A)
q = self.q(queries) # (B, L, A)
v = self.v(values) # (B, L, A)
# weights => (B, L, L)
if self.grad_checkpoint:
scores = checkpoint(calc_scores(self.dk), q, k)
else:
scores = torch.bmm(q, k.transpose(1, 2)) / math.sqrt(self.dk)
if attention_mask is not None:
scores = scores + ((1 - attention_mask.unsqueeze(1)) * -1e5)
scores = F.softmax(scores, dim=-1)
scores = self.drop(scores)
# out => (B, L, A)
out = torch.bmm(scores, v)
return out, scores
def _reset_parameters(self):
nn.init.xavier_uniform_(self.k.weight)
nn.init.xavier_uniform_(self.q.weight)
nn.init.xavier_uniform_(self.v.weight)
class MultiheadAttentionSerial(nn.Module):
"""Serial MultiheadAttention"""
def __init__(self,
attention_size=512,
num_heads=8,
input_size=None,
dropout=.1,
grad_checkpoint=False):
super(MultiheadAttentionSerial, self).__init__()
if input_size is None:
input_size = attention_size
self.head_size = int(attention_size / num_heads)
self.heads = [
Attention(input_size,
self.head_size,
dropout=dropout,
grad_checkpoint=grad_checkpoint)
for _ in num_heads]
def forward(self, x, queries=None, values=None, attention_mask=None):
"""
x : (B, L, D)
queries : (B, L, D)
values : (B, L, D)
"""
# list of (B, L, A / H)
out = [h(x,
queries=queries,
values=values,
attention_mask=attention_mask)
for h in self.heads]
# (B, L, A)
out = torch.cat(out, dim=-1)
return out
class MultiheadAttentionParallel(nn.Module):
def __init__(self,
attention_size=512,
num_heads=8,
input_size=None,
dropout=.1,
grad_checkpoint=False):
super(MultiheadAttentionParallel, self).__init__()
if input_size is None:
input_size = attention_size
self.dk = input_size
self.num_heads = num_heads
self.head_size = int(attention_size / num_heads)
self.attention_size = attention_size
self.grad_checkpoint = grad_checkpoint
self.k = nn.Linear(input_size, attention_size, bias=False)
self.q = nn.Linear(input_size, attention_size, bias=False)
self.v = nn.Linear(input_size, attention_size, bias=False)
self.output = FF(attention_size,
attention_size,
activation='none',
layer_norm=True,
dropout=dropout)
self.drop = nn.Dropout(dropout)
self._reset_parameters()
def _split_heads(self, x):
"""
x => (B, L, A)
out => (B, H, L, A/H)
"""
batch_size, max_length, _ = x.size()
return (x
.view(batch_size, max_length,
self.num_heads, self.head_size)
.permute(0, 2, 1, 3))
def _merge_heads(self, x):
"""
x => (B, H, L, A/H)
out => (B, L, A)
"""
batch_size, _, max_length, _ = x.size()
# x => (B, L, H, A/H)
x = x.permute(0, 2, 1, 3).contiguous()
return x.view(batch_size, max_length, -1)
def forward(self, x, queries=None, values=None, attention_mask=None):
"""
x : (B, L, D)
queries : (B, L, D)
values : (B, L, D)
"""
if queries is None:
queries = x
if values is None:
values = x
k = self._split_heads(self.k(x)) # (B, H, L, A/H)
q = self._split_heads(self.q(queries)) # (B, H, L, A/H)
v = self._split_heads(self.v(values)) # (B, H, L, A/H)
# scores => (B, H, L, L)
if self.grad_checkpoint:
scores = checkpoint(calc_scores(self.dk), q, k)
else:
scores = torch.matmul(q, k.transpose(-1, -2)) / math.sqrt(self.dk)
if attention_mask is not None:
scores = scores + ((1 - attention_mask.unsqueeze(1)) * -1e5)
scores = F.softmax(scores, dim=-1)
scores = self.drop(scores)
# out => (B, H, L, A/H)
out = self._merge_heads(torch.matmul(scores, v))
out = self.output(out)
return out
def _reset_parameters(self):
nn.init.xavier_uniform_(self.k.weight)
nn.init.xavier_uniform_(self.q.weight)
nn.init.xavier_uniform_(self.v.weight)
nn.init.xavier_uniform_(self.output.fc.weight)
nn.init.constant_(self.output.fc.bias, 0.)
MultiheadAttention = MultiheadAttentionParallel
|
#
# MIT License
#
# Copyright (c) 2022 GT4SD team
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
"""GuacaMol baselines training utilities."""
from dataclasses import dataclass, field
from typing import Any, Dict
from ..core import TrainingPipeline, TrainingPipelineArguments
class GuacaMolBaselinesTrainingPipeline(TrainingPipeline):
"""GuacaMol Baselines training pipelines."""
def train( # type: ignore
self,
training_args: Dict[str, Any],
model_args: Dict[str, Any],
dataset_args: Dict[str, Any],
) -> None:
"""Generic training function for GuacaMol Baselines training.
Args:
training_args: training arguments passed to the configuration.
model_args: model arguments passed to the configuration.
dataset_args: dataset arguments passed to the configuration.
Raises:
NotImplementedError: the generic trainer does not implement the pipeline.
"""
raise NotImplementedError
@dataclass
class GuacaMolDataArguments(TrainingPipelineArguments):
"""Arguments related to data loading."""
__name__ = "dataset_args"
train_smiles_filepath: str = field(
metadata={"help": "Path of SMILES file for Training."}
)
test_smiles_filepath: str = field(
metadata={"help": "Path of SMILES file for Validation."}
)
@dataclass
class GuacaMolSavingArguments(TrainingPipelineArguments):
"""Saving arguments related to GuacaMol trainer."""
__name__ = "saving_args"
model_filepath: str = field(metadata={"help": "Path to the model file."})
model_config_filepath: str = field(
metadata={"help": "Path to the model config file."}
)
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from torch.nn import functional as F
import numpy as np
import numpy.random as npr
from maskrcnn_benchmark.modeling.box_coder import BoxCoder
from maskrcnn_benchmark.modeling.matcher import Matcher
from maskrcnn_benchmark.structures.boxlist_ops import boxlist_iou
from maskrcnn_benchmark.modeling.balanced_positive_negative_sampler import (
BalancedPositiveNegativeSampler
)
from maskrcnn_benchmark.modeling.utils import cat
class FastRCNNSampling(object):
"""
Sampling RoIs
"""
def __init__(
self,
proposal_matcher,
fg_bg_sampler,
box_coder,
):
"""
Arguments:
proposal_matcher (Matcher)
fg_bg_sampler (BalancedPositiveNegativeSampler)
box_coder (BoxCoder)
"""
self.proposal_matcher = proposal_matcher
self.fg_bg_sampler = fg_bg_sampler
self.box_coder = box_coder
def match_targets_to_proposals(self, proposal, target):
match_quality_matrix = boxlist_iou(target, proposal)
matched_idxs = self.proposal_matcher(match_quality_matrix)
# Fast RCNN only need "labels" field for selecting the targets
target = target.copy_with_fields(["labels", "attributes"])
# get the targets corresponding GT for each proposal
# NB: need to clamp the indices because we can have a single
# GT in the image, and matched_idxs can be -2, which goes
# out of bounds
matched_targets = target[matched_idxs.clamp(min=0)]
matched_targets.add_field("matched_idxs", matched_idxs)
return matched_targets
def prepare_targets(self, proposals, targets):
labels = []
attributes = []
regression_targets = []
matched_idxs = []
for proposals_per_image, targets_per_image in zip(proposals, targets):
matched_targets = self.match_targets_to_proposals(
proposals_per_image, targets_per_image
)
matched_idxs_per_image = matched_targets.get_field("matched_idxs")
labels_per_image = matched_targets.get_field("labels")
attris_per_image = matched_targets.get_field("attributes")
labels_per_image = labels_per_image.to(dtype=torch.int64)
attris_per_image = attris_per_image.to(dtype=torch.int64)
# Label background (below the low threshold)
bg_inds = matched_idxs_per_image == Matcher.BELOW_LOW_THRESHOLD
labels_per_image[bg_inds] = 0
attris_per_image[bg_inds,:] = 0
# Label ignore proposals (between low and high thresholds)
ignore_inds = matched_idxs_per_image == Matcher.BETWEEN_THRESHOLDS
labels_per_image[ignore_inds] = -1 # -1 is ignored by sampler
# compute regression targets
regression_targets_per_image = self.box_coder.encode(
matched_targets.bbox, proposals_per_image.bbox
)
labels.append(labels_per_image)
attributes.append(attris_per_image)
regression_targets.append(regression_targets_per_image)
matched_idxs.append(matched_idxs_per_image)
return labels, attributes, regression_targets, matched_idxs
def subsample(self, proposals, targets):
"""
This method performs the positive/negative sampling, and return
the sampled proposals.
Note: this function keeps a state.
Arguments:
proposals (list[BoxList])
targets (list[BoxList])
"""
labels, attributes, regression_targets, matched_idxs = self.prepare_targets(proposals, targets)
sampled_pos_inds, sampled_neg_inds = self.fg_bg_sampler(labels)
proposals = list(proposals)
# add corresponding label and regression_targets information to the bounding boxes
for labels_per_image, attributes_per_image, regression_targets_per_image, matched_idxs_per_image, proposals_per_image in zip(
labels, attributes, regression_targets, matched_idxs, proposals
):
proposals_per_image.add_field("labels", labels_per_image)
proposals_per_image.add_field("attributes", attributes_per_image)
proposals_per_image.add_field("regression_targets", regression_targets_per_image)
proposals_per_image.add_field("matched_idxs", matched_idxs_per_image)
# distributed sampled proposals, that were obtained on all feature maps
# concatenated via the fg_bg_sampler, into individual feature map levels
for img_idx, (pos_inds_img, neg_inds_img) in enumerate(zip(sampled_pos_inds, sampled_neg_inds)):
img_sampled_inds = torch.nonzero(pos_inds_img | neg_inds_img).squeeze(1)
proposals_per_image = proposals[img_idx][img_sampled_inds]
proposals[img_idx] = proposals_per_image
return proposals
def assign_label_to_proposals(self, proposals, targets):
for img_idx, (target, proposal) in enumerate(zip(targets, proposals)):
match_quality_matrix = boxlist_iou(target, proposal)
# proposal.bbox.shape[0]; -1 is below low threshold; -2 is between thresholds; the others are matched gt indices
matched_idxs = self.proposal_matcher(match_quality_matrix)
# Fast RCNN only need "labels" field for selecting the targets
target = target.copy_with_fields(["labels", "attributes"]) # only copy "labels" and "attributes" to extra_fields (dict)
matched_targets = target[matched_idxs.clamp(min=0)] # index items like List
labels_per_image = matched_targets.get_field("labels").to(dtype=torch.int64)
attris_per_image = matched_targets.get_field("attributes").to(dtype=torch.int64)
labels_per_image[matched_idxs < 0] = 0 # background
attris_per_image[matched_idxs < 0, :] = 0 # background
proposals[img_idx].add_field("labels", labels_per_image)
proposals[img_idx].add_field("attributes", attris_per_image)
return proposals
def assign_label_to_proposals_by_dict(self, proposals, targets, cls_dict):
"""
Instead of using box location to match gt objects, use a dictionary to assign gt object labels
"""
device = proposals[0].bbox.device
for img_idx, (target, proposal) in enumerate(zip(targets, proposals)):
# detected cls --> vg cls --> check whether exist in gt cls --> if so, randomly select the gt cls, and then random one gt object
det_dist = proposal.extra_fields['det_dist'] # the dist after softmax
det_cls = torch.argmax(det_dist, dim=1).cpu().numpy()
gt_cls = target.get_field("labels").cpu().numpy()
dict_matched_idxs = []
for i, det_c in enumerate(det_cls):
# for each detector cls, there might be multiple corresponding vg cls
vg_cls = cls_dict[det_c]
cls_cand = [vg_c for vg_c in vg_cls if vg_c in gt_cls]
if len(cls_cand) == 0: # no matched gt cls
dict_matched_idxs.append(-99)
else: # there are gt cls that can be matched to detected objects, then randomly select one
selected_cls = cls_cand[npr.permutation(np.arange(len(cls_cand)))[0]]
# there are multiple gt objects that have same gt cls, then randomly select one,
# though it's class-level selection in this function (not instance-level selection)
obj_cand = [gt_i for gt_i, gt_c in enumerate(gt_cls) if gt_c == selected_cls]
selected_obj = obj_cand[npr.permutation(np.arange(len(obj_cand)))[0]]
dict_matched_idxs.append(selected_obj)
dict_matched_idxs = torch.tensor(dict_matched_idxs, dtype=torch.long).to(device)
################# the following is the same as assign_label_to_proposals #################
#match_quality_matrix = boxlist_iou(target, proposal)
# proposal.bbox.shape[0]; -1 is below low threshold; -2 is between thresholds; the others are matched gt indices
#matched_idxs = self.proposal_matcher(match_quality_matrix)
matched_idxs = dict_matched_idxs
# Fast RCNN only need "labels" field for selecting the targets
target = target.copy_with_fields(["labels", "attributes"]) # only copy "labels" and "attributes" to extra_fields (dict)
matched_targets = target[matched_idxs.clamp(min=0)] # index items like List
labels_per_image = matched_targets.get_field("labels").to(dtype=torch.int64)
attris_per_image = matched_targets.get_field("attributes").to(dtype=torch.int64)
labels_per_image[matched_idxs < 0] = 0 # background
attris_per_image[matched_idxs < 0, :] = 0 # background
proposals[img_idx].add_field("labels", labels_per_image)
proposals[img_idx].add_field("attributes", attris_per_image)
return proposals
def make_roi_box_samp_processor(cfg):
matcher = Matcher(
cfg.MODEL.ROI_HEADS.FG_IOU_THRESHOLD,
cfg.MODEL.ROI_HEADS.BG_IOU_THRESHOLD,
allow_low_quality_matches=False,
)
bbox_reg_weights = cfg.MODEL.ROI_HEADS.BBOX_REG_WEIGHTS
box_coder = BoxCoder(weights=bbox_reg_weights)
fg_bg_sampler = BalancedPositiveNegativeSampler(
cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE, cfg.MODEL.ROI_HEADS.POSITIVE_FRACTION
)
samp_processor = FastRCNNSampling(
matcher,
fg_bg_sampler,
box_coder,
)
return samp_processor
|
class Borg:
_shared_state = {}
def __init__(self):
self.__dict__ = self._shared_state
class MonostateSingleton(Borg):
def __init__(self):
Borg.__init__(self)
self.name = "MySingleton"
def get_name(self) -> str:
return self.name
def set_name(self, name: str):
self.name = name
class NewSingleton(Borg):
def __init__(self):
Borg.__init__(self)
self.name = "MySingleton"
def get_name(self) -> str:
return self.name
def set_name(self, name: str):
self.name = name
if __name__ == "__main__":
my_singleton1 = MonostateSingleton()
print("Singleton1 name: " + my_singleton1.get_name())
my_singleton2 = MonostateSingleton()
my_singleton2.set_name("New Singleton")
print("Singleton2 name: " + my_singleton2.get_name())
print("Singleton1 name: " + my_singleton1.name)
print(my_singleton1)
print(my_singleton2)
print(id(my_singleton1) == id(my_singleton2))
|
from django.urls import path, include
from . import views
app_name = "pressTv"
urlpatterns = [
path("", views.index.as_view(), name="index"),
path("change_news_page/<str:pageNum>", views.changePage, name="change_news_page"),
]
|
"""
It make request http.
"""
import logging
from http_client.send_request import send_request
from http_client.response import get_body
class HttpRoute:
"""
It make request http.
"""
def __init__(self, host, port, method, path):
"""
Initialize the variables.
:param host: string
:param port: int
:param method: string
:param path: string
"""
self.host = host
self.port = port
self.method = method
self.path = path
def get_history(self):
"""
Get the historic of request made.
:return: response dictionary
"""
logging.info('\033[1;34mGetting all the histories\033[m')
try:
response = send_request(
method='GET',
url=f'{self.host}:{self.port}'
f'/=^.^=/history?path={self.path}'
f'&method={self.method}'
)
response_dictionary = get_body(response)
logging.info('\033[1;34mSuccessfully obtained\033[m')
return response_dictionary
except Exception as error:
logging.error('\033[1;31mFailed to get history,'
'Error: %(error)s\033[m')
raise Exception(f'Failed to get history\n'
f'Error: {error}') from error
def clear_history(self):
"""
Clean the historic of request made.
:return: response http
"""
logging.info('\033[1;34mClearing the history\033[m')
try:
response = send_request(
method='DELETE',
url=f'{self.host}:{self.port}'
f'/=^.^=/history?path={self.path}'
f'&method={self.method}'
)
logging.info('\033[1;34mSuccessfully clean\033[m')
return response
except Exception as error:
logging.error('\033[1;31mFailed to clear history,'
'Error: %(error)s\033[m')
raise Exception(f'Failed to clear history\n'
f'Error: {error}') from error
def delete(self):
"""
Delete a route.
:return: response http
"""
logging.info('\033[1;34mDeleting the route\033[m')
try:
response = send_request(
method='DELETE',
url=f'{self.host}:{self.port}'
f'/=^.^=/route?path={self.path}'
f'&method={self.method}'
)
logging.info('\033[1;34mSuccessfully deleted\033[m')
return response
except Exception as error:
logging.error('\033[1;31mFailed to delete http route,'
'Error: %(error)s\033[m')
raise Exception(f'Failed to delete http route\n'
f'Error: {error}') from error
def update(self, response_status, response_body):
"""
Update the route.
:param response_status: int
:param response_body: string
:return: response dictionary
"""
logging.info('\033[1;34mUpdating the route\033[m')
http_route = {
"response": {
"code": response_status,
"body": response_body
}
}
try:
response = send_request(
method='PUT',
url=f'{self.host}:{self.port}'
f'/=^.^=/route?path={self.path}'
f'&method={self.method}',
body=http_route
)
response_dictionary = get_body(response)
logging.info('\033[1;34mSuccessfully updated\033[m')
return response_dictionary
except Exception as error:
logging.error('\033[1;31mFailed to update route,'
'Error: %(error)s\033[m')
raise Exception(f'Failed to update route\n'
f'Error: {error}') from error
def details(self):
"""
Get the details of route.
:return: response dictionary
"""
logging.info('\033[1;34mGetting the route\033[m')
try:
response = send_request(
method='GET',
url=f'{self.host}:{self.port}'
f'/=^.^=/route?path={self.path}'
f'&method={self.method}'
)
response_dictionary = get_body(response)
logging.info('\033[1;34mSuccessfully obtained\033[m')
return response_dictionary
except Exception as error:
logging.error('\033[1;31mFailed to get details http route,'
'Error: %(error)s\033[m')
raise Exception(f'Failed to get details http route\n'
f'Error: {error}') from error
def request(self, body='', hearders=''):
"""
Make a request in route.
:param body: string
:param hearders: json
:return: response http
"""
logging.info('\033[1;34mReceiving a request '
'of method POST in route\033[m')
try:
response = send_request(
method=self.method,
url=f'{self.host}:{self.port}{self.path}',
body=body,
header=hearders
)
logging.info('\033[1;34mSuccessfully requested\033[m')
return response
except Exception as error:
logging.error('\033[1;31mFailed to request http route,'
'Error: %(error)s\033[m')
raise Exception(f'Failed to request http route\n'
f'Error: {error}') from error
|
import collections
import os
import sys
import PIL
from . import Image
modules = {
"pil": "PIL._imaging",
"tkinter": "PIL._tkinter_finder",
"freetype2": "PIL._imagingft",
"littlecms2": "PIL._imagingcms",
"webp": "PIL._webp",
}
def check_module(feature):
if not (feature in modules):
raise ValueError("Unknown module %s" % feature)
module = modules[feature]
try:
__import__(module)
return True
except ImportError:
return False
def get_supported_modules():
return [f for f in modules if check_module(f)]
codecs = {"jpg": "jpeg", "jpg_2000": "jpeg2k", "zlib": "zip", "libtiff": "libtiff"}
def check_codec(feature):
if feature not in codecs:
raise ValueError("Unknown codec %s" % feature)
codec = codecs[feature]
return codec + "_encoder" in dir(Image.core)
def get_supported_codecs():
return [f for f in codecs if check_codec(f)]
features = {
"webp_anim": ("PIL._webp", "HAVE_WEBPANIM"),
"webp_mux": ("PIL._webp", "HAVE_WEBPMUX"),
"transp_webp": ("PIL._webp", "HAVE_TRANSPARENCY"),
"raqm": ("PIL._imagingft", "HAVE_RAQM"),
"libjpeg_turbo": ("PIL._imaging", "HAVE_LIBJPEGTURBO"),
"libimagequant": ("PIL._imaging", "HAVE_LIBIMAGEQUANT"),
}
def check_feature(feature):
if feature not in features:
raise ValueError("Unknown feature %s" % feature)
module, flag = features[feature]
try:
imported_module = __import__(module, fromlist=["PIL"])
return getattr(imported_module, flag)
except ImportError:
return None
def get_supported_features():
return [f for f in features if check_feature(f)]
def check(feature):
return (
feature in modules
and check_module(feature)
or feature in codecs
and check_codec(feature)
or feature in features
and check_feature(feature)
)
def get_supported():
ret = get_supported_modules()
ret.extend(get_supported_features())
ret.extend(get_supported_codecs())
return ret
def pilinfo(out=None, supported_formats=True):
if out is None:
out = sys.stdout
Image.init()
print("-" * 68, file=out)
print("Pillow {}".format(PIL.__version__), file=out)
py_version = sys.version.splitlines()
print("Python {}".format(py_version[0].strip()), file=out)
for py_version in py_version[1:]:
print(" {}".format(py_version.strip()), file=out)
print("-" * 68, file=out)
print(
"Python modules loaded from {}".format(os.path.dirname(Image.__file__)),
file=out,
)
print(
"Binary modules loaded from {}".format(os.path.dirname(Image.core.__file__)),
file=out,
)
print("-" * 68, file=out)
for name, feature in [
("pil", "PIL CORE"),
("tkinter", "TKINTER"),
("freetype2", "FREETYPE2"),
("littlecms2", "LITTLECMS2"),
("webp", "WEBP"),
("transp_webp", "WEBP Transparency"),
("webp_mux", "WEBPMUX"),
("webp_anim", "WEBP Animation"),
("jpg", "JPEG"),
("jpg_2000", "OPENJPEG (JPEG2000)"),
("zlib", "ZLIB (PNG/ZIP)"),
("libtiff", "LIBTIFF"),
("raqm", "RAQM (Bidirectional Text)"),
("libimagequant", "LIBIMAGEQUANT (Quantization method)"),
]:
if check(name):
print("---", feature, "support ok", file=out)
else:
print("***", feature, "support not installed", file=out)
print("-" * 68, file=out)
if supported_formats:
extensions = collections.defaultdict(list)
for ext, i in Image.EXTENSION.items():
extensions[i].append(ext)
for i in sorted(Image.ID):
line = "{}".format(i)
if i in Image.MIME:
line = "{} {}".format(line, Image.MIME[i])
print(line, file=out)
if i in extensions:
print(
"Extensions: {}".format(", ".join(sorted(extensions[i]))), file=out
)
features = []
if i in Image.OPEN:
features.append("open")
if i in Image.SAVE:
features.append("save")
if i in Image.SAVE_ALL:
features.append("save_all")
if i in Image.DECODERS:
features.append("decode")
if i in Image.ENCODERS:
features.append("encode")
print("Features: {}".format(", ".join(features)), file=out)
print("-" * 68, file=out)
|
class Foo:
class Boo():
def foo(self):
print "rrrrr"
|
#!/usr/bin/env python
import sys
from setuptools import setup, find_packages
install_requires = ['PyYAML==3.10', 'virtualenvwrapper==4.0'] #virtualenvwrapper 4.1.1 has broken packaging
if sys.version_info < (2, 7):
install_requires += ['ordereddict==1.1', 'argparse==1.2.1']
setup(
name='Batman',
version='0.7.0',
description='A deployment toolbelt',
author='Kevin McCarthy',
author_email='me@kevinmccarthy.org',
url='https://github.com/realgeeks/batman',
packages=find_packages(),
license='MIT',
install_requires=install_requires,
entry_points={
'console_scripts': [
'batman = batman.main:batmain',
]
},
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Topic :: System :: Installation/Setup',
'Topic :: Software Development :: Version Control',
'License :: OSI Approved :: MIT License',
],
)
|
# coding = utf-8
import numpy as np
import cv2
def add_gasuss_noise(image, mean=0, var=0.001):
'''
添加高斯噪声
mean : 均值
var : 方差
'''
image = np.array(image/255, dtype=float)
noise = np.random.normal(mean, var ** 0.5, image.shape)
out = image + noise
if out.min() < 0:
low_clip = -1.
else:
low_clip = 0.
out = np.clip(out, low_clip, 1.0)
out = np.uint8(out*255)
return out
def add_haze(image, t=0.6, A=1):
'''
添加雾霾
t : 透视率 0~1
A : 大气光照
'''
out = image*t + A*255*(1-t)
return out
def ajust_image(image, cont=1, bright=0):
'''
调整对比度与亮度
cont : 对比度,调节对比度应该与亮度同时调节
bright : 亮度
'''
out = np.uint8(np.clip((cont * image + bright), 0, 255))
# tmp = np.hstack((img, res)) # 两张图片横向合并(便于对比显示)
return out
def ajust_image_hsv(image, h=1, s=1, v=0.8):
'''
调整HSV通道,调整V通道以调整亮度
各通道系数
'''
HSV = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
H, S, V = cv2.split(HSV)
H2 = np.uint8(H * h)
S2 = np.uint8(S * s)
V2 = np.uint8(V * v)
hsv_image = cv2.merge([H2, S2, V2])
out = cv2.cvtColor(hsv_image, cv2.COLOR_HSV2BGR)
return out
def ajust_jpg_quality(image, q=100, save_path=None):
'''
调整图像JPG压缩失真程度
q : 压缩质量 0~100
'''
if save_path is None:
cv2.imwrite("jpg_tmp.jpg", image, [int(cv2.IMWRITE_JPEG_QUALITY), q])
out = cv2.imread('jpg_tmp.jpg')
return out
else:
cv2.imwrite(save_path, image, [int(cv2.IMWRITE_JPEG_QUALITY), q])
def add_gasuss_blur(image, kernel_size=(3, 3), sigma=0.1):
'''
添加高斯模糊
kernel_size : 模糊核大小
sigma : 标准差
'''
out = cv2.GaussianBlur(image, kernel_size, sigma)
return out
def test_methods():
img = cv2.imread('test.jpg')
out = add_haze(img)
cv2.imwrite("add_haze.jpg", out)
out = add_gasuss_noise(img)
cv2.imwrite("add_gasuss_noise.jpg", out)
out = add_gasuss_blur(img)
cv2.imwrite("add_gasuss_blur.jpg", out)
out = ajust_image(img)
cv2.imwrite("ajust_image.jpg", out)
out = ajust_image_hsv(img)
cv2.imwrite("ajust_image_hsv.jpg", out)
ajust_jpg_quality(img, save_path='ajust_jpg_quality.jpg')
test_methods()
|
import numpy as np
import pymc3 as pm
import pandas as pd
import arviz as az
from scipy import stats
from scipy.special import logsumexp
from bayesian_routines import standardize
import matplotlib.pyplot as plt
np.random.seed(20897234)
# 7H1
data = pd.read_csv('Laffer.csv', delimiter=';')
rate = standardize(np.array(data['tax_rate']))
revenue = standardize(np.array(data['tax_revenue']))
n_samples = 2000 // 4
n_tuning = 1000
with pm.Model() as linear:
#a = pm.Normal('b0', mu=0., sigma=0.5)
b = pm.Normal('b1', mu=0., sigma=0.5)
s = pm.Exponential('s', lam=0.5)
m = b * rate
r = pm.Normal('r', mu=m, sigma=s, observed=revenue)
traceLinear = pm.sample(n_samples, tune=n_tuning)
with pm.Model() as quadratic:
a = pm.Normal('b0', mu=0., sigma=0.5)
b = pm.Normal('b1', mu=0., sigma=0.5)
c = pm.Normal('b2', mu=0., sigma=0.5)
s = pm.Exponential('s', lam=0.5)
m = a + b * rate + c * (rate ** 2)
r = pm.Normal('r', mu=m, sigma=s, observed=revenue)
traceQuadratic = pm.sample(n_samples, tune=n_tuning)
with pm.Model() as cubic:
a = pm.Normal('b0', mu=0., sigma=0.5)
b = pm.Normal('b1', mu=0., sigma=0.5)
c = pm.Normal('b2', mu=0., sigma=0.5)
d = pm.Normal('b3', mu=0., sigma=0.5)
s = pm.Exponential('s', lam=0.5)
m = a + b * rate + c * (rate ** 2) + d * (rate ** 3)
r = pm.Normal('r', mu=m, sigma=s, observed=revenue)
traceCubic = pm.sample(n_samples, tune=n_tuning)
r = az.compare({'L': traceLinear, 'Q':traceQuadratic, 'C':traceCubic}, 'WAIC')
print(r)
'''
#------------------------------------------------------------------------------------------------------------------
# 7H2
ww = az.waic(traceLinear, pointwise=True)
sInd = np.argmax(revenue)
sampleImportance = np.array(ww[6])
with pm.Model() as linearRobust:
a = pm.Normal('b0', mu=0., sigma=0.5)
b = pm.Normal('b1', mu=0., sigma=0.5)
s = pm.Exponential('s', lam=0.5)
m = a + b * rate
r = pm.StudentT('r', nu=1., mu=m, sigma=s, observed=revenue)
traceLinearRobust = pm.sample(n_samples, tune=n_tuning)
ww = az.waic(traceLinearRobust, pointwise=True)
sampleImportance2 = np.array(ww[6])
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(sampleImportance, sampleImportance2)
ax.set_xlabel('Normal')
ax.set_ylabel('Student t')
ax.set_xlim(0, 25)
ax.set_ylim(0, 25)
# effect is reduce ~by half, but still it is pretty big, much bigger than the other points
#------------------------------------------------------------------------------------------------------------------
# 7H3
def entropy(x):
return - np.sum(x * np.log2(x))
def kldivergence(p,q):
return np.sum(p * np.log2(p / q))
d = np.array([[0.2, 0.2, 0.2, 0.2, 0.2], [0.8, 0.1, 0.05, 0.025, 0.025], [0.05, 0.15, 0.7, 0.05, 0.05]])
e = np.zeros(d.shape[0])
for i, island in enumerate(d):
e[i] = entropy(island)
kls = np.zeros((3,3))
for i in range(3):
for j in range(3):
if i != j:
kls[i,j] = kldivergence(d[i], d[j])
# entropy decreases from 1, 3, 2. 1 is the most 'random'. It is very much expecting species A very often
# more entropy means less KL divergence to other distributions, because it's harder to be surprised by what you see
plt.show()
#------------------------------------------------------------------------------------------------------------------
# 7H5
data = pd.read_csv('../chapter-6/foxes.csv', delimiter=';')
area = standardize(np.array(data['area']))
avgfood = standardize(np.array(data['avgfood']))
weight = standardize(np.array(data['weight']))
groupsize = standardize(np.array(data['groupsize']))
mp = 0.
sp = 0.4
lp = 0.5
n_samples = 2000 // 4
n_tuning = 1000
with pm.Model() as fox1:
w_f = pm.Normal('wf', mu=mp, sigma=sp)
w_g = pm.Normal('wg', mu=mp, sigma=sp)
w_a = pm.Normal('wa', mu=mp, sigma=sp)
w_0 = pm.Normal('w0', mu=mp, sigma=sp)
s = pm.Exponential('s', lam=lp)
mu = w_0 + w_f * avgfood + w_g * groupsize + w_a * area
W = pm.Normal('w', mu=mu, sigma=s, observed=weight)
trace1 = pm.sample(n_samples, tune=n_tuning)
with pm.Model() as fox2:
w_f = pm.Normal('wf', mu=mp, sigma=sp)
w_g = pm.Normal('wg', mu=mp, sigma=sp)
w_0 = pm.Normal('w0', mu=mp, sigma=sp)
s = pm.Exponential('s', lam=lp)
mu = w_0 + w_f * avgfood + w_g * groupsize
W = pm.Normal('w', mu=mu, sigma=s, observed=weight)
trace2 = pm.sample(n_samples, tune=n_tuning)
with pm.Model() as fox3:
w_g = pm.Normal('wg', mu=mp, sigma=sp)
w_a = pm.Normal('wa', mu=mp, sigma=sp)
w_0 = pm.Normal('w0', mu=mp, sigma=sp)
s = pm.Exponential('s', lam=lp)
mu = w_0 + w_g * groupsize + w_a * area
W = pm.Normal('w', mu=mu, sigma=s, observed=weight)
trace3 = pm.sample(n_samples, tune=n_tuning)
with pm.Model() as fox4:
w_f = pm.Normal('wf', mu=mp, sigma=sp)
w_0 = pm.Normal('w0', mu=mp, sigma=sp)
s = pm.Exponential('s', lam=lp)
mu = w_0 + w_f * avgfood
W = pm.Normal('w', mu=mu, sigma=s, observed=weight)
trace4 = pm.sample(n_samples, tune=n_tuning)
with pm.Model() as fox5:
w_a = pm.Normal('wa', mu=mp, sigma=sp)
w_0 = pm.Normal('w0', mu=mp, sigma=sp)
s = pm.Exponential('s', lam=lp)
mu = w_0 + w_a * area
W = pm.Normal('w', mu=mu, sigma=s, observed=weight)
trace5 = pm.sample(n_samples, tune=n_tuning)
r = az.compare({'m1': trace1, 'm2':trace2, 'm3':trace3, 'm4': trace4, 'm5': trace5}, 'WAIC')
'''
# model with all variables is the best
# ignoring group size is really bad, because it has a negative effect from the other two variables
# using area instead of avgfood is slightly worse and the difference is more consistent (smaller std error)
# probably because area is an indirect cause of weight, while food is direct?
|
"""Package for defining custom template tags, for use in Django templates."""
|
#!/usr/bin/python3
# -*- coding:utf-8 -*-
import http.client, urllib.parse
import json
import os
import numpy as np
import hashlib
import base64
def calcFileSha256(filname):
"""calculate file sha256"""
with open(filname, "rb") as f:
sha256obj = hashlib.sha256()
sha256obj.update(f.read())
hash_value = sha256obj.hexdigest()
return hash_value
class FileSyncClient:
def __init__(self, ip_, port_):
self.ip = ip_
self.port = port_
pass
def getAllSyncDirectory(self):
headers = {'Connection': 'keep-alive'}
conn = http.client.HTTPConnection(self.ip, self.port, timeout=10)
conn.request("GET", "/filesync/getAllSyncDirctory", headers=headers)
res = conn.getresponse()
data = res.read().decode('utf8')
print(data)
try:
jobj = json.loads(data)
if jobj is None or type(jobj) is not dict:
raise Exception('invalid data')
else:
return jobj['dirs']
pass
except Exception as e:
raise e
def __getSyncDirectoryMeta(self, sdid_):
headers = {'Connection': 'keep-alive'}
body = "{" \
+ "\"SDID\": \"" + sdid_ + "\"" \
+ "}"
conn = http.client.HTTPConnection(self.ip, self.port, timeout=10)
conn.request("POST", "/filesync/getSyncDirectoryMeta", headers=headers, body=body)
res = conn.getresponse()
data = res.read().decode('utf8')
print(data)
try:
jobj = json.loads(data)
if jobj is None or type(jobj) is not dict:
raise Exception('invalid data')
else:
return jobj
pass
except Exception as e:
raise e
def __updateFile(self, sdid_, fop_, fstate_, file_, buf_):
# TODO bug file encoding issue, should read binary and encode
headers = {'Connection': 'keep-alive',
'Content-Type': 'multipart; boundary=--AAA'
}
body = ''
body_file_header = "{" \
+ "\"filepath\": \"" + file_ + "\"," \
+ "\"SDID\": \"" + sdid_ + "\"," \
+ "\"fop\": " + str(fop_) + "," \
+ "\"fstate\": " + str(fstate_) \
+ "}"
body += '----AAA\r\n' \
+ 'Content-Length: ' + str(len(body_file_header.encode())) + '\r\n\r\n' \
+ body_file_header + '\r\n'\
+ '----AAA\r\n' \
+ 'Content-Length: ' + str(len(str(buf_))) + '\r\n\r\n' \
+ str(buf_) + "\r\n" \
+ '----AAA--\r\n'
conn = http.client.HTTPConnection(self.ip, self.port, timeout=10)
conn.request("POST", "/filesync/updateFile", headers=headers, body=body.encode('utf8'))
res = conn.getresponse()
data = res.read().decode('utf8')
print(data)
try:
jobj = json.loads(data)
if jobj is None or type(jobj) is not dict:
raise Exception('invalid data')
if jobj['status'] != 0:
raise Exception('update file failed.')
except Exception as e:
raise e
pass
def __removeFile(self, sdid_, file_):
# TODO bug file encoding issue, should read binary and encode
headers = {'Connection': 'keep-alive',
'Content-Type': 'multipart; boundary=--AAA'
}
body = ''
body_file_header = "{" \
+ "\"filepath\": \"" + file_ + "\"," \
+ "\"SDID\": \"" + sdid_ + "\"," \
+ "\"fop\": " + str(2) + "," \
+ "\"fstate\": " + str(0) \
+ "}"
buf_ = 'Delete File.'
body += '----AAA\r\n' \
+ 'Content-Length: ' + str(len(body_file_header.encode())) + '\r\n\r\n' \
+ body_file_header + '\r\n'\
+ '----AAA\r\n' \
+ 'Content-Length: ' + str(len(str(buf_))) + '\r\n\r\n' \
+ str(buf_) + "\r\n" \
+ '----AAA--\r\n'
conn = http.client.HTTPConnection(self.ip, self.port, timeout=10)
conn.request("POST", "/filesync/updateFile", headers=headers, body=body.encode('utf8'))
res = conn.getresponse()
data = res.read().decode('utf8')
print(data)
try:
jobj = json.loads(data)
if jobj is None or type(jobj) is not dict:
raise Exception('invalid data')
if jobj['status'] != 0:
raise Exception('update file failed.')
except Exception as e:
raise e
pass
def __uploadFile(self, sdid_, root_, file_) -> bool:
with open(os.path.join(root_, file_), 'rb') as f:
fop = 0
while 1:
buf = f.read(1024 * 1024)
b64 = base64.encodebytes(buf)
msg = b64.decode()
if buf != b'':
try:
self.__updateFile(sdid_, fop, 1, file_, msg)
except Exception as e:
return False
else:
try:
self.__updateFile(sdid_, fop, 0, file_, msg)
except Exception as e:
return False
break
fop = 1
pass
return True
@staticmethod
def __scan_dir(dir_path_):
if not os.path.isdir(dir_path_):
raise Exception("invalid dir_path_ " + dir_path_)
file_list = []
for root, dirs, files in os.walk(dir_path_):
begin = 0
if dir_path_[-1] == '\\' or dir_path_[-1] == '/':
begin = len(dir_path_)
else:
begin = len(dir_path_) + 1
root_path = root[begin:].replace('\\', '/')
for file in files:
if root_path == '':
file_list.append(file.replace("\\", '/'))
else:
file_list.append(root_path + '/' + file.replace("\\", '/'))
return file_list
def sync(self, sdid_, local_dir_):
if type(local_dir_) is not str or type(sdid_) is not str:
print("invalid local path.")
raise Exception("invalid param")
if local_dir_[-1] != '\\' and local_dir_[-1] != '/':
local_dir_ += '/'
meta_ = self.__getSyncDirectoryMeta(sdid_)
# TODO 20210520 scan local dir and
if type(meta_) is not dict:
raise Exception("invalid sdid_ " + sdid_)
local_list = np.array(FileSyncSession.__scan_dir(local_dir_))
remote_list = np.array(meta_['dir_info'])
for it in local_list:
# if it == 'test_bills/微信支付账单(20190101-20190301).csv':
# print('a')
# pass
ret = np.where(remote_list[:, 0] == it)
if np.size(ret) == 1:
'''remote file exist'''
if os.path.isfile(local_dir_+it):
local_sha256 = calcFileSha256(local_dir_+it)
remote_sha256 = remote_list[ret[0], 1]
if local_sha256 == remote_sha256:
pass
else:
if not self.__uploadFile(sdid_, local_dir_, it):
print("upload file faild " + it)
pass
pass
pass
remote_list = np.delete(remote_list, ret[0], axis=0)
elif np.size(ret) > 1:
print("duplicate file")
continue
else:
if not self.__uploadFile(sdid_, local_dir_, it):
print("upload file faild " + it)
continue
pass
print("---- print will delete")
for it in remote_list:
print(it)
try:
self.__removeFile(sdid_, it[0])
except Exception as e:
pass
pass
if __name__ == '__main__':
fsyn = FileSyncClient("127.0.0.1", 8444)
sds = fsyn.getAllSyncDirectory()
print('getAllSyncDirectory:\n' + str(sds))
fsyn.sync("c2e102a4-b7ee-11eb-9fa4-3c46d899fc43", "./source2")
pass
|
from CyberSource import *
import os
import json
from importlib.machinery import SourceFileLoader
config_file = os.path.join(os.getcwd(), "data", "Configuration.py")
configuration = SourceFileLoader("module.name", config_file).load_module()
def get_transaction_details_for_given_batch_id():
id = "12345"
try:
config_obj = configuration.Configuration()
client_config = config_obj.get_configuration()
api_instance = TransactionBatchesApi(client_config)
return_data, status, body = api_instance.get_transaction_batch_details(id)
print("API RESPONSE CODE : ", status)
print("API RESPONSE BODY : ", body)
# The Report obtained is being stored in a file
file_extension = (api_instance.api_client.last_response.urllib3_response.headers['Content-Type'])[-3:]
f = open(os.path.join(os.getcwd(), "resources", "BatchDetailsReport." + file_extension), "a+")
f.write(body)
f.close()
print("Batch Details Downloaded at the Location : " + os.path.join(os.getcwd(), "resources", "BatchDetailsReport." + file_extension))
except Exception as e:
print("Exception when calling TransactionBatchesApi->get_transaction_batch_details: %s\n" % e)
if __name__ == "__main__":
get_transaction_details_for_given_batch_id()
|
import paho.mqtt.publish as publish
import paho.mqtt.client as mqtt
topic = "fountainStateOff"
server = "168.128.36.204"
publish.single(
topic, payload=None, qos=0, retain=False, hostname=server,
port=1883, client_id="cabbage_client", keepalive=60, will=None, auth=None, tls=None,
protocol=mqtt.MQTTv31)
|
#!/usr/bin/env python
# Copyright (C) 2018 Michael Pilosov
# Michael Pilosov 01/21/2018
'''
The python script for building the
ConsistentBayes package and subpackages.
'''
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(name='cbayes',
version='0.3.26',
description='Consistent Bayesian Inversion',
author='Michael Pilosov',
author_email='mpilosov@gmail.com',
license='MIT',
url='https://github.com/mpilosov/ConsistentBayes/',
packages=['cbayes'],
install_requires=['matplotlib', 'scipy', 'numpy', 'nose', 'seaborn',
'ipykernel', 'ipywidgets', 'scikit-learn']
)
|
A_30_01_9 = {0: {'A': -0.273, 'C': 0.188, 'E': 0.612, 'D': 0.566, 'G': 0.063, 'F': 0.287, 'I': -0.054, 'H': -0.485, 'K': -0.921, 'M': -0.389, 'L': -0.073, 'N': 0.183, 'Q': 0.237, 'P': 0.557, 'S': -0.23, 'R': -0.832, 'T': 0.104, 'W': 0.296, 'V': 0.041, 'Y': 0.121}, 1: {'A': -0.287, 'C': 0.263, 'E': 0.858, 'D': 0.713, 'G': -0.157, 'F': -0.334, 'I': -0.231, 'H': 0.256, 'K': 0.006, 'M': -0.198, 'L': 0.008, 'N': 0.265, 'Q': -0.209, 'P': 0.781, 'S': -0.534, 'R': 0.493, 'T': -0.734, 'W': -0.087, 'V': -0.558, 'Y': -0.317}, 2: {'A': 0.278, 'C': 0.26, 'E': 0.49, 'D': 0.656, 'G': 0.316, 'F': -0.316, 'I': 0.086, 'H': -0.599, 'K': -0.83, 'M': -0.113, 'L': -0.087, 'N': -0.009, 'Q': 0.084, 'P': 0.337, 'S': 0.158, 'R': -1.159, 'T': 0.231, 'W': 0.155, 'V': 0.185, 'Y': -0.123}, 3: {'A': -0.118, 'C': -0.122, 'E': 0.192, 'D': 0.179, 'G': 0.187, 'F': -0.064, 'I': -0.021, 'H': 0.011, 'K': -0.009, 'M': -0.209, 'L': 0.045, 'N': -0.034, 'Q': 0.144, 'P': -0.052, 'S': 0.011, 'R': -0.13, 'T': 0.068, 'W': 0.05, 'V': -0.028, 'Y': -0.099}, 4: {'A': -0.156, 'C': 0.141, 'E': 0.15, 'D': 0.267, 'G': 0.056, 'F': -0.109, 'I': 0.012, 'H': -0.129, 'K': 0.045, 'M': -0.065, 'L': 0.062, 'N': 0.028, 'Q': -0.061, 'P': 0.103, 'S': -0.091, 'R': -0.167, 'T': -0.117, 'W': 0.189, 'V': -0.095, 'Y': -0.063}, 5: {'A': 0.093, 'C': 0.118, 'E': 0.193, 'D': 0.252, 'G': 0.013, 'F': -0.098, 'I': 0.036, 'H': -0.028, 'K': 0.083, 'M': -0.022, 'L': 0.082, 'N': -0.032, 'Q': -0.001, 'P': 0.081, 'S': -0.119, 'R': -0.153, 'T': -0.086, 'W': -0.186, 'V': 0.011, 'Y': -0.237}, 6: {'A': 0.017, 'C': -0.073, 'E': 0.38, 'D': 0.302, 'G': 0.087, 'F': 0.022, 'I': -0.222, 'H': -0.083, 'K': 0.168, 'M': -0.217, 'L': -0.104, 'N': 0.003, 'Q': -0.011, 'P': -0.353, 'S': 0.045, 'R': -0.065, 'T': 0.171, 'W': 0.039, 'V': -0.037, 'Y': -0.068}, 7: {'A': 0.073, 'C': 0.043, 'E': 0.039, 'D': 0.209, 'G': 0.07, 'F': -0.407, 'I': -0.051, 'H': -0.048, 'K': 0.2, 'M': 0.162, 'L': -0.056, 'N': 0.023, 'Q': 0.128, 'P': -0.263, 'S': -0.047, 'R': 0.117, 'T': 0.053, 'W': 0.034, 'V': -0.013, 'Y': -0.265}, 8: {'A': -0.746, 'C': 0.283, 'E': 0.384, 'D': 0.349, 'G': -0.216, 'F': 0.251, 'I': -0.299, 'H': 0.249, 'K': -1.203, 'M': -0.051, 'L': -0.189, 'N': 0.392, 'Q': 0.561, 'P': 0.122, 'S': 0.043, 'R': -0.09, 'T': 0.044, 'W': 0.599, 'V': -0.408, 'Y': -0.076}, -1: {'con': 4.23841}}
|
from powerline_shell.utils import BasicSegment
import os
import json
class Segment(BasicSegment):
def add_to_powerline(self):
with open('/root/.bluemix/config.json') as json_file:
config = json.load(json_file)
try:
self.powerline.append(" g:%s " % config['ResourceGroup']['Name'],
self.powerline.theme.IBMCLOUD_GROUP_FG,
self.powerline.theme.IBMCLOUD_GROUP_BG)
except:
pass
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from frappe import _
def get_data():
return [{
"label":
_("Recruitment"),
"items": [{
"type": "doctype",
"name": "Job Contract",
"description": _("Job Contract"),
}, {
"type": "doctype",
"name": "Contract Renewal",
"description": _("Contract Renewal"),
}]
}, {
"label":
_("Leaves and Holiday"),
"items": [{
"type": "doctype",
"name": "Vacation Settlement",
"description": _("Vacation Settlement"),
}, {
"type": "doctype",
"name": "Final Settlement",
"description": _("Final Settlement"),
}]
}, {
"label":
_("Travel and Expense Claim"),
"items": [{
"type": "doctype",
"name": "Mandate Form",
"description": _("Mandate Form"),
}, {
"type": "doctype",
"name": "Business Trip Fees",
"description": _("Business Trip Fees"),
}]
}, {
"label":
_("Payroll"),
"items": [{
"type": "doctype",
"name": "Increment Request",
"description": _("Increment Request"),
}, {
"type": "doctype",
"name": "Modification Salary",
"label": _("Modification Salary"),
"description": _(" ")
}, {
"type": "doctype",
"name": "Rewards",
"label": _("Rewards"),
"description": _(" ")
}, {
"type": "doctype",
"name": "Deduction",
"description": _("Deduction"),
}]
}, {
"label":
_("Employee and Attendance"),
"items": [{
"type": "doctype",
"name": "Joining Work",
"description": _("Joining Work"),
}]
}, {
"label":
_("Fleet Management"),
"items": [{
"type": "doctype",
"name": "Vehicle Repairing Request",
"description": _("Vehicle Repairing Request"),
}, {
"type": "doctype",
"name": "Automobile GP",
"description": _("Automobile GP"),
}, {
"type": "doctype",
"name": "Receiving Vehicle",
"description": _("Receiving Vehicle"),
}]
}, {
"label":
_("Violation"),
"items": [{
"type": "doctype",
"name": "Violation",
"description": _("Violation"),
}, {
"type": "doctype",
"name": "Violation Type",
"description": _("Violation Type"),
}, {
"type": "doctype",
"name": "Attendance Violation",
"description": _("Attendance Violation"),
}]
}, {
"label":
_("Employee and Attendance"),
"items": [{
"type": "doctype",
"name": "Receipt Custody",
"description": _("Receipt Custody"),
}, {
"type": "doctype",
"name": "ID Card",
"description": _("ID Card"),
}, {
"type": "doctype",
"name": "Employee Certificate",
"description": _("Employee Certificate"),
}, {
"type": "doctype",
"name": "Experience Certificate",
"description": _("Experience Certificate"),
}]
}, {
"label":
_("Insurance"),
"items": [{
"type": "doctype",
"name": "Employee Health Insurance",
"description": _("Employee Health Insurance"),
}, {
"type": "doctype",
"name": "Vehicle Insurance",
"description": _("Vehicle Insurance Provider"),
}, {
"type": "report",
"label": _("Employee Insurance Table"),
"name": "Employee Insurance",
"doctype": "Employee"
}, {
"type": "report",
"label": _("Vehicle Insurance Table"),
"name": "Vehicle Insurance",
"doctype": "Vehicle"
}, {
"type": "doctype",
"name": "Property and Facility",
"description": _("Property and Facility"),
}, {
"type": "doctype",
"name": "Property Insurance Criteria",
"description": _("Property Insurance Criteria"),
}]
}, {
"label":
_("Custodies and Telecom"),
"items": [{
"type": "doctype",
"name": "Custody",
"description": _("Custody"),
}, {
"type": "doctype",
"name": "Custody Type",
"description": _("Custody Type"),
}, {
"type": "doctype",
"name": "Custody Log",
"description": _("Custody Log"),
}, {
"type": "doctype",
"name": "Devices",
"description": _("Devices"),
}, {
"type": "doctype",
"label": _("SIM Cards & Connections"),
"name": "Telecom",
"description": _("SIM Cards & Connections"),
}, {
"type": "doctype",
"name": "Common Item",
"description": _("Common Item"),
}, {
"type": "doctype",
"name": "Software Licenses",
"description": _("Software Licenses"),
}]
}]
|
from __future__ import unicode_literals
from setuptools import setup, find_packages
import os
try:
from configparser import ConfigParser
except ImportError:
from ConfigParser import ConfigParser
PROJECT_DIR = os.path.dirname(os.path.abspath(__file__))
PROJECT_INI = os.path.join(PROJECT_DIR, "project.ini")
config = ConfigParser()
config.read(PROJECT_INI)
def get_config(opt):
return config.get("project", opt)
NAME = get_config("name")
DESCRIPTION = get_config("description")
URL = get_config("url")
AUTHOR = "nonamenix"
AUTHOR_EMAIL = "nonamenix@gmail.com"
README = "README.rst"
README_TXT = "README.txt"
LONG_DESCRIPTION = open(os.path.join(PROJECT_DIR, README)).read()
REQUIREMENTS_FILE = "requirements.txt"
REQUIREMENTS = open(os.path.join(PROJECT_DIR, REQUIREMENTS_FILE)).readlines()
VERSION = get_config("version")
DEV_VERSION = os.environ.get("DEV_VERSION")
if DEV_VERSION:
VERSION = "{}.dev{}".format(VERSION, DEV_VERSION)
config.set("project", "version", VERSION)
with open(PROJECT_INI, "w") as f:
config.write(f)
# create a README.txt file from .md
with open(README_TXT, "wb") as f:
f.write(LONG_DESCRIPTION.encode())
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
url=URL,
include_package_data=True,
packages=find_packages(),
install_requires=REQUIREMENTS,
keywords=[
"aiohttp",
"aiohttp-server",
"aiohttp-swagger",
"hug",
"api",
"api-rest",
"rest",
"swagger",
"schema",
],
project_urls={
"Changelog": "https://github.com/nonamenix/aiohug/blob/master/changelog.md",
"Issues": "https://github.com/nonamenix/aiohug/issues",
},
)
# delete README.txt
os.remove(README_TXT)
|
# -*- coding: utf-8 -*-
# Generated by scripts/generate_labels.py
from __future__ import unicode_literals
# LearningActivities
READ = "wA01urpi"
CREATE = "UXADWcXZ"
PRACTICE = "VwRCom7G"
WATCH = "UD5UGM0z"
REFLECT = "3dSeJhqs"
EXPLORE = "#j8L0eq3"
LISTEN = "mkA1R3NU"
choices = (
(READ, "Read"),
(CREATE, "Create"),
(PRACTICE, "Practice"),
(WATCH, "Watch"),
(REFLECT, "Reflect"),
(EXPLORE, "Explore"),
(LISTEN, "Listen"),
)
LEARNINGACTIVITIESLIST = [
READ,
CREATE,
PRACTICE,
WATCH,
REFLECT,
EXPLORE,
LISTEN,
]
|
import time
class car(object):
def __init__(self,name):
print('init has been used')
self.__name = name
def __del__(self):
print('del has been used')
time.sleep(1)
print('%s has been killed'%self.__name)
car1 = car('bus')
car2 = car1
car3 = car1
print(id(car),id(car2),id(car3))
print('car1 will be killed')
del(car1)
time.sleep(1)
print('car2 will be killed')
time.sleep(1)
del(car2)
print('car3 will be killed')
del(car3)
del(car)
|
from collections import defaultdict
import logging
from operator import itemgetter
from numba import jit
import numpy as np
from shapely.geometry import LineString, MultiLineString, MultiPolygon, Polygon
from shapely.ops import linemerge, unary_union
from shapely.prepared import prep
from .utils import build_spatial_index, flatten, set_geom, take
logger = logging.getLogger(__package__)
def apply_topological_op(features, topo_op, **params):
"""
Apply topo_op to the graph formed by the polygon features.
The input features are an array of tuples (<geom>,<data>). The geoms can be a mix
of polygons and multipolygons
The lines that belong to multiple polygons are merged before being passed
to topo_op
"""
geoms, data = zip(*features)
multi, polygons = _decompose_multi(geoms)
# edges is a single MultiLineString
edges = _decompose(polygons)
faces = _build_topology(polygons, edges)
# topoop_edges must be mapped 1-1 with the output edges
topoop_edges, faces, edges = topo_op(faces, edges, **params)
# topoop_polygons are mapped 1-1 with the polygons
topoop_polygons = _build_geometry(faces, topoop_edges)
topoop_multi = _recompose_multi(multi, topoop_polygons)
topoop_features = zip(topoop_multi, data)
return topoop_features
def smooth_chaikin(features, iterations=2, keep_border=False):
"""
Smooth the edges using the Chaikin method
This op doesn't offer the same guarantees as simplify_dp : It is possible for
edges to cross after this op is applied
"""
sm_features = apply_topological_op(
features, _smooth_chaikin_edges, iterations=iterations, keep_border=keep_border
)
return sm_features
def _smooth_chaikin_edges(
faces, edges: MultiLineString, iterations, keep_border
) -> MultiLineString:
if keep_border:
border_index = _detect_border(faces)
xy_edge_index = _index_edges_by_xy(edges)
sm_edges = []
for edge_index, edge in enumerate(edges):
if keep_border and edge_index in border_index:
# nothing to do
sm_edges.append(edge)
continue
# the 2nd condition is so that there is no smoothing when
# the edge touches another edge at a single point.
# By construction, this point is always the first (and last)
# of coords
smooth_around = edge.is_ring and len(xy_edge_index[edge.coords[0]]) == 1
if smooth_around:
# so that the smoothing also happens between the last and first
# segment
coords = np.array(list(edge.coords) + [edge.coords[1]])
else:
coords = np.array(edge.coords)
# cf https://stackoverflow.com/a/47255374
for _ in range(iterations):
L = coords.repeat(2, axis=0)
R = np.empty_like(L)
R[0] = L[0]
R[2::2] = L[1:-1:2]
R[1:-1:2] = L[2::2]
R[-1] = L[-1]
coords = L * 0.75 + R * 0.25
coords = coords.tolist()
if smooth_around:
# points have been added in the loop
index_close = 2 ** iterations
coords = coords[index_close - 1 : -index_close]
sm_edges.append(LineString(coords))
# faces and edges have not been modified
return MultiLineString(sm_edges), faces, edges
def densify(features, max_distance: float = None, n: int = 1, keep_border=False):
"""
Add points to the boundaries of the polygons
Can be useful to control the smoothing performed by the Chaikin method
"""
ds_features = apply_topological_op(
features,
_densify_edges,
max_distance=max_distance,
n=n,
keep_border=keep_border,
)
return ds_features
def _densify_edges(
faces, edges: MultiLineString, max_distance, n, keep_border
) -> MultiLineString:
if keep_border:
border_index = _detect_border(faces)
ds_edges = []
for edge_index, edge in enumerate(edges):
if keep_border and edge_index in border_index:
# nothing to do
ds_edges.append(edge)
continue
coords = np.array(edge.coords)
if max_distance:
dists = _point_distance(coords)
ns = np.ceil(dists / max_distance).astype(np.int64)
else:
# np points => np-1 segments
ns = np.full(len(coords) - 1, n + 1)
# + 1 to contain the last point
n_ds_points = np.sum(ns) + 1
ds_coords = np.zeros((n_ds_points, 2))
_densify_edge(coords, ns, ds_coords)
ds_edges.append(LineString(zip(ds_coords[:, 0], ds_coords[:, 1])))
return MultiLineString(ds_edges), faces, edges
@jit(nopython=True)
def _densify_edge(input, ns, output):
curr_j = 0
for i in range(len(input) - 1):
n = ns[i]
start = input[i]
stop = input[i + 1]
diff = stop - start
for j in range(n):
output[curr_j + j] = start + diff * j / n
curr_j += n
# the last point is not processed in the loop: stays the same in the output
output[-1] = input[-1]
def simplify_dp(features, distance, keep_border=False):
"""
Simplify polygon features using the Douglas-Peucker method.
This op simplifies edges shared by multiple polygons in the same way. It will
also prevent edges from crossing each other.
"""
simpl_features = apply_topological_op(
features, _simplify_dp_edges, distance=distance, keep_border=keep_border
)
# remove degenerate polygons (no area)
f_simpl_features = []
for f in simpl_features:
# if polgygon is degenerate: This will make it empty
# if mulitp: It will remove degenerate member polygons
geom = f[0].buffer(0)
if geom.is_empty:
# degenerate
continue
f_simpl_features.append(set_geom(f, geom))
return f_simpl_features
def _simplify_dp_edges(faces, edges: MultiLineString, distance, keep_border):
if keep_border:
border_index = _detect_border(faces)
# simplify from Shapely does not change polylines composed of only 2 points
# so the edges at the border are cut into 2 point polylines
faces, edges = _segmentize_border(faces, edges, border_index)
# e.simplify does DP (from Shapely / GEOS)
return edges.simplify(distance, True), faces, edges
def _decompose_multi(geoms):
multi = []
polygons = []
for geom in geoms:
if geom.type == "MultiPolygon":
polygons.extend(geom.geoms)
multi.append(
("MultiPolygon", list(range(len(polygons) - len(geom), len(polygons))))
)
else:
# Polygon
polygons.append(geom)
multi.append(("Polygon", [len(polygons) - 1]))
return multi, polygons
def _recompose_multi(multi, polygons):
rec_multi = []
for gtype, poly_indices in multi:
if gtype == "MultiPolygon":
rec_multi.append(MultiPolygon(take(polygons, poly_indices)))
else:
# a single geoemtry
rec_multi.append(polygons[poly_indices[0]])
return rec_multi
def _decompose(polygons):
lines = flatten((_polygon_contour(p) for p in polygons))
union = unary_union(lines)
if isinstance(union, LineString):
edges = MultiLineString([union])
else:
edges = linemerge(union)
return edges
def _build_topology(polygons, edges):
"""
Decompose polygons in an ordered sequence of edges
"""
edge_si = build_spatial_index(edges)
faces = []
for polygon in polygons:
rings = [polygon.exterior] + list(polygon.interiors)
preps = map(prep, rings)
face = []
for ring, pring in zip(rings, preps):
indexes = list(edge_si.intersection(ring.bounds))
indexes = list(filter(lambda i: pring.contains(edges[i]), indexes))
# sequence of oriented edges for current ring
topo_ring = _build_topo_ring(
list(map(lambda i: edges[i], indexes)), indexes
)
face.append(topo_ring)
faces.append(face)
return faces
def _build_topo_ring(edges, edge_global_indexes):
# Second element of tuple indicates if the topo_ring has the same orientation
# as the edge
# init : arbitrary direction for first edge
result, node_xy = [(0, True)], edges[0].coords[-1]
indexes = set(range(1, len(edges)))
while len(indexes) > 0:
# tries to find edge that starts or ends with node_xy
next_ = None
candidates_next = []
for idx in indexes:
if node_xy in (edges[idx].coords[0], edges[idx].coords[-1]):
candidates_next.append(idx)
if len(candidates_next) == 1:
next_ = candidates_next[0]
elif len(candidates_next) == 2:
# can happen if the ring boundary touches itself at a single point
# (not valid but output by rasterio / GDAL in polygonize)
# one of the edges forms a closed ring (but not considered a hole)
# we take that edge as next
logger.warning("Invalid polygon: Self-touching contour")
if (
edges[candidates_next[0]].coords[0]
== edges[candidates_next[0]].coords[-1]
):
next_ = candidates_next[0]
elif (
edges[candidates_next[1]].coords[0]
== edges[candidates_next[1]].coords[-1]
):
next_ = candidates_next[1]
# else : should not happen (invalid polygon)
# if the input polygons are valid and not degenerate
# it should be always possible to find a next_
assert next_ is not None
# TODO have more explicit error messages in case of invalid inputs
if node_xy == edges[next_].coords[0]:
result.append((next_, True))
node_xy = edges[next_].coords[-1]
else:
result.append((next_, False))
node_xy = edges[next_].coords[0]
indexes.remove(next_)
return list(map(lambda r: (edge_global_indexes[r[0]], r[1]), result))
def _detect_border(faces):
edge_counter = defaultdict(int)
for if_, face in enumerate(faces):
for ir, ring in enumerate(face):
for ie, (edge_index, is_same_order) in enumerate(ring):
edge_counter[edge_index] += 1
return set([edge_index for edge_index, c in edge_counter.items() if c == 1])
def _segmentize_border(faces, edges, border_index):
"""
Cuts edges at the border (part of a single polygon)
into its constituent segments
"""
# to simple linestrings
edges = list(edges.geoms)
borders = []
for if_, face in enumerate(faces):
for ir, ring in enumerate(face):
for ie, (edge_index, is_same_order) in enumerate(ring):
if edge_index in border_index:
borders.append((if_, ir, ie, edge_index, is_same_order))
# the edges at the border are part of only a sing ring
# however, a ring can have multiple edges at the border
# we sort so that the greatest "ie" is processed first
# => when splice is performed, it won't disturb the next ones
borders = sorted(borders, reverse=True, key=itemgetter(2))
for border in borders:
if_, ir, ie, edge_index, order = border
coords = edges[edge_index].coords
segments = []
for i in range(len(coords) - 1):
# same order as the origin edge
segments.append(LineString([coords[i], coords[i + 1]]))
if not order:
segments = list(reversed(segments))
num_indices_added = len(segments) - 1
indices_added = list(
zip(
range(len(edges), len(edges) + num_indices_added),
[order] * num_indices_added,
)
)
# so that there are no holes and no need to change the existing indices
# first output segment takes the index of its edge
edges[edge_index] = segments[0]
# add new segments at the end
edges.extend(segments[1:])
# the index of the first segment (at "ie") stays the same (edge_index)
# the others are spliced after in the index list of the ring
splice_index = ie + 1
faces[if_][ir][splice_index:splice_index] = indices_added
return faces, MultiLineString(edges)
def _index_edges_by_xy(edges):
rindex = defaultdict(set)
for i, edge in enumerate(edges):
rindex[edge.coords[0]].add(i)
rindex[edge.coords[-1]].add(i)
return rindex
def _build_geometry(faces, edges):
polygons = []
for face in faces:
rings = []
for topo_ring in face:
ring = _build_geom_ring(topo_ring, edges)
rings.append(ring)
polygons.append(Polygon(rings[0], rings[1:]))
return polygons
def _build_geom_ring(ring, edges):
coords = []
for edge_index, is_same_order in ring:
if is_same_order:
coords.extend(edges[edge_index].coords[:-1])
else:
# revert
coords.extend(edges[edge_index].coords[:0:-1])
# close the contour
coords.append(coords[0])
return coords
def _polygon_contour(polygon):
return [LineString(tuple(polygon.exterior.coords))] + [
LineString(tuple(geom.coords)) for geom in polygon.interiors
]
def _point_distance(coords):
# the coordinates are each on a row with 2 elements
diff = np.diff(coords, axis=0)
dists = np.sqrt(diff[:, 0] ** 2 + diff[:, 1] ** 2)
return dists
|
# Simple demo of of the PCA9685 PWM servo/LED controller library.
# This will move channel 0 from min to max position repeatedly.
# Author: Tony DiCola
# License: Public Domain
from __future__ import division
import time
import paho.mqtt.client as mqtt
import Adafruit_PCA9685
import serial
from lightTelemetry import LightTelemetry
pwm = Adafruit_PCA9685.PCA9685()
# Helper function to make setting a servo pulse width simpler.
def set_servo_pulse(channel, pulse):
if pulse<1000:
pulse=1000
elif pulse>2000:
pulse=2000
t=0.2114*(pulse)
pwm.set_pwm(channel, 0, int(t))
#print t
def on_connect(client, userdata, flags, rc):
print("Connected to MQTT broker "+str(rc))
client.subscribe("test")
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
#print(msg.topic+" "+str(msg.payload))
pl=str(msg.payload)
i=pl.index(":")
channel=pl[:i]
value=int(pl[i+1:])
print (channel,value)
c=-1
if channel=="throttle":
c=2
elif channel=="yaw":
c=3
elif channel=="pitch":
c=0
elif channel=="roll":
c=1
if c>=0:
set_servo_pulse(c,value)
if channel=="kill":
pwm.set_pwm(2, 0, 0)
pwm.set_pwm_freq(50)
set_servo_pulse(2,1000)
set_servo_pulse(0,1500)
set_servo_pulse(1,1500)
set_servo_pulse(3,1500)
client = mqtt.Client("", True, None, mqtt.MQTTv31)
client.on_connect = on_connect
client.on_message = on_message
client.connect("192.168.1.1", 1883, 60)
client.loop_start()
ser = serial.Serial(
port='/dev/ttyS0',
baudrate = 19200,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=1
)
lt=LightTelemetry()
while True:
b=ser.read();
#print "serial",b
if b:
lt.processByte(b)
#time.sleep(0.01)
|
# Generated by Django 2.0.3 on 2018-07-02 17:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('queueapp', '0004_jenkins_project2'),
]
operations = [
migrations.AddField(
model_name='autofilter',
name='issues_per_cycle',
field=models.PositiveSmallIntegerField(default=4),
),
migrations.AddField(
model_name='jenkinsactuator',
name='multiple_count_lower',
field=models.PositiveSmallIntegerField(default=3, help_text='Inclusive'),
),
migrations.AddField(
model_name='jenkinsactuator',
name='multiple_count_upper',
field=models.PositiveSmallIntegerField(default=5, help_text='Inclusive'),
),
migrations.AlterField(
model_name='jenkinsactuator',
name='project_name2',
field=models.CharField(blank=True, help_text='Many issues at once (optional)', max_length=60),
),
]
|
from django.conf.urls.defaults import *
from django.views.generic.simple import direct_to_template
urlpatterns = patterns('impromptu.views',
url(r'^filebrowser$', 'filebrowser', name='filebrowser'),
# NEW..
url(r'^(?P<num>.+)$', 'funpage', name='impromptu_fun_detail'),
url(r'^$', 'index', name='impromptu_home'),
)
|
from subprocess import Popen, STDOUT
from typing import List, Tuple
import itertools
onto_prefixes = ['SNOMEDCT_US','FMA','NCI']
middlefix = 'SCUI_to_keep'
#onto_postfixes = ['large','mini-body','mini-disease-body','mini-pharm-neoplas-body','mini-SF-NF','mini-SN-disease','mini-SN-pharm-neoplas','mini-SN-pharm','mini-SN-neoplas','mini-SN'] # full settings
onto_postfixes = ['large','mini-body','mini-SN-disease','mini-SN-pharm','mini-SN-neoplas'] # selected settings
run_part1_get_SCUI = True
run_part2_get_mappings = True
if run_part1_get_SCUI:
# part 1: run get_SCUI_in_onto_from_STYs.py to get SCUIs to keep
for onto_postfix in onto_postfixes:
command = 'python get_SCUI_in_onto_from_STYs.py -s %s' % onto_postfix
print('running ', command)
p = Popen(command, shell=True, stderr=STDOUT)
p.wait()
if 0 != p.returncode:
print('Command %s wrong!' % command)
continue
if run_part2_get_mappings:
# part 2: run get_mappings.py to get the mappings with the counts for each setting
# get ordered pairwise combinations
def unique_combinations(elements: List[str]) -> List[Tuple[str, str]]:
"""
from https://codereview.stackexchange.com/a/256954/257768
Precondition: `elements` does not contain duplicates.
Postcondition: Returns unique combinations of length 2 from `elements`.
>>> unique_combinations(["apple", "orange", "banana"])
[("apple", "orange"), ("apple", "banana"), ("orange", "banana")]
"""
return list(itertools.combinations(elements, 2))
for onto_prefix1, onto_prefix2 in unique_combinations(onto_prefixes):
for onto_postfix in onto_postfixes:
onto1_scui_fn = '_'.join([onto_prefix1,middlefix,onto_postfix]) + '.csv'
onto2_scui_fn = '_'.join([onto_prefix2,middlefix,onto_postfix]) + '.csv'
command = 'python get_mappings.py -o1 %s -o2 %s' % (onto1_scui_fn,onto2_scui_fn)
print('running ', command)
p = Popen(command, shell=True, stderr=STDOUT)
p.wait()
if 0 != p.returncode:
print('Command %s wrong!' % command)
continue
#else:
# print('Command %s completed successfully!' % command)
#Popen(command)
|
import sys
from tblib import pickling_support
pickling_support.install()
class TraceableException:
""" This class stores an Exception and its traceback, allowing it to be rethrown in another process (or thread) whilst maintaining a useful stack trace """
def __init__(self, error: BaseException):
if error is None:
raise ValueError("An exception must be provided")
self._exception = error
self._traceback = error.__traceback__ or sys.exc_info()[2]
def __repr__(self) -> str:
return f"TraceableException({repr(self._exception)})"
def get_error(self) -> BaseException:
"""Returns the original exception."""
return self._exception.with_traceback(self._traceback)
|
from .condizione_pagamento import CondizionePagamentoViewSet
from .esigibilita_iva import EsigibilitaIVAViewSet
from .modalita_pagamento import ModalitaPagamentoViewSet
from .natura_operazione_iva import NaturaOperazioneIVAViewSet
from .regime_fiscale import RegimeFiscaleViewSet
__all__ = [
"CondizionePagamentoViewSet",
"EsigibilitaIVAViewSet",
"ModalitaPagamentoViewSet",
"NaturaOperazioneIVAViewSet",
"RegimeFiscaleViewSet",
]
|
"""
SiDE: Feature Learning in Signed Directed Networks
Authors: Junghwan Kim(kjh900809@snu.ac.kr), Haekyu Park(hkpark627@snu.ac.kr),
Ji-Eun Lee(dreamhunter@snu.ac.kr), U Kang (ukang@snu.ac.kr)
Data Mining Lab., Seoul National University
This software is free of charge under research purposes.
For commercial purposes, please contact the authors.
-------------------------------------------------------------------------
File: model.py
- A main model file implementing computation graph of side
Version: 1.0
"""
import tensorflow as tf
from multiprocessing import cpu_count
import threading
import time
import sys
class Side(object):
""" SiDE model inference graph """
def __init__(self, config, sess):
"""
Initializer to set local variables, preload random walk data and build graph
:param config: a dictionary containing keys like
'num_walks', 'embed_dim', 'window_size', 'neg_sample_size', 'damping_factor', 'balance_factor',
'regularization_param', 'batch_size', 'learning_rate', 'clip_norm',
'epochs_to_train', 'summary_interval', 'save_interval', 'final_walk_path', 'embed_path'
:param sess: tensorflow session to execute tensorflow operations
"""
self.embed_dim = config['embed_dim']
self.window_size = config['window_size']
self.neg_sample_size = config['neg_sample_size']
self.beta = config['damping_factor']
self.gamma = config['balance_factor']
self.regularization_param = config['regularization_param']
self.batch_size = config['batch_size']
self.learning_rate = config['learning_rate']
self.clip_norm = config['clip_norm']
self.epochs_to_train = config['epochs_to_train']
self.summary_interval = config['summary_interval']
self.save_interval = config['save_interval']
self.concurrent_step = config['concurrent_step'] if 'concurrent_step' in config else cpu_count()
self.final_walk_path = config['final_walk_path']
self.embed_path = config['embed_path']
self._sess = sess
self._node2idx = dict()
self._idx2node = list()
self.build_graph()
self.save_vocab()
def build_graph(self):
"""
Build computation graph using custom ops defined in side_kernels.cc
"""
t0 = time.time()
word2vec = tf.load_op_library("/home/javari2/semb/baseline-sign/wiki/src/methods/side/embedding/side_ops.so")
# preload random walk data and show statistics
(words, counts, words_per_epoch, self._epoch, self._words_processed,
self._examples, self._labels, self.num_pos, self.num_neg) = word2vec.skipgram_side(filename=self.final_walk_path,
batch_size=self.batch_size,
window_size=self.window_size)
(self.vocab_words, self.vocab_counts, self.words_per_epoch) = self._sess.run([words, counts, words_per_epoch])
self.vocab_size = len(self.vocab_words)
print("read walk file pipeline done in %ds" % (time.time() - t0))
self._idx2node = self.vocab_words
for i, w in enumerate(self._idx2node):
self._node2idx[w] = i
self._W_target = tf.Variable(
tf.random_uniform([self.vocab_size, self.embed_dim],
- 0.5 / self.embed_dim, 0.5 / self.embed_dim),
name="W_target")
self._W_context = tf.Variable(
tf.zeros([self.vocab_size, self.embed_dim]),
name="W_context")
tf.summary.histogram("target_weight", self._W_target)
tf.summary.histogram("context_weight", self._W_context)
self._b_out_pos = tf.Variable(tf.zeros([self.vocab_size]), name="b_out_pos")
self._b_out_neg = tf.Variable(tf.zeros([self.vocab_size]), name="b_out_neg")
self._b_in_pos = tf.Variable(tf.zeros([self.vocab_size]), name="b_in_pos")
self._b_in_neg = tf.Variable(tf.zeros([self.vocab_size]), name="b_in_neg")
tf.summary.histogram("positive_out_bias", self._b_out_pos)
tf.summary.histogram("negative_out_bias", self._b_out_neg)
tf.summary.histogram("positive_in_bias", self._b_in_pos)
tf.summary.histogram("negative_in_bias", self._b_in_neg)
self.multiplier = tf.multiply(
tf.multiply(
tf.pow(tf.constant([-1], dtype=tf.float32), tf.cast(self.num_neg, tf.float32)),
tf.pow(tf.constant([self.beta], dtype=tf.float32), tf.cast(self.num_pos + self.num_neg - 1, tf.float32))),
tf.pow(tf.constant([self.gamma], dtype=tf.float32), tf.cast(self.num_neg - 1, tf.float32))
)
self.global_step = tf.Variable(0, name="global_step")
words_to_train = float(self.words_per_epoch * self.epochs_to_train)
self._lr = self.learning_rate * tf.maximum(0.0001,
1.0 - tf.cast(self._words_processed, tf.float32) / words_to_train)
# define one step of training operation
inc = self.global_step.assign_add(1)
with tf.control_dependencies([inc]):
self._train = word2vec.neg_train_side(self._W_target,
self._W_context,
self._b_in_pos,
self._b_in_neg,
self._b_out_pos,
self._b_out_neg,
self._examples,
self._labels,
self._lr,
self.multiplier,
tf.constant(self.regularization_param),
vocab_count=self.vocab_counts.tolist(),
num_negative_samples=self.neg_sample_size)
self._sess.run(tf.global_variables_initializer())
self.saver = tf.train.Saver()
def save_vocab(self):
"""
Save vocabulary file from statistics of random walk data
"""
with open(self.embed_path + ".vocab", "w") as f:
for i in range(self.vocab_size):
vocab_word = tf.compat.as_text(self.vocab_words[i]).encode("utf-")
f.write("%s %d \n" % (vocab_word, self.vocab_counts[i]))
def _train_thread_body(self):
"""
Function called by each threads
Execute training operation as long as the current epoch is not changed
"""
initial_epoch, prev_words = self._sess.run([self._epoch, self._words_processed])
while True:
_, epoch, words = self._sess.run([self._train, self._epoch, self._words_processed])
if epoch != initial_epoch:
break
def train(self):
"""
Train the side model using multi threads
"""
initial_epoch, initial_words = self._sess.run([self._epoch, self._words_processed])
self.summary = tf.summary.merge_all()
self.writer = tf.summary.FileWriter(self.embed_path, self._sess.graph)
workers = []
for _ in range(self.concurrent_step):
t = threading.Thread(target=self._train_thread_body)
t.start()
workers.append(t)
# Print statistics while multi-threads execute the training updates
last_words, last_time, last_summary_time, last_checkpoint_time = initial_words, time.time(), 0, 0
while True:
time.sleep(5)
(epoch, step, words, lr) = self._sess.run(
[self._epoch, self.global_step, self._words_processed, self._lr])
now = time.time()
last_words, last_time, rate = words, now, (words - last_words) / (now - last_time)
print("Epoch %4d Step %8d: lr = %5.7f words/sec = %8.0f\r" % (epoch, step, lr, rate), end="")
sys.stdout.flush()
if now - last_summary_time > self.summary_interval:
summary, global_step = self._sess.run([self.summary, self.global_step])
self.writer.add_summary(summary, global_step)
last_summary_time = now
if epoch != initial_epoch:
break
for t in workers:
t.join()
|
import cv2
s=input("Enter The Name of the File: ")
#print(s)
a=int(input("Enter the Desired Breadth: "))
b=int(input("Enter the Desired Height: "))
t=input("Enter the Name of the New Resized File: ")
t="Files/"+t
s="Files/"+s
img=cv2.imread(s,1)
#cv2.imshow("Picture Box",img)
#cv2.waitKey(0)
resized_image=cv2.resize(img,(a,b))
cv2.imwrite(t,resized_image)
|
import logging
import os
from dataclasses import dataclass
from typing import Any, Iterable, Mapping, Optional
from reconcile.utils import gql
from reconcile.utils import raw_github_api
from reconcile.utils.secret_reader import SecretReader
from reconcile import queries
REPOS_QUERY = """
{
apps_v1 {
codeComponents {
url
resource
}
}
}
"""
QONTRACT_INTEGRATION = "github-repo-invites"
@dataclass
class CodeComponents:
urls: set[str]
known_orgs: set[str]
def _parse_code_components(
raw: Optional[Iterable[Mapping[str, Any]]]
) -> CodeComponents:
urls = set()
known_orgs = set()
for app in raw or []:
code_components = app["codeComponents"]
if not code_components:
continue
for code_component in app["codeComponents"]:
url = code_component["url"]
urls.add(url)
org = url[: url.rindex("/")]
known_orgs.add(org)
return CodeComponents(
urls=urls,
known_orgs=known_orgs,
)
def _accept_invitations(
github: raw_github_api.RawGithubApi, code_components: CodeComponents, dry_run: bool
) -> set[str]:
accepted_invitations = set()
urls = code_components.urls
known_orgs = code_components.known_orgs
for i in github.repo_invitations():
invitation_id = i["id"]
invitation_url = i["html_url"]
url = os.path.dirname(invitation_url)
accept = url in urls or any(url.startswith(org) for org in known_orgs)
if accept:
logging.info(["accept", url])
accepted_invitations.add(url)
if not dry_run:
github.accept_repo_invitation(invitation_id)
else:
logging.debug(["skipping", url])
return accepted_invitations
def run(dry_run):
gqlapi = gql.get_api()
result = gqlapi.query(REPOS_QUERY)
settings = queries.get_app_interface_settings()
secret_reader = SecretReader(settings=settings)
secret = settings["githubRepoInvites"]["credentials"]
token = secret_reader.read(secret)
g = raw_github_api.RawGithubApi(token)
code_components = _parse_code_components(result["apps_v1"])
accepted_invitations = _accept_invitations(g, code_components, dry_run)
return accepted_invitations
|
from funcy import print_durations
import itertools
import functools
from collections import Counter
# Puzzle: https://adventofcode.com/2021/day/21
def create_die(maxnum):
# 100-sided die, rolls 1,2,3,...100,1,2,3...
yield from enumerate(itertools.cycle(range(1, maxnum + 1)), 1)
def wrap_around(field):
# game board with spaces 1 to 10, two players
# after 10, wraps around to 1
if field > 10:
field %= 10
if field == 0:
field = 10
return field
def player(startfield, die):
score = 0
field = startfield
while True:
walk = 0
for i in range(3):
diecount, dieval = next(die)
walk += dieval
field += walk
field = wrap_around(field)
score += field
yield diecount, score
@print_durations
def day21a(p1_start, p2_start):
# game board with spaces 1 to 10, two players
# after 10, wraps around to 1
# 100-sided die, rolls 1,2,3,...100,1,2,3...
# each player's turn, the player rolls the die three times
# and adds up the results. Then, the player moves their pawn that many times forward
# score: add current field
# score >= 1000: player wins immediately
# what do you get if you multiply the score of the losing player
# by the number of times the die was rolled during the game
die = create_die(100)
p1 = player(p1_start, die)
p2 = player(p2_start, die)
while True:
diecount, s1 = next(p1)
if s1 >= 1000:
return s2 * diecount
diecount, s2 = next(p2)
if s2 >= 1000:
return s1 * diecount
def step_count_dict():
x = []
for i in range(1, 4):
for j in range(1, 4):
for k in range(1, 4):
x.append(i + j + k)
# {3: 1, 4: 3, 5: 6, 6: 7, 7: 6, 8: 3, 9: 1}
return Counter(x)
scd = step_count_dict()
@functools.cache
def count_num_wins(p1_field, p1_score, p2_field, p2_score):
# p1 is throwing dice
# when calling day21b for next move, switch p1 and p2 in arguments
# When adding up -> switch back
p1_sum = 0
p2_sum = 0
for field_movement, num_throws in scd.items():
p1_field_new = p1_field + field_movement
p1_field_new = wrap_around(p1_field_new)
p1_score_new = p1_score + p1_field_new
if p1_score_new >= 21:
# p1 wins 1, p2 wins 0
p1_sum += num_throws
else:
# go again
p2_win, p1_win = count_num_wins(p2_field, p2_score, p1_field_new, p1_score_new)
p1_sum += p1_win * num_throws
p2_sum += p2_win * num_throws
return p1_sum, p2_sum
@print_durations
def day21b(p1_start, p2_start):
# same board, but three-sided die
# die not deterministic anymore
# die throw = new universe created
# count the number of wins for each possible die-throw
# winning score reduced to 21
# find the player that wins in more universes; in how many universes does that player win?
return max(count_num_wins(p1_start, 0, p2_start, 0))
if __name__ == "__main__":
print(day21a(4, 8))
assert day21a(4, 8) == 739785
print(day21a(8, 9))
assert count_num_wins(4, 0, 8, 0) == (444356092776315, 341960390180808)
assert day21b(8, 9) == 346642902541848
|
"""
Base Class for using in connection to network devices
Connections Method are based upon AsyncSSH and should be running in asyncio loop
"""
import asyncio
import re
import asyncssh
from netdev.exceptions import TimeoutError, DisconnectError
from netdev.logger import logger
class BaseDevice(object):
"""
Base Abstract Class for working with network devices
"""
def __init__(self, host=u'', username=u'', password=u'', port=22, device_type=u'', known_hosts=None,
local_addr=None, client_keys=None, passphrase=None, timeout=15, loop=None, tunnel=None,
agent_forwarding=False, x509_trusted_certs=None, x509_trusted_cert_paths=None,
client_host_keysign=False, client_host_keys=None, client_host=None, client_username=None,
gss_host=(), gss_delegate_creds=False, agent_path=(), client_version=(), kex_algs=(),
encryption_algs=(), mac_algs=(), compression_algs=(), signature_algs=()):
"""
Initialize base class for asynchronous working with network devices
:param str host: device hostname or ip address for connection
:param str username: username for logging to device
:param str password: user password for logging to device
:param int port: ssh port for connection. Default is 22
:param str device_type: network device type
:param known_hosts: file with known hosts. Default is None (no policy). With () it will use default file
:param str local_addr: local address for binding source of tcp connection
:param client_keys: path for client keys. Default in None. With () it will use default file in OS
:param str passphrase: password for encrypted client keys
:param float timeout: timeout in second for getting information from channel
:param loop: asyncio loop object
:param tunnel:
An existing SSH client connection that this new connection should
be tunneled over. If set, a direct TCP/IP tunnel will be opened
over this connection to the requested host and port rather than
connecting directly via TCP.
:param agent_forwarding: (optional)
Whether or not to allow forwarding of ssh-agent requests from
processes running on the server. By default, ssh-agent forwarding
requests from the server are not allowed.
:param client_host_keysign: (optional)
Whether or not to use `ssh-keysign` to sign host-based
authentication requests. If set to `True`, an attempt will be
made to find `ssh-keysign` in its typical locations. If set to
a string, that will be used as the `ssh-keysign` path. When set,
client_host_keys should be a list of public keys. Otherwise,
client_host_keys should be a list of private keys with optional
paired certificates.
:param client_host_keys: (optional)
A list of keys to use to authenticate this client via host-based
authentication. If `client_host_keysign` is set and no host keys
or certificates are specified, an attempt will be made to find
them in their typical locations. If `client_host_keysign` is
not set, host private keys must be specified explicitly or
host-based authentication will not be performed.
:param client_host: (optional)
The local hostname to use when performing host-based
authentication. If not specified, the hostname associated with
the local IP address of the SSH connection will be used.
:param client_username: (optional)
The local username to use when performing host-based
authentication. If not specified, the username of the currently
logged in user will be used.
:param gss_host: (optional)
The principal name to use for the host in GSS key exchange and
authentication. If not specified, this value will be the same
as the `host` argument. If this argument is explicitly set to
`None`, GSS key exchange and authentication will not be performed.
:param gss_delegate_creds: (optional)
Whether or not to forward GSS credentials to the server being
accessed. By default, GSS credential delegation is disabled.
:param agent_path: (optional)
The path of a UNIX domain socket to use to contact an ssh-agent
process which will perform the operations needed for client
public key authentication, or the :class:`SSHServerConnection`
to use to forward ssh-agent requests over. If this is not
specified and the environment variable `SSH_AUTH_SOCK` is
set, its value will be used as the path. If `client_keys`
is specified or this argument is explicitly set to `None`,
an ssh-agent will not be used.
:param client_version: (optional)
An ASCII string to advertise to the SSH server as the version of
this client, defaulting to `'AsyncSSH'` and its version number.
:param kex_algs: (optional)
A list of allowed key exchange algorithms in the SSH handshake,
taken from :ref:`key exchange algorithms <KexAlgs>`
:param encryption_algs: (optional)
A list of encryption algorithms to use during the SSH handshake,
taken from :ref:`encryption algorithms <EncryptionAlgs>`
:param mac_algs: (optional)
A list of MAC algorithms to use during the SSH handshake, taken
from :ref:`MAC algorithms <MACAlgs>`
:param compression_algs: (optional)
A list of compression algorithms to use during the SSH handshake,
taken from :ref:`compression algorithms <CompressionAlgs>`, or
`None` to disable compression
:param signature_algs: (optional)
A list of public key signature algorithms to use during the SSH
handshake, taken from :ref:`signature algorithms <SignatureAlgs>`
"""
if host:
self._host = host
else:
raise ValueError("Host must be set")
self._port = int(port)
self._device_type = device_type
self._timeout = timeout
if loop is None:
self._loop = asyncio.get_event_loop()
else:
self._loop = loop
"""Convert needed connect params to a dictionary for simplicity"""
self._connect_params_dict = {'host': self._host,
'port': self._port,
'username': username,
'password': password,
'known_hosts': known_hosts,
'local_addr': local_addr,
'client_keys': client_keys,
'passphrase': passphrase,
'tunnel': tunnel,
'agent_forwarding': agent_forwarding,
'loop': loop,
'x509_trusted_certs': x509_trusted_certs,
'x509_trusted_cert_paths': x509_trusted_cert_paths,
'client_host_keysign': client_host_keysign,
'client_host_keys': client_host_keys,
'client_host': client_host,
'client_username': client_username,
'gss_host': gss_host,
'gss_delegate_creds': gss_delegate_creds,
'agent_path': agent_path,
'client_version': client_version,
'kex_algs': kex_algs,
'encryption_algs': encryption_algs,
'mac_algs': mac_algs,
'compression_algs': compression_algs,
'signature_algs': signature_algs}
# Filling internal vars
self._stdin = self._stdout = self._stderr = self._conn = None
self._base_prompt = self._base_pattern = ''
self._MAX_BUFFER = 65535
self._ansi_escape_codes = False
_delimiter_list = ['>', '#']
"""All this characters will stop reading from buffer. It mean the end of device prompt"""
_pattern = r"{}.*?(\(.*?\))?[{}]"
"""Pattern for using in reading buffer. When it found processing ends"""
_disable_paging_command = 'terminal length 0'
"""Command for disabling paging"""
@property
def base_prompt(self):
"""Returning base prompt for this network device"""
return self._base_prompt
async def __aenter__(self):
"""Async Context Manager"""
await self.connect()
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
"""Async Context Manager"""
await self.disconnect()
async def connect(self):
"""
Basic asynchronous connection method
It connects to device and makes some preparation steps for working.
Usual using 3 functions:
* _establish_connection() for connecting to device
* _set_base_prompt() for finding and setting device prompt
* _disable_paging() for non interactive output in commands
"""
logger.info("Host {}: Trying to connect to the device".format(self._host))
await self._establish_connection()
await self._set_base_prompt()
await self._disable_paging()
logger.info("Host {}: Has connected to the device".format(self._host))
async def _establish_connection(self):
"""Establishing SSH connection to the network device"""
logger.info('Host {}: Establishing connection to port {}'.format(self._host, self._port))
output = ""
# initiate SSH connection
fut = asyncssh.connect(**self._connect_params_dict)
try:
self._conn = await asyncio.wait_for(fut, self._timeout)
except asyncssh.DisconnectError as e:
raise DisconnectError(self._host, e.code, e.reason)
except asyncio.TimeoutError:
raise TimeoutError(self._host)
self._stdin, self._stdout, self._stderr = await self._conn.open_session(term_type='Dumb', term_size=(200, 24))
logger.info("Host {}: Connection is established".format(self._host))
# Flush unnecessary data
delimiters = map(re.escape, type(self)._delimiter_list)
delimiters = r"|".join(delimiters)
output = await self._read_until_pattern(delimiters)
logger.debug("Host {}: Establish Connection Output: {}".format(self._host, repr(output)))
return output
async def _set_base_prompt(self):
"""
Setting two important vars:
base_prompt - textual prompt in CLI (usually hostname)
base_pattern - regexp for finding the end of command. It's platform specific parameter
For Cisco devices base_pattern is "prompt(\(.*?\))?[#|>]
"""
logger.info("Host {}: Setting base prompt".format(self._host))
prompt = await self._find_prompt()
# Strip off trailing terminator
self._base_prompt = prompt[:-1]
delimiters = map(re.escape, type(self)._delimiter_list)
delimiters = r"|".join(delimiters)
base_prompt = re.escape(self._base_prompt[:12])
pattern = type(self)._pattern
self._base_pattern = pattern.format(base_prompt, delimiters)
logger.debug("Host {}: Base Prompt: {}".format(self._host, self._base_prompt))
logger.debug("Host {}: Base Pattern: {}".format(self._host, self._base_pattern))
return self._base_prompt
async def _disable_paging(self):
"""Disable paging method"""
logger.info("Host {}: Trying to disable paging".format(self._host))
command = type(self)._disable_paging_command
command = self._normalize_cmd(command)
logger.debug("Host {}: Disable paging command: {}".format(self._host, repr(command)))
self._stdin.write(command)
output = await self._read_until_prompt()
logger.debug("Host {}: Disable paging output: {}".format(self._host, repr(output)))
if self._ansi_escape_codes:
output = self._strip_ansi_escape_codes(output)
return output
async def _find_prompt(self):
"""Finds the current network device prompt, last line only"""
logger.info("Host {}: Finding prompt".format(self._host))
self._stdin.write(self._normalize_cmd("\n"))
prompt = ''
delimiters = map(re.escape, type(self)._delimiter_list)
delimiters = r"|".join(delimiters)
prompt = await self._read_until_pattern(delimiters)
prompt = prompt.strip()
if self._ansi_escape_codes:
prompt = self._strip_ansi_escape_codes(prompt)
if not prompt:
raise ValueError("Host {}: Unable to find prompt: {}".format(self._host, repr(prompt)))
logger.debug("Host {}: Found Prompt: {}".format(self._host, repr(prompt)))
return prompt
async def send_command(self, command_string, pattern='', re_flags=0, strip_command=True, strip_prompt=True):
"""
Sending command to device (support interactive commands with pattern)
:param str command_string: command for executing basically in privilege mode
:param str pattern: pattern for waiting in output (for interactive commands)
:param re.flags re_flags: re flags for pattern
:param bool strip_command: True or False for stripping command from output
:param bool strip_prompt: True or False for stripping ending device prompt
:return: The output of the command
"""
logger.info('Host {}: Sending command'.format(self._host))
output = ''
command_string = self._normalize_cmd(command_string)
logger.debug("Host {}: Send command: {}".format(self._host, repr(command_string)))
self._stdin.write(command_string)
output = await self._read_until_prompt_or_pattern(pattern, re_flags)
# Some platforms have ansi_escape codes
if self._ansi_escape_codes:
output = self._strip_ansi_escape_codes(output)
output = self._normalize_linefeeds(output)
if strip_prompt:
output = self._strip_prompt(output)
if strip_command:
output = self._strip_command(command_string, output)
logger.debug("Host {}: Send command output: {}".format(self._host, repr(output)))
return output
def _strip_prompt(self, a_string):
"""Strip the trailing router prompt from the output"""
logger.info('Host {}: Stripping prompt'.format(self._host))
response_list = a_string.split('\n')
last_line = response_list[-1]
if self._base_prompt in last_line:
return '\n'.join(response_list[:-1])
else:
return a_string
async def _read_until_prompt(self):
"""Read channel until self.base_pattern detected. Return ALL data available"""
return await self._read_until_pattern(self._base_pattern)
async def _read_until_pattern(self, pattern='', re_flags=0):
"""Read channel until pattern detected. Return ALL data available"""
output = ''
logger.info("Host {}: Reading until pattern".format(self._host))
if not pattern:
pattern = self._base_pattern
logger.debug("Host {}: Reading pattern: {}".format(self._host, pattern))
while True:
fut = self._stdout.read(self._MAX_BUFFER)
try:
output += await asyncio.wait_for(fut, self._timeout)
except asyncio.TimeoutError:
raise TimeoutError(self._host)
if re.search(pattern, output, flags=re_flags):
logger.debug("Host {}: Reading pattern '{}' was found: {}".format(self._host, pattern, repr(output)))
return output
async def _read_until_prompt_or_pattern(self, pattern='', re_flags=0):
"""Read until either self.base_pattern or pattern is detected. Return ALL data available"""
output = ''
logger.info("Host {}: Reading until prompt or pattern".format(self._host))
if not pattern:
pattern = self._base_pattern
base_prompt_pattern = self._base_pattern
while True:
fut = self._stdout.read(self._MAX_BUFFER)
try:
output += await asyncio.wait_for(fut, self._timeout)
except asyncio.TimeoutError:
raise TimeoutError(self._host)
if re.search(pattern, output, flags=re_flags) or re.search(base_prompt_pattern, output, flags=re_flags):
logger.debug("Host {}: Reading pattern '{}' or '{}' was found: {}".format(self._host, pattern,
base_prompt_pattern,
repr(output)))
return output
@staticmethod
def _strip_backspaces(output):
"""Strip any backspace characters out of the output"""
backspace_char = '\x08'
return output.replace(backspace_char, '')
@staticmethod
def _strip_command(command_string, output):
"""
Strip command_string from output string
Cisco IOS adds backspaces into output for long commands (i.e. for commands that line wrap)
"""
logger.info('Stripping command')
backspace_char = '\x08'
# Check for line wrap (remove backspaces)
if backspace_char in output:
output = output.replace(backspace_char, '')
output_lines = output.split("\n")
new_output = output_lines[1:]
return "\n".join(new_output)
else:
command_length = len(command_string)
return output[command_length:]
@staticmethod
def _normalize_linefeeds(a_string):
"""Convert '\r\r\n','\r\n', '\n\r' to '\n"""
newline = re.compile(r'(\r\r\n|\r\n|\n\r)')
return newline.sub('\n', a_string)
@staticmethod
def _normalize_cmd(command):
"""Normalize CLI commands to have a single trailing newline"""
command = command.rstrip("\n")
command += '\n'
return command
async def send_config_set(self, config_commands=None):
"""
Sending configuration commands to device
The commands will be executed one after the other.
:param list config_commands: iterable string list with commands for applying to network device
:return: The output of this commands
"""
logger.info("Host {}: Sending configuration settings".format(self._host))
if config_commands is None:
return ''
if not hasattr(config_commands, '__iter__'):
raise ValueError("Host {}: Invalid argument passed into send_config_set".format(self._host))
# Send config commands
logger.debug("Host {}: Config commands: {}".format(self._host, config_commands))
output = ''
for cmd in config_commands:
self._stdin.write(self._normalize_cmd(cmd))
output += await self._read_until_prompt()
if self._ansi_escape_codes:
output = self._strip_ansi_escape_codes(output)
output = self._normalize_linefeeds(output)
logger.debug("Host {}: Config commands output: {}".format(self._host, repr(output)))
return output
@staticmethod
def _strip_ansi_escape_codes(string_buffer):
"""
Remove some ANSI ESC codes from the output
http://en.wikipedia.org/wiki/ANSI_escape_code
Note: this does not capture ALL possible ANSI Escape Codes only the ones
I have encountered
Current codes that are filtered:
ESC = '\x1b' or chr(27)
ESC = is the escape character [^ in hex ('\x1b')
ESC[24;27H Position cursor
ESC[?25h Show the cursor
ESC[E Next line (HP does ESC-E)
ESC[2K Erase line
ESC[1;24r Enable scrolling from start to row end
ESC7 Save cursor position
ESC[r Scroll all screen
ESC8 Restore cursor position
ESC[nA Move cursor up to n cells
ESC[nB Move cursor down to n cells
require:
HP ProCurve
F5 LTM's
Mikrotik
"""
logger.info("Stripping ansi escape codes")
logger.debug("Unstripped output: {}".format(repr(string_buffer)))
code_save_cursor = chr(27) + r'7'
code_scroll_screen = chr(27) + r'\[r'
code_restore_cursor = chr(27) + r'8'
code_cursor_up = chr(27) + r'\[\d+A'
code_cursor_down = chr(27) + r'\[\d+B'
code_position_cursor = chr(27) + r'\[\d+;\d+H'
code_show_cursor = chr(27) + r'\[\?25h'
code_next_line = chr(27) + r'E'
code_erase_line = chr(27) + r'\[2K'
code_enable_scroll = chr(27) + r'\[\d+;\d+r'
code_set = [code_save_cursor, code_scroll_screen, code_restore_cursor, code_cursor_up, code_cursor_down,
code_position_cursor, code_show_cursor, code_erase_line, code_enable_scroll]
output = string_buffer
for ansi_esc_code in code_set:
output = re.sub(ansi_esc_code, '', output)
# CODE_NEXT_LINE must substitute with '\n'
output = re.sub(code_next_line, '\n', output)
logger.debug('Stripped output: {}'.format(repr(output)))
return output
async def _cleanup(self):
""" Any needed cleanup before closing connection """
logger.info("Host {}: Cleanup session".format(self._host))
pass
async def disconnect(self):
""" Gracefully close the SSH connection """
logger.info("Host {}: Disconnecting".format(self._host))
await self._cleanup()
self._conn.close()
await self._conn.wait_closed()
|
# Generated by Selenium IDE
import pytest
import time
import json
import re
import os
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
class TestSavemovie():
def setup_method(self, method):
self.driver = webdriver.Chrome()
self.vars = {}
def teardown_method(self, method):
self.driver.quit()
def test_copytext(self):
prefx = ''
db = 'covid'
dblen = 19200
header = 'name whn confirmed deaths recovered'
self.driver.get("https://sqlzoo.net/wiki/Window_LAG")
self.driver.set_window_size(1009, 768)
file = './src/' + prefx + db + '.csv'
if os.path.exists(file):
print(file + ' already exists.')
exit()
f = open(file, 'a+', encoding='utf-8')
f.write(re.sub(r'\s', ',', header))
i = 0
time.sleep(10)
while i < dblen:
# while i < 51:
self.driver.find_element(By.ID, "txtar_1").clear()
self.driver.find_element(By.ID, "txtar_1").send_keys("SELECT * from %s limit %i, 50" % (db, i))
self.driver.find_element(By.CSS_SELECTOR, "#frm__1 .submitSQL").click()
time.sleep(4.5)
## web page: MORE_JOIN_operations
# txt = self.driver.find_element(By.CSS_SELECTOR, ".qu:nth-child(12) > .res").text
## Web page: AdventureWorks
# txt = self.driver.find_element(By.CSS_SELECTOR, ".qu:nth-child(8) > .res").text
## Web page: Neeps_easy_questions
# txt = self.driver.find_element(By.CSS_SELECTOR, ".qu:nth-child(4) > .res").text
## Web page: Dressmaker
# txt = self.driver.find_element(By.CSS_SELECTOR, ".qu:nth-child(7) > .res").text
## Web page: Window_LAG
txt = self.driver.find_element(By.CSS_SELECTOR, ".qu:nth-child(7) > .res").text
txt = txt.replace(header, '')
txt = txt.replace('Result:', '')
txt = txt.replace('Wrong answer. Too many columns', '')
txt = txt.replace('Show what the answer should be...', '')
i += 50
f.write(re.sub('\n{2,}', '\n', txt))
f.close()
|
import pytest
import sqlalchemy as sa
from h_matchers import Any
from sqlalchemy.engine import CursorResult
from lms.db import BASE, BulkAction
class TestBulkAction:
class TableWithBulkUpsert(BASE):
__tablename__ = "test_table_with_bulk_upsert"
BULK_CONFIG = BulkAction.Config(
upsert_index_elements=["id"],
upsert_update_elements=["name"],
upsert_use_onupdate=False,
)
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.String, nullable=False)
other = sa.Column(sa.String)
# Lots of auto update columns
scalar = sa.Column(sa.Integer, onupdate=42)
callable = sa.Column(sa.Integer, onupdate=lambda: 42)
sql = sa.Column(sa.Integer, onupdate=sa.select([42]))
default = sa.Column(sa.Integer, sa.schema.ColumnDefault(42, for_update=True))
def test_upsert(self, db_session):
db_session.add_all(
[
self.TableWithBulkUpsert(id=1, name="pre_existing_1", other="pre_1"),
self.TableWithBulkUpsert(id=2, name="pre_existing_2", other="pre_2"),
]
)
db_session.flush()
result = BulkAction(db_session).upsert(
self.TableWithBulkUpsert,
[
{"id": 1, "name": "update_old", "other": "post_1"},
{"id": 3, "name": "create_with_id", "other": "post_3"},
{"id": 4, "name": "over_block_size", "other": "post_4"},
],
)
assert isinstance(result, CursorResult)
self.assert_has_rows(
db_session,
{"id": 1, "name": "update_old", "other": "pre_1"},
{"id": 2, "name": "pre_existing_2", "other": "pre_2"},
{"id": 3, "name": "create_with_id", "other": "post_3"},
{"id": 4, "name": "over_block_size", "other": "post_4"},
)
def test_upsert_does_nothing_if_given_an_empty_list_of_values(self, db_session):
assert BulkAction(db_session).upsert(self.TableWithBulkUpsert, []) == []
@pytest.mark.parametrize("column", ("scalar", "callable", "sql", "default"))
@pytest.mark.usefixtures("with_upsert_use_onupdate")
def test_upsert_with_onupdate_columns(self, db_session, column):
db_session.add_all(
[
self.TableWithBulkUpsert(id=1, name="pre_existing_1", **{column: 0}),
self.TableWithBulkUpsert(id=2, name="pre_existing_2", **{column: 1}),
]
)
db_session.flush()
BulkAction(db_session).upsert(
self.TableWithBulkUpsert, [{"id": 1, "name": "update_existing"}]
)
self.assert_has_rows(
db_session,
# 42 is the onupdate default value
{"id": 1, "name": "update_existing", column: 42},
{"id": 2, "name": "pre_existing_2", column: 1},
)
def test_it_fails_with_missing_config(self, db_session):
with pytest.raises(AttributeError):
BulkAction(db_session).upsert(
"object_without_config", [{"id": 1, "name": "name", "other": "other"}]
)
def test_you_cannot_add_config_with_the_wrong_name(self):
# Not sure why this isn't ValueError... must be a descriptor thing
with pytest.raises(RuntimeError):
class MisconfiguredModel: # pylint: disable=unused-variable
NOT_THE_RIGHT_NAME = BulkAction.Config(
upsert_index_elements=["id"], upsert_update_elements=["name"]
)
def assert_has_rows(self, db_session, *attrs):
rows = list(db_session.query(self.TableWithBulkUpsert))
assert (
rows
== Any.iterable.containing(
[
Any.instance_of(self.TableWithBulkUpsert).with_attrs(expected)
for expected in attrs
]
).only()
)
@pytest.fixture
def with_upsert_use_onupdate(self):
self.TableWithBulkUpsert.BULK_CONFIG.upsert_use_onupdate = True
yield
self.TableWithBulkUpsert.BULK_CONFIG.upsert_use_onupdate = False
|
import mock
import pytest
from pyetcd import EtcdResult
from etcdb.eval_expr import EtcdbFunction, etcdb_count
from etcdb.execute.dml.select import eval_row, prepare_columns, \
group_result_set, get_row_by_primary_key
from etcdb.resultset import ColumnSet, Column, Row, ResultSet
from etcdb.sqlparser.sql_tree import SQLTree
@pytest.mark.parametrize('expressions, cs', [
(
[
(
('bool_primary',
('predicate',
('bit_expr',
('simple_expr',
('IDENTIFIER', 'id')
)
)
)
),
None),
(
('bool_primary',
('predicate',
('bit_expr',
('simple_expr',
('IDENTIFIER', 'name')
)
)
)
),
None
)
],
ColumnSet().add(Column('id')).add(Column('name'))
),
(
[
(
('bool_primary',
('predicate',
('bit_expr',
('simple_expr',
('IDENTIFIER', 'id')
)
)
)
),
None),
(
('bool_primary',
('predicate',
('bit_expr',
('simple_expr',
('function_call', 'COUNT')
)
)
)
),
None
)
],
ColumnSet().add(Column('id')).add(Column('COUNT(*)'))
)
])
def test_prepare_columns(expressions, cs):
tree = SQLTree()
tree.expressions = expressions
actual_cs = prepare_columns(tree)
print('Expected: %s' % cs)
print('Actual: %s' % actual_cs)
assert actual_cs == cs
@pytest.mark.parametrize('cs, row, expressions, result', [
(
ColumnSet().add(Column('id')).add(Column('name')),
Row((1, 'aaa')),
[
(
('bool_primary',
('predicate',
('bit_expr',
('simple_expr',
('IDENTIFIER', 'id')
)
)
)
),
None),
(
('bool_primary',
('predicate',
('bit_expr',
('simple_expr',
('IDENTIFIER', 'name')
)
)
)
),
None
)
],
(1, 'aaa')
),
(
ColumnSet().add(Column('id')).add(Column('COUNT(*)')),
Row((1, 'aaa')),
[
(
('bool_primary',
('predicate',
('bit_expr',
('simple_expr',
('IDENTIFIER', 'id')
)
)
)
),
None),
(
('bool_primary',
('predicate',
('bit_expr',
('simple_expr',
('function_call', 'COUNT')
)
)
)
),
'__count'
)
],
(1, EtcdbFunction(etcdb_count, group=True))
),
(
ColumnSet().add(Column('COUNT(*)')),
None,
[
(
('bool_primary',
('predicate',
('bit_expr',
('simple_expr',
('function_call', 'COUNT')
)
)
)
),
'__count'
)
],
(EtcdbFunction(etcdb_count, group=True),)
)
])
def test_eval_row(cs, row, expressions, result):
tree = SQLTree()
tree.expressions = expressions
assert eval_row(cs, row, tree) == result
@pytest.mark.parametrize('rs, row, expressions, result', [
(
ResultSet(ColumnSet().add(Column(str(
EtcdbFunction(etcdb_count, group=True)))), []),
None,
[
(
('bool_primary',
('predicate',
('bit_expr',
('simple_expr',
('function_call', 'COUNT')
)
)
)
),
'__count'
)
],
ResultSet(
ColumnSet().add(Column('__count')),
[Row((0,))]
)
),
(
ResultSet(ColumnSet().add(Column(str(
EtcdbFunction(etcdb_count, group=True)))),
[
Row((5, )),
Row((6, ))
]
),
Row((6, )),
[
(
('bool_primary',
('predicate',
('bit_expr',
('simple_expr',
('function_call', 'COUNT')
)
)
)
),
'__count'
)
],
ResultSet(
ColumnSet().add(Column('__count')),
[Row((2,))]
)
)
])
def test_group_result_set(rs, row, expressions, result):
tree = SQLTree()
tree.expressions = expressions
actual = group_result_set(etcdb_count, rs, row, tree, 0)
print('Expected: %s' % result)
print('Actual: %s' % actual)
assert actual == result
@mock.patch('etcdb.execute.dml.select.get_table_columns')
def test_get_row_by_primary_key_correct_fields_order(mock_get_table_columns):
cs = ColumnSet()
cs.add(Column('id'))
cs.add(Column('name'))
mock_get_table_columns.return_value = cs
mock_etcd_client = mock.Mock()
mock_response = mock.Mock()
mock_response.headers = {'X-Etcd-Index': 111}
mock_response.content = '{"action":"get","node":{"key":"/foo/bar/2","value":"{\\"name\\": \\"aaaa\\", \\"id\\": 2}","modifiedIndex":651557,"createdIndex":651557}}'
mock_etcd_client.read.return_value = EtcdResult(mock_response)
actual = get_row_by_primary_key(mock_etcd_client, 'foo', 'bar', 2)
expected = Row((2, 'aaaa'))
print('Expected: %s' % expected)
print('Actual: %s' % actual)
assert actual == expected
|
class Solution:
def removeDuplicates(self, s: str) -> str:
stack = []
for i in s:
if not stack or stack[-1] != i:
stack.append(i)
elif stack[-1] == i:
stack.pop()
return "".join(stack)
|
import unittest
from collections import defaultdict
import numpy as np
import pandas as pd
import numpy.testing as np_test
from scipy.sparse import coo_matrix
from dummyPy import Encoder, OneHotEncoder
class TestEncoder(unittest.TestCase):
def test_class(self):
encoder = Encoder()
self.assertEqual(encoder.column_mapper, None)
levels = set()
for color in ["red", "blue", "yellow"]:
levels.add(color)
encoder.fit(levels)
self.assertEqual(encoder.column_mapper,
{'blue': 0, 'red': 1, 'yellow': 2})
data = pd.Series(["red", "red", "blue", "yellow", "brown", "red"])
transformed_data1 = encoder.transform(data)
transformed_data2 = coo_matrix((np.ones(5),
([0, 1, 2, 3, 5], [1, 1, 0, 2, 1])),
shape=(6, 3))
np_test.assert_array_equal(transformed_data1.toarray(),
transformed_data2.toarray())
class TestOneHotEncoder(unittest.TestCase):
def setUp(self):
self.data = pd.read_csv("titanic.csv",
usecols=["Pclass", "Sex", "Age", "Fare", "Embarked"])
self.chunked_data = pd.read_csv("titanic.csv",
usecols=["Pclass", "Sex", "Age", "Fare", "Embarked"],
chunksize=10)
def test_class_init(self):
one_hot_encoder = OneHotEncoder(categorical_columns=["Pclass", "Sex", "Embarked"])
self.assertEqual(one_hot_encoder.categorical_columns, ["Pclass", "Sex", "Embarked"])
self.assertEqual(one_hot_encoder.unique_vals, defaultdict(set))
self.assertEqual(one_hot_encoder.encoders,
{"Pclass": Encoder(), "Sex": Encoder(), "Embarked": Encoder()})
def test_update_unique_vals(self):
one_hot_encoder = OneHotEncoder(categorical_columns=["Pclass", "Sex", "Embarked"])
one_hot_encoder._update_unique_vals(self.data)
self.assertEqual(one_hot_encoder.unique_vals["Embarked"], set(['Q', np.nan, 'S', 'C']))
self.assertEqual(one_hot_encoder.unique_vals["Sex"], set(['male', 'female']))
self.assertEqual(one_hot_encoder.unique_vals["Pclass"], set([1, 2, 3]))
def test_fit_encoders(self):
one_hot_encoder = OneHotEncoder(categorical_columns=["Pclass", "Sex", "Embarked"])
one_hot_encoder._update_unique_vals(self.data)
one_hot_encoder._fit_encoders()
embarked_encoder = Encoder()
embarked_encoder.fit(set(['Q', np.nan, 'S', 'C']))
self.assertEqual(one_hot_encoder.encoders["Embarked"], embarked_encoder)
sex_encoder = Encoder()
sex_encoder.fit(set(['male', 'female']))
self.assertEqual(one_hot_encoder.encoders["Sex"], sex_encoder)
pclass_encoder = Encoder()
pclass_encoder.fit(set([1, 2, 3]))
self.assertEqual(one_hot_encoder.encoders["Pclass"], pclass_encoder)
def test_fit(self):
one_hot_encoder1 = OneHotEncoder(categorical_columns=["Pclass", "Sex", "Embarked"])
one_hot_encoder2 = OneHotEncoder(categorical_columns=["Pclass", "Sex", "Embarked"])
one_hot_encoder1.fit(self.data)
one_hot_encoder2._update_unique_vals(self.data)
one_hot_encoder2._fit_encoders()
self.assertEqual(one_hot_encoder1.categorical_columns,
one_hot_encoder2.categorical_columns)
self.assertEqual(one_hot_encoder1.unique_vals,
one_hot_encoder2.unique_vals)
self.assertEqual(one_hot_encoder1.encoders,
one_hot_encoder2.encoders)
def test_fit_chunks(self):
one_hot_encoder1 = OneHotEncoder(categorical_columns=["Pclass", "Sex", "Embarked"])
one_hot_encoder2 = OneHotEncoder(categorical_columns=["Pclass", "Sex", "Embarked"])
one_hot_encoder1.fit(self.chunked_data)
one_hot_encoder2.fit(self.data)
self.assertEqual(one_hot_encoder1.categorical_columns,
one_hot_encoder2.categorical_columns)
self.assertEqual(one_hot_encoder1.unique_vals,
one_hot_encoder2.unique_vals)
self.assertEqual(one_hot_encoder1.encoders,
one_hot_encoder2.encoders)
def test_transform(self):
one_hot_encoder = OneHotEncoder(categorical_columns=["Pclass", "Sex", "Embarked"])
one_hot_encoder.fit(self.data)
transformed_data = np.array([[0.0, 0.0, 1.0, 0.0, 1.0, 22.0, 7.25, 0.0, 0.0, 0.0, 1.0],
[1.0, 0.0, 0.0, 1.0, 0.0, 38.0, 71.2833, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 1.0, 0.0, 26.0, 7.925, 0.0, 0.0, 0.0, 1.0],
[1.0, 0.0, 0.0, 1.0, 0.0, 35.0, 53.1, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 1.0, 0.0, 1.0, 35.0, 8.05, 0.0, 0.0, 0.0, 1.0]])
np_test.assert_array_equal(one_hot_encoder.transform(self.data.head()),
transformed_data)
def test_transform_coo(self):
one_hot_encoder = OneHotEncoder(categorical_columns=["Pclass", "Sex", "Embarked"])
one_hot_encoder.fit(self.data)
coo_matrix_1 = one_hot_encoder.transform(self.data.head(), dtype="coo")
coo_matrix_2 = coo_matrix(one_hot_encoder.transform(self.data.head(), dtype="np"))
np_test.assert_array_equal(coo_matrix_1.toarray(),
coo_matrix_2.toarray())
def test_fit_transform(self):
one_hot_encoder1 = OneHotEncoder(categorical_columns=["Pclass", "Sex", "Embarked"])
one_hot_encoder2 = OneHotEncoder(categorical_columns=["Pclass", "Sex", "Embarked"])
one_hot_encoder2.fit(self.data.head())
np_test.assert_array_equal(one_hot_encoder1.fit_transform(self.data.head()),
one_hot_encoder2.transform(self.data.head()))
|
# This file is automatically generated by the rmf-codegen project.
#
# The Python code generator is maintained by Lab Digital. If you want to
# contribute to this project then please do not edit this file directly
# but send a pull request to the Lab Digital fork of rmf-codegen at
# https://github.com/labd/rmf-codegen
import typing
import warnings
from ...models.errors import ErrorResponse
from ...models.importsinks import ImportSink, ImportSinkDraft, ImportSinkPagedResponse
from .by_project_key_import_sinks_by_import_sink_key_request_builder import (
ByProjectKeyImportSinksByImportSinkKeyRequestBuilder,
)
if typing.TYPE_CHECKING:
from ...base_client import BaseClient
class ByProjectKeyImportSinksRequestBuilder:
_client: "BaseClient"
_project_key: str
def __init__(
self,
project_key: str,
client: "BaseClient",
):
self._project_key = project_key
self._client = client
def with_import_sink_key_value(
self, import_sink_key: str
) -> ByProjectKeyImportSinksByImportSinkKeyRequestBuilder:
return ByProjectKeyImportSinksByImportSinkKeyRequestBuilder(
import_sink_key=import_sink_key,
project_key=self._project_key,
client=self._client,
)
def post(
self,
body: "ImportSinkDraft",
*,
headers: typing.Dict[str, str] = None,
options: typing.Dict[str, typing.Any] = None,
) -> "ImportSink":
"""Creates a new import sink."""
headers = {} if headers is None else headers
response = self._client._post(
endpoint=f"/{self._project_key}/import-sinks",
params={},
json=body.serialize(),
headers={"Content-Type": "application/json", **headers},
options=options,
)
if response.status_code in (201, 200):
return ImportSink.deserialize(response.json())
elif response.status_code == 400:
obj = ErrorResponse.deserialize(response.json())
raise self._client._create_exception(obj, response)
warnings.warn("Unhandled status code %d" % response.status_code)
def get(
self,
*,
limit: float,
offset: float,
headers: typing.Dict[str, str] = None,
options: typing.Dict[str, typing.Any] = None,
) -> "ImportSinkPagedResponse":
"""Retrieves all import sinks of a project key."""
headers = {} if headers is None else headers
response = self._client._get(
endpoint=f"/{self._project_key}/import-sinks",
params={"limit": limit, "offset": offset},
headers=headers,
options=options,
)
if response.status_code == 200:
return ImportSinkPagedResponse.deserialize(response.json())
warnings.warn("Unhandled status code %d" % response.status_code)
|
import argparse
import re
parser = argparse.ArgumentParser()
parser.add_argument("hybrid_pileup", help="pileup file for hybrid")
parser.add_argument("parent_pileup", help="pileup file for parent")
parser.add_argument("output", help="file to write shared SNPs to")
parser.add_argument("-v", "--verbose", action="store_true", help="verbose reporting")
parser.add_argument("-c", "--minCov", help="minimum coverage for a variant to be considered", default=3, type=int)
parser.add_argument("-f", "--minFrac", help="minimum agreement fraction for a variant to be considered", default=0.9, type=float)
args = parser.parse_args()
minCov = args.minCov
minFrac = args.minFrac
indel_regex = re.compile('[\+\-][0-9]+[ACGTNacgtn]+')
mismatch_regex = re.compile('[ACGTacgt]')
def pileup_scanner(file_in):
# Opens a pileup $file_in;
# records all the places where a read base disagrees with the ref
# Outputs as a nested dictionary
# {contig_name: {contig_metadata;
# { postion: {site_ref, site_cov, {base counts} } }} }
# for pileup specs:
# https://en.wikipedia.org/wiki/Pileup_format#Column_5:_The_bases_string
# or http://samtools.sourceforge.net/pileup.shtml (deprecated)
mpile_file = open(file_in, "r")
contig_dict = {}
for line in (mpile_file):
try:
contig, position, ref_base, coverage, read_base, qual = line.split()
except ValueError: # zero-cov sites don't report a readbase/qual Dx
contig, position, ref_base, coverage = line.split()
read_base, qual = "N", "I"
if contig not in contig_dict.keys():
# intialize the dictionary for each new contig
contig_dict[contig] = dict((
('mini_counter', 0),
('sum_coverage', 0),
('mm_count', 0),
('mini_counter', 0),
('position_dict', {})))
contig_dict[contig]['mini_counter'] += 1
contig_dict[contig]['sum_coverage'] += int(coverage)
if (
int(coverage) > 1 and "*" not in read_base and not
indel_regex.search(read_base) and ref_base.upper() in ['A','T','C','G'] and
mismatch_regex.search(read_base)):
# if cov >1 and read_base is not an indel and the reference base
# is not null and read bases contain mismatches
# ... then tally the bases at this site
read_base_dict = {
"A": read_base.upper().count("A"),
"T": read_base.upper().count("T"),
"C": read_base.upper().count("C"),
"G": read_base.upper().count("G")}
read_base_dict[ref_base.upper()] += (
read_base.upper().count(",") +
read_base.upper().count("."))
# incremenet when the read base is the reference base
contig_dict[contig]['mm_count'] += 1 # increment mismatch count
contig_dict[contig]['position_dict'][int(position)] = {'ref_base':ref_base.upper(), 'pileup_cov':int(coverage), 'base_dict':read_base_dict}
if int(position) % 1000 == 0 and args.verbose:
print("Contig %s: %s KiloBases scanned!" % tuple([contig, int(position)/1000]))
mpile_file.close()
return contig_dict
def contig_report(contig_dict):
for contig in contig_dict.keys():
if contig_dict[contig]['mm_count'] > 0:
mismatch_warn_string = '.'
else:
mismatch_warn_string = ' (only Ns in the pileup reference base column?)'
print( "contig %s had an average coverage depth of %s reads and a raw mismatch count of %s%s" % tuple([contig, contig_dict[contig]['sum_coverage']/contig_dict[contig]['mini_counter'], contig_dict[contig]['mm_count'], mismatch_warn_string]) )
def mismatch_chooser(site_dict):
#{'base_dict': {
# 'A': 0, 'C': 0, 'T': 0, 'G': 28}, 'ref_base': 'A', 'pileup_cov': 28}}
# choice = 'N'
choice = site_dict['ref_base']
meta = None # We may want to later report information on polymorphism
for bass in site_dict['base_dict'].keys():
if int(float(site_dict['pileup_cov'])) >= minCov and site_dict['base_dict'][bass]/float(site_dict['pileup_cov']) >= minFrac :
choice = bass
return choice, meta
def contig_dict_comparator(parent_dict, hybrid_dict):
shared_contig_list = list(set(hybrid_dict.keys()).intersection(set(parent_dict.keys())))
comparison_dict = { k : [] for k in shared_contig_list }
for contig in shared_contig_list:
minicount = 0
total_count = len(parent_dict[contig]['position_dict'].keys())
for parent_pos in list(parent_dict[contig]['position_dict']):
minicount += 1
if parent_pos in list(hybrid_dict[contig]['position_dict']):
# If the parent variant site is variant in the hybrid...
parent_minidict = parent_dict[contig]['position_dict'].pop(parent_pos)
hybrid_minidict = hybrid_dict[contig]['position_dict'].pop(parent_pos)
hyb_var, hyb_meta = mismatch_chooser(hybrid_minidict)
par_var, par_meta = mismatch_chooser(parent_minidict)
if hybrid_minidict['ref_base'] != parent_minidict['ref_base']:
# If this happens... something, somewhere has gone terribly wrong x_x
print( "WARNING: reference sequences disagree on contig %s, position %s !!!" % tuple([contig, parent_pos]))
elif par_var == parent_minidict['ref_base']:
#if the site isn't actually variable in the parent, ignore it
pass
elif hyb_var == par_var:
#if the site has the same variant in hybrid and parent, record it as parent-derived
comparison_dict[contig].append([parent_pos, 1])
else:
#if the parent site is the wrong variant in the hybrid, it's not parent-derived
comparison_dict[contig].append([parent_pos, 0])
else:
# If the parent variant site isn't variant in the hybrid, the hybrid site isn't parent-derived.
comparison_dict[contig].append([parent_pos, 0])
parent_minidict = parent_dict[contig]['position_dict'].pop(parent_pos)
if minicount % 10000 == 0 and args.verbose:
print( "%s parent mismatch sites investigated of %s!" % tuple([minicount, total_count]))
print( "Contig %s of %s (%s) compared..." % tuple([shared_contig_list.index(contig), len(shared_contig_list), contig]))
print()
return comparison_dict
def comparison_writer(comp_dict, file_out):
write_file = open(file_out, "w")
for contig in comp_dict.keys():
for coord in comp_dict[contig]:
write_file.write("%s\t%s\t%s\n" % tuple([contig, coord[0], coord[1]]) )
write_file.close()
print( "gathering hybrid contigs...".upper())
hyb_contig_dict = pileup_scanner(args.hybrid_pileup)
contig_report(hyb_contig_dict)
print()
print( "gathering parent contigs...".upper())
par_contig_dict = pileup_scanner(args.parent_pileup)
contig_report(par_contig_dict)
print()
print( "comparing parent and offspring...".upper())
comparison = contig_dict_comparator(par_contig_dict, hyb_contig_dict)
print( "writing comparison to a file....".upper())
comparison_writer(comparison, args.output)
print( "DONE!")
|
import os
import glob
import filetype
import mutagen
import json
from pydub import AudioSegment
from mutagen.easyid3 import EasyID3
from mutagen.easymp4 import EasyMP4
from mutagen.asf import ASFTags
from mutagen.asf import ASF
EXTENSION_MAPPING = None
CONVERSION_TABLE = None
def setExtensionMapping():
global EXTENSION_MAPPING
global CONVERSION_TABLE
with open("./extension_mapping.json") as json_file:
m = json.load(json_file)
EXTENSION_MAPPING = {}
for e in m.items():
k, v = e
for j in v.items():
kj, vj = j
EXTENSION_MAPPING[vj] = k
CONVERSION_TABLE = dict(m)
def getFilesAndExtensions():
"""Walks current directory, gets list of media files and extensions
Returns:
list -- List of tuples, file path and file extension
"""
paths = os.walk(os.curdir)
files_plus_ext = []
print("Retrieving files and their extensions")
for p in paths:
files = ["{0}/{1}".format(
os.path.abspath(p[0]), file_name) for file_name in p[2]]
for file_name in p[2]:
file_abs_path = "{0}/{1}".format(os.path.abspath(p[0]), file_name)
file_ext = filetype.guess(file_abs_path)
if file_ext is not None:
files_plus_ext.append((file_abs_path, file_ext.extension))
return files_plus_ext
def chdirIfMatch(f, extension_list):
"""Changes to the directory of the given file if the extension
is in the extension list
Arguments:
f {tuple} -- The file path and file extension
extension_list {list} -- The valid extensions
Returns:
tuple -- If the file matches the extension list
"""
print("Current file: {0}".format(f[0]))
file_path, file_ext = f
if file_ext in extension_list:
print("Extension matches: {0}".format(file_ext))
file_dir = os.path.abspath(os.path.join(file_path, os.pardir))
if os.curdir != file_dir:
os.chdir(file_dir)
return (file_path, file_ext)
else:
return
def convertToMP3(file_path, file_ext):
"""Converts the given file to mp3
Arguments:
file_path {str} -- Absolute path to the file
file_ext {str} -- The file extension
Returns:
str -- The resulting mp3 path if it doesn't already exist
"""
mp3_filename = os.path.splitext(
os.path.basename(file_path))[0] + ".mp3"
if not os.path.isfile(mp3_filename):
AudioSegment.from_file(file_path).export(mp3_filename, format="mp3")
print("Converted {0} from {1} to mp3".format(file_path, file_ext))
return mp3_filename
def addTagsToMP3(file_path, mp3_file_path, file_ext):
"""Gets the existing tags from the mp4 file and saves them to the mp3
Arguments:
mp4_file_path {str} -- Path for MP4 file
mp3_file_path {str} -- Path for MP3 file
"""
original_file_tags = None
mp3_tags = EasyID3(mp3_file_path)
if file_ext == "m4a":
original_file_tags = EasyMP4(file_path)
for k, v in original_file_tags.items():
found_tag = EXTENSION_MAPPING.get(k, None)
if found_tag is not None:
resulting_tag = CONVERSION_TABLE[found_tag]["mp3"]
mp3_tags[resulting_tag] = [v]
print("Added tag {0}:{1} to mp3 {2}".format(
resulting_tag, v, mp3_file_path))
elif file_ext in ["wma", "wmv"]:
original_file_tags = ASF(file_path).tags
for k, v in original_file_tags.items():
found_tag = EXTENSION_MAPPING.get(k, None)
if found_tag is not None:
resulting_tag = CONVERSION_TABLE[found_tag]["mp3"]
if file_ext in["wma", "wmv"]:
if resulting_tag == "composer":
if mp3_tags.get(resulting_tag, None):
mp3_tags[resulting_tag] = ["{0}, {1}"].format(
mp3_tags[resulting_tag][0], v[0].value)
else:
mp3_tags[resulting_tag] = [v[0].value]
else:
mp3_tags[resulting_tag] = [v[0].value]
print("Added tag {0}:{1} to mp3 {2}".format(
resulting_tag, v[0], mp3_file_path))
else:
return
mp3_tags.save(mp3_file_path)
print("MP3 tags saved to {0}".format(mp3_file_path))
setExtensionMapping()
|
import itertools
def Hamming(Dna, Pat):
return len([i for (i, j) in zip(Dna, Pat) if i!=j])
def GetAllString(mer):
res = {}
L= 'ACGT'
perms = itertools.product(L, repeat=mer)
all_str = []
for k in perms:
all_str.append(''.join(k))
return all_str
def GetTotal(Dna, Pat, d):
arr = []
i = 0
for i in range(len(Dna) - len(Pat) + 1):
k = Hamming(Pat, Dna[i:i+len(Pat)])
if k<=d:
arr.append(i)
return len(arr)
def MotifEnumeration(k, d, all_dna):
cur = 0
res = {}
ans = []
L= 'ACGT'
perms = GetAllString(k)
for a in perms:
b = True
for Dna in all_dna:
if GetTotal(Dna, a, d) > 0:
b = b & True
else:
b = False
break
if b == True:
ans.append(a)
return ans
def main(infile, outfile):
# Read the input, but do something non-trivial instead of count the lines in the file
inp = [line.rstrip('\n') for line in infile]
print(inp)
output = MotifEnumeration(int(inp[0].split()[0]), int(inp[0].split()[1]), inp[1:])
output = '\n'.join(str(i) for i in output)
# For debugging, print something to console
print(output)
# Write the output.
outfile.write(output)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.