content stringlengths 5 1.05M |
|---|
"""
byceps.announce.text_assembly.guest_server
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Announce user badge events.
:Copyright: 2014-2022 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from flask_babel import gettext
from ...events.guest_server import GuestServerRegistered
from ...services.party import service as party_service
from ._helpers import get_screen_name_or_fallback, with_locale
@with_locale
def assemble_text_for_guest_server_registered(
event: GuestServerRegistered,
) -> str:
initiator_screen_name = get_screen_name_or_fallback(
event.initiator_screen_name
)
owner_screen_name = get_screen_name_or_fallback(event.owner_screen_name)
party = party_service.get_party(event.party_id)
return gettext(
'%(initiator_screen_name)s has registered a guest server '
'owned by "%(owner_screen_name)s for party "%(party_title)s".',
initiator_screen_name=initiator_screen_name,
owner_screen_name=owner_screen_name,
party_title=party.title,
)
|
'''
Given two .txt files that have lists of numbers in them, find the numbers that are overlapping.
One .txt file has a list of all prime numbers under 1000, and the other .txt file has a list of happy numbers up to 1000.
(If you forgot, prime numbers are numbers that cannot be divided by any other number.
And yes, happy numbers are a real thing in mathematics - you can look it up on Wikipedia.
The explanation is easier with an example, which I will describe below.)
'''
def readFromFileAsList(fileName):
listX = []
fileX = open(fileName + '.txt', 'r')
while fileX.readline():
line = fileX.readline().strip()
listX.append(line)
fileX.close()
return listX
primeNumbers = readFromFileAsList("resources/ex23/One")
happyNumbers = readFromFileAsList("resources/ex23/Other")
overlappingNumbers = [n for n in primeNumbers if n in happyNumbers]
print(overlappingNumbers) |
#encoding:utf-8
subreddit = 'SrGrafo'
t_channel = '@r_SrGrafo'
def send_post(submission, r2t):
return r2t.send_simple(submission)
|
# flake8: noqa
from .body import BodyViewSet
from .jurisdiction import JurisdictionViewSet
from .office import OfficeViewSet
from .party import PartyViewSet
|
age = 12
male = "f"
location = "Russia"
locations = ["Russia", "Ukraine", "Belarus"]
is_programmer = True
is_admin = False
if (age >= 12
and male == "f"
and location in locations
and (is_programmer or is_admin)):
print("Доступ открыт")
if age >= 12 \
and male == "m" \
and location in locations \
and (is_programmer or is_admin):
print("Доступ открыт")
|
from collections import OrderedDict
from unittest import TestCase
import unittest
from justamodel.exceptions import ValidationError, ModelValidationError
from justamodel.model import Model, Field, PolymorphicModel
from justamodel.serializer import DictModelSerializer, JsonModelSerializer, make_field_filter, \
iter_model_fields
from justamodel.types import StringType, IntType, UrlType, ModelType, ListType, SetType, DictType
class TestModel(Model):
string_field = Field(StringType())
int_field = Field(IntType())
url_field = Field(UrlType(scheme='http'))
class TestModelA(Model):
a_field = Field(StringType())
x = Field(IntType())
class TestModelB(Model):
a_field = Field(IntType())
y = Field(IntType())
class TestModelAB(PolymorphicModel):
types_to_model_classes = {
'a': TestModelA,
'b': TestModelB
}
class TestComposedModel(Model):
name = Field(StringType())
submodel = Field(ModelType(TestModelA))
class TestComposedModel2(Model):
name = Field(StringType())
submodel = Field(ModelType(TestModelA), required=False)
class TestComposedModel3(Model):
name = Field(StringType())
submodels = Field(ListType(ModelType(TestModelA)))
class TestComposedModel4(Model):
name = Field(StringType())
submodels = Field(SetType(StringType()))
class TestComposedModel5(Model):
name = Field(StringType())
submodels = Field(DictType(StringType(), ModelType(TestModelA)))
class TestInheritedModel(TestModel):
another_field = Field(StringType())
class TestDictSerialization(TestCase):
def setUp(self):
self.serializer = DictModelSerializer()
def test_deserialization(self):
deserialized = self.serializer.deserialize_model({
'string_field': 'a string',
'int_field': 46,
'url_field': 'http://abc'
}, TestModel)
expected = TestModel(string_field='a string', int_field=46, url_field='http://abc')
self.assertEqual(expected, deserialized)
def test_deserialization_into_instance(self):
deserialized = TestModel(int_field=46)
self.serializer.deserialize_model({
'string_field': 'a string',
'url_field': 'http://abc'
}, deserialized)
expected = TestModel(string_field='a string', int_field=None, url_field='http://abc')
self.assertEqual(expected, deserialized)
def test_deserialization_invalid(self):
with self.assertRaises(ValidationError):
self.serializer.deserialize_model(10, TestModel)
def test_serialization(self):
serialized = self.serializer.serialize_model(TestModel(string_field='a string', int_field=46, url_field='http://abc'))
expected = {
'string_field': 'a string',
'int_field': 46,
'url_field': 'http://abc'
}
self.assertEqual(expected, serialized)
def test_serialization_ordered(self):
serializer = DictModelSerializer(mapping_type=OrderedDict)
serialized = serializer.serialize_model(TestModel(string_field='a string', int_field=46, url_field='http://abc'))
expected = OrderedDict((
('string_field', 'a string'),
('int_field', 46),
('url_field', 'http://abc'),
))
self.assertEqual(expected, serialized)
def test_serialization_invalid(self):
with self.assertRaises(TypeError):
self.serializer.serialize_model(10)
def test_deserialization_polymorphic(self):
deserialized = self.serializer.deserialize_model({
'type': 'a',
'a_field': 'a_field',
'x': 10
}, TestModelAB)
expected = TestModelA(a_field='a_field', x=10)
self.assertEqual(expected, deserialized)
deserialized = self.serializer.deserialize_model({
'type': 'b',
'a_field': 20,
'y': 30
}, TestModelAB)
expected = TestModelB(a_field=20, y=30)
self.assertEqual(expected, deserialized)
def test_deserialization_polymorphic_invalid(self):
with self.assertRaises(ValidationError):
self.serializer.deserialize_model({
'a_field': 'a_field',
'x': 10
}, TestModelAB)
def test_serialization_polymorphic(self):
model = TestModelA(a_field='a string', x=10)
serialized = self.serializer.serialize_model(model, model_type=TestModelAB)
expected = {
'a_field': 'a string',
'x': 10,
'type': 'a'
}
self.assertEqual(expected, serialized)
model = TestModelB(a_field=20, y=30)
serialized = self.serializer.serialize_model(model, model_type=TestModelAB)
expected = {
'a_field': 20,
'y': 30,
'type': 'b'
}
self.assertEqual(expected, serialized)
def test_deserialization_inherited(self):
deserialized = self.serializer.deserialize_model({
'string_field': 'a string',
'int_field': 46,
'url_field': 'http://abc',
'another_field': 'test'
}, TestInheritedModel)
expected = TestInheritedModel(string_field='a string', int_field=46, url_field='http://abc',
another_field='test')
self.assertEqual(expected, deserialized)
def test_serialization_inherited(self):
model = TestInheritedModel(string_field='a string', int_field=46, url_field='http://abc', another_field='test')
serialized = self.serializer.serialize_model(model)
expected = {
'string_field': 'a string',
'int_field': 46,
'url_field': 'http://abc',
'another_field': 'test'
}
self.assertEqual(expected, serialized)
def test_serialization_composed(self):
model = TestComposedModel(name='test', submodel=TestModelA(a_field='abc', x=10))
serialized = self.serializer.serialize_model(model)
expected = {
'name': 'test',
'submodel': {
'a_field': 'abc',
'x': 10
}
}
self.assertEqual(expected, serialized)
def test_deserialization_composed(self):
serialized = {
'name': 'test',
'submodel': {
'a_field': 'abc',
'x': 10
}
}
deserialized = self.serializer.deserialize_model(serialized, TestComposedModel)
expected = TestComposedModel(name='test', submodel=TestModelA(a_field='abc', x=10))
self.assertEqual(expected, deserialized)
def test_serialization_none(self):
model = TestComposedModel2(name='test', submodel=None)
serialized = self.serializer.serialize_model(model)
expected = {
'name': 'test',
'submodel': None
}
self.assertEqual(expected, serialized)
def test_deserialization_none(self):
serialized = {
'name': 'test',
'submodel': None
}
deserialized = self.serializer.deserialize_model(serialized, TestComposedModel2)
expected = TestComposedModel2(name='test', submodel=None)
self.assertEqual(expected, deserialized)
def test_serialization_composed_list(self):
model = TestComposedModel3(name='test',
submodels=[TestModelA(a_field='abc', x=10), TestModelA(a_field='def', x=20)])
serialized = self.serializer.serialize_model(model)
expected = {
'name': 'test',
'submodels': [{
'a_field': 'abc',
'x': 10
}, {
'a_field': 'def',
'x': 20
}]
}
self.assertEqual(expected, serialized)
def test_deserialization_composed_list(self):
serialized = {
'name': 'test',
'submodels': [{
'a_field': 'abc',
'x': 10
}, {
'a_field': 'def',
'x': 20
}]
}
deserialized = self.serializer.deserialize_model(serialized, TestComposedModel3)
expected = TestComposedModel3(name='test',
submodels=[TestModelA(a_field='abc', x=10), TestModelA(a_field='def', x=20)])
self.assertEqual(expected, deserialized)
def test_serialization_composed_set(self):
model = TestComposedModel4(name='test', submodels={'abc', 'def'})
serialized = self.serializer.serialize_model(model)
expected = {
'name': 'test',
'submodels': {'abc', 'def'}
}
self.assertEqual(expected, serialized)
def test_deserialization_composed_set(self):
serialized = {
'name': 'test',
'submodels': {'abc', 'def'}
}
deserialized = self.serializer.deserialize_model(serialized, TestComposedModel4)
expected = TestComposedModel4(name='test', submodels={'abc', 'def'})
self.assertEqual(expected, deserialized)
def test_serialization_composed_dict(self):
model = TestComposedModel5(name='test',
submodels={'a': TestModelA(a_field='abc', x=10), 'b': TestModelA(a_field='def', x=20)})
serialized = self.serializer.serialize_model(model)
expected = {
'name': 'test',
'submodels': {'a': {
'a_field': 'abc',
'x': 10
}, 'b': {
'a_field': 'def',
'x': 20
}}
}
self.assertEqual(expected, serialized)
def test_deserialization_composed_dict(self):
serialized = {
'name': 'test',
'submodels': {'a': {
'a_field': 'abc',
'x': 10
}, 'b': {
'a_field': 'def',
'x': 20
}}
}
deserialized = self.serializer.deserialize_model(serialized, TestComposedModel5)
expected = TestComposedModel5(name='test',
submodels={'a': TestModelA(a_field='abc', x=10),
'b': TestModelA(a_field='def', x=20)})
self.assertEqual(expected, deserialized)
def test_deserialization_validation_errors(self):
serialized = {
'string_field': 'aa',
'int_field': 10,
'url_field': 'abc'
}
err = {
'string_field': ValidationError('Test string_field'),
'int_field': ValidationError('Test int_field')
}
class MockSerializer(DictModelSerializer):
def serialize_value(self, value, value_type, field=None, **kwargs):
pass
def deserialize_value(self, value, value_type, field=None, **kwargs):
if field and field.name in err:
raise err[field.name]
return value
serializer = MockSerializer()
with self.assertRaises(ModelValidationError) as error:
serializer.deserialize_model(serialized, TestModel)
self.assertEqual(error.exception.sub_errors['string_field'].errors, [err['string_field']])
self.assertEqual(error.exception.sub_errors['int_field'].errors, [err['int_field']])
self.assertNotIn('url_field', error.exception.sub_errors)
class TestSerializationJson(TestCase):
def setUp(self):
self.serializer = JsonModelSerializer(sort_keys=True)
def test_deserialization_json(self):
deserialized = self.serializer.deserialize_model('''{
"string_field": "a string",
"int_field": 46,
"url_field": "http://abc"
}''', TestModel)
expected = TestModel(string_field='a string', int_field=46, url_field='http://abc')
self.assertEqual(expected, deserialized)
def test_deserialization_json_invalid(self):
with self.assertRaises(ValidationError):
self.serializer.deserialize_model('{invalid json', TestModel)
def test_serialization_json(self):
serialized = self.serializer.serialize_model(TestModel(string_field='a string', int_field=46, url_field='http://abc'))
expected = '{"int_field": 46, "string_field": "a string", "url_field": "http://abc"}'
self.assertEqual(expected, serialized)
class TestFieldFiltering(TestCase):
def test_make_filter_none(self):
f = make_field_filter(None)
self.assertTrue(f('a'))
self.assertTrue(f('b'))
self.assertTrue(f('c'))
def test_make_filter_list(self):
f = make_field_filter(['a', 'b'])
self.assertTrue(f('a'))
self.assertTrue(f('b'))
self.assertFalse(f('c'))
def test_make_filter_lambda(self):
f = make_field_filter(lambda x: x == 'a' or x == 'c')
self.assertTrue(f('a'))
self.assertFalse(f('b'))
self.assertTrue(f('c'))
def test_iter_fields_with_filter(self):
fields = list(iter_model_fields(TestModel, fields=['url_field', 'string_field']))
self.assertEqual(fields, [('string_field', TestModel.fields['string_field']),
('url_field', TestModel.fields['url_field']),
])
def test_iter_fields_without_filter(self):
fields = list(iter_model_fields(TestModel))
self.assertEqual(fields, [('string_field', TestModel.fields['string_field']),
('int_field', TestModel.fields['int_field']),
('url_field', TestModel.fields['url_field']),
])
|
from threading import Thread
from tensorflow.python import ipu
import tensorflow as tf
NUM_ITERATIONS = 100
#
# Configure the IPU system
#
cfg = ipu.config.IPUConfig()
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
#
# The input data and labels
#
def create_dataset():
mnist = tf.keras.datasets.mnist
(x_train, y_train), (_, _) = mnist.load_data()
x_train = x_train / 255.0
train_ds = tf.data.Dataset.from_tensor_slices(
(x_train, y_train)).shuffle(10000)
train_ds = train_ds.map(lambda d, l:
(tf.cast(d, tf.float32), tf.cast(l, tf.int32)))
train_ds = train_ds.batch(32, drop_remainder=True)
return train_ds.repeat()
#
# The host side queue
#
outfeed_queue = ipu.ipu_outfeed_queue.IPUOutfeedQueue()
#
# A custom training loop
#
@tf.function(experimental_compile=True)
def training_step(num_iterations, iterator, in_model, optimizer):
for _ in tf.range(num_iterations):
features, labels = next(iterator)
with tf.GradientTape() as tape:
predictions = in_model(features, training=True)
prediction_loss = tf.keras.losses.sparse_categorical_crossentropy(
labels, predictions)
loss = tf.reduce_mean(prediction_loss)
grads = tape.gradient(loss, in_model.trainable_variables)
optimizer.apply_gradients(zip(grads, in_model.trainable_variables))
outfeed_queue.enqueue(loss)
#
# Execute the graph
#
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
# Create the Keras model and optimizer.
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
opt = tf.keras.optimizers.SGD(0.01)
# Create an iterator for the dataset.
train_iterator = iter(create_dataset())
# Function to continuously dequeue the outfeed until NUM_ITERATIONS examples
# are seen.
def dequeue_thread_fn():
counter = 0
while counter != NUM_ITERATIONS:
for loss in outfeed_queue:
print("Step", counter, "loss = ", loss.numpy())
counter += 1
# Start the dequeuing thread.
dequeue_thread = Thread(target=dequeue_thread_fn, args=[])
dequeue_thread.start()
# Run the custom training loop over the dataset.
strategy.run(training_step,
args=[NUM_ITERATIONS, train_iterator, model, opt])
dequeue_thread.join()
|
"""
River Sizes:
You're given a two-dimensional array (a matrix) of potentially unequal height and width containing only 0s and 1s.
Each 0 represents land, and each 1 represents part of a river.
A river consists of any number of 1s that are either horizontally or vertically adjacent (but not diagonally adjacent).
The number of adjacent 1s forming a river determine its size.
Note that a river can twist. In other words, it doesn't have to be a straight vertical line or a straight horizontal line; it can be L-shaped, for example.
Write a function that returns an array of the sizes of all rivers represented in the input matrix.
The sizes don't need to be in any particular order.
Sample Input
matrix = [
[1, 0, 0, 1, 0],
[1, 0, 1, 0, 0],
[0, 0, 1, 0, 1],
[1, 0, 1, 0, 1],
[1, 0, 1, 1, 0],
]
Sample Output
[1, 2, 2, 2, 5] // The numbers could be ordered differently.
// The rivers can be clearly seen here:
// [
// [1, , , 1, ],
// [1, , 1, , ],
// [ , , 1, , 1],
// [1, , 1, , 1],
// [1, , 1, 1, ],
// ]
"""
"""
Solution:
1. iterate through every element in the array
2. if we find a river (1) map out it's length while marking the river's elements as visited (-1)
"""
# O(wh) time | O(wh) space
def riverSizes(matrix):
river_sizes = []
for row in range(len(matrix)):
for col in range(len(matrix[0])):
if matrix[row][col] == 1: # if river
river_sizes.append(findRiverSize(matrix, row, col))
return river_sizes
def findRiverSize(matrix, row, col):
if row < 0 or col < 0 or row >= len(matrix) or col >= len(matrix[0]) \
or matrix[row][col] != 1: # not river (base case)
return 0
matrix[row][col] = -1 # mark point as visited
left = findRiverSize(matrix, row, col-1)
right = findRiverSize(matrix, row, col+1)
down = findRiverSize(matrix, row+1, col)
up = findRiverSize(matrix, row-1, col)
return 1 + right + left + down + up # visit neighbours
matrix = [
[1, 0, 0, 1, 0],
[1, 0, 1, 0, 0],
[0, 0, 1, 0, 1],
[1, 0, 1, 0, 1],
[1, 0, 1, 1, 0],
]
x = [
[1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0],
[1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0],
[0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1],
[1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0],
[1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1]
] # [2, 1, 21, 5, 2, 1]
print(riverSizes(matrix))
print(riverSizes([[1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0]]))
print(riverSizes(x))
# print(findRiverSize(x, 0, 5))
|
# problem name: Count the Arrays
# problem link: https://codeforces.com/contest/1312/problem/D
# contest link: https://codeforces.com/contest/1312
# time: (?)
# author: reyad
# other_tags: (?)
# N.B. : This solution should be submitted with python 3.7, it doesn't work with
# previous versions of python because of 'pow' function. Python 3.7 'pow' function
# supports 'modular inverse' by default
n, m = map(int, input().split())
M = 998244353
f = [1]
for i in range(1, m+1):
f.append((f[-1] * i) % M)
r = (((n-2) * pow(2, n-3, M)) % M) * ((f[m] * pow((f[n-1] * f[m-n+1]) % M, -1, M)) % M)
print(r % M) |
"""
Validators for WTForms used in the Digital Marketplace frontend
EmailValidator -- validate that a string is a valid email address
GreaterThan -- compare the values of two fields
FourDigitYear -- validate that a four digit year value is provided
DateValidator -- Error messages for a date input
"""
import datetime
from typing import Any, Dict, Optional
from wtforms.validators import StopValidation, ValidationError
from dmutils.email.helpers import validate_email_address
class EmailValidator:
"""
Tests whether a string is a valid email address.
:param message:
Error message to raise in case of a validation error.
"""
def __init__(self, message="Please enter a valid email address."):
self.message = message
def __call__(self, form, field):
if not validate_email_address(field.data):
raise ValidationError(self.message)
return
class GreaterThan:
"""
Compares the values of two fields.
:param fieldname:
The name of the other field to compare to.
:param message:
Error message to raise in case of a validation error.
"""
def __init__(self, fieldname, message=None):
self.fieldname = fieldname
self.message = message
def __call__(self, form, field):
try:
other = form[self.fieldname]
except KeyError:
raise ValidationError(
field.gettext("Invalid field name '%s'." % self.fieldname)
)
if field.data is None or other.data is None:
return
elif field.data > other.data:
return
d = {
"other_label": hasattr(other, "label")
and other.label.text
or self.fieldname,
"other_name": self.fieldname,
}
message = self.message
if message is None:
message = field.gettext("Field must be greater than %(other_name)s.")
raise ValidationError(message % d)
class FourDigitYear:
"""
Validates that a `DateField`'s year field has a four digit year value.
:param message:
Error message to raise in case of a validation error.
"""
def __init__(self, message=None):
self.message = message
def __call__(self, form, field):
try:
digits = len(field.form_field.year.raw_data[0])
except IndexError:
digits = 0
if not digits == 4:
message = self.message
if message is None:
message = field.gettext("Year must be YYYY.")
raise ValidationError(message)
class DateValidator:
"""Error messages for a date input
Implements the guidance in [1].
Instead of taking a `message` DateValidator has two properties,
`whatever_it_is` and `Whatever_it_is`, which are stand-ins for whatever it
is the question is asking the user for. `whatever_it_is` is used in the
middle of a sentence, and should include a participle: for instance, if you
are asking for the date when a buyer needs a supplier to start work,
`whatever_it_is` should be `the start date`. `Whatever_it_is` (with a
capital 'W') is used at the beginning of the sentence, and should not
include a participle. If `Whatever_it_is` is not provided it will be
generated from `whatever_it_is` by dropping the first word and then
capitalising the first letter; in our previous example, for instance,
`Whatever_it_is` would be `Start date`.
Alternatively you can provide the error messages desired directly; there
are three different error messages that can be raised by this validator:
- nothing_is_entered_error_message
- date_is_incomplete_error_message
- date_entered_cant_be_correct_error_message
The string for `date_is_incomplete_error_message` also includes a
placeholder for `whatever_is_missing`, in format string syntax.
[1]: https://design-system.service.gov.uk/components/date-input/#error-messages
"""
def __init__(
self,
whatever_it_is,
Whatever_it_is=None,
*,
nothing_is_entered_error_message=None,
date_is_incomplete_error_message=None,
date_entered_cant_be_correct_error_message=None,
):
self.whatever_it_is = whatever_it_is
if whatever_it_is and not Whatever_it_is:
_, rest = whatever_it_is.split(maxsplit=1)
self.Whatever_it_is = rest[0].upper() + rest[1:]
else:
self.Whatever_it_is = Whatever_it_is
self.nothing_is_entered_error_message = (
nothing_is_entered_error_message or "Enter {whatever_it_is}"
)
self.date_is_incomplete_error_message = (
date_is_incomplete_error_message
or "{Whatever_it_is} must include a {whatever_is_missing}"
)
self.date_entered_cant_be_correct_error_message = (
date_entered_cant_be_correct_error_message
or "{Whatever_it_is} must be a real date"
)
def _error(self, error, fields, **kwargs):
e = ValueError(self.format_error_message(error, **kwargs))
e.error = error
# Design System guidance is that if more than one field has an error
# we should highlight all fields.
if len(fields) > 1:
e.fields = {"year", "month", "day"}
else:
e.fields = fields
return e
def format_error_message(self, error, **kwargs):
return getattr(self, error + "_error_message").format(
whatever_it_is=self.whatever_it_is,
Whatever_it_is=self.Whatever_it_is,
**kwargs,
)
def validate_input(self, raw_data: Dict[str, Any]):
"""Check that year month and day are in `raw_data` and are all integers
:raises ValueError:
"""
if not any(raw_data.values()):
raise self._error(
"nothing_is_entered",
set(raw_data.keys()),
)
elif not all(raw_data.values()):
missing = list(sorted(
name
for name, value in raw_data.items()
if not value
))
whatever_is_missing = " and ".join(missing)
raise self._error(
"date_is_incomplete",
set(missing),
whatever_is_missing=whatever_is_missing,
)
def int_or_none(s: Any) -> Optional[int]:
if not s:
return None
try:
return int(s)
except ValueError:
return None
data = {name: int_or_none(value) for name, value in raw_data.items()}
if not all(data.values()):
raise self._error(
"nothing_is_entered",
{k for k, v in data.items() if v is None},
)
def validate_data(self, data: Dict[str, int]):
"""Check that year month and day in `data` make a valid date
:raises ValueError:
"""
year, month, day = data["year"], data["month"], data["day"]
invalid = set()
if not (1 <= day and day <= 31):
invalid.add("day")
if not (1 <= month and month <= 12):
invalid.add("month")
if not (datetime.MINYEAR <= year and year <= datetime.MAXYEAR):
invalid.add("year")
if not invalid:
try:
datetime.date(year, month, day)
except ValueError:
# assume that since we've eliminated other possibilities
# above it must be that the day given is outside the number
# of days in the given month and year
# see https://docs.python.org/3.6/library/datetime.html#datetime.date
invalid.add("day")
if invalid:
raise self._error(
"date_entered_cant_be_correct",
invalid,
)
def validate_date(self, year, month, day):
self.validate_input({"year": year, "month": month, "day": day})
self.validate_data({"year": int(year), "month": int(month), "day": int(day)})
def __call__(self, form, field):
"""WTForms date validator for DMDateField"""
try:
self.validate_date(
field.form_field.year.data,
field.form_field.month.data,
field.form_field.day.data,
)
except ValueError as e:
error_message = str(e)
error_fields = getattr(e, "fields", {"year", "month", "day"})
# remove processing errors from field
field.errors[:] = []
# add errors to specific form field fields
for form_field_field in error_fields:
getattr(field.form_field, form_field_field).errors = [error_message]
validation_error = StopValidation(error_message)
validation_error.fields = error_fields
raise validation_error
|
import os
from abc import ABCMeta, abstractmethod
import torch
from torch.utils import data
from torchvision import transforms
from PIL import Image
from target_transforms import ClassLabel
from utils import load_value_file
def images_loader(paths: dict) -> list:
images = []
for path in paths:
with open(path, 'rb') as f:
with Image.open(f) as img:
if paths[path] == '3ch':
images.append(img.convert('RGB'))
elif paths[path] == '1ch':
images.append(img.convert('L'))
return images
def video_loader(video_dir_paths: dict, frame_indices: list,
image_format: str) -> list:
video = []
for i in frame_indices:
images_paths = {}
for video_dir_path in video_dir_paths:
path = os.path.join(video_dir_path, image_format.format(i))
assert os.path.exists(path), 'No such file :{}'.format(path)
images_paths[path] = video_dir_paths[video_dir_path]
images = images_loader(images_paths)
video.extend(images)
return video
# 複数の画像をチャンネル結合して一つの画像にする
# example :RGB, Gray, Gray, RGB, Gray, Gray, RGB, Gray... -> 5ch, 5ch...
def channels_coupling(clip: list, image_number: int) -> list:
new_clip = []
for i in range(0, len(clip), image_number):
temp = []
for j in range(image_number):
temp.append(clip[i + j])
new_clip.append(torch.cat(temp, 0))
return new_clip
class BaseLoader(data.Dataset, metaclass=ABCMeta):
@abstractmethod
def load_annotation_data(self, data_file_path: str):
pass
@abstractmethod
def get_class_labels(self, entry):
pass
@abstractmethod
def get_video_names_and_annotations(self, entry, subset: str) -> tuple:
pass
def make_data_set(
self,
paths: dict,
annotation_path: str,
subset: str
):
# アノテーションファイルからjsonやcsvオブジェクトを取得する
entry = self.load_annotation_data(annotation_path)
# 動画の名前とその属するクラスのそれぞれのリストを取得
video_names, annotations = self.get_video_names_and_annotations(entry, subset)
# クラスをidへ割り振る
class_to_idx = self.get_class_labels(entry)
# idからクラスがわかるようにする
idx_to_class = {}
for name, label in class_to_idx.items():
idx_to_class[label] = name
video_information = []
for i in range(len(video_names)):
# 1000毎に経過報告をする
if i % 1000 == 0:
print('data_set loading [{}/{}]'.format(i, len(video_names)))
full_paths = {}
n_frames = 0
for path in paths:
full_path = os.path.join(path, video_names[i])
assert os.path.exists(full_path), 'No such file :{}'.format(full_path)
full_paths[full_path] = paths[path]
n_frames_file_path = os.path.join(full_path, 'n_frames')
if os.path.exists(n_frames_file_path):
n_frames = int(load_value_file(n_frames_file_path))
assert n_frames > 0
video_info = {
'paths': full_paths,
'n_frames': n_frames,
'video_id': video_names[i].split('/')[1],
'label': class_to_idx[annotations[i]['label']],
'frame_indices': list(range(1, n_frames + 1))
}
video_information.append(video_info)
return video_information, idx_to_class
def __init__(
self,
paths: dict,
annotation_path: str,
subset: str,
spatial_transform: transforms = None,
target_transform: ClassLabel = None,
image_format: str = 'image_{0:05d}.jpg'
):
self.data, self.class_names = self.make_data_set(
paths,
annotation_path,
subset
)
self.spatial_transform = spatial_transform
self.target_transform = target_transform
self.image_format = image_format
self.image_number = len(paths)
def __getitem__(self, index: int) -> tuple:
paths = self.data[index]['paths']
clip = video_loader(paths, self.data[index]['frame_indices'],
self.image_format)
clip = [self.spatial_transform(img) for img in clip]
if self.image_number > 1:
clip = channels_coupling(clip, self.image_number)
clip = torch.stack(clip, 0).permute(1, 0, 2, 3)
target = self.data[index]
if self.target_transform is not None:
if self.target_transform.flag:
target, target_name = self.target_transform(target)
return clip, target, target_name
else:
target = self.target_transform(target)
return clip, target
def __len__(self) -> int:
return len(self.data)
|
import discord
from discord.ext import commands
class NoPatron(commands.CheckFailure):
"""Exception raised when you need to donate to use a command."""
pass
async def has_money(bot, userid, money):
async with bot.pool.acquire() as conn:
return await conn.fetchval(
'SELECT money FROM profile WHERE "user"=$1 AND "money">=$2;', userid, money
)
def is_admin():
async def predicate(ctx):
return ctx.author.id in ctx.bot.config.admins
return commands.check(predicate)
def is_patron():
async def predicate(ctx):
response = await ctx.bot.cogs["Sharding"].handler(
"user_is_patreon", 1, args={"member_id": ctx.author.id}
)
if any(response):
return True
raise NoPatron()
return commands.check(predicate)
async def user_is_patron(bot, user):
response = await bot.cogs["Sharding"].handler(
"user_is_patreon", 1, args={"member_id": user.id}
)
return any(response)
def is_supporter():
async def predicate(ctx):
response = await ctx.bot.cogs["Sharding"].handler(
"user_is_helper", 1, args={"member_id": ctx.author.id}
)
return any(response)
return commands.check(predicate)
def is_hypesquad(ctx):
member = ctx.bot.get_guild(ctx.bot.config.support_server_id).get_member(
ctx.author.id
) # cross server stuff
if not member:
return False
return (
discord.utils.get(member.roles, name="Hypesquad") is not None
or discord.utils.get(member.roles, name="Administrators") is not None
)
|
"""Defining graphs using the networkx package."""
import os
import networkx
import numpy as np
import matplotlib.pyplot as plt
from .plot_graph import get_positions, get_colours, get_colours_extend
def nx_create_graph(graph):
"""Covert a graph from the simple_graph package into networkx format."""
G = networkx.DiGraph()
G.add_nodes_from(range(0, len(graph)))
for i, edge_list in enumerate(graph):
if len(edge_list) != 0:
to_add = [(i, e) for e in edge_list]
G.add_edges_from(to_add)
return G
def nx_vis_graph(
nx_graph, region_sizes, start_set, end_set, reachable=None, name="nx_graph.png"
):
"""Visualise the graph with positions in regions linearly spaced."""
pos = get_positions(region_sizes, as_dict=True)
c = get_colours(nx_graph.number_of_nodes(), start_set, end_set, reachable)
plt.clf()
networkx.draw_networkx(nx_graph, with_labels=False, node_color=c, pos=pos)
plt.savefig(name)
def nx_vis_force(
nx_graph,
start_set,
end_set,
sources,
targets,
name="nx_simple.png",
labels=False,
reachable=None,
):
"""Simple force based visual representation of the networkx graph."""
options = {
"node_size": 50,
"linewidths": 0,
"width": 0.1,
}
c = get_colours_extend(
nx_graph.number_of_nodes(), start_set, end_set, sources, targets, reachable
)
plt.clf()
networkx.draw(nx_graph, node_color=c, with_labels=labels, **options)
plt.savefig(name, dpi=400)
def nx_graph_stats(G):
"""Print simple stats about the graph G."""
print("Total number of nodes: ", int(G.number_of_nodes()))
print("Total number of edges: ", int(G.number_of_edges()))
print("Total number of self-loops: ", int(G.number_of_selfloops()))
print("List of all nodes with self-loops: ", list(G.nodes_with_selfloops()))
print(
"List of all nodes we can go to in a single step from node 2: ",
list(G.successors(2)),
)
def nx_find_connected(graph, start_set, end_set, cutoff=np.inf):
"""Return the nodes in end_set connected to start_set."""
reachable = []
for end in end_set:
if nx_is_reachable(graph, end, start_set):
reachable.append(end)
if len(reachable) >= cutoff:
break
return reachable
def nx_is_reachable(graph, end, start_set):
"""Return if there is path from start_set to end in graph."""
for start in start_set:
result = networkx.algorithms.shortest_paths.generic.has_path(graph, start, end)
if result:
return True
return False
def nx_find_connected_limited(graph, start_set, end_set, max_depth=3):
"""Return the neurons in end_set reachable from start_set with limited depth."""
reverse_graph = graph.reverse()
reachable = []
for e in end_set:
preorder_nodes = list(
(
networkx.algorithms.traversal.depth_first_search.dfs_preorder_nodes(
reverse_graph, source=e, depth_limit=max_depth
)
)
)
for s in start_set:
if s in preorder_nodes:
reachable.append(e)
break
return reachable
def export_gml(graph, name="nx_gml.graphml"):
"""Export the networkx graph to gml format for Gephi visualisation."""
here = os.path.dirname(os.path.realpath(__file__))
path = os.path.join(here, "..", "resources", name)
os.makedirs(os.path.dirname(path), exist_ok=True)
networkx.write_graphml(graph, path)
return
|
import xlrd as xl
import numpy as np
import matplotlib.pyplot as plt
from flask import Flask, jsonify, request, render_template, Markup, json, session, redirect, url_for,session
import io
import base64
from funf import ds1,ds2
from dc import att1,att2
import json
app = Flask(__name__)
app.config["TEMPLATES_AUTO_RELOAD"]=True
@app.route('/')
def data():
return render_template('data.html')
@app.route('/result',methods = ['POST', 'GET'])
def result():
if request.method == 'POST':
global result
result = request.form
global a
global b
global c
global d
a=int(result["dataset-1"])
b=int(result["dataset-2"])
c=int(result["Attribute-1"])
d=int(result["Attribute-2"])
global l
global k
f=ds1(a)
g=ds2(b)
k=att1(f,c)
l=att2(g,d)
global p
global k1
global k2
k1=l.copy()
k2=k.copy()
k1=np.array(k1)
k2=np.array(k2)
p=np.subtract(k1,k2)
p=list(p)
global label
label=['Jammu and Kashmir','Himachal Pradesh','Punjab','Chandigarh','Uttarakhand','Haryana','NCT of Delhi', 'Rajasthan','Uttar Pradesh','Bihar','Sikkim','Arunachal Pradesh','Nagaland','Manipur','Mizoram','Tripura','Meghalaya','Assam','West Bengal','Jharkhand','Orissa','Chhatisgarh','Madhya Pradesh','Gujarat','Daman and Diu','Dadra and Nagar Haveli','Maharashtra','Andhra Pradesh','Karnataka','Goa','Lakshadweep','Kerala','Tamil Nadu','Pondicherry','Andaman and Nicobar Islands']
global at1,at2
if(c==1):
at1="male literate population"
elif(c==2):
at1="male literacy rate"
elif(c==3):
at1="female literate population"
elif(c==4):
at1="female literacy rate"
elif(c==5):
at1="total literate population"
if(d==1):
at2="male literate population"
elif(d==2):
at2="male literacy rate"
elif(d==3):
at2="female literate population"
elif(d==4):
at2="female literacy rate"
elif(d==5):
at2="total literate population"
return render_template('result.html',result = result,a=a,b=b,c=c,d=d,p=p,k=k1,l=k2,label=label)
@app.route('/lc')
def lc():
return render_template("gr.html",k=k,l=l,p=p,label=label,a=a,b=b,c=c,d=d,at1=at1,at2=at2)
@app.route('/pie')
def hst():
return render_template("pie.html",k=k,l=l,label=label,p=p,at1=at1,at2=at2,a=a,b=b,c=c,d=d)
@app.route('/bplot')
def bplot():
return render_template("bplot.html",k=k,l=l,p=p,label=label,at1=at1,at2=at2,a=a,b=b,c=c,d=d)
@app.route('/mlc')
def mlc():
return render_template("mlc.html",k=k,l=l,p=p,label=label,at1=at1,at2=at2,a=a,b=b,c=c,d=d)
@app.route('/scplt')
def scplt():
return render_template("scplt.html",k=k,l=l,p=p,label=label,at1=at1,at2=at2,a=a,b=b,c=c,d=d)
if __name__ == '__main__':
app.run(debug=True)
|
import json
from . import api, root
from .decorators import gateway_belong_to_user, require_basic_or_oauth
from userver.object.gateway import Gateway, Location
from utils.errors import KeyDuplicateError, PatchError
from .forms.form_gateway import AddGatewayForm, PatchGateway
from flask import request, Response
from .forms import get_formdata_from_json_or_form
@api.route(root + 'gateways', methods=['GET', 'POST'])
@require_basic_or_oauth
def gateways(user):
if request.method == 'GET':
gateways_list = []
gateways = Gateway.query.filter_by(user_id=user.id)
for gateway in gateways:
dict = gateway.obj_to_dict()
gateways_list.append(dict)
data = json.dumps(gateways_list)
return Response(status=200, response=data)
elif request.method == 'POST':
formdata = get_formdata_from_json_or_form(request)
add_gateway = AddGatewayForm(formdata)
if add_gateway.validate():
try:
gateway = import_gateway(user, add_gateway)
gateway.save()
new_gateway = Gateway.query.get(gateway.id)
return Response(status=201, response=json.dumps(new_gateway.obj_to_dict()))
except KeyDuplicateError as error:
errors = {'mac_addr': str(error)}
return Response(status=406, response=json.dumps({"errors": errors}))
except AssertionError as error:
return Response(status=406, response=json.dumps({"errors": {"other": str(error)}}))
else:
errors = {}
for key, value in add_gateway.errors.items():
errors[key] = value[0]
return Response(status=406, response=json.dumps({"errors": errors}))
@api.route(root + 'gateways/<gateway_id>/pull_info', methods=['GET'])
@require_basic_or_oauth
@gateway_belong_to_user
def gateway_pull_info(user, gateway):
"""
:param user:
:param gateway:
:return:
"""
gateway.get_pull_info()
@api.route(root + 'gateways/<gateway_id>', methods=['GET', 'DELETE', 'PATCH', 'POST'])
@require_basic_or_oauth
@gateway_belong_to_user
def gateway(user, gateway):
if request.method == 'GET':
return Response(status=200, response=json.dumps(gateway.obj_to_dict()))
elif request.method == 'PATCH':
try:
formdata = get_formdata_from_json_or_form(request)
PatchGateway.patch(gateway, formdata)
return json.dumps(gateway.obj_to_dict()), 200
except (AssertionError, PatchError, ValueError) as error:
return json.dumps({'errors': str(error)}), 406
elif request.method == 'DELETE':
gateway.delete()
return json.dumps({'success': True}), 200
elif request.method == 'POST':
formdata = get_formdata_from_json_or_form(request)
if formdata and formdata.get('cmd') is not None:
if formdata['cmd'] == 'restart':
gateway.send_restart_request()
return '', 204
else:
return 'Unknown cmd %s ' % formdata['cmd'], 406
else:
return '', 406
def import_gateway(user, add_gateway):
mac_addr = add_gateway['mac_addr'].data
name = add_gateway['name'].data
platform = add_gateway['platform'].data
freq_plan = add_gateway['freq_plan'].data
model = add_gateway['model'].data
location = Location(add_gateway['longitude'].data, add_gateway['latitude'].data, add_gateway['altitude'].data)
return Gateway(user.id, mac_addr, name, platform, model, freq_plan=freq_plan, location=location) |
"""Example program to test pyatag."""
import asyncio
import logging
import aiohttp
from pyatag import AtagException, AtagOne
from pyatag.discovery import async_discover_atag
logging.basicConfig()
_LOGGER = logging.getLogger(__name__)
_LOGGER.setLevel(logging.DEBUG)
async def main():
"""Initialize session for main program."""
async with aiohttp.ClientSession() as session:
await run(session)
async def run(session):
"""Run example main program."""
atag_ip, device_id = await async_discover_atag()
_LOGGER.info(f"Found Atag device {device_id }at address {atag_ip}")
atag = AtagOne(atag_ip, session)
try:
await atag.authorize()
await atag.update()
except AtagException as err:
_LOGGER.error(err)
return False
for sensor in atag.report:
_LOGGER.debug("%s = %s", sensor.name, sensor.state)
for attribute in dir(atag.climate):
_LOGGER.debug(
"atag.climate.%s = %s", attribute, getattr(atag.climate, attribute)
)
await atag.climate.set_preset_mode("manual")
await atag.climate.set_temp(21)
_LOGGER.debug(atag.report.report_time)
_LOGGER.debug(atag.dhw.temperature)
asyncio.run(main())
|
import math
import random
print math.log(32)
print math.log(32, 10)
print math.log(32, 2)
print math.log10(64)
print math.trunc(3.233)
print math.trunc(3.78)
print math.trunc(-232.83)
print math.trunc(-232.1)
l = [1, 2, 3]
random.seed(1234)
print random.choice(l)
print random.choice(l)
print random.choice(l)
print random.choice(l)
print random.choice(l)
print random.choice(l)
print random.choice(l)
t = (8, 9, 10)
print random.choice(t)
print random.choice(t)
print random.choice(t)
print random.choice(t)
print random.choice(t)
print random.choice(t)
print random.choice(t)
print random.choice(t)
|
import json
import logging
import os
import shutil
import subprocess
import sys
from dataclasses import dataclass
from typing import Optional, List, Dict, Any
from rustimport import settings
_logger = logging.getLogger(__name__)
class Cargo:
def __init__(self, executable_path: Optional[str] = None):
self.executable_path = executable_path or settings.cargo_executable or require('cargo')
@dataclass
class BuildResult:
artifact_path: Optional[str]
exit_code: int
success: bool
error_output: List[str]
compiler_messages: List[Dict[str, Any]]
def build(self, crate_path: str,
destination_path: Optional[str] = None,
release: bool = False,
suppress_output: bool = False,
additional_args: Optional[List[str]] = None) -> BuildResult:
"""
Runs `cargo build --lib` for the given `crate_path`.
@param crate_path: The path of the crate's root directory (the directory containing Cargo.toml).
@param destination_path: Copy the built library artifact to this folder or file path.
@param release: Whether to build a release binary (toggles Cargo's "--release" flag)
@param suppress_output: If true, no process output will be printed to stdout. In case of build failure,
the output will be collected and logged using `logging.error()` for debugging.
@param additional_args: Additional command line arguments to supply to the cargo executable.
"""
cmd = [
self.executable_path, 'rustc',
'--lib',
'--message-format', 'json'
]
if suppress_output:
cmd.append("--quiet")
if release:
cmd.append('--release')
if additional_args:
cmd.extend(additional_args)
_logger.debug(f'Building {crate_path}: {" ".join(cmd)}')
proc = subprocess.Popen(
cmd,
cwd=crate_path,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE if suppress_output else None,
)
result = self.__handle_build_process(crate_path, proc)
if not result.success and suppress_output:
_logger.error(f"Compilation failed. Cargo build output:\n\n"
+ '\n'.join(result.error_output) +
f"{proc.stderr.read().decode()}")
_logger.info(f'Cargo exited with code {result.exit_code}.')
if result.success and result.artifact_path and destination_path:
_logger.info(f"Copying artifact {result.artifact_path} to {destination_path}")
shutil.copy2(result.artifact_path, destination_path)
return result
@classmethod
def __handle_build_process(cls, crate_path: str, proc: subprocess.Popen) -> BuildResult:
"""
Handle json messages received from the given cargo process `proc`.
This method extracts build processes main library's artifact path (the python extension), if possible.
@return: A `Cargo.BuildResult`. Note that artifact_path might be `None` if extraction
fails (mostly in case of compilation errors).
"""
abs_crate_path = os.path.realpath(crate_path).rstrip("/")
artifact_path = None
messages = []
error_output = []
while (exit_code := proc.poll()) is None:
line = proc.stdout.readline()
if line.strip():
messages.append(message := json.loads(line))
if message.get('reason') == 'compiler-artifact':
if os.path.dirname(message.get('manifest_path')) == abs_crate_path:
artifact_path = message['filenames'][0]
elif message.get('reason') == 'compiler-message':
if not proc.stderr:
sys.stderr.write(message['message']['rendered'])
else:
error_output.append(message['message']['rendered'])
return cls.BuildResult(
success=exit_code == 0,
exit_code=exit_code,
compiler_messages=messages,
error_output=error_output,
artifact_path=artifact_path,
)
def require(executable_name: str):
path = shutil.which(executable_name)
if path:
return path
else:
sys.stderr.write(
"Could not find the rust toolchain installation. Make sure it is installed and "
"the `PATH` environment variable is set correctly.\n\n"
)
if os.name != 'nt':
sys.stderr.write("You can install the toolchain like this:\n$ curl https://sh.rustup.rs | sh\n\n")
else:
sys.stderr.write(
"To install the toolchain, visit https://forge.rust-lang.org/infra/other-installation-methods.html"
"#other-ways-to-install-rustup\n\n"
)
raise FileNotFoundError(f'Could not find {executable_name} binary.')
|
#-*- coding: utf-8 -*-
import os
#os.mkdir('tmp2')
def verifica_ws():
print (os.getcwd())
if os.path.exists('Audio'):
print('existe a pasta Audio')
else:
print('não existe a pasta Audio')
print('Criando a pasta Audio')
os.makedirs('Audio')
if os.path.exists('Slow'):
print('existe a pasta Slow')
else:
print('não existe a pasta Slow')
print('Criando a pasta Slow')
os.makedirs('Slow')
if os.path.exists('Reproduzir'):
print('existe a pasta Reproduzir')
else:
print('não existe a pasta Reproduzir')
print('Criando a pasta Reproduzir')
os.makedirs('Reproduzir')
|
import json
from dad_joke import DadJoke
class Database:
def __init__(self, filename='db.json'):
self.jokes = []
self.filename = filename
self.read_from_file()
def add_record(self, record):
self.jokes.append(record)
def get_records(self):
return self.jokes
def save_to_file(self):
json_string = json.dumps([ob.__dict__ for ob in self.jokes])
with open(self.filename, 'w') as file:
file.write(json_string)
def read_from_file(self):
with open(self.filename, 'r') as file:
jokes = json.load(file, object_hook=lambda d: DadJoke(**d))
self.jokes = jokes
|
# This file is executed on every boot (including wake-boot from deepsleep)
# import esp
# esp.osdebug(None)
# import webrepl
# webrepl.start()
import gc
import network
import machine
from config import SSID, PASSWORD
def establish_connection():
wifi = network.WLAN(network.STA_IF)
if not wifi.isconnected():
print("Connecting to {}".format(SSID))
wifi.active(True)
wifi.connect(SSID, PASSWORD)
while not wifi.isconnected():
pass
machine.idle()
print("Successfully connected to {}".format(SSID))
establish_connection()
gc.collect()
|
#!/usr/bin/env python
#
# Copyright (c) 2013 GhostBSD
#
# See COPYING for licence terms.
#
# create_cfg.py v 1.4 Friday, January 17 2014 Eric Turgeon
#
import os
import pickle
from subprocess import Popen, PIPE
# Directory use from the installer.
tmp = "/tmp/.gbi/"
installer = "/usr/local/lib/gbi/"
# Installer data file.
disk = '%sdisk' % tmp
layout = '%slayout' % tmp
model = '%smodel' % tmp
pcinstallcfg = '%spcinstall.cfg' % tmp
user_passwd = '%suser' % tmp
language = '%slanguage' % tmp
dslice = '%sslice' % tmp
left = '%sleft' % tmp
partlabel = '%spartlabel' % tmp
timezone = '%stimezone' % tmp
KBFile = '%skeyboard' % tmp
boot_file = '%sboot' % tmp
disk_schem = '%sscheme' % tmp
zfs_config = '%szfs_config' % tmp
ufs_config = tmp + 'ufs_config'
class gbsd_cfg():
def __init__(self):
f = open('%spcinstall.cfg' % tmp, 'w')
# Installation Mode
f.writelines('# Installation Mode\n')
f.writelines('installMode=fresh\n')
f.writelines('installInteractive=no\n')
f.writelines('installType=GhostBSD\n')
f.writelines('installMedium=livecd\n')
f.writelines('packageType=livecd\n')
# System Language
langfile = open(language, 'r')
lang = langfile.readlines()[0].rstrip()
f.writelines('\n# System Language\n\n')
f.writelines('localizeLang=%s\n' % lang)
os.remove(language)
# Keyboard Setting
if os.path.exists(model):
f.writelines('\n# Keyboard Setting\n')
os.remove(model)
if os.path.exists(KBFile):
rkb = open(KBFile, 'r')
kb = rkb.readlines()
kbl = kb[0].rstrip()
f.writelines('localizeKeyLayout=%s\n' % kbl)
kbv = kb[1].rstrip()
if kbv != 'None':
f.writelines('localizeKeyVariant=%s\n' % kbv)
kbm = kb[2].rstrip()
if kbm != 'None':
f.writelines('localizeKeyModel=%s\n' % kbm)
# Timezone
if os.path.exists(timezone):
time = open(timezone, 'r')
t_output = time.readlines()[0].strip()
f.writelines('\n# Timezone\n')
f.writelines('timeZone=%s\n' % t_output)
f.writelines('enableNTP=yes\n')
os.remove(timezone)
if os.path.exists(zfs_config):
# Disk Setup
r = open(zfs_config, 'r')
zfsconf = r.readlines()
for line in zfsconf:
if 'partscheme' in line:
f.writelines(line)
read = open(boot_file, 'r')
boot = read.readlines()[0].strip()
if boot == 'refind':
f.writelines('bootManager=none\n')
f.writelines('efiLoader=%s\n' % boot)
else:
f.writelines('bootManager=%s\n' % boot)
f.writelines('efiLoader=none\n')
os.remove(boot_file)
else:
f.writelines(line)
# os.remove(zfs_config)
elif os.path.exists(ufs_config):
# Disk Setup
r = open(ufs_config, 'r')
ufsconf = r.readlines()
for line in ufsconf:
if 'partscheme' in line:
f.writelines(line)
read = open(boot_file, 'r')
boot = read.readlines()[0].strip()
if boot == 'refind':
f.writelines('bootManager=none\n')
f.writelines('efiLoader=%s\n' % boot)
else:
f.writelines('bootManager=%s\n' % boot)
f.writelines('efiLoader=none\n')
os.remove(boot_file)
else:
f.writelines(line)
else:
# Disk Setup
r = open(disk, 'r')
drive = r.readlines()
d_output = drive[0].strip()
f.writelines('\n# Disk Setup\n')
f.writelines('disk0=%s\n' % d_output)
os.remove(disk)
# Partition Slice.
p = open(dslice, 'r')
line = p.readlines()
part = line[0].rstrip()
f.writelines('partition=%s\n' % part)
os.remove(dslice)
# Boot Menu
read = open(boot_file, 'r')
line = read.readlines()
boot = line[0].strip()
if boot == 'refind':
f.writelines('bootManager=none\n')
f.writelines('efiLoader=%s\n' % boot)
else:
f.writelines('bootManager=%s\n' % boot)
f.writelines('efiLoader=none\n')
# os.remove(boot_file)
# Sheme sheme
read = open(disk_schem, 'r')
shem = read.readlines()[0]
f.writelines(shem + '\n')
f.writelines('commitDiskPart\n')
# os.remove(disk_schem)
# Partition Setup
f.writelines('\n# Partition Setup\n')
part = open(partlabel, 'r')
# If slice and auto file exist add first partition line.
# But Swap need to be 0 it will take the rest of the freespace.
for line in part:
if 'BOOT' in line or 'BIOS' in line or 'UEFI' in line:
pass
else:
f.writelines('disk0-part=%s\n' % line.strip())
f.writelines('commitDiskLabel\n')
os.remove(partlabel)
# Network Configuration
f.writelines('\n# Network Configuration\n')
readu = open(user_passwd, 'rb')
uf = pickle.load(readu)
net = uf[5]
f.writelines('hostname=%s\n' % net)
# Set the root pass
f.writelines('\n# Network Configuration\n')
readr = open('%sroot' % tmp, 'rb')
rf = pickle.load(readr)
root = rf[0]
f.writelines('\n# Set the root pass\n')
f.writelines('rootPass=%s\n' % root)
# Setup our users
user = uf[0]
f.writelines('\n# Setup user\n')
f.writelines('userName=%s\n' % user)
name = uf[1]
f.writelines('userComment=%s\n' % name)
passwd = uf[2]
f.writelines('userPass=%s\n' % passwd.rstrip())
shell = uf[3]
f.writelines('userShell=%s\n' % shell)
upath = uf[4]
f.writelines('userHome=%s\n' % upath.rstrip())
f.writelines('defaultGroup=wheel\n')
f.writelines('userGroups=operator\n')
f.writelines('commitUser\n')
ifvbox = open('/tmp/.ifvbox', 'w')
vbguest = Popen('pciconf -lv | grep "VirtualBox Graphics"', shell=True,
stdout=PIPE, close_fds=True, universal_newlines=True)
if "VirtualBox Graphics" in vbguest.stdout.read():
ifvbox.writelines('True\n')
else:
ifvbox.writelines('False\n')
ifvbox.close()
f.writelines('runExtCommand=cat /etc/rc.conf | grep kld_list >> $FSMNT/etc/rc.conf\n')
if os.path.exists("/etc/X11/xorg.conf"):
f.writelines('runExtCommand=cp /etc/X11/xorg.conf $FSMNT/etc/X11/xorg.conf\n')
f.writelines('runScript=/root/iso_to_hd.sh\n')
f.writelines('runCommand=rm -f /root/iso_to_hd.sh\n')
if os.path.exists(zfs_config):
zfsark = """echo 'vfs.zfs.arc_max="512M"' >> /boot/loader.conf"""
f.writelines('runCommand=%s\n' % zfsark)
# adding setting for keyboard in slim
keyboard_conf = '/usr/local/etc/X11/xorg.conf.d/keyboard.conf'
k_conf_list = [
'Section "InputClass"',
' Identifier "Keyboard0"',
' Driver "kbd"',
' Option "XkbLayout" "%s"' % kbl
]
if kbv != 'None':
k_conf_list.append(' Option "XkbVariant" "%s"' % kbv)
if kbm != 'None':
k_conf_list.append(' Option "XkbModel" "%s"' % kbm)
k_conf_list.append('EndSection')
for conf_line in k_conf_list:
if 'Section "InputClass"' == conf_line:
cmd = """echo '%s' > %s""" % (conf_line, keyboard_conf)
else:
cmd = """echo '%s' >> %s""" % (conf_line, keyboard_conf)
f.writelines('runCommand=%s\n' % cmd)
f.close()
os.remove(user_passwd)
|
from pathlib import Path
from fhir.resources.valueset import ValueSet as _ValueSet
from oops_fhir.utils import ValueSet
from oops_fhir.r4.code_system.adverse_event_causality_method import (
AdverseEventCausalityMethod as AdverseEventCausalityMethod_,
)
__all__ = ["AdverseEventCausalityMethod"]
_resource = _ValueSet.parse_file(Path(__file__).with_suffix(".json"))
class AdverseEventCausalityMethod(AdverseEventCausalityMethod_):
"""
AdverseEventCausalityMethod
TODO.
Status: draft - Version: 4.0.1
http://hl7.org/fhir/ValueSet/adverse-event-causality-method
"""
class Meta:
resource = _resource
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import numpy as np
import torch
from pterotactyl.utility import utils
BASE_MESH_SIZE = 1824
BASE_CHART_SIZE = 25
# replay buffer used for learning RL models over the environment
class ReplayMemory:
def __init__(self, args):
self.args = args
# basic info which might be used by a learning method
# _n denotes observations occuring after the action is perfromed
self.mask = torch.zeros((self.args.mem_capacity, self.args.num_actions))
self.mask_n = torch.zeros((self.args.mem_capacity, self.args.num_actions))
self.actions = torch.zeros((self.args.mem_capacity))
self.rewards = torch.zeros(self.args.mem_capacity)
self.score = torch.zeros(self.args.mem_capacity)
self.score_n = torch.zeros(self.args.mem_capacity)
self.first_score = torch.zeros(self.args.mem_capacity)
if self.args.use_recon:
num_fingers = 1 if self.args.finger else 4
mesh_shape = BASE_MESH_SIZE + (
BASE_CHART_SIZE * self.args.num_grasps * num_fingers
)
self.mesh = torch.zeros((self.args.mem_capacity, mesh_shape, 4))
self.mesh_n = torch.zeros((self.args.mem_capacity, mesh_shape, 4))
if self.args.use_latent:
latent_size = utils.load_model_config(self.args.auto_location)[
0
].encoding_size
self.latent = torch.zeros((self.args.mem_capacity, latent_size))
self.latent_n = torch.zeros((self.args.mem_capacity, latent_size))
self.first_latent = torch.zeros((self.args.mem_capacity, latent_size))
self.position = 0
self.count_seen = 0
# add a set of transitions to the replay buffer
def push(self, action, observation, next_observation, reward):
for i in range(len(action)):
self.actions[self.position] = action[i]
self.rewards[self.position] = reward[i]
self.score[self.position] = observation["score"][i]
self.score_n[self.position] = next_observation["score"][i]
self.first_score[self.position] = observation["first_score"][i]
self.mask[self.position] = observation["mask"][i]
self.mask_n[self.position] = next_observation["mask"][i]
if self.args.use_recon:
self.mesh[self.position] = observation["mesh"][i]
self.mesh_n[self.position] = next_observation["mesh"][i]
if self.args.use_latent:
self.latent[self.position] = observation["latent"][i]
self.latent_n[self.position] = next_observation["latent"][i]
self.first_latent[self.position] = observation["first_latent"][i]
self.count_seen += 1
self.position = (self.position + 1) % self.args.mem_capacity
# sample a set of transitions from the replay buffer
def sample(self):
if (
self.count_seen < self.args.burn_in
or self.count_seen < self.args.train_batch_size
):
return None
indices = np.random.choice(
min(self.count_seen, self.args.mem_capacity), self.args.train_batch_size
)
data = {
"mask": self.mask[indices],
"mask_n": self.mask_n[indices],
"actions": self.actions[indices],
"rewards": self.rewards[indices],
"score": self.score[indices],
"score_n": self.score_n[indices],
"first_score": self.first_score[indices],
}
if self.args.use_recon:
data["mesh"] = self.mesh[indices]
data["mesh_n"] = self.mesh_n[indices]
if self.args.use_latent:
data["latent"] = self.latent[indices]
data["latent_n"] = self.latent_n[indices]
data["first_latent"] = self.first_latent[indices]
return data
# save the replay buffer to disk
def save(self, directory):
data = {
"mask": self.mask,
"mask_n": self.mask_n,
"actions": self.actions,
"rewards": self.rewards,
"score": self.score,
"first_score": self.first_score,
"position": self.position,
"count_seen": self.count_seen,
}
if self.args.use_recon:
data["mesh"] = self.mesh
data["mesh_n"] = self.mesh_n
if self.args.use_latent:
data["latent"] = self.latent
data["latent_n"] = self.latent_n
data["first_latent"] = self.first_latent
temp_path = directory + "_replay_buffer_temp.pt"
full_path = directory + "_replay_buffer.pt"
torch.save(data, temp_path)
os.rename(temp_path, full_path)
# load the replay buffer from the disk
def load(self, directory):
data = torch.load(directory + "_replay_buffer.pt")
self.mask = data["mask"]
self.mask_n = data["mask_n"]
self.actions = data["actions"]
self.actions = data["actions"]
self.rewards = data["rewards"]
self.score = data["score"]
self.first_score = data["first_score"]
self.position = data["position"]
self.count_seen = data["count_seen"]
if self.args.use_recon:
self.mesh = data["mesh"]
self.mesh_n = data["mesh_n"]
if self.args.use_latent:
self.latent = data["latent"]
self.latent_n = data["latent_n"]
self.first_latent = data["first_latent"]
|
# coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Tuple
import numpy as np
class HistoryBuffer:
"""
Track a series of scalar values and provide access to smoothed values over a
window or the global average of the series.
"""
def __init__(self, max_length: int = 1000000):
"""
Args:
max_length: maximal number of values that can be stored in the
buffer. When the capacity of the buffer is exhausted, old
values will be removed.
"""
self._max_length: int = max_length
self._data: List[Tuple[float, float]] = [] # (value, iteration) pairs
self._count: int = 0
self._global_avg: float = 0
def update(self, value: float, iteration: float = None):
"""
Add a new scalar value produced at certain iteration. If the length
of the buffer exceeds self._max_length, the oldest element will be
removed from the buffer.
"""
if iteration is None:
iteration = self._count
if len(self._data) == self._max_length:
self._data.pop(0)
self._data.append((value, iteration))
self._count += 1
self._global_avg += (value - self._global_avg) / self._count
def latest(self):
"""
Return the latest scalar value added to the buffer.
"""
return self._data[-1][0]
def median(self, window_size: int):
"""
Return the median of the latest `window_size` values in the buffer.
"""
return np.median([x[0] for x in self._data[-window_size:]])
def avg(self, window_size: int):
"""
Return the mean of the latest `window_size` values in the buffer.
"""
return np.mean([x[0] for x in self._data[-window_size:]])
def global_avg(self):
"""
Return the mean of all the elements in the buffer. Note that this
includes those getting removed due to limited buffer storage.
"""
return self._global_avg
def values(self):
"""
Returns:
list[(number, iteration)]: content of the current buffer.
"""
return self._data
|
'''
Prime numbers are numbers that can only be cleanly divided by themselves and 1.
https://en.wikipedia.org/wiki/Prime_number
You need to write a function that checks whether if the number passed into it is a prime number or not.
e.g. 2 is a prime number because it's only divisible by 1 and 2.
But 4 is not a prime number because you can divide it by 1, 2 or 4.
'''
#Write your code below this line 👇
def prime_checker(number):
prime = True
for num in range(2, number):
if number % num == 0:
prime = False
break
if prime and number != 1:
print("It's a prime number.")
else:
print("It's not a prime number.")
#Write your code above this line 👆
#Do NOT change any of the code below👇
n = int(input("Check this number: "))
prime_checker(number=n) |
from typing import Union
from tps.utils import load_dict, prob2bool
from tps.symbols import punctuation, accent
from tps.modules import Processor
class Replacer(Processor):
def __init__(self, dict_source: Union[str, tuple, list, dict]=None,
name: str="Replacer"):
"""
Base class for replacer-type processors.
:param dict_source: Union[str, tuple, list, dict]
Source of dictionary that contains replacement pairs.
Options:
* str - path to file.
The file extension must explicitly show its format in case of json and yaml files.
In other cases, user must set the format himself (see below).
* tuple, list - (path, format)
path - path to the dictionary file
format - format of the dictionary file (see tps.utils.load_dict function)
* dict - just a dict
"""
super().__init__(None, name)
fmt = None
if isinstance(dict_source, (tuple, list)):
dict_source, fmt = dict_source
self.entries = load_dict(dict_source, fmt)
def process(self, string: str, **kwargs) -> str:
"""
Splits the passed string into tokens and replaces each one according to the dictionary (if exists).
Keep it mind, that tokenization is simple here and it's better to pass normalized string.
:param string: str
Your text.
:param kwargs:
* mask: Union[bool, float]
Whether to mask each token.
If float, then masking probability will be computed for each token independently.
:return: str
"""
mask = kwargs.get("mask", False)
tokens = self.split_to_tokens(string)
for idx, token in enumerate(tokens):
if token in punctuation:
continue
token = self._process_token(token, mask)
tokens[idx] = token
return self.join_tokens(tokens)
def _process_token(self, token, mask):
return token if prob2bool(mask) else self.entries.get(token, token)
class BlindReplacer(Replacer):
def _process_token(self, token, mask):
return token if prob2bool(mask) else self.entries.get(token.replace(accent, ""), token) |
"""This module wraps some state loading, holding, and saving
functionality into python class implementation.
"""
import json
from collections import Counter
from dataclasses import dataclass
from pathlib import Path
@dataclass
class AnonState:
state_path: Path
vf_filename: str = "state_cache.json"
tc_filename: str = "tag_cache.json"
_inited: bool = False
def init_state(self):
self.visited_folders = {}
self.tag_counter = Counter()
self._inited = True
def _assert_inited(self):
if not self._inited:
raise AssertionError(f"Run {self.__repr__()}.init_state() method first")
def load_state(self):
self._assert_inited()
vf_path = self.state_path / self.vf_filename
tc_path = self.state_path / self.tc_filename
if vf_path.exists() and vf_path.is_file():
with open(vf_path, "r") as fout:
self.visited_folders = json.load(fout)
if tc_path.exists() and tc_path.is_file():
with open(tc_path, "r") as fout:
self.tag_counter = Counter(json.load(fout))
def save_state(self):
self._assert_inited()
vf_path = self.state_path / self.vf_filename
tc_path = self.state_path / self.tc_filename
with open(vf_path, "w") as fin:
json.dump(self.visited_folders, fin)
with open(tc_path, "w") as fin:
json.dump(self.tag_counter, fin)
if __name__ == "__main__":
test_state = AnonState(Path.cwd())
test_state.init_state()
test_state.save_state()
test_state.load_state()
print(test_state.visited_folders)
print(test_state.tag_counter)
|
from typing import List
import pandas as pd
from concurrent.futures import ThreadPoolExecutor
import requests
url = "https://dawa.aws.dk/postnumre/reverse"
def get_zipcode(series: pd.Series, client: requests.Session) -> str:
resp = client.get(url, params={"x": series.longitude, "y": series.latitude})
if resp.status_code == 200:
return resp.json()["nr"]
resp.raise_for_status()
def get_all_zipcodes(lats_longs: pd.DataFrame, n_workers: int = 10) -> List[str]:
session = requests.Session()
with ThreadPoolExecutor(max_workers=n_workers) as executor:
futures = [executor.submit(get_zipcode, lat_long, session) for _, lat_long in
lats_longs.iterrows()]
return [future.result() for future in futures]
def get_missing_zip_codes(df: pd.DataFrame) -> pd.DataFrame:
missing_lat_longs = df.loc[df.zipcode.isna(), ["latitude", "longitude"]]
df.loc[df.zipcode.isna(), "zipcode"] = get_all_zipcodes(missing_lat_longs)
return df
|
#!/usr/bin/python
# Copyright (C) 2015, WSID
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import unittest
from gi.repository import GObject
from gi.repository import CrankBase
class TestRange(unittest.TestCase):
def test_uint_equal (self):
a = CrankBase.RanUint.init (3, 5)
b = CrankBase.RanUint.init (0, 4)
c = CrankBase.RanUint.init (3, 5)
assert (not a.equal (b))
assert (a.equal (c))
def test_uint_hash (self):
a = CrankBase.RanUint.init (3, 5)
b = CrankBase.RanUint.init (0, 4)
c = CrankBase.RanUint.init (3, 5)
self.assertNotEqual (a.hash (), b.hash ())
self.assertEqual (a.hash (), c.hash ());
def test_uint_to_string (self):
a = CrankBase.RanUint.init (3, 5)
self.assertEqual (a.to_string (), "[3, 5)")
def test_uint_is_empty (self):
a = CrankBase.RanUint.init (2, 3)
assert (not a.is_empty())
a = CrankBase.RanUint.init (4, 4)
assert (a.is_empty())
def test_uint_is_unit (self):
a = CrankBase.RanUint.init (2, 3)
assert (a.is_unit())
a = CrankBase.RanUint.init (4, 4)
assert (not a.is_unit())
def test_uint_get_length (self):
a = CrankBase.RanUint.init (7, 12)
self.assertEqual (a.get_length (), 5)
def test_uint_contains (self):
a = CrankBase.RanUint.init (10, 23)
assert (a.contains (15))
assert (not a.contains (55))
def test_uint_get (self):
a = CrankBase.RanUint.init (10, 110)
self.assertEqual (a.get (0.25), 35)
def test_uint_index_of (self):
a = CrankBase.RanUint.init (10, 110)
self.assertEqual (a.index_of (85), 0.75)
def test_uint_clamp (self):
a = CrankBase.RanUint.init (10, 100)
self.assertEqual (a.clamp (4), 10)
self.assertEqual (a.clamp (44), 44)
self.assertEqual (a.clamp (200), 100)
def test_uint_intersection (self):
a = CrankBase.RanUint.init (14, 45)
b = CrankBase.RanUint.init (55, 74)
c = CrankBase.RanUint.init (30, 60)
(res, d) = a.intersection (b)
assert (not res)
(res, e) = a.intersection (c)
assert (res)
self.assertEqual (e.start, 30)
self.assertEqual (e.end, 45)
if __name__ == '__main__':
unittest.main ()
|
__author__ = 'cjm'
import logging
import requests
# TODO: inherit a generic ResultSet model
class Concept:
"""
A SG concept (payload of search)
"""
def __init__(self, obj={}):
self.id = obj['curie']
self.deprecated = obj['deprecated']
self.labels = obj['labels']
self.categories = obj['categories']
self.synonyms = obj['synonyms']
self.acronyms = obj['acronyms']
self.abbreviations = obj['abbreviations']
self.definitions = obj['definitions']
def __str__(self):
return self.id+' "'+str(str(self.labels))+'"'
|
import os
import argparse
import pandas as pd
from typing import Optional
from transformers import AutoTokenizer, AutoModel, T5ForConditionalGeneration
from nltk.translate.chrf_score import corpus_chrf
from tqdm.auto import tqdm
from ..utils import load_model
from .metrics import evaluate_style_transfer
def paraphrase(text, model, tokenizer, n=None, max_length='auto', temperature=0.0, beams=3):
texts = [text] if isinstance(text, str) else text
inputs = tokenizer(texts, return_tensors='pt', padding=True)['input_ids'].to(model.device)
if max_length == 'auto':
max_length = int(inputs.shape[1] * 1.2) + 10
result = model.generate(
inputs,
num_return_sequences=n or 1,
do_sample=False,
temperature=temperature,
repetition_penalty=3.0,
max_length=max_length,
bad_words_ids=[[2]], # unk
num_beams=beams,
)
texts = [tokenizer.decode(r, skip_special_tokens=True) for r in result]
if not n and isinstance(text, str):
return texts[0]
return texts
def inference(
data_path: str,
model_name: str,
tokenizer_model_name: str,
result_path: str,
device: str = '0',
batch_size: int = 64
):
os.environ['CUDA_VISIBLE_DEVICES'] = device
data = pd.read_csv(data_path, sep='\t')
toxic = data['toxic_comment'].tolist()
tokenizer = AutoTokenizer.from_pretrained(tokenizer_model_name)
model = T5ForConditionalGeneration.from_pretrained(model_name).cuda()
para_results = []
# problematic_batch = [] # if something goes wrong you can track such bathces
for i in tqdm(range(0, len(toxic), batch_size)):
batch = [sentence for sentence in toxic[i:i + batch_size]]
try:
para_results.extend(paraphrase(batch, model, tokenizer, temperature=0.0))
except Exception as e:
print(i)
para_results.append(toxic[i:i + batch_size])
with open(result_path, 'w') as file:
file.writelines([sentence + '\n' for sentence in para_results])
def evaluate_and_dump(
gold_label_path: str,
predicts_path: str,
name: str,
output_path: Optional[str] = None,
batch_size: int = 16,
device: Optional[str] = None,
target_metric='accuracy',
):
use_cuda = True if device is not None else False
if device is not None:
os.environ['CUDA_VISIBLE_DEVICES'] = device
print('Load data...')
df = pd.read_csv(gold_label_path, sep='\t').fillna('')
toxic_inputs = df['toxic_comment'].tolist()
with open(predicts_path, 'r') as f:
rewritten = f.readlines()
rewritten = [sentence.strip() for sentence in rewritten]
print('Load models...')
style_model, style_tokenizer = load_model('SkolkovoInstitute/russian_toxicity_classifier', use_cuda=use_cuda)
meaning_model, meaning_tokenizer = load_model('cointegrated/LaBSE-en-ru', use_cuda=use_cuda, model_class=AutoModel)
fluency_model, fluency_tolenizer = load_model('SkolkovoInstitute/rubert-base-corruption-detector', use_cuda=use_cuda)
results = evaluate_style_transfer(
original_texts=toxic_inputs,
rewritten_texts=rewritten,
style_model=style_model,
style_tokenizer=style_tokenizer,
meaning_model=meaning_model,
meaning_tokenizer=meaning_tokenizer,
cola_model=fluency_model,
cola_tokenizer=fluency_tolenizer,
style_target_label=0,
batch_size=batch_size,
aggregate=True
)
neutral_references = []
for index, row in df.iterrows():
neutral_references.append([row['neutral_comment1'], row['neutral_comment2'], row['neutral_comment3']])
results['chrf'] = corpus_chrf(neutral_references, rewritten)
results[f'{target_metric}_chrf'] = results[target_metric] * results['chrf']
if output_path is not None:
if not os.path.exists(output_path):
with open(output_path, 'w') as f:
f.writelines(f'| Model | ACC | SIM | FL | J | ChrF1 | {target_metric}*ChrF1 |\n')
f.writelines('| ----- | --- | --- | -- | - | ----- | --------- |\n')
with open(output_path, 'a') as res_file:
res_file.writelines(
f"{name}|{results['accuracy']:.4f}|{results['similarity']:.4f}|{results['fluency']:.4f}|"
f"{results['joint']:.4f}|{results['chrf']:.4f}|{results[f'{target_metric}_chrf']:.4f}|\n"
)
return results
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-i', "--inputs", help="path to test sentences", required=True)
parser.add_argument('-p', "--preds", help="path to predictions of a model", required=True)
parser.add_argument('-r', "--output_path", help="path to result .md file", default=None, type=Optional[str])
parser.add_argument('-n', "--name", help="model name", default='test', type=str)
parser.add_argument("--batch_size", default=32, type=int)
parser.add_argument("--device", default=None, type=Optional[str])
args = parser.parse_args()
evaluate_and_dump(
gold_label_path=args.inputs,
predicts_path=args.preds,
name=args.name,
output_path=args.output_path,
batch_size=args.batch_size,
device=args.device,
)
|
#!/usr/bin/python
#
# test decoder error measures
#
#
import os
import sys
#import random as random
import math as m
import numpy as np
import unittest
import mock
import tests.common as tc
import abt_constants
from abtclass import *
import hmm_bt as hbt # this test did it right(!)
# b3 class modified by BH, local version in current dir
import b3 as b3 # behavior trees
import random
class Test_DecodeEval(unittest.TestCase):
def test_DE01(self):
assertct = 0
#Build a model
tmod = model(5) # with 5 states
tmod.names = ['a','b','c','d','e']
tmod.statenos = {'a':1, 'b': 2, 'c':3, 'd':4, 'e':5}
## Generate some state seqs with and without errors
#
#
random.seed(430298219) # we want same seqs every time so we can assert for right answers
Lmax = 10
Nseqs = 10
lengths = []
ls3 = []
seqs1 = []
seqs2 = []
seqs3 = []
for i in range(Nseqs): # 10 test sequences
#minimum of 5 in sequence
l1 = Lmax/2 + int(random.random()*(Lmax/2) + 0.5)
lengths.append(l1)
ls3.append(l1)
#print 'length: ', l1, '|',
for i in range(l1): # generate a random sequence of l1 names
s = tmod.statenos[random.choice(tmod.names)]
seqs1.append(s)
seqs2.append(s)
seqs3.append(s)
assert len(seqs1) == sum(lengths), 'Somethings wrong with sequence setup'
assertct += 1
####################################################################################
# First no deletions or insertions!!!! just changes
# Add some errors to FIRST THREE Seqs
# seqsx[0] has 1 error
seqs2[0] = 1
# seqsx[1] has 2 errors
seqs2[7] = 2
seqs2[8] = 3
# seqsx[2] has 3 errors
seqs2[16] = 1
seqs2[17] = 2
seqs2[18] = 3
#s1 = np.matrix(seqs1) # needs to be list
s2 = np.array(seqs2).reshape((sum(lengths))) # needs to be numpy matrix of 1D
#print 'Setup: ',s2.shape
[avgd, maxd, count] = hbt.Veterbi_Eval(s2, seqs1, tmod.names, lengths, tmod.statenos)
# avgd: average sed per symbol
# maxd: max sed for any seq
# count: total sed for all seqs
if(False):
print 'avgd: ', '{:6.3f}'.format(avgd)
print 'maxd: ', '{:6.3f}'.format(maxd)
print 'count ', '{:6d}'.format(count)
test_eps = 0.001
fs = 'test_decode_eval: string change test: assertion failure'
tc.assert_feq(avgd, 0.099, fs, test_eps)
assertct += 1
tc.assert_feq(maxd,0.600,fs,test_eps)
assertct += 1
assert count == 6, fs
assertct += 1
print '\n'
print ' test_decode_eval.py '
print '\n Passed all',assertct,' assertions '
#i = 0
#for l in ls3:
#for j in range(l):
#print '[',str(i)+','+str(j)+']'+str(seqs1[i+j])+'/'+str(seqs3[i+j]) ,
#print ''
#i += j+1
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
"""
Created by howie.hu at 2021/4/25.
Description:模型实现
Changelog: all notable changes to this file will be documented
"""
from keras import layers
from keras.callbacks import ModelCheckpoint, TensorBoard
from keras.models import Sequential
from src.classifier.model_lib.char_cnn.keras_utils import FitCallback
class CharCNN:
def __init__(
self,
*,
conv_layers: list = None,
fully_layers: list = None,
input_size: int = 1014,
alphabet_size: int = 69,
num_of_classes: int = 4,
dropout_p: float = 0.5,
threshold: float = 1e-6,
loss="categorical_crossentropy",
optimizer="adam",
):
"""
基于Keras的字符级卷积神经网络
:param conv_layers: 卷积层
:param fully_layers: 全连接层
:param input_size: 输入大小,论文中是1014
:param alphabet_size: 字母表大小
:param num_of_classes: 类别
:param dropout_p: dropout值
:param threshold: threshold值
:param loss: 损失函数
:param optimizer: 优化
"""
# 卷积层定义
if conv_layers is None:
self.conv_layers = [
[256, 7, 3],
[256, 7, 3],
[256, 3, None],
[256, 3, None],
[256, 3, None],
[256, 3, 3],
]
else:
self.conv_layers = conv_layers
# 全连接层
if fully_layers is None:
self.fully_layers = [1024, 1024]
else:
self.fully_layers = fully_layers
self.alphabet_size = alphabet_size
self.input_size = input_size
self.num_of_classes = num_of_classes
self.dropout_p = dropout_p
self.threshold = threshold
self.loss = loss
self.optimizer = optimizer
self.shape = (input_size, alphabet_size, 1)
self.model = self._build_model()
def _build_model(self):
"""
论文中的模型结构
:return:
"""
model = Sequential()
# 词嵌入
model.add(
layers.Embedding(self.alphabet_size + 1, 128, input_length=self.input_size)
)
# 卷积层
for cl in self.conv_layers:
model.add(layers.Conv1D(filters=cl[0], kernel_size=cl[1]))
model.add(layers.ThresholdedReLU(self.threshold))
if cl[-1] is not None:
model.add(layers.MaxPool1D(pool_size=cl[-1]))
model.add(layers.Flatten())
# 全连接层
for fl in self.fully_layers:
# model.add(layers.Dense(fl, activity_regularizer=regularizers.l2(0.01)))
model.add(layers.Dense(fl))
model.add(layers.ThresholdedReLU(self.threshold))
model.add(layers.Dropout(self.dropout_p))
# 输出层
model.add(layers.Dense(self.num_of_classes, activation="softmax"))
model.compile(optimizer=self.optimizer, loss=self.loss, metrics=["accuracy"])
print("CharCNN model built success")
model.summary()
return model
def train(
self,
*,
training_inputs,
training_labels,
validation_inputs,
validation_labels,
epochs,
batch_size,
model_file_path,
verbose=2,
checkpoint_every=100,
evaluate_every=100,
):
"""
对模型进项训练
:param training_inputs: 训练实例
:param training_labels: 训练标签
:param validation_inputs: 验证实例
:param validation_labels: 验证标签
:param epochs: 迭代周期
:param batch_size: 每次批大小
:param model_file_path:模型保存路径
:param verbose: Integer. 0, 1, or 2. Verbosity mode. 0 = silent, 1 = progress bar, 2 = one line per epoch.
:param checkpoint_every: 每多少次进行 checkpoint
:param evaluate_every: 每多少次进行 evaluate
:return:
"""
tensorboard = TensorBoard(
log_dir="./logs",
histogram_freq=checkpoint_every,
batch_size=batch_size,
write_graph=True,
write_grads=True,
write_images=True,
embeddings_freq=0,
embeddings_layer_names=None,
)
fit_callback = FitCallback(
test_data=(validation_inputs, validation_labels),
evaluate_every=evaluate_every,
)
checkpoint = ModelCheckpoint(
model_file_path,
monitor="val_loss",
verbose=1,
save_best_only=True,
mode="min",
)
# 开始训练
print("Training Started ===>")
self.model.fit(
training_inputs,
training_labels,
validation_data=(validation_inputs, validation_labels),
epochs=epochs,
batch_size=batch_size,
verbose=verbose,
callbacks=[tensorboard, fit_callback, checkpoint],
)
if __name__ == "__main__":
char_cnn_model = CharCNN()
|
""" Tests for utils. """
import collections
from datetime import datetime, timedelta
from unittest import mock
from unittest.mock import Mock, patch
from uuid import uuid4
import ddt
from django.conf import settings
from django.test import TestCase
from django.test.utils import override_settings
from opaque_keys.edx.locator import CourseLocator, LibraryLocator
from path import Path as path
from pytz import UTC
from user_tasks.models import UserTaskArtifact, UserTaskStatus
from cms.djangoapps.contentstore import utils
from cms.djangoapps.contentstore.tasks import ALL_ALLOWED_XBLOCKS, validate_course_olx
from cms.djangoapps.contentstore.tests.utils import TEST_DATA_DIR, CourseTestCase
from openedx.core.djangoapps.site_configuration.tests.test_util import with_site_configuration_context
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase, SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.partitions.partitions import Group, UserPartition
class LMSLinksTestCase(TestCase):
""" Tests for LMS links. """
def lms_link_test(self):
""" Tests get_lms_link_for_item. """
course_key = CourseLocator('mitX', '101', 'test')
location = course_key.make_usage_key('vertical', 'contacting_us')
link = utils.get_lms_link_for_item(location, False)
self.assertEqual(link, "//localhost:8000/courses/course-v1:mitX+101+test/jump_to/block-v1:mitX+101+test+type"
"@vertical+block@contacting_us")
# test preview
link = utils.get_lms_link_for_item(location, True)
self.assertEqual(
link,
"//preview.localhost/courses/course-v1:mitX+101+test/jump_to/block-v1:mitX+101+test+type@vertical+block"
"@contacting_us "
)
# now test with the course' location
location = course_key.make_usage_key('course', 'test')
link = utils.get_lms_link_for_item(location)
self.assertEqual(link, "//localhost:8000/courses/course-v1:mitX+101+test/jump_to/block-v1:mitX+101+test+type"
"@course+block@test")
def lms_link_for_certificate_web_view_test(self):
""" Tests get_lms_link_for_certificate_web_view. """
course_key = CourseLocator('mitX', '101', 'test')
dummy_user = ModuleStoreEnum.UserID.test
mode = 'professional'
self.assertEqual(
utils.get_lms_link_for_certificate_web_view(course_key, mode),
"//localhost:8000/certificates/course/{course_key}?preview={mode}".format(
course_key=course_key,
mode=mode
)
)
with with_site_configuration_context(configuration={"course_org_filter": "mitX", "LMS_BASE": "dummyhost:8000"}):
self.assertEqual(
utils.get_lms_link_for_certificate_web_view(course_key, mode),
"//dummyhost:8000/certificates/course/{course_key}?preview={mode}".format(
course_key=course_key,
mode=mode
)
)
class ExtraPanelTabTestCase(TestCase):
""" Tests adding and removing extra course tabs. """
def get_tab_type_dicts(self, tab_types):
""" Returns an array of tab dictionaries. """
if tab_types:
return [{'tab_type': tab_type} for tab_type in tab_types.split(',')]
else:
return []
def get_course_with_tabs(self, tabs=None):
""" Returns a mock course object with a tabs attribute. """
if tabs is None:
tabs = []
course = collections.namedtuple('MockCourse', ['tabs'])
if isinstance(tabs, str):
course.tabs = self.get_tab_type_dicts(tabs)
else:
course.tabs = tabs
return course
class XBlockVisibilityTestCase(SharedModuleStoreTestCase):
"""Tests for xblock visibility for students."""
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.dummy_user = ModuleStoreEnum.UserID.test
cls.past = datetime(1970, 1, 1, tzinfo=UTC)
cls.future = datetime.now(UTC) + timedelta(days=1)
cls.course = CourseFactory.create()
def test_private_unreleased_xblock(self):
"""Verifies that a private unreleased xblock is not visible"""
self._test_visible_to_students(False, 'private_unreleased', self.future)
def test_private_released_xblock(self):
"""Verifies that a private released xblock is not visible"""
self._test_visible_to_students(False, 'private_released', self.past)
def test_public_unreleased_xblock(self):
"""Verifies that a public (published) unreleased xblock is not visible"""
self._test_visible_to_students(False, 'public_unreleased', self.future, publish=True)
def test_public_released_xblock(self):
"""Verifies that public (published) released xblock is visible if staff lock is not enabled."""
self._test_visible_to_students(True, 'public_released', self.past, publish=True)
def test_private_no_start_xblock(self):
"""Verifies that a private xblock with no start date is not visible"""
self._test_visible_to_students(False, 'private_no_start', None)
def test_public_no_start_xblock(self):
"""Verifies that a public (published) xblock with no start date is visible unless staff lock is enabled"""
self._test_visible_to_students(True, 'public_no_start', None, publish=True)
def test_draft_released_xblock(self):
"""Verifies that a xblock with an unreleased draft and a released published version is visible"""
vertical = self._create_xblock_with_start_date('draft_released', self.past, publish=True)
# Create an unreleased draft version of the xblock
vertical.start = self.future
modulestore().update_item(vertical, self.dummy_user)
self.assertTrue(utils.is_currently_visible_to_students(vertical))
def _test_visible_to_students(self, expected_visible_without_lock, name, start_date, publish=False):
"""
Helper method that checks that is_xblock_visible_to_students returns the correct value both
with and without visible_to_staff_only set.
"""
no_staff_lock = self._create_xblock_with_start_date(name, start_date, publish, visible_to_staff_only=False)
self.assertEqual(expected_visible_without_lock, utils.is_currently_visible_to_students(no_staff_lock))
# any xblock with visible_to_staff_only set to True should not be visible to students.
staff_lock = self._create_xblock_with_start_date(
name + "_locked", start_date, publish, visible_to_staff_only=True
)
self.assertFalse(utils.is_currently_visible_to_students(staff_lock))
def _create_xblock_with_start_date(self, name, start_date, publish=False, visible_to_staff_only=False):
"""Helper to create an xblock with a start date, optionally publishing it"""
vertical = modulestore().create_item(
self.dummy_user, self.course.location.course_key, 'vertical', name,
fields={'start': start_date, 'visible_to_staff_only': visible_to_staff_only}
)
if publish:
modulestore().publish(vertical.location, self.dummy_user)
return vertical
class ReleaseDateSourceTest(CourseTestCase):
"""Tests for finding the source of an xblock's release date."""
def setUp(self):
super().setUp()
self.chapter = ItemFactory.create(category='chapter', parent_location=self.course.location)
self.sequential = ItemFactory.create(category='sequential', parent_location=self.chapter.location)
self.vertical = ItemFactory.create(category='vertical', parent_location=self.sequential.location)
# Read again so that children lists are accurate
self.chapter = self.store.get_item(self.chapter.location)
self.sequential = self.store.get_item(self.sequential.location)
self.vertical = self.store.get_item(self.vertical.location)
self.date_one = datetime(1980, 1, 1, tzinfo=UTC)
self.date_two = datetime(2020, 1, 1, tzinfo=UTC)
def _update_release_dates(self, chapter_start, sequential_start, vertical_start):
"""Sets the release dates of the chapter, sequential, and vertical"""
self.chapter.start = chapter_start
self.chapter = self.store.update_item(self.chapter, ModuleStoreEnum.UserID.test)
self.sequential.start = sequential_start
self.sequential = self.store.update_item(self.sequential, ModuleStoreEnum.UserID.test)
self.vertical.start = vertical_start
self.vertical = self.store.update_item(self.vertical, ModuleStoreEnum.UserID.test)
def _verify_release_date_source(self, item, expected_source):
"""Helper to verify that the release date source of a given item matches the expected source"""
source = utils.find_release_date_source(item)
self.assertEqual(source.location, expected_source.location)
self.assertEqual(source.start, expected_source.start)
def test_chapter_source_for_vertical(self):
"""Tests a vertical's release date being set by its chapter"""
self._update_release_dates(self.date_one, self.date_one, self.date_one)
self._verify_release_date_source(self.vertical, self.chapter)
def test_sequential_source_for_vertical(self):
"""Tests a vertical's release date being set by its sequential"""
self._update_release_dates(self.date_one, self.date_two, self.date_two)
self._verify_release_date_source(self.vertical, self.sequential)
def test_chapter_source_for_sequential(self):
"""Tests a sequential's release date being set by its chapter"""
self._update_release_dates(self.date_one, self.date_one, self.date_one)
self._verify_release_date_source(self.sequential, self.chapter)
def test_sequential_source_for_sequential(self):
"""Tests a sequential's release date being set by itself"""
self._update_release_dates(self.date_one, self.date_two, self.date_two)
self._verify_release_date_source(self.sequential, self.sequential)
class StaffLockTest(CourseTestCase):
"""Base class for testing staff lock functions."""
def setUp(self):
super().setUp()
self.chapter = ItemFactory.create(category='chapter', parent_location=self.course.location)
self.sequential = ItemFactory.create(category='sequential', parent_location=self.chapter.location)
self.vertical = ItemFactory.create(category='vertical', parent_location=self.sequential.location)
self.orphan = ItemFactory.create(category='vertical', parent_location=self.sequential.location)
# Read again so that children lists are accurate
self.chapter = self.store.get_item(self.chapter.location)
self.sequential = self.store.get_item(self.sequential.location)
self.vertical = self.store.get_item(self.vertical.location)
# Orphan the orphaned xblock
self.sequential.children = [self.vertical.location]
self.sequential = self.store.update_item(self.sequential, ModuleStoreEnum.UserID.test)
def _set_staff_lock(self, xblock, is_locked):
"""If is_locked is True, xblock is staff locked. Otherwise, the xblock staff lock field is removed."""
field = xblock.fields['visible_to_staff_only']
if is_locked:
field.write_to(xblock, True)
else:
field.delete_from(xblock)
return self.store.update_item(xblock, ModuleStoreEnum.UserID.test)
def _update_staff_locks(self, chapter_locked, sequential_locked, vertical_locked):
"""
Sets the staff lock on the chapter, sequential, and vertical
If the corresponding argument is False, then the field is deleted from the xblock
"""
self.chapter = self._set_staff_lock(self.chapter, chapter_locked)
self.sequential = self._set_staff_lock(self.sequential, sequential_locked)
self.vertical = self._set_staff_lock(self.vertical, vertical_locked)
class StaffLockSourceTest(StaffLockTest):
"""Tests for finding the source of an xblock's staff lock."""
def _verify_staff_lock_source(self, item, expected_source):
"""Helper to verify that the staff lock source of a given item matches the expected source"""
source = utils.find_staff_lock_source(item)
self.assertEqual(source.location, expected_source.location)
self.assertTrue(source.visible_to_staff_only)
def test_chapter_source_for_vertical(self):
"""Tests a vertical's staff lock being set by its chapter"""
self._update_staff_locks(True, False, False)
self._verify_staff_lock_source(self.vertical, self.chapter)
def test_sequential_source_for_vertical(self):
"""Tests a vertical's staff lock being set by its sequential"""
self._update_staff_locks(True, True, False)
self._verify_staff_lock_source(self.vertical, self.sequential)
self._update_staff_locks(False, True, False)
self._verify_staff_lock_source(self.vertical, self.sequential)
def test_vertical_source_for_vertical(self):
"""Tests a vertical's staff lock being set by itself"""
self._update_staff_locks(True, True, True)
self._verify_staff_lock_source(self.vertical, self.vertical)
self._update_staff_locks(False, True, True)
self._verify_staff_lock_source(self.vertical, self.vertical)
self._update_staff_locks(False, False, True)
self._verify_staff_lock_source(self.vertical, self.vertical)
def test_orphan_has_no_source(self):
"""Tests that a orphaned xblock has no staff lock source"""
self.assertIsNone(utils.find_staff_lock_source(self.orphan))
def test_no_source_for_vertical(self):
"""Tests a vertical with no staff lock set anywhere"""
self._update_staff_locks(False, False, False)
self.assertIsNone(utils.find_staff_lock_source(self.vertical))
class InheritedStaffLockTest(StaffLockTest):
"""Tests for determining if an xblock inherits a staff lock."""
def test_no_inheritance(self):
"""Tests that a locked or unlocked vertical with no locked ancestors does not have an inherited lock"""
self._update_staff_locks(False, False, False)
self.assertFalse(utils.ancestor_has_staff_lock(self.vertical))
self._update_staff_locks(False, False, True)
self.assertFalse(utils.ancestor_has_staff_lock(self.vertical))
def test_inheritance_in_locked_section(self):
"""Tests that a locked or unlocked vertical in a locked section has an inherited lock"""
self._update_staff_locks(True, False, False)
self.assertTrue(utils.ancestor_has_staff_lock(self.vertical))
self._update_staff_locks(True, False, True)
self.assertTrue(utils.ancestor_has_staff_lock(self.vertical))
def test_inheritance_in_locked_subsection(self):
"""Tests that a locked or unlocked vertical in a locked subsection has an inherited lock"""
self._update_staff_locks(False, True, False)
self.assertTrue(utils.ancestor_has_staff_lock(self.vertical))
self._update_staff_locks(False, True, True)
self.assertTrue(utils.ancestor_has_staff_lock(self.vertical))
def test_no_inheritance_for_orphan(self):
"""Tests that an orphaned xblock does not inherit staff lock"""
self.assertFalse(utils.ancestor_has_staff_lock(self.orphan))
class GroupVisibilityTest(CourseTestCase):
"""
Test content group access rules.
"""
def setUp(self):
super().setUp()
chapter = ItemFactory.create(category='chapter', parent_location=self.course.location)
sequential = ItemFactory.create(category='sequential', parent_location=chapter.location)
vertical = ItemFactory.create(category='vertical', parent_location=sequential.location)
html = ItemFactory.create(category='html', parent_location=vertical.location)
problem = ItemFactory.create(
category='problem', parent_location=vertical.location, data="<problem></problem>"
)
self.sequential = self.store.get_item(sequential.location)
self.vertical = self.store.get_item(vertical.location)
self.html = self.store.get_item(html.location)
self.problem = self.store.get_item(problem.location)
# Add partitions to the course
self.course.user_partitions = [
UserPartition(
id=0,
name="Partition 0",
description="Partition 0",
scheme=UserPartition.get_scheme("random"),
groups=[
Group(id=0, name="Group A"),
Group(id=1, name="Group B"),
],
),
UserPartition(
id=1,
name="Partition 1",
description="Partition 1",
scheme=UserPartition.get_scheme("random"),
groups=[
Group(id=0, name="Group C"),
Group(id=1, name="Group D"),
],
),
UserPartition(
id=2,
name="Partition 2",
description="Partition 2",
scheme=UserPartition.get_scheme("random"),
groups=[
Group(id=0, name="Group E"),
Group(id=1, name="Group F"),
Group(id=2, name="Group G"),
Group(id=3, name="Group H"),
],
),
]
self.course = self.store.update_item(self.course, ModuleStoreEnum.UserID.test)
def set_group_access(self, xblock, value):
""" Sets group_access to specified value and calls update_item to persist the change. """
xblock.group_access = value
self.store.update_item(xblock, self.user.id)
def test_no_visibility_set(self):
""" Tests when group_access has not been set on anything. """
def verify_all_components_visible_to_all():
""" Verifies when group_access has not been set on anything. """
for item in (self.sequential, self.vertical, self.html, self.problem):
self.assertFalse(utils.has_children_visible_to_specific_partition_groups(item))
self.assertFalse(utils.is_visible_to_specific_partition_groups(item))
verify_all_components_visible_to_all()
# Test with group_access set to Falsey values.
self.set_group_access(self.vertical, {1: []})
self.set_group_access(self.html, {2: None})
verify_all_components_visible_to_all()
def test_sequential_and_problem_have_group_access(self):
""" Tests when group_access is set on a few different components. """
self.set_group_access(self.sequential, {1: [0]})
# This is a no-op.
self.set_group_access(self.vertical, {1: []})
self.set_group_access(self.problem, {2: [3, 4]})
# Note that "has_children_visible_to_specific_partition_groups" only checks immediate children.
self.assertFalse(utils.has_children_visible_to_specific_partition_groups(self.sequential))
self.assertTrue(utils.has_children_visible_to_specific_partition_groups(self.vertical))
self.assertFalse(utils.has_children_visible_to_specific_partition_groups(self.html))
self.assertFalse(utils.has_children_visible_to_specific_partition_groups(self.problem))
self.assertTrue(utils.is_visible_to_specific_partition_groups(self.sequential))
self.assertFalse(utils.is_visible_to_specific_partition_groups(self.vertical))
self.assertFalse(utils.is_visible_to_specific_partition_groups(self.html))
self.assertTrue(utils.is_visible_to_specific_partition_groups(self.problem))
class GetUserPartitionInfoTest(ModuleStoreTestCase):
"""
Tests for utility function that retrieves user partition info
and formats it for consumption by the editing UI.
"""
def setUp(self):
"""Create a dummy course. """
super().setUp()
self.course = CourseFactory()
self.block = ItemFactory.create(category="problem", parent_location=self.course.location)
# Set up some default partitions
self._set_partitions([
UserPartition(
id=0,
name="Cohort user partition",
scheme=UserPartition.get_scheme("cohort"),
description="Cohorted user partition",
groups=[
Group(id=0, name="Group A"),
Group(id=1, name="Group B"),
],
),
UserPartition(
id=1,
name="Random user partition",
scheme=UserPartition.get_scheme("random"),
description="Random user partition",
groups=[
Group(id=0, name="Group C"),
],
),
])
def test_retrieves_partition_info_with_selected_groups(self):
# Initially, no group access is set on the block, so no groups should
# be marked as selected.
expected = [
{
"id": 0,
"name": "Cohort user partition",
"scheme": "cohort",
"groups": [
{
"id": 0,
"name": "Group A",
"selected": False,
"deleted": False,
},
{
"id": 1,
"name": "Group B",
"selected": False,
"deleted": False,
},
]
},
{
"id": 1,
"name": "Random user partition",
"scheme": "random",
"groups": [
{
"id": 0,
"name": "Group C",
"selected": False,
"deleted": False,
},
]
}
]
self.assertEqual(self._get_partition_info(schemes=["cohort", "random"]), expected)
# Update group access and expect that now one group is marked as selected.
self._set_group_access({0: [1]})
expected[0]["groups"][1]["selected"] = True
self.assertEqual(self._get_partition_info(schemes=["cohort", "random"]), expected)
def test_deleted_groups(self):
# Select a group that is not defined in the partition
self._set_group_access({0: [3]})
# Expect that the group appears as selected but is marked as deleted
partitions = self._get_partition_info()
groups = partitions[0]["groups"]
self.assertEqual(len(groups), 3)
self.assertEqual(groups[2], {
"id": 3,
"name": "Deleted Group",
"selected": True,
"deleted": True
})
def test_singular_deleted_group(self):
"""
Verify that a partition with only one deleted group is
shown in the partition info with the group marked as deleted
"""
self._set_partitions([
UserPartition(
id=0,
name="Cohort user partition",
scheme=UserPartition.get_scheme("cohort"),
description="Cohorted user partition",
groups=[],
),
])
self._set_group_access({0: [1]})
partitions = self._get_partition_info()
groups = partitions[0]["groups"]
self.assertEqual(len(groups), 1)
self.assertEqual(groups[0], {
"id": 1,
"name": "Deleted Group",
"selected": True,
"deleted": True,
})
def test_filter_by_partition_scheme(self):
partitions = self._get_partition_info(schemes=["random"])
self.assertEqual(len(partitions), 1)
self.assertEqual(partitions[0]["scheme"], "random")
def test_exclude_inactive_partitions(self):
# Include an inactive verification scheme
self._set_partitions([
UserPartition(
id=0,
name="Cohort user partition",
scheme=UserPartition.get_scheme("cohort"),
description="Cohorted user partition",
groups=[
Group(id=0, name="Group A"),
Group(id=1, name="Group B"),
],
),
UserPartition(
id=1,
name="Completely random user partition",
scheme=UserPartition.get_scheme("random"),
description="Random user partition",
groups=[
Group(id=0, name="Group C"),
],
active=False,
),
])
# Expect that the inactive scheme is excluded from the results
partitions = self._get_partition_info(schemes=["cohort", "verification"])
self.assertEqual(len(partitions), 1)
self.assertEqual(partitions[0]["scheme"], "cohort")
def test_exclude_partitions_with_no_groups(self):
# The cohort partition has no groups defined
self._set_partitions([
UserPartition(
id=0,
name="Cohort user partition",
scheme=UserPartition.get_scheme("cohort"),
description="Cohorted user partition",
groups=[],
),
UserPartition(
id=1,
name="Completely random user partition",
scheme=UserPartition.get_scheme("random"),
description="Random user partition",
groups=[
Group(id=0, name="Group C"),
],
),
])
# Expect that the partition with no groups is excluded from the results
partitions = self._get_partition_info(schemes=["cohort", "random"])
self.assertEqual(len(partitions), 1)
self.assertEqual(partitions[0]["scheme"], "random")
def _set_partitions(self, partitions):
"""Set the user partitions of the course descriptor. """
self.course.user_partitions = partitions
self.course = self.store.update_item(self.course, ModuleStoreEnum.UserID.test)
def _set_group_access(self, group_access):
"""Set group access of the block. """
self.block.group_access = group_access
self.block = self.store.update_item(self.block, ModuleStoreEnum.UserID.test)
def _get_partition_info(self, schemes=None):
"""Retrieve partition info and selected groups. """
return utils.get_user_partition_info(self.block, schemes=schemes)
@patch.dict(settings.FEATURES, ENABLE_COURSE_OLX_VALIDATION=True)
@mock.patch('olxcleaner.validate')
@ddt.ddt
class ValidateCourseOlxTests(CourseTestCase):
"""Tests for olx validation"""
def setUp(self):
super().setUp()
self.LOGGER = 'cms.djangoapps.contentstore.tasks.LOGGER'
self.data_dir = path(TEST_DATA_DIR)
self.toy_course_path = self.data_dir / 'course_ignore'
self.status = UserTaskStatus.objects.create(
user=self.user, task_id=str(uuid4()), task_class='sample_task', name='CourseImport', total_steps=4
)
def test_with_library_locator(self, mock_olxcleaner_validate):
"""
Tests that olx is validation is skipped with library locator.
"""
library_key = LibraryLocator(org='TestOrg', library='TestProbs')
self.assertTrue(validate_course_olx(library_key, self.toy_course_path, self.status))
self.assertFalse(mock_olxcleaner_validate.called)
def test_config_settings_enabled(self, mock_olxcleaner_validate):
"""
Tests olx validation with config setting is disabled.
"""
with patch.dict(settings.FEATURES, ENABLE_COURSE_OLX_VALIDATION=False):
self.assertTrue(validate_course_olx(self.course.id, self.toy_course_path, self.status))
self.assertFalse(mock_olxcleaner_validate.called)
def test_config_settings_disabled(self, mock_olxcleaner_validate):
"""
Tests olx validation with config setting is enabled.
"""
with patch.dict(settings.FEATURES, ENABLE_COURSE_OLX_VALIDATION=True):
self.assertTrue(validate_course_olx(self.course.id, self.toy_course_path, self.status))
self.assertTrue(mock_olxcleaner_validate.called)
def test_exception_during_validation(self, mock_olxcleaner_validate):
"""
Tests olx validation in case of unexpected error.
In case of any unexpected exception during the olx validation,
the course import continues and information is logged on the server.
"""
mock_olxcleaner_validate.side_effect = Exception
with mock.patch(self.LOGGER) as patched_log:
self.assertTrue(validate_course_olx(self.course.id, self.toy_course_path, self.status))
self.assertTrue(mock_olxcleaner_validate.called)
patched_log.exception.assert_called_once_with(
f'Course import {self.course.id}: CourseOlx could not be validated')
def test_no_errors(self, mock_olxcleaner_validate):
"""
Tests olx validation with no errors.
Verify that in case for no validation errors, no artifact object is created.
"""
mock_olxcleaner_validate.return_value = [
Mock(),
Mock(errors=[], return_error=Mock(return_value=False)),
Mock()
]
self.assertTrue(validate_course_olx(self.course.id, self.toy_course_path, self.status))
task_artifact = UserTaskArtifact.objects.filter(status=self.status, name='OLX_VALIDATION_ERROR').first()
self.assertIsNone(task_artifact)
self.assertTrue(mock_olxcleaner_validate.called)
@mock.patch('cms.djangoapps.contentstore.tasks.report_error_summary')
@mock.patch('cms.djangoapps.contentstore.tasks.report_errors')
def test_creates_artifact(self, mock_report_errors, mock_report_error_summary, mock_olxcleaner_validate):
"""
Tests olx validation in case of errors.
Verify that in case of olx validation errors, course import does fail & errors
are logged in task artifact.
"""
errors = [Mock(description='DuplicateURLNameError', level_val=3)]
mock_olxcleaner_validate.return_value = [
Mock(),
Mock(errors=errors, return_error=Mock(return_value=True)),
Mock()
]
mock_report_errors.return_value = [f'ERROR {error.description} found in content' for error in errors]
mock_report_error_summary.return_value = [f'Errors: {len(errors)}']
with patch(self.LOGGER) as patched_log:
self.assertFalse(validate_course_olx(self.course.id, self.toy_course_path, self.status))
patched_log.error.assert_called_once_with(
f'Course import {self.course.id}: CourseOlx validation failed.')
task_artifact = UserTaskArtifact.objects.filter(status=self.status, name='OLX_VALIDATION_ERROR').first()
self.assertIsNotNone(task_artifact)
def test_validate_calls_with(self, mock_olxcleaner_validate):
"""
Tests that olx library is called with expected keyword arguments.
"""
allowed_xblocks = ALL_ALLOWED_XBLOCKS
steps = 2
ignore = ['edx-xblock']
mock_olxcleaner_validate.return_value = [Mock(), Mock(errors=[], return_error=Mock(return_value=False)), Mock()]
with override_settings(COURSE_OLX_VALIDATION_STAGE=steps, COURSE_OLX_VALIDATION_IGNORE_LIST=ignore):
validate_course_olx(self.course.id, self.toy_course_path, self.status)
mock_olxcleaner_validate.assert_called_with(
filename=self.toy_course_path,
steps=steps,
ignore=ignore,
allowed_xblocks=allowed_xblocks
)
|
from django.test import TestCase
from django.template.loader import Template, Context
class TemplateTestCase(TestCase):
BASE_TEMPLATE = """ """
def setUp(self):
self.template_txt = self.BASE_TEMPLATE
def test_can_override_django_h5bp_css_block(self):
context = Context({})
self.template_txt += """
{% block django-h5bp-css %}<div>NO-BOCK</div>{% endblock %}
"""
template = Template(self.template_txt)
rendered = template.render(context)
self.assertInHTML(
"""<div>NO-BOCK</div>""",
rendered,
msg_prefix="Could not override django-h5bp-css block. "
)
def test_can_override_django_h5bp_js_block(self):
context = Context({})
self.template_txt += """
{% block django-h5bp-js %}<div>NO-BOCK</div>{% endblock %}
"""
template = Template(self.template_txt)
rendered = template.render(context)
self.assertInHTML(
"""<div>NO-BOCK</div>""",
rendered,
msg_prefix="Could not override django-h5bp-js block. "
)
class BootstrapTeamplateTest(TemplateTestCase):
BASE_TEMPLATE = """
{% extends "bootstrap/bootstrap_h5bp.html" %}
"""
class ClassicTeamplateTest(TemplateTestCase):
BASE_TEMPLATE = """
{% extends "classic/classic_h5bp.html" %}
"""
class ResponsiveTeamplateTest(TemplateTestCase):
BASE_TEMPLATE = """
{% extends "responsive/responsive_h5bp.html" %}
"""
|
from misc2_module.misc import MiscModule
|
#!/usr/bin/python
import urllib
import urllib2
import ssl
import json
import copy
from ansible.module_utils.basic import AnsibleModule
def get_workspace(host, token, name):
url = "http://" + host + "/api/workspaces"
form_values = {'token': token }
form_data = urllib.urlencode(form_values)
request = urllib2.Request(url, form_data)
response = urllib2.urlopen(request)
response_data = response.read()
response_json = json.loads(response_data)
workspace = next((x for x in response_json['workspace'] if x['ioName'] == name), None)
return workspace
def get_workspace_id(outId):
# no explanations from my side can be added here, just this is how it works.
# nore datails can be found in this story:
# https://avid-ondemand.atlassian.net/browse/ECD-617
# (note: 4294967295 is a hex representation of FFFFFFFF)
return int(outId) & 4294967295
def get_default_workpace(host, token):
url = "http://" + host + "/api/workspace/Create"
form_values = {'token': token }
form_data = urllib.urlencode(form_values)
request = urllib2.Request(url, form_data)
response = urllib2.urlopen(request)
response_data = response.read()
response_json = json.loads(response_data)
return response_json
def create_workspace(host, token, original, modified):
url = "http://" + host + "/api/workspace/Create"
form_values = { 'token': token,
'original': original,
'modified': modified }
form_data = json.dumps(form_values)
opener = urllib2.build_opener(urllib2.HTTPHandler)
request = urllib2.Request(url, data=form_data)
request.add_header('Content-Type', 'application/json')
request.add_header('Connection', 'keep-alive')
request.get_method = lambda: 'PUT'
response = opener.open(request, timeout=3000)
def run_module():
module_args = dict(
host=dict(type='str', required=True),
token=dict(type='str', required=True),
name=dict(type='str', required=True),
size=dict(type='str', required=True)
)
result = dict(
changed=False
)
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=False
)
try:
# check if workspace exists
workspace = get_workspace(module.params['host'], module.params['token'], module.params['name'])
if workspace:
result['id'] = get_workspace_id(workspace['outID'])
module.exit_json(**result)
# create workspace
workspace_original = get_default_workpace(module.params['host'], module.params['token'])
workspace_modified = copy.deepcopy(workspace_original)
workspace_modified['ioName'] = module.params['name']
workspace_modified['ioByteCount'] = module.params['size']
result['changed'] = True
create_workspace(module.params['host'], module.params['token'], workspace_original, workspace_modified)
# get workspace details
workspace = get_workspace(module.params['host'], module.params['token'], module.params['name'])
if not workspace:
raise Exception("unable to get workspace details after creation")
result['id'] = get_workspace_id(workspace['outID'])
except Exception as e:
result['failed'] = True
result['error'] = e
module.exit_json(**result)
module.exit_json(**result)
def main():
run_module()
if __name__ == '__main__':
main() |
import os
import sys
from nndct_shared.base import SingletonMeta
from nndct_shared.utils import io, NndctScreenLogger
try:
import matplotlib.pyplot as plt
except ImportError:
_enable_plot = False
else:
_enable_plot = True
class Plotter(metaclass=SingletonMeta):
counter = 0
figure_dict = {}
def __init__(self):
if not _enable_plot:
NndctScreenLogger().warning("Please install matplotlib for visualization.")
sys.exit(1)
self._dir = '.nndct_quant_stat_figures'
io.create_work_dir(self._dir)
def plot_hist(self, name, data):
plot_title = "_".join([name, 'hist'])
if plot_title in self.figure_dict:
NndctScreenLogger().info("Finish visualization.")
sys.exit(0)
self.figure_dict[plot_title] = True
plt.figure(self.counter)
self.counter += 1
plt.hist(data, bins=20, facecolor='blue', edgecolor='black', alpha=0.7)
plt.xlabel('value')
plt.ylabel('frequency')
plt.title(plot_title)
plot_title = plot_title.replace('/', '_')
plt.savefig(os.path.join(self._dir, '.'.join([plot_title, 'svg'])))
plt.close()
|
import dash_bootstrap_components as dbc
import dash_daq as daq
import dash_html_components as html
import pretty_midi
dropdown_color = 'black'
dropdown_backgroundColor = 'lightgray'
def add(orchestra, instrument_list, custom_id):
def set_id(index, set_type):
return {
'type': set_type,
'index': index
}
children = []
for i in range(len(instrument_list)):
instrument = instrument_list[i]['inst']
techs = instrument_list[i]['tech']
notes = instrument_list[i]['note']
dynamics = instrument_list[i]['dynamic']
tgt = instrument_list[i]['target']
onoff = instrument_list[i]['onoff']
if techs in list(orchestra[instrument].keys()):
if dynamics in list(orchestra[instrument][techs].keys()):
if notes in list(orchestra[instrument][techs][dynamics].keys()):
children.append(html.Div([html.Div('{}: {} {}'.format(str(len(children)+1), instrument, pretty_midi.note_number_to_name(notes)), style={'display':'inline-block', 'marginRight':'4px', 'textShadow':'2px 2px 2px black'}),
dbc.Button(
"Octave up", id=set_id(i, "octave-up{}".format(custom_id)),
size='sm', color="sienna",
style={'border': 'none', 'display': 'inline-block', 'width':'100px'}
),
dbc.Button(
"Octave down", id=set_id(i, "octave-down{}".format(custom_id)),
size='sm', color="sienna",
style={'border': 'none', 'display': 'inline-block', 'width':'100px'}
),
dbc.Select(
options=[{'label': val, 'value': val} for val in list(orchestra.keys())],
value=instrument,
style={'backgroundColor':dropdown_backgroundColor, 'width':100, 'color': dropdown_color, 'display':'inline-block'},
bs_size='sm',
id=set_id(i, 'inst{}'.format(custom_id)),
),
dbc.Select(
options=[{'label': val, 'value': val} for val in list(orchestra[instrument].keys())],
value=techs,
style={'backgroundColor': dropdown_backgroundColor, 'width':100, 'color': dropdown_color, 'display':'inline-block'},
bs_size='sm',
id=set_id(i, 'tech{}'.format(custom_id)),
),
dbc.Select(
options=[{'label': val, 'value': val} for val in
list(orchestra[instrument][techs].keys())],
value=dynamics,
style={'backgroundColor': dropdown_backgroundColor, 'width': 60,
'color': dropdown_color,},
bs_size='sm',
id=set_id(i, 'dyn{}'.format(custom_id)),
),
dbc.Select(
options=[{'label': pretty_midi.note_number_to_name(val), 'value': val} for val in
list(orchestra[instrument][techs][dynamics].keys())],
value=notes,
style={'backgroundColor': dropdown_backgroundColor, 'width': 60,
'color': dropdown_color, 'display': 'inline-block'},
bs_size='sm',
id=set_id(i, 'note{}'.format(custom_id)),
),
daq.ToggleSwitch(
label='target',
color='red',
size=15,
value=tgt,
vertical=True,
id=set_id(i, 'target{}'.format(custom_id)),
style={'display': 'inline-block', 'textShadow':'2px 2px 2px black'}
),html.Div('\xa0 \xa0', style={'display': 'inline-block'}),
daq.ToggleSwitch(
label='on/off',
color='green',
size=15,
value=onoff,
vertical=True,
id=set_id(i, 'onoff{}'.format(custom_id)),
style={'display': 'inline-block', 'textShadow':'2px 2px 2px black'}
),
html.Hr(style={'borderTop': '1px solid #bbb'}),
], id=set_id(i, 'orch_outer{}'.format(custom_id)), style={'backgroundColor':'primary'}))
return children |
from marltoolbox.algos.adaptive_mechanism_design.amd import \
AdaptiveMechanismDesign
|
import torch
from colossalai.zero.sharded_param import ShardedParamV2
from colossalai.utils import get_current_device
from typing import List
class BucketizedTensorCopy(object):
def __init__(
self,
chunk_size: int,
):
r"""
torch.nn.Parameter CPU (fp32) -> ShardedParam GPU (fp16)
TODO(jiaruifang) The class is a little bit hardcoded
I will make it more general later.
"""
self.chunk_size = chunk_size
self._offset = 0
self._cpu_buffer = torch.empty(chunk_size, dtype=torch.float, device=torch.device("cpu:0"), pin_memory=True)
self._cuda_buffer = torch.empty(chunk_size,
dtype=torch.half,
device=torch.device(f"cuda:{get_current_device()}"))
self._buffered_param_list: List[ShardedParamV2] = []
self._numel_list = []
def copy(self, src_param: torch.nn.Parameter, target_param: ShardedParamV2):
assert isinstance(target_param, ShardedParamV2)
assert isinstance(src_param, torch.nn.Parameter)
numel = src_param.numel()
if self._offset + numel > self.chunk_size:
self.flush()
assert src_param.data.device.type == 'cpu'
self._cpu_buffer.narrow(0, self._offset, numel).copy_(src_param.data.view(-1))
self._buffered_param_list.append(target_param)
self._numel_list.append(numel)
self._offset += numel
def flush(self):
"""
flush to cuda memory
"""
self._cuda_buffer.copy_(self._cpu_buffer)
flush_offset = 0
for sparam, numel in zip(self._buffered_param_list, self._numel_list):
sparam.data.copy_payload(self._cpu_buffer.narrow(0, flush_offset, numel))
flush_offset += numel
self.reset()
def reset(self):
self._buffered_param_list = []
self._numel_list = []
self._offset = 0
|
from output.models.nist_data.atomic.any_uri.schema_instance.nistschema_sv_iv_atomic_any_uri_pattern_3_xsd.nistschema_sv_iv_atomic_any_uri_pattern_3 import NistschemaSvIvAtomicAnyUriPattern3
__all__ = [
"NistschemaSvIvAtomicAnyUriPattern3",
]
|
import unittest
from unittest.mock import MagicMock
# pylint: disable=unused-wildcard-import
from simulation.apps import *
from simulation.loopix import *
from simulation.messages import *
from simulation.multicast.base import *
from simulation.simrandom import *
from simulation.simulation import *
from simulation.utils import *
from tests.utils import *
class TestAppPayload(unittest.TestCase):
def test_WHEN_repr_THEN_contains_info(self):
payload = App.Payload(nonce=42, created_at=11)
self.assertIn("42", str(payload))
self.assertIn("11", str(payload))
class TestApp(unittest.TestCase):
def setUp(self):
self.sim = create_test_simulation()
self.group = Group("group", self.sim.users)
self.multicast = MagicMock
self.app = App("app", self.sim, self.group, self.multicast)
def test_WHEN_get_payload_THEN_has_been_send_and_e2e_logged(self):
payload = App.Payload(nonce=42, created_at=11)
m = ApplicationMessage(self.group.users[0], TAG_PAYLOAD, payload, self.group.id)
m.set_deliver_online_state(Message.DELIVERED_ONLINE)
self.sim.time = 20
self.app.on_payload(m.recipient, m, m.body)
self.assertIn((m.recipient, payload.nonce), self.app.seen_deliveries.set)
self.assertIn((20, 20-11), self.sim.output.e2e_delays[self.app])
def test_WHEN_create_payload_THEN_new_nonce_every_time(self):
self.sim.time = 1
payload_1 = self.app._create_payload()
self.sim.time = 2
payload_2 = self.app._create_payload()
self.assertEqual(1, payload_1.created_at)
self.assertEqual(2, payload_2.created_at)
self.assertGreater(payload_2.nonce, payload_1.nonce)
class TestInteractiveApp(unittest.TestCase):
def create_app(self):
self.sim = create_test_simulation()
self.users = [
create_static_mobile_user("U_a", online=[False]),
create_static_mobile_user("U_b", online=[True]),
create_static_mobile_user("U_c", online=[False]),
create_static_mobile_user("U_d", online=[True]),
create_static_mobile_user("U_e", online=[True]),
]
return InteractiveApp(
name="name", sim=self.sim,
group=Group("group", self.users),
multicast_factory=MagicMock,
init_rate_per_second=0.2,
heavy_user_percentage=40, heavy_user_weight=4
)
def test_WHEN_created_THEN_weights_set_correctly(self):
app = self.create_app()
self.assertDictEqual(
{
self.users[0]: 4,
self.users[1]: 4,
self.users[2]: 1,
self.users[3]: 1,
self.users[4]: 1,
},
app.user_to_weight
)
def test_WHEN_choosing_sender_and_recipient_THEN_close_to_expectations(self):
app = self.create_app()
n = 100_000
sender_counts = {u: 0 for u in self.users}
for _ in range(n):
sender = app._choose_online_sender()
sender_counts[sender] += 1
self.assertEqual(sender_counts[self.users[0]], 0) # offline
self.assertEqual(sender_counts[self.users[2]], 0) # offline
self.assertAlmostEqual(sender_counts[self.users[1]], 4/6*n, delta=1000) # heavy user
self.assertAlmostEqual(sender_counts[self.users[3]], 1/6*n, delta=1000) # normal user
self.assertAlmostEqual(sender_counts[self.users[4]], 1/6*n, delta=1000) # normal user
class TestInteractiveMultimessageApp(unittest.TestCase):
def create_app(self):
self.sim = create_test_simulation()
self.users = [
create_static_mobile_user("U_a", online=[True]),
create_static_mobile_user("U_b", online=[True]),
]
return InteractiveMultimessageApp(
name="name", sim=self.sim,
group=Group("group", self.users),
multicast_factory=MagicMock,
init_rate_per_second=0.2,
multi_message=10,
heavy_user_percentage=40, heavy_user_weight=4
)
def test_WHEN_sending_multimessages_THEN_close_to_expectations(self):
app = self.create_app()
app.send_payload_to_group = MagicMock()
n = 10_000
for _ in range(n):
app.send_a_message(self.users[0])
self.assertEqual(app.multi_message * n, app.send_payload_to_group.call_count)
|
import pytest
from principal import soma
def test_soma():
assert soma(2,4) == 6
assert soma(8, -4) == 4
assert soma(0, 0) == 0
|
#!/usr/bin/env python
import os
import versioneer
from setuptools import setup
from distutils import log
from distutils.command.clean import clean
from distutils.dir_util import remove_tree
base_path = os.path.dirname(os.path.abspath(__file__))
long_description = """
This package consists of a couple of optimised tools for doing things that can roughly be
considered "group-indexing operations". The most prominent tool is `aggregate`.
`aggregate` takes an array of values, and an array giving the group number for each of those
values. It then returns the sum (or mean, or std, or any, ...etc.) of the values in each group.
You have probably come across this idea before, using `matlab` accumarray, `pandas` groupby,
or generally MapReduce algorithms and histograms.
There are different implementations of `aggregate` provided, based on plain `numpy`, `numba`
and `weave`. Performance is a main concern, and so far we comfortably beat similar
implementations in other packages (check the benchmarks).
"""
class NumpyGroupiesClean(clean):
"""Custom clean command to tidy up the project root."""
def run(self):
clean.run(self)
for folder in ('build', 'numpy_groupies.egg-info'):
path = os.path.join(base_path, folder)
if os.path.isdir(path):
remove_tree(path, dry_run=self.dry_run)
if not self.dry_run:
self._rm_walk()
def _rm_walk(self):
for path, dirs, files in os.walk(base_path):
if any(p.startswith('.') for p in path.split(os.path.sep)):
# Skip hidden directories like the git folder right away
continue
if path.endswith('__pycache__'):
remove_tree(path, dry_run=self.dry_run)
else:
for fname in files:
if fname.endswith('.pyc') or fname.endswith('.so'):
fpath = os.path.join(path, fname)
os.remove(fpath)
log.info("removing '%s'", fpath)
setup(name='numpy_groupies',
version=versioneer.get_version(),
author="@ml31415 and @d1manson",
author_email="npgroupies@occam.com.ua",
license='BSD',
description="Optimised tools for group-indexing operations: aggregated sum and more.",
long_description=long_description,
url="https://github.com/ml31415/numpy-groupies",
download_url="https://github.com/ml31415/numpy-groupies/archive/master.zip",
keywords=[ "accumarray", "aggregate", "groupby", "grouping", "indexing"],
packages=['numpy_groupies'],
install_requires=[],
setup_requires=['pytest-runner'],
tests_require=['pytest', 'numpy', 'numba'],
classifiers=['Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
],
cmdclass=dict(clean=NumpyGroupiesClean, **versioneer.get_cmdclass()),
)
|
from ._version import __version__, version_info
from .NullAuthenticator import NullAuthenticator |
from django.db import models
# Create your models here.
class User(models.Model):
code = models.CharField(max_length = 200)
email = models.CharField(max_length = 100)
password = models.CharField(max_length = 20)
birth_date = models.DateTimeField()
class Profile(models.Model):
name = models.CharField(max_length = 100)
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name="profiles")
relationship = models.ManyToManyField("self")
class Reaction(models.Model):
REACTION_TYPE_CHOICES = (
('LIKE', 'like'),
('LOVE', 'love'),
('LAUGH', 'laugh'),
('IMPRESSIVE', 'impressive'),
('SAD', 'sad'),
('ANGRY', 'angry')
)
reaction_type = models.CharField(max_length=2, choices=REACTION_TYPE_CHOICES, default='LIKE')
profile = models.ForeignKey(Profile, on_delete=models.CASCADE, related_name = "reactions")
class Post(models.Model):
text = models.CharField(max_length = 255)
created_date = models.DateTimeField(auto_now_add=True)
profile = models.ForeignKey(Profile, on_delete=models.CASCADE, related_name = "posts")
reactions = models.ManyToManyField(Reaction, through='PostReaction')
class Comment(models.Model):
text = models.CharField(max_length = 255)
created_date = models.DateTimeField(auto_now_add=True)
profile = models.ForeignKey(Profile, on_delete=models.CASCADE, related_name = "comments")
post = models.ForeignKey(Post, on_delete=models.CASCADE, related_name = "comments")
class PostReaction(models.Model):
weight = models.IntegerField()
created_date = models.DateTimeField(auto_now_add=True)
post = models.ForeignKey(Post, on_delete=models.CASCADE)
reaction = models.ForeignKey(Reaction, on_delete=models.CASCADE)
def get_date(day, month, year):
from django.utils import timezone
import datetime, pytz
return datetime.datetime(year, month, day, tzinfo=pytz.UTC) |
# -*- coding: utf-8 -*-
"""052 - Progressão Aritmética
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1XxsY2LOjklht2ABKc1B0VXPOppV9E7Dh
"""
num = int(input('\nDigite o Primeiro número da PA: '))
razão = int(input('Digite a Razão da PA: '))
for c in range(1, 11):
print(num, end=' ')
num += razão
print('Acabou') |
from src import *
class PointNetEncoder(nn.Module):
def __init__(self, dim_latent, breadth=128, is_variational = False):
super(PointNetEncoder, self).__init__()
self.is_variational = is_variational
if is_variational:
self.filename = 'variational-' + self.filename
self.encoder = PN1_Module(input_num_chann=3,
num_pn_output=breadth,
num_mlp_output=dim_latent)
def forward(self, x):
x = x.permute(0,2,1)
return self.encoder(x)
class PN1_Module(nn.Module):
def __init__(self,
input_num_chann=3,
num_pn_output=128,
dim_mlp_append=0,
num_mlp_output=16):
"""
* Use spatial softmax instead of max pool by default
"""
super(PN1_Module, self).__init__()
self.dim_mlp_append = dim_mlp_append
self.num_mlp_output = num_mlp_output
# CNN
self.conv_1 = nn.Sequential(
nn.Conv1d(input_num_chann, num_pn_output//4, 1),
nn.ReLU(),
)
self.conv_2 = nn.Sequential(
nn.Conv1d(num_pn_output//4, num_pn_output//2,1),
nn.ReLU(),
)
self.conv_3 = nn.Sequential(
nn.Conv1d(num_pn_output//2, num_pn_output, 1),
)
# Spatial softmax
self.sm = nn.Softmax(dim=2)
# MLP
num_linear_1_input = num_pn_output*3 + dim_mlp_append
self.linear_1 = nn.Sequential(
nn.Linear(num_linear_1_input, num_pn_output),
nn.ReLU(),
)
self.linear_2 = nn.Sequential(
nn.Linear(num_pn_output, num_pn_output//2),
nn.ReLU(),
)
# Output layers
self.linear_out = nn.Linear(num_pn_output//2, num_mlp_output)
def forward(self, x):
B = x.shape[0]
# CNN
out = self.conv_1(x)
out = self.conv_2(out)
out = self.conv_3(out)
# Spatial softmax
s = self.sm(out) # B x 128 x 1536, normalized among points
xyz = x[:,:3,:]
out_pn = torch.bmm(s, xyz.permute(0,2,1)).view(B, -1)
# MLP
x = self.linear_1(out_pn)
x = self.linear_2(x)
out_mlp = self.linear_out(x).squeeze(1)
return out_mlp
|
from PyQt5 import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from deritradeterminal.util.deribit_api import RestClient
from deritradeterminal.managers.ConfigManager import ConfigManager
class StopMarketSellThread(QThread):
signeler = pyqtSignal(bool,str,str)
def processOrder(self):
try:
config = ConfigManager.get_config()
client = RestClient(config.tradeApis[self.accountid][0], config.tradeApis[self.accountid][1], ConfigManager.get_config().apiUrl)
client.sell_stop_market_order(ConfigManager.get_config().tradeInsturment, float(self.amount), self.price)
self.signeler.emit(True, "Stop Market Sell Order Success", "Stop Market Sell On Account: " + str(self.accountid) + " For Amount: " + str(self.amount) + " At Price: " + str(self.price))
except Exception as e:
self.signeler.emit(False, "Stop Market Sell Order Error" , "Failed to stop market sell on " + str(self.accountid) + " for amount: " + str(self.amount) + "\n" + str(e))
def __init__(self, accountid, price, amount):
QThread.__init__(self)
self.accountid = accountid
self.price = price
self.amount = amount
def run(self):
self.processOrder()
|
class BallMovement:
""" Represents movement for the Ball """
def __init__(self, speed):
""" Initialize with the speed """
self.speed = speed
self.initialSpeed = speed
def reset(self):
""" Reset the ball movement to the initial speed """
self.speed = self.initialSpeed
def update(self):
""" Update the ball """
velocity = self.body.velocity
desiredVelocity = velocity.unitVector*self.speed
impulse = (desiredVelocity - velocity)*self.body.mass
self.body.applyImpulse(impulse)
@property
def body(self):
""" Return the body for the ball """
return self.entity.body |
import unittest
import string
import inspect
from enum import Enum
from uuid import uuid4
from functools import wraps
from datetime import datetime
from odm import mapper
from odm.types import JSONType, UUIDType, ChoiceType
from sqlalchemy.orm import relationship
from sqlalchemy import Column, String, Boolean, DateTime, Integer, ForeignKey
from pulsar.utils.string import random_string
from pulsar.apps.greenio import GreenPool
Model = mapper.model_base('foooo')
class TaskType(Enum):
work = 1
personal = 2
social = 3
class PersonalTasks(Model):
id = Column(UUIDType, primary_key=True)
subject = Column(String(250))
done = Column(Boolean, default=False)
created = Column(DateTime, default=datetime.utcnow)
__create_sql__ = """
create or replace view {0[name]} as (
with
-- QUERY
_tasks as (
select *
from task
where type = 2
),
_personal_view as (
select
t.id,
t.subject,
t.done,
t.created
from _tasks t
)
select * from _personal_view ps
)
"""
__drop_sql__ = """
drop view {0[name]}
"""
class Employee(Model):
id = Column(Integer, primary_key=True)
name = Column(String(80))
type = Column(String(50))
sex = Column(ChoiceType({'female': 'female', 'male': 'male'}))
@mapper.declared_attr
def __mapper_args__(cls):
name = cls.__name__.lower()
if cls.__name__ == 'Employee':
return {
'polymorphic_identity': name,
'polymorphic_on': cls.type
}
else:
return {
'polymorphic_identity': name
}
class Engineer(Employee):
engineer_name = Column(String(30))
@mapper.declared_attr
def id(self):
return Column(Integer, ForeignKey('employee.id'), primary_key=True)
class Task(Model):
id = Column(UUIDType, primary_key=True)
subject = Column(String(250))
done = Column(Boolean, default=False)
created = Column(DateTime, default=datetime.utcnow)
info = Column(JSONType)
info2 = Column(JSONType(binary=False))
type = Column(ChoiceType(TaskType, impl=Integer),
default=TaskType.work)
@mapper.declared_attr
def employee_id(cls):
return Column(Integer, ForeignKey('employee.id'))
@mapper.declared_attr
def employee(cls):
return relationship('Employee', backref='tasks')
def randomname(prefix):
name = random_string(min_len=8, max_len=8, characters=string.ascii_letters)
return ('%s%s' % (prefix, name)).lower()
def green(method):
if inspect.isclass(method):
cls = method
for name in dir(cls):
if name.startswith('test'):
method = getattr(cls, name)
setattr(cls, name, green(method))
return cls
else:
@wraps(method)
def _(self):
return self.green_pool.submit(method, self)
return _
class TestCase(unittest.TestCase):
prefixdb = 'odmtest_'
# Tuple of SqlAlchemy models to register
mapper = None
@classmethod
async def setUpClass(cls):
# Create the application
cls.dbs = {}
cls.dbname = randomname(cls.prefixdb)
cls.init_mapper = mapper.Mapper(cls.url())
cls.green_pool = GreenPool()
cls.mapper = await cls.green_pool.submit(
cls.init_mapper.database_create,
cls.dbname
)
cls.mapper.register_module(__name__)
await cls.green_pool.submit(cls.mapper.table_create)
@classmethod
async def tearDownClass(cls):
# Create the application
if cls.mapper:
pool = cls.green_pool
await pool.submit(cls.mapper.close)
await pool.submit(cls.init_mapper.database_drop, cls.dbname)
@classmethod
def url(cls):
'''Url for database to test
'''
raise NotImplementedError
class MapperMixin:
def test_mapper(self):
mapper = self.mapper
self.assertTrue(mapper.binds)
def test_databases(self):
dbs = self.mapper.database_all()
self.assertIsInstance(dbs, dict)
def test_tables(self):
tables = self.mapper.tables()
self.assertTrue(tables)
self.assertEqual(len(tables[0][1]), 3)
def test_database_drop_fail(self):
self.assertRaises(AssertionError,
self.mapper.database_drop,
lambda e: None)
def test_create_task(self):
with self.mapper.begin() as session:
task = self.mapper.task(id=uuid4(),
subject='simple task',
type=TaskType.personal)
session.add(task)
self.assertTrue(task.id)
self.assertEqual(task.type, TaskType.personal)
with self.mapper.begin() as session:
task = session.query(self.mapper.task).get(task.id)
self.assertEqual(task.type, TaskType.personal)
def test_update_task(self):
with self.mapper.begin() as session:
task = self.mapper.task(id=uuid4(),
subject='simple task to update')
task.info = dict(extra='extra info')
session.add(task)
self.assertTrue(task.id)
self.assertFalse(task.done)
self.assertEqual(task.info['extra'], 'extra info')
with self.mapper.begin() as session:
task.done = True
session.add(task)
with self.mapper.begin() as session:
task = session.query(self.mapper.task).get(task.id)
self.assertTrue(task.done)
self.assertEqual(task.info['extra'], 'extra info')
def test_task_employee(self):
mapper = self.mapper
with mapper.begin() as session:
user = mapper.employee(name='pippo', sex='male')
session.add(user)
with mapper.begin() as session:
task = mapper.task(id=uuid4(),
employee_id=user.id,
subject='simple task to update')
session.add(task)
with mapper.begin() as session:
user = session.query(mapper.employee).get(user.id)
tasks = user.tasks
self.assertTrue(tasks)
self.assertEqual(user.sex, 'male')
def test_view(self):
mapper = self.mapper
with mapper.begin() as session:
session.add(mapper.task(id=uuid4(),
subject='simple task 1',
type=TaskType.personal))
session.add(mapper.task(id=uuid4(),
subject='simple task 2',
type=TaskType.personal))
session.add(mapper.task(id=uuid4(),
subject='simple task 3',
type=TaskType.work))
with mapper.begin() as session:
ptasks = session.query(mapper.personaltasks).all()
self.assertTrue(len(ptasks) >= 2)
def test_database_exist(self):
binds = self.mapper.database_exist()
self.assertTrue(binds)
self.assertTrue(binds['default'])
|
import datetime
from keepsake.packages import get_imported_packages
def test_get_imported_packages():
assert "keepsake" in get_imported_packages()
|
import numpy as np
from numgrad._utils._expand_to import _expand_to
from numgrad._utils._unbroadcast import _unbroadcast_to
from numgrad._variable import Variable
from numgrad._vjp import _register_vjp, differentiable
# https://numpy.org/doc/stable/reference/arrays.ndarray.html#special-methods
def _getitem_vjp(dy, _y, x, key):
dx = np.zeros_like(x)
dx[key] = dy
return dx
Variable.__getitem__ = differentiable(_getitem_vjp)(
lambda self, key: self[key])
Variable.__getitem__.__doc__ = np.ndarray.__getitem__.__doc__
# https://numpy.org/doc/stable/reference/routines.array-creation.html#building-matrices
_register_vjp(np.diag, lambda dy, _y, x, k=0: _unbroadcast_to(
np.diag(dy, k=k), x.shape))
_register_vjp(np.diagflat, lambda dy, _y, x, k=0: np.diag(dy, k=k).reshape(
*x.shape))
_register_vjp(np.tril, lambda dy, _y, _x, k=0: np.tril(dy, k))
_register_vjp(np.triu, lambda dy, _y, _x, k=0: np.triu(dy, k))
# https://numpy.org/doc/stable/reference/routines.array-manipulation.html#changing-array-shape
_register_vjp(np.reshape, lambda dy, _y, x, _newshape, order=None: dy.reshape(
*x.shape, order=order))
_register_vjp(np.ravel, lambda dy, _y, x, order=None: dy.reshape(
*x.shape, order=order))
# https://numpy.org/doc/stable/reference/routines.array-manipulation.html#transpose-like-operations
_register_vjp(np.moveaxis, lambda dy, _y, _x, source, destination: np.moveaxis(
dy, source=destination, destination=source))
_register_vjp(np.swapaxes, lambda dy, _y, _x, axis1, axis2: np.swapaxes(
dy, axis1, axis2))
_register_vjp(
np.transpose,
lambda dy, _y, _x, axes=None: (
np.transpose(dy) if axes is None
else np.transpose(dy, np.argsort(axes))
),
)
# https://numpy.org/doc/stable/reference/routines.array-manipulation.html#changing-number-of-dimensions
_register_vjp(np.broadcast_to, lambda dy, _y, x, shape: ( # noqa: U100
_unbroadcast_to(dy, x.shape)))
_register_vjp(np.expand_dims, lambda dy, _y, _x, axis: np.squeeze(dy, axis))
_register_vjp(np.squeeze, lambda dy, _y, x, axis=None: (
np.expand_dims(dy, [ax for ax, len_ in enumerate(x.shape) if len_ == 1])
if axis is None else np.expand_dims(dy, axis)))
# https://numpy.org/doc/stable/reference/routines.array-manipulation.html#rearranging-elements
_register_vjp(np.flip, lambda dy, _y, _x, axis=None: np.flip(dy, axis))
_register_vjp(np.fliplr, lambda dy, _y, _x: np.fliplr(dy))
_register_vjp(np.flipud, lambda dy, _y, _x: np.flipud(dy))
_register_vjp(np.roll, lambda dy, _y, _x, shift, axis=None: np.roll(
dy, -shift if isinstance(shift, int) else [-s for s in shift], axis))
_register_vjp(np.rot90, lambda dy, _y, _x, k=1, axes=(0, 1): np.rot90(
dy, -k, axes))
# https://numpy.org/doc/stable/reference/routines.linalg.html
def _matmul_vjp_x1(dy, _y, x1, x2):
x1, x2 = np.asarray(x1), np.asarray(x2)
if x2.ndim == 1:
return np.broadcast_to(dy[..., None], x1.shape) * x2
if x1.ndim == 1:
return _unbroadcast_to((dy[..., None, :] * x2).sum(-1), x1.shape)
return _unbroadcast_to(dy @ np.swapaxes(x2, -1, -2), x1.shape)
def _matmul_vjp_x2(dy, _y, x1, x2):
x1, x2 = np.asarray(x1), np.asarray(x2)
if x2.ndim == 1:
return _unbroadcast_to(dy[..., None] * x1, x2.shape)
if x1.ndim == 1:
return np.broadcast_to(dy[..., None, :], x2.shape) * x1[:, None]
return _unbroadcast_to(np.swapaxes(x1, -1, -2) @ dy, x2.shape)
_register_vjp(np.matmul, _matmul_vjp_x1, _matmul_vjp_x2)
# https://numpy.org/doc/stable/reference/routines.math.html#trigonometric-functions
_register_vjp(np.sin, lambda dy, _y, x: dy * np.cos(x))
_register_vjp(np.cos, lambda dy, _y, x: dy * -np.sin(x))
_register_vjp(np.tan, lambda dy, y, _x: dy * (1 + np.square(y)))
_register_vjp(np.arcsin, lambda dy, y, _x: dy / np.cos(y))
_register_vjp(np.arccos, lambda dy, y, _x: dy / -np.sin(y))
_register_vjp(np.arctan, lambda dy, y, _x: dy * (np.cos(y) ** 2))
_register_vjp(
np.hypot,
lambda dy, y, x1, _x2: _unbroadcast_to(dy * x1 / y, x1.shape),
lambda dy, y, _x1, x2: _unbroadcast_to(dy * x2 / y, x2.shape),
)
# https://numpy.org/doc/stable/reference/routines.math.html#hyperbolic-functions
_register_vjp(np.sinh, lambda dy, _y, x: dy * np.cosh(x))
_register_vjp(np.cosh, lambda dy, _y, x: dy * np.sinh(x))
_register_vjp(np.tanh, lambda dy, y, _x: dy * (1 - np.square(y)))
_register_vjp(np.arcsinh, lambda dy, y, _x: dy / np.cosh(y))
_register_vjp(np.arccosh, lambda dy, y, _x: dy / np.sinh(y))
_register_vjp(np.arctanh, lambda dy, _y, x: dy / (1 - np.square(x)))
# https://numpy.org/doc/stable/reference/routines.math.html#sums-products-differences
_register_vjp(
np.sum,
lambda dy, _y, x, axis=None, keepdims=False, **kwargs: _expand_to(
dy, x.shape, axis, keepdims),
)
# https://numpy.org/doc/stable/reference/routines.math.html#exponents-and-logarithms
_register_vjp(np.exp, lambda dy, y, _x: dy * y)
_register_vjp(np.expm1, lambda dy, y, _x: dy * (y + 1))
_register_vjp(np.exp2, lambda dy, y, _x: dy * y * np.log(2))
_register_vjp(np.log, lambda dy, _y, x: dy / x)
_register_vjp(np.log10, lambda dy, _y, x: dy / (x * np.log(10)))
_register_vjp(np.log2, lambda dy, _y, x: dy / (x * np.log(2)))
_register_vjp(np.log1p, lambda dy, _y, x: dy / (1 + x))
_register_vjp(
np.logaddexp,
lambda dy, y, x1, _x2: _unbroadcast_to(dy * np.exp(x1 - y), x1.shape),
lambda dy, y, _x1, x2: _unbroadcast_to(dy * np.exp(x2 - y), x2.shape),
)
_register_vjp(
np.logaddexp2,
lambda dy, y, x1, _x2: _unbroadcast_to(dy * np.exp2(x1 - y), x1.shape),
lambda dy, y, _x1, x2: _unbroadcast_to(dy * np.exp2(x2 - y), x2.shape),
)
# https://numpy.org/doc/stable/reference/routines.math.html#arithmetic-operations
_register_vjp(
np.add,
lambda dy, _y, x1, _x2: _unbroadcast_to(dy, x1.shape),
lambda dy, _y, _x1, x2: _unbroadcast_to(dy, x2.shape),
)
_register_vjp(np.reciprocal, lambda dy, y, _x: dy * -(y ** 2))
_register_vjp(np.positive, lambda dy, _y, _x: dy)
_register_vjp(np.negative, lambda dy, _y, _x: -dy)
_register_vjp(
np.multiply,
lambda dy, _y, x1, x2: _unbroadcast_to(dy * x2, x1.shape),
lambda dy, _y, x1, x2: _unbroadcast_to(dy * x1, x2.shape),
)
_register_vjp(
np.divide,
lambda dy, _y, x1, x2: _unbroadcast_to(dy / x2, x1.shape),
lambda dy, _y, x1, x2: _unbroadcast_to(dy * x1 / -(x2 ** 2), x2.shape),
)
_register_vjp(
np.power,
lambda dy, y, x1, x2: _unbroadcast_to(dy * x2 * y / x1, x1.shape),
lambda dy, y, x1, x2: (
None if np.any(np.asarray(x1) < 0)
else _unbroadcast_to(dy * y * np.log(x1), x2.shape)
),
)
_register_vjp(
np.subtract,
lambda dy, _y, x1, _x2: _unbroadcast_to(dy, x1.shape),
lambda dy, _y, _x1, x2: _unbroadcast_to(-dy, x2.shape),
)
_register_vjp(
np.float_power,
lambda dy, y, x1, x2: _unbroadcast_to(dy * x2 * y / x1, x1.shape),
lambda dy, y, x1, x2: (
None if np.any(np.asarray(x1) < 0)
else _unbroadcast_to(dy * y * np.log(x1), x2.shape)
),
)
# https://numpy.org/doc/stable/reference/routines.math.html#extrema-finding
_register_vjp(
np.maximum,
lambda dy, y, x1, _x2: _unbroadcast_to(
np.where(np.asarray(x1) != np.asarray(y), 0, dy), x1.shape),
lambda dy, y, _x1, x2: _unbroadcast_to(
np.where(np.asarray(x2) != np.asarray(y), 0, dy), x2.shape),
)
_register_vjp(
np.fmax,
lambda dy, y, x1, _x2: _unbroadcast_to(
np.where(np.asarray(x1) != np.asarray(y), 0, dy), x1.shape),
lambda dy, y, _x1, x2: _unbroadcast_to(
np.where(np.asarray(x2) != np.asarray(y), 0, dy), x2.shape),
)
_register_vjp(
np.amax,
lambda dy, y, x, axis=None, keepdims=False, **kwargs: np.where(
np.asarray(x) == (
np.asarray(y) if keepdims else
np.asarray(x).max(axis, keepdims=True)
),
_expand_to(dy, x.shape, axis, keepdims), 0,
),
)
_register_vjp(
np.nanmax,
lambda dy, y, x, axis=None, keepdims=False: np.where(
np.asarray(x) == (
np.asarray(y) if keepdims else
np.nanmax(np.asarray(x), axis, keepdims=True)
),
_expand_to(dy, x.shape, axis, keepdims), 0,
),
)
_register_vjp(
np.minimum,
lambda dy, y, x1, _x2: _unbroadcast_to(
np.where(np.asarray(x1) != np.asarray(y), 0, dy), x1.shape),
lambda dy, y, _x1, x2: _unbroadcast_to(
np.where(np.asarray(x2) != np.asarray(y), 0, dy), x2.shape),
)
_register_vjp(
np.fmin,
lambda dy, y, x1, _x2: _unbroadcast_to(
np.where(np.asarray(x1) != np.asarray(y), 0, dy), x1.shape),
lambda dy, y, _x1, x2: _unbroadcast_to(
np.where(np.asarray(x2) != np.asarray(y), 0, dy), x2.shape),
)
_register_vjp(
np.amin,
lambda dy, y, x, axis=None, keepdims=False, **kwargs: np.where(
np.asarray(x) == (
np.asarray(y) if keepdims else
np.asarray(x).min(axis, keepdims=True)
),
_expand_to(dy, x.shape, axis, keepdims), 0,
),
)
_register_vjp(
np.nanmin,
lambda dy, y, x, axis=None, keepdims=False: np.where(
np.asarray(x) == (
np.asarray(y) if keepdims else
np.nanmin(np.asarray(x), axis, keepdims=True)
),
_expand_to(dy, x.shape, axis, keepdims), 0,
),
)
# https://numpy.org/doc/stable/reference/routines.math.html#miscellaneous
_register_vjp(np.sqrt, lambda dy, y, _x: dy * 0.5 / y)
_register_vjp(np.cbrt, lambda dy, y, _x: dy / (3 * y ** 2))
_register_vjp(np.square, lambda dy, _y, x: dy * 2 * x)
_register_vjp(np.absolute, lambda dy, _y, x: dy * np.sign(x))
_register_vjp(np.fabs, lambda dy, _y, x: dy * np.sign(x))
# https://numpy.org/doc/stable/reference/random/legacy.html#functions-in-numpy-random
_register_vjp(
np.random.exponential,
lambda dy, y, scale, size=None: (
dx := dy * y / scale,
dx if size is None else _unbroadcast_to(dx, scale.shape),
)[1],
module_name='numpy.random', func_name='exponential',
)
_register_vjp(
np.random.normal,
lambda dy, _y, loc, scale, size=None, **kwargs: ( # noqa: U100
_unbroadcast_to(dy, loc.shape)),
lambda dy, y, loc, scale, size=None: _unbroadcast_to( # noqa: U100
dy * (np.asarray(y) - np.asarray(loc)) / np.asarray(scale),
scale.shape,
),
module_name='numpy.random', func_name='normal',
)
_register_vjp(
np.random.uniform,
lambda dy, y, low, high, size=None: ( # noqa: U100
u := (np.asarray(y) - np.asarray(low)) / (
np.asarray(high) - np.asarray(low)),
_unbroadcast_to(dy - dy * u, low.shape),
)[1],
lambda dy, y, low, high, size=None: ( # noqa: U100
u := (np.asarray(y) - np.asarray(low)) / (
np.asarray(high) - np.asarray(low)),
_unbroadcast_to(dy * u, high.shape),
)[1],
module_name='numpy.random', func_name='uniform',
)
# https://numpy.org/doc/stable/reference/routines.statistics.html#averages-and-variances
_register_vjp(
np.mean,
lambda dy, _y, x, axis=None, keepdims=False: (
_expand_to(dy, x.shape, axis, keepdims) * dy.size / x.size
),
)
_register_vjp(
np.nanmean,
lambda dy, _y, x, axis=None, *, keepdims=False: (
nan_mask := np.isnan(x),
np.where(
nan_mask, 0,
_expand_to(dy, x.shape, axis, keepdims) / np.sum(
~nan_mask, axis, keepdims=True),
),
)[1],
)
__all__ = []
|
import torch
import torch.nn.functional as F
from torch.autograd import Variable
import torchvision.datasets as datasets
import torchvision.transforms as T
import bcolz
use_gpu = True
cuda_available = torch.cuda.is_available()
device = torch.device("cuda" if (cuda_available and use_gpu) else "cpu")
def save_array(fname, arr):
c = bcolz.carray(arr, rootdir=fname, mode='w')
c.flush()
def load_array(fname):
return bcolz.open(fname)[:]
def to_var(x, volatile=False):
return x.to(device)
def create_img_dataloader(image_folder, transform=None, batch_size=25, shuffle=False, num_workers=2):
if transform is None:
transform = T.Compose([
T.Resize((224, 224)),
T.ToTensor(),
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
img_dataset = datasets.ImageFolder(image_folder, transform)
img_dataloader = torch.utils.data.DataLoader(img_dataset, batch_size, shuffle, num_workers)
return img_dataset, img_dataloader |
#!/usr/bin/python
# coding: utf-8 -*-
# (c) 2020, Bianca Henderson <bianca@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: tower_inventory_source_update
author: "Bianca Henderson (@beeankha)"
short_description: Update inventory source(s).
description:
- Update Ansible Tower inventory source(s). See
U(https://www.ansible.com/tower) for an overview.
options:
inventory:
description:
- Name of the inventory that contains the inventory source(s) to update.
required: True
type: str
inventory_source:
description:
- The name of the inventory source to update.
required: True
type: str
organization:
description:
- Name of the inventory source's inventory's organization.
type: str
wait:
description:
- Wait for the job to complete.
default: False
type: bool
interval:
description:
- The interval to request an update from Tower.
required: False
default: 1
type: float
timeout:
description:
- If waiting for the job to complete this will abort after this
amount of seconds
type: int
extends_documentation_fragment: awx.awx.auth
'''
EXAMPLES = '''
- name: Update a single inventory source
tower_inventory_source_update:
inventory: "My Inventory"
inventory_source: "Example Inventory Source"
organization: Default
- name: Update all inventory sources
tower_inventory_source_update:
inventory: "My Other Inventory"
inventory_source: "{{ item }}"
loop: "{{ query('awx.awx.tower_api', 'inventory_sources', query_params={ 'inventory': 30 }, return_ids=True ) }}"
'''
RETURN = '''
id:
description: id of the inventory update
returned: success
type: int
sample: 86
status:
description: status of the inventory update
returned: success
type: str
sample: pending
'''
from ..module_utils.tower_api import TowerAPIModule
def main():
# Any additional arguments that are not fields of the item can be added here
argument_spec = dict(
inventory=dict(required=True),
inventory_source=dict(required=True),
organization=dict(),
wait=dict(default=False, type='bool'),
interval=dict(default=1.0, type='float'),
timeout=dict(default=None, type='int'),
)
# Create a module for ourselves
module = TowerAPIModule(argument_spec=argument_spec)
# Extract our parameters
inventory = module.params.get('inventory')
inventory_source = module.params.get('inventory_source')
organization = module.params.get('organization')
wait = module.params.get('wait')
interval = module.params.get('interval')
timeout = module.params.get('timeout')
lookup_data = {'name': inventory}
if organization:
lookup_data['organization'] = module.resolve_name_to_id('organizations', organization)
inventory_object = module.get_one('inventories', data=lookup_data)
if not inventory_object:
module.fail_json(msg='The specified inventory, {0}, was not found.'.format(lookup_data))
inventory_source_object = module.get_one('inventory_sources', **{
'data': {
'name': inventory_source,
'inventory': inventory_object['id'],
}
})
if not inventory_source_object:
module.fail_json(msg='The specified inventory source was not found.')
# Sync the inventory source(s)
inventory_source_update_results = module.post_endpoint(inventory_source_object['related']['update'], **{'data': {}})
if inventory_source_update_results['status_code'] != 202:
module.fail_json(msg="Failed to update inventory source, see response for details", **{'response': inventory_source_update_results})
module.json_output['changed'] = True
module.json_output['id'] = inventory_source_update_results['json']['id']
module.json_output['status'] = inventory_source_update_results['json']['status']
if not wait:
module.exit_json(**module.json_output)
# Invoke wait function
module.wait_on_url(
url=inventory_source_update_results['json']['url'],
object_name=inventory_object,
object_type='inventory_update',
timeout=timeout, interval=interval
)
module.exit_json(**module.json_output)
if __name__ == '__main__':
main()
|
import os
def main():
os.system("python \"iid_train.py\" --server_config=2 --cuda_device=\"cuda:1\" --img_to_load=-1 --load_previous=0 --test_mode=0 --patch_size=256 --batch_size=64 --net_config=2 --num_blocks=0 "
"--plot_enabled=0 --debug_mode=0 --version_name=\"maps2rgb_rgb2maps_v4.13\" --iteration=9")
os.system("python \"iid_train.py\" --server_config=2 --cuda_device=\"cuda:1\" --img_to_load=-1 --load_previous=0 --test_mode=0 --patch_size=256 --batch_size=64 --net_config=2 --num_blocks=0 "
"--plot_enabled=0 --debug_mode=0 --version_name=\"maps2rgb_rgb2maps_v4.13\" --iteration=10")
os.system("python \"iid_train.py\" --server_config=2 --cuda_device=\"cuda:1\" --img_to_load=-1 --load_previous=0 --test_mode=0 --patch_size=256 --batch_size=64 --net_config=2 --num_blocks=0 "
"--plot_enabled=0 --debug_mode=0 --version_name=\"maps2rgb_rgb2maps_v4.13\" --iteration=11")
os.system("python \"iid_train.py\" --server_config=2 --cuda_device=\"cuda:1\" --img_to_load=-1 --load_previous=0 --test_mode=0 --patch_size=256 --batch_size=64 --net_config=2 --num_blocks=0 "
"--plot_enabled=0 --debug_mode=0 --version_name=\"maps2rgb_rgb2maps_v4.13\" --iteration=12")
if __name__ == "__main__":
main() |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-01-22 12:28
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('instapp', '0003_profile'),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('post_image', models.ImageField(upload_to='posts/')),
('caption', models.CharField(max_length=240)),
('location', models.CharField(max_length=30)),
('date_posted', models.DateTimeField(auto_now_add=True)),
('like', models.IntegerField(default=0)),
('profile', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='instapp.Profile')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
from .base import FunctionalTest
class PostLikeTest(FunctionalTest):
def test_search_post_title(self):
self.browser.get(self.live_server_url)
self.move_to_default_board()
# 현준이는 좋아요를 받기 위해서 아재개그를 올린다.
self.add_post('반성문을 영어로 해석하면??', '글로벌\n(글로 벌 받는것)\nㅋㅋㅋㅋㅋ꿀잼')
# 게시물이 하나 보인다.
searched_posts = self.browser.find_elements_by_css_selector('#id_post_list_table tbody tr')
self.assertEqual(len(searched_posts), 1)
self.check_for_row_in_list_table('id_post_list_table', '반성문을 영어로 해석하면??')
# 좋아요가 하나도 달리지 않자 자신의 게시물에 추천을 하려고 게시글을 누른다.
post = self.browser.find_element_by_css_selector('tr > td > a')
post.click()
# 추천 수가 0개이다.
like_button = self.browser.find_element_by_class_name('like-count')
self.assertEqual('0', like_button.text)
# 추천 버튼을 누른다.
like_button.click()
# 추천되었다는 메시지가 뜨고 확인 버튼을 누른다.
alert = self.browser.switch_to_alert()
alert.accept()
# 추천 수가 1 로 증가했다.
like_button = self.browser.find_element_by_class_name('like-count')
self.assertEqual('1', like_button.text)
# 현준이는 눈물을 흘리며 페이지를 닫는다.
|
from __future__ import division
def odds_stats(odds):
# we fist calculate the total of odds at stake
# so there are only three possible outcomes in a match,either 1,2,3
hometeam_percentage = (odds[0] * 100) / (odds[0] + odds[1])
awayteam_percentage = (odds[1] * 100) / (odds[0] + odds[1])
print 'According to betting odds Percentages of home team and away team losing are', hometeam_percentage, awayteam_percentage
return {'H': hometeam_percentage, 'A': awayteam_percentage}
def get_best_odds(odds):
# return value of team most likely to win,or rather highest odds/chances(rarely a draw)
value = odds.index(min(odds))
if value == 0:
return ('H', value)
elif value == 1:
return ('D', value)
else:
return ('A', value)
def get_match_winner(actual_scores):
if actual_scores[0] > actual_scores[1]:
# the home team won
return "H"
elif actual_scores[1] > actual_scores[0]:
return "A"
else:
return "D"
def elo_rating_analytics(home_team, away_team):
r1 = (pow(10, (home_team['rating'] / 400)))
r2 = (pow(10, (away_team['rating'] / 400)))
e1 = (r1 / (r1 + r2))
e2 = (r2 / (r1 + r2))
if e1 == e2:
print "teams are equally balanced out therefore there is a high percent chance for a draw."
else:
print "Chances of wining are;", e1* 100, e2* 100
return {'H': e1, 'A': e2}
##post match math
def apply_elo_rating(home_team, away_team, actual_scores):
if actual_scores[0] > actual_scores[1]:
analytics = elo_rating_analytics(home_team, away_team)
rating1 = home_team['rating'] + (32 * (1 - analytics['H']))
rating2 = away_team['rating'] + (32 * (0 - analytics['A']))
elif actual_scores[1] > actual_scores[0]:
analytics = elo_rating_analytics(home_team, away_team)
rating1 = home_team['rating'] + (32 * (0 - analytics['H']))
rating2 = away_team['rating'] + (32 * (1 - analytics['A']))
elif actual_scores[0] == actual_scores[1]:
analytics = elo_rating_analytics(home_team, away_team)
rating1 = home_team['rating'] + (32 * (0.5- analytics['H']))
rating2 = away_team['rating'] + (32 * (0.5 - analytics['A']))
print "New rating, ", rating1, rating2
return (rating1,rating2)
'''
def define_match_result(match, best_odds):
if best_odds[0] == ''' |
#!/usr/bin/env python
from ruffus import *
import sys
import os
import shutil
import cgatcore.experiment as E
from cgatcore import pipeline as P
PARAMS = P.get_parameters("pipeline.yml")
@originate('dependency_check.done')
def dependency_check(outfile):
deps = ["wget", "unzip", "tar", "udocker", "time"]
for cmd in deps:
if shutil.which(cmd) is None:
raise EnvironmentError("Required dependency \"{}\" not found".format(cmd))
open(outfile, 'a').close()
@follows(dependency_check)
@originate('download_code.done')
def download_code(outfile):
statement = '''wget {} && touch download_code.done'''.format(PARAMS['input']['code'])
P.run(statement)
@follows(dependency_check)
@originate('download_data.done')
def download_data(outfile):
statement = '''wget {} && touch download_data.done'''.format(PARAMS['input']['data'])
P.run(statement)
@transform(download_code, suffix('download_code.done'), 'prepare_code.done')
def prepare_code(infile, outfile):
statement = '''unzip master.zip &&
rm master.zip &&
touch prepare_code.done'''
P.run(statement)
@transform(download_data, suffix('download_data.done'), 'prepare_data.done')
def prepare_data(infile, outfile):
statement = '''tar xzf hcg16-data.tar.gz &&
rm hcg16-data.tar.gz &&
touch prepare_data.done'''
P.run(statement)
@follows(prepare_code, prepare_data)
@merge('AW*.xp1', 'calibration.log')
def calibration(infiles, outfile):
statement = '''\\time -o calibration.time -v
udocker run -v "$(pwd)":/data -t amigahub/casa:v1.0 --nogui --logfile calibration.log -c hcg-16-master/casa/calibration_flag.py
1> calibration.stdout
2> calibration.stderr'''
P.run(statement)
@transform(calibration, suffix('calibration.log'), 'imaging.log')
def imaging(infile, outfile):
statement = '''\\time -o imaging.time -v
udocker run -v "$(pwd)":/data -t amigahub/casa:v1.0 --nogui --logfile imaging.log -c hcg-16-master/casa/imaging.py
1> imaging.stdout
2> imaging.stderr'''
P.run(statement)
@split(imaging, ['HCG16_CD_rob2_MS.3.5s.dil', 'HCG16_CD_rob2_MS.5.0s.nodil', 'HIPASS_cube_params'])
def masking(infile, outfiles):
for mask in outfiles:
statement = '''\\time -o masking.{}.time -v
udocker run -v "$(pwd)":/data -t amigahub/sofia:v1.0 hcg-16-master/sofia/{}.session
1> masking.{}.stdout
2> masking.{}.stderr'''.format(mask, mask, mask, mask, mask)
P.run(statement)
open(mask, 'a').close()
@originate('HCG16_DECaLS_cutout.jpeg')
def get_decals_jpeg(outfile):
statement = '''
wget "{}" -O HCG16_DECaLS_cutout.jpeg
'''.format(PARAMS['decals']['jpeg'])
P.run(statement)
@originate('HCG16_DECaLS_r_cutout.fits')
def get_decals_fits(outfile):
statement = '''
wget "{}" -O HCG16_DECaLS_r_cutout.fits
'''.format(PARAMS['decals']['fits'])
P.run(statement)
@follows(imaging, masking, get_decals_jpeg, get_decals_fits)
@merge(imaging, 'plotting.done')
def plotting(infiles, outfile):
statement = '''\\time -o plotting.time -v
cp hcg-16-master/plot_scripts/*.ipynb . &&
cp hcg-16-master/plot_scripts/*.py . &&
cp hcg-16-master/sofia/HIPASS_cube_params.session . &&
for n in `ls *.ipynb`; do jupyter nbconvert --to python $n; newname=$(echo $n | sed 's/.ipynb/.py/g'); python $newname; done &&
touch plotting.done
'''
P.run(statement)
@files(None, 'reset.log')
def cleanup(infile, outfile):
statement = '''rm -rf HCG16_C* HCG16_D*
hcg-16-master/ HCG16_source_mask/ AW*.xp1
*gcal* *bcal* *.last *.log *.time *.stdout *.stderr *.done
rflag* ctmp* delays.cal/ flux.cal/ gaincurve.cal/ *dil
*fits *ascii S* N* E_clump* PGC8210* H* cd_bridge* Fig* Tab2* general_functions.py __pycache__/'''
P.run(statement)
def main(argv=None):
if argv is None:
argv = sys.argv
P.main(argv)
if __name__ == "__main__":
sys.exit(P.main(sys.argv))
|
# -*- coding: utf-8 -*-
import json_lines
__author__ = 'drumcap'
import scrapy
from vanilla_scrap.items import MovieCommentItem
from urllib.parse import urlparse, parse_qs
from datetime import datetime
import re
import random
import time
import json
extract_nums = lambda s: re.search('\d+', s).group(0)
sanitize_str = lambda s: s.strip()
rand_sleep = lambda max: time.sleep(int(random.randrange(1, max)))
NAVER_BASEURL = 'http://movie.naver.com/movie/point/af/list.nhn'
NAVER_RATINGURL = NAVER_BASEURL + '?&page=%s'
NAVER_MOVIEURL = NAVER_BASEURL + '?st=mcode&target=after&sword=%s&page=%s'
NAVER_MOVIE_RANK = 'https://movie.naver.com/movie/sdb/rank/rmovie.nhn?sel=pnt&tg=%s&page=%s'
class MovieCommentSpider(scrapy.Spider):
name = "movie-comment"
def extract_nums(self, s): return re.search('\d+', s).group(0)
def start_requests(self):
filename = 'movie-info-items.jl'
with json_lines.open(filename) as f:
for item in f:
yield scrapy.Request(NAVER_MOVIEURL % (item.get('movie_id'), 1), self.parse_naver_cmt)
def parse_naver_cmt(self, response):
dtnow = datetime.now()
for sel in response.css('#old_content > table > tbody > tr'):
item = MovieCommentItem()
item['source'] = 'naver'
item['review_id'] = sel.xpath('./td[@class="ac num"]/text()').extract_first()
item['rating'] = sel.xpath('./td[@class="point"]/text()').extract_first()
item['movie_id'] = extract_nums(sel.xpath('./td[@class="title"]/a/@href').extract_first())
item['movie_name'] = sel.xpath('./td[@class="title"]/a/text()').extract_first()
item['review_txt'] = ' '.join(sel.xpath('./td[@class="title"]/text()').extract()).strip()
item['author'] = sel.xpath('./td[@class="num"]/a/text()').extract_first()
item['date'] = datetime.strptime(sel.xpath('./td[@class="num"]/text()').extract_first(),'%y.%m.%d').astimezone().isoformat()
yield item
next_page = response.css('.paging .pg_next::attr(href)').extract_first()
next_page_n = parse_qs(urlparse(next_page).query).get('page')
next_page_num = int(next_page_n[0]) if next_page_n is not None else 0
if next_page is not None and next_page_num < 1000:
print("2 ######## go next page {}".format(next_page))
yield response.follow(next_page, callback=self.parse_naver_cmt) |
import os # NOQA
from mock import patch
import confluent.docker_utils as utils
def test_imports():
""" Basic sanity tests until we write some real tests """
import confluent.docker_utils # noqa
def test_add_registry_and_tag():
""" Inject registry and tag values from environment """
base_image = "confluentinc/example"
fake_environ = {
"DOCKER_REGISTRY": "default-registry/",
"DOCKER_TAG": "default-tag",
"DOCKER_UPSTREAM_REGISTRY": "upstream-registry/",
"DOCKER_UPSTREAM_TAG": "upstream-tag",
"DOCKER_TEST_REGISTRY": "test-registry/",
"DOCKER_TEST_TAG": "test-tag",
}
with patch.dict('os.environ', fake_environ):
assert utils.add_registry_and_tag(base_image) == 'default-registry/confluentinc/example:default-tag'
assert utils.add_registry_and_tag(base_image, scope="UPSTREAM") == 'upstream-registry/confluentinc/example:upstream-tag'
assert utils.add_registry_and_tag(base_image, scope="TEST") == 'test-registry/confluentinc/example:test-tag'
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (C) 2015-2020 Florent Rougon
#
# This file is distributed under the terms of the DO WHAT THE FUCK YOU WANT TO
# PUBLIC LICENSE version 2, dated December 2004, by Sam Hocevar. You should
# have received a copy of this license along with this file. You can also find
# it at <http://www.wtfpl.net/>.
from setuptools import setup, find_packages
import sys
import os
import subprocess
import traceback
setuptools_pkg = "FFGo"
pypkg_name = "ffgo"
here = os.path.abspath(os.path.dirname(__file__))
namespace = {}
version_file = os.path.join(here, pypkg_name, "version.py")
with open(version_file, "r", encoding="utf-8") as f:
exec(f.read(), namespace)
version = namespace["__version__"]
def do_setup():
with open("README.rst", "r", encoding="utf-8") as f:
long_description = f.read()
setup(
name=setuptools_pkg,
version=version,
description="A powerful graphical launcher for the FlightGear "
"flight simulator",
long_description=long_description,
long_description_content_type="text/x-rst",
url="http://frougon.net/projects/{}/".format(setuptools_pkg),
license='WTFPLv2',
project_urls={
"Screenshots": "http://frougon.net/projects/FFGo/gallery/",
"FFGo doc. on the FlightGear wiki":
"http://wiki.flightgear.org/FFGo",
"FFGo conditional config doc.":
"http://frougon.net/projects/FFGo/doc/README-conditional-config/",
"Git repository": "https://github.com/frougon/FFGo",
"Issue tracker": "https://github.com/frougon/FFGo/issues",
},
# According to
# <https://packaging.python.org/specifications/core-metadata/> and the
# rendering on PyPI, it appears that only the original author can be
# listed in the 'author' field. FGo! and its author Robert Leda are
# mentioned at the very beginning of README.rst, and therefore also of
# the FFGo home page. Given the limitations for the 'author' field, I
# believe this is reasonable and fair.
author="Florent Rougon",
author_email="f.rougon@free.fr",
# See <https://pypi.org/pypi?%3Aaction=list_classifiers>
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: End Users/Desktop',
'License :: DFSG approved',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Topic :: Games/Entertainment :: Simulation',
],
keywords=\
'FlightGear,flightgear,flight,simulator,launcher,FFGo,FGo!',
packages=[pypkg_name],
include_package_data=True,
# Files to exclude from installation
exclude_package_data = { '':
['*/COPYING.txt', '*.po', '*.pot',
'*.xcf', '*/thumbnail-no-Pillow.svg',
'*/Makefile', '*/Makefile.py-functions'] },
python_requires='~=3.4',
install_requires=['CondConfigParser'],
extras_require = {'images': ['Pillow'], 'geo': ['geographiclib']},
entry_points={'console_scripts': ['ffgo = ffgo.main:main'],
'gui_scripts': ['ffgo-noconsole = ffgo.main:main']},
# We need real files and directories for gettext l10n files, but
# pkg_resources.resource_filename() doesn't work if the package is
# imported from a zip file ("resource_filename() only supported for
# .egg, not .zip"). As a consequence:
# - this project can't be "zip safe" without ugly hacks;
# - the pkg_resources module doesn't bring any value here; we can
# happily use __file__ to find our resources, and avoid depending
# on pkg_resources to spare our beloved users the hassle of
# installing one more dependency.
zip_safe=False
)
if __name__ == "__main__": do_setup()
|
# -*- coding: utf-8 -*-
from setup import *
def plot_signal_location(report, plot_width=900, plot_height=200):
fig = bplt.figure(title='Signal location', plot_width=plot_width,
plot_height=plot_height,
tools='xpan,xzoom_in,xzoom_out,xwheel_zoom,reset',
toolbar_location='above', active_drag='xpan',
active_scroll='xwheel_zoom')
x = np.array(report['pos']) / 1e6
y_data = np.array(report['values'])
y_fit = np.array(report['best_fit'])
peak_span = bmod.BoxAnnotation(left=report['peak_start']/1e6,
right=report['peak_end']/1e6,
level='underlay',
fill_color='blue', fill_alpha=.1)
focus_span = bmod.BoxAnnotation(left=report['focus_start']/1e6,
right=report['focus_end']/1e6,
level='underlay',
fill_color='red', fill_alpha=.3)
epicenter_span = bmod.BoxAnnotation(left=(report['epicenter'] - 10000)/1e6,
right=(report['epicenter'] + 10000)/1e6,
level='underlay', fill_color='red',
fill_alpha=.3)
fig.add_layout(peak_span)
fig.add_layout(focus_span)
fig.add_layout(epicenter_span)
fig.line(x, y_fit, line_color='black', line_dash='dashed')
fig.circle(x, y_data, alpha=1)
# bound the range to prevent zooming out too far
fig.x_range = bmod.Range1d(x[0], x[-1], bounds=(x[0], x[-1]))
fig.xaxis.axis_label = \
'Chromosome {} position (Mbp)'.format(report['chromosome'])
fig.yaxis.axis_label = report['statistic']['id']
return fig
def plot_genes(genes, chrom, start, end, fig=None, offset=0, x_range=None,
plot_width=900, plot_height=100):
# setup figure
if fig is None:
hover = bmod.HoverTool(
tooltips="<p>@label<br/>@seqid:@start{,}-@end{,}</p>")
fig = bplt.figure(title='Genes', plot_width=plot_width,
plot_height=plot_height, x_range=x_range,
tools='xpan,xzoom_in,xzoom_out,xwheel_zoom,'
'reset,tap'.split() + [hover],
toolbar_location='above', active_drag='xpan',
active_scroll='xwheel_zoom')
fig.xaxis.axis_label = 'Chromosome {} position (Mbp)'.format(chrom)
url = '../../../../../gene/@id.html'
taptool = fig.select(type=bmod.TapTool)
taptool.callback = bmod.OpenURL(url=url)
# handle joined chromosomes
# TODO (thumps desk) there must be a better way!
if chrom in '23':
# plot R arm (on the left)
rarm = '{}R'.format(chrom)
rarm_len = len(genome[rarm])
if start < rarm_len:
rarm_start = start
rarm_end = min(rarm_len, end)
plot_genes(genes, rarm, rarm_start, rarm_end, fig=fig)
# plot L arm (on the right)
larm = '{}L'.format(chrom)
if end > rarm_len:
larm_start = max(0, start - rarm_len)
larm_end = end - rarm_len
plot_genes(genes, larm, larm_start, larm_end, fig=fig,
offset=rarm_len)
return fig
# from here assume single arm
seqid = chrom
df = genes[(genes.seqid == seqid) &
(genes.end >= start) &
(genes.start <= end)]
labels = [('{}'.format(gene.ID) +
(' ({})'.format(gene.Name) if gene.Name else '') +
(' - {}'.format(gene.description.split('[Source:')[0])
if gene.description else ''))
for _, gene in df.iterrows()]
# hover = bmod.HoverTool(tooltips=[
# ("ID", '@id'),
# ("Name", '@name'),
# ("Description", '@description'),
# ("Location", "@seqid:@start-@end"),
# ])
bottom = np.zeros(len(df))
bottom[df.strand == '+'] = 1
source = bmod.ColumnDataSource(data={
'seqid': df.seqid,
'start': df.start,
'end': df.end,
'left': (df.start + offset) / 1e6,
'right': (df.end + offset) / 1e6,
'bottom': bottom,
'top': bottom + .8,
'id': df.ID,
'name': df.Name,
'description': df.description,
'label': labels,
})
fig.quad(bottom='bottom', top='top', left='left', right='right',
source=source, line_width=0)
fig.y_range = bmod.Range1d(-.5, 2.3)
yticks = [0.4, 1.4]
yticklabels = ['reverse', 'forward']
fig.yaxis.ticker = yticks
fig.yaxis.major_label_overrides = {k: v for k, v in zip(yticks, yticklabels)}
fig.ygrid.visible = False
return fig
def fig_signal_location(report, genes):
fig1 = plot_signal_location(report)
chrom = report['chromosome']
start = report['pos'][0]
end = report['pos'][-1]
fig1.xaxis.visible = False
fig2 = plot_genes(genes, chrom, start, end, x_range=fig1.x_range)
gfig = blay.gridplot([[fig1], [fig2]], toolbar_location='above')
return gfig
def build_signal_outputs(path, template, genes, signals, ir_candidates):
# load the basic signal report
with open(path, mode='rb') as report_file:
report = yaml.load(report_file)
# figure out what chromosome arm
chromosome = report['chromosome']
epicenter = report['epicenter']
epicenter_seqid, epicenter_coord = split_arms(chromosome, epicenter)
# obtain focus
focus_start = report['focus_start']
focus_start_seqid, focus_start_coord = split_arms(chromosome, focus_start)
focus_end = report['focus_end']
focus_end_seqid, focus_end_coord = split_arms(chromosome, focus_end)
# crude way to deal with rare case where focus spans centromere
# TODO handle whole chromosomes
if focus_start_seqid != epicenter_seqid:
focus_start_coord = 1
if focus_end_seqid != epicenter_seqid:
focus_end_coord = len(genome[epicenter_seqid])
report['min_flank_delta_aic'] = min(report['delta_aic_left'], report['delta_aic_right'])
# augment report with gene information
overlapping_genes = genes[(
(genes.seqid == epicenter_seqid) &
(genes.start <= focus_end_coord) &
(genes.end >= focus_start_coord)
)]
report['overlapping_genes'] = [
{'id': gene.ID,
'name': gene.Name,
'description': gene.description.split('[Source:')[0].strip()}
for _, gene in overlapping_genes.iterrows()
]
adjacent_genes = genes[(
(genes.seqid == epicenter_seqid) &
((genes.end < focus_start_coord) | (genes.start > focus_end_coord)) &
(genes.start <= (focus_end_coord + 50000)) &
(genes.end >= (focus_start_coord - 50000))
)]
report['adjacent_genes'] = [
{'id': gene.ID,
'name': gene.Name,
'description': gene.description.split('[Source:')[0].strip()}
for _, gene in adjacent_genes.iterrows()
]
# augment report with related signals information
# TODO this doesn't properly handle overlapping signals spanning a
# centromere
overlapping_signals = signals[(
(signals.epicenter_seqid == epicenter_seqid) &
(signals.focus_start_coord <= focus_end_coord) &
(signals.focus_end_coord >= focus_start_coord) &
# don't include self
((signals.pop_key != report['pop_key']) |
(signals.statistic != report['statistic']['id']))
)]
report['overlapping_signals'] = overlapping_signals.to_dict(orient='records')
overlapping_loci = [locus for locus in known_loci
if (locus['seqid'] == epicenter_seqid and
locus['start_coord'] <= focus_end_coord and
locus['end_coord'] >= focus_start_coord)]
overlapping_loci_names = set([locus['short_name'] for locus in overlapping_loci])
adjacent_loci = [locus for locus in known_loci
if (locus['seqid'] == epicenter_seqid and
locus['start_coord'] <= (focus_end_coord + 50000) and
locus['end_coord'] >= (focus_start_coord - 50000) and
locus['short_name'] not in overlapping_loci_names)]
report['overlapping_loci'] = overlapping_loci
report['adjacent_loci'] = adjacent_loci
report['ir_candidates'] = ir_candidates
# render the report
out_dir = os.path.join(
'docs',
os.path.dirname(path)[len('docs/_static/data/'):]
)
os.makedirs(out_dir, exist_ok=True)
page_path = os.path.join(out_dir, 'index.rst')
print('rendering', page_path)
with open(page_path, mode='w') as page_file:
print(template.render(**report), file=page_file)
# render a bokeh signal plot
fig = fig_signal_location(report, genes)
script, div = bemb.components(fig)
plot_path = os.path.join(out_dir, 'peak_location.html')
print('rendering', plot_path)
with open(plot_path, mode='w') as plot_file:
print('<div class="bokeh-figure peak-location">', file=plot_file)
print(script, file=plot_file)
print(div, file=plot_file)
print('</div>', file=plot_file)
def main():
# setup jinja
loader = jinja2.FileSystemLoader('templates')
env = jinja2.Environment(loader=loader)
template = env.get_template('signal.rst')
# setup signals
signals = pd.read_csv('docs/_static/data/signals.csv')
# setup IR candidates
ir_candidates = {
slug: (
etl
.fromtsv('docs/_static/data/ir-candidate-genes/{}.csv'.format(slug))
.values(0).set()
)
for slug in ['metabolic', 'target_site', 'behavioural', 'cuticular']
}
# iterate over signal reports
for path in sorted(glob('docs/_static/data/signal/*/*/*/*/report.yml')):
build_signal_outputs(path, template, genes, signals, ir_candidates)
if __name__ == '__main__':
main()
|
CLIENT_TO_SERVER_RPC = 1
CLIENT_CREATE_ENTITY = 2
SERVER_TO_CLIENT_RPC = 3
CLIENT_DESTROY_ENTITY = 4
|
# -* coding: UTF-8 -*-
"""Tests for Plato base class."""
from __future__ import print_function, unicode_literals
import unittest
from .base import Plato
from .kle_parser import Key
class TestPlato(unittest.TestCase):
"""Tests for Plato."""
def test_calculate_layout_width_height(self):
"""Test Plato calculate_layout works out width and height."""
plato = Plato(unit_mm=19)
keys = [Key('q', (0, 0), (1, 1)), Key('m', (12, 5), (2, 3))]
plato.calculate_layout(keys)
self.assertAlmostEqual(plato.width_in_units, 14)
self.assertAlmostEqual(plato.height_in_units, 8)
self.assertAlmostEqual(plato.centre_col, 7)
self.assertAlmostEqual(plato.centre_row, 4)
def test_key_bbox(self):
"""Test key_bbox of a list of keys is centred on 0,0."""
plato = Plato(unit_mm=19)
keys = [Key('q', (0, 0), (1, 1)), Key('m', (12, 5), (2, 1))]
plato.calculate_layout(keys)
(x, y), (wd, ht) = plato.key_bbox()
self.assertAlmostEqual(wd, 14 * 19) # Because of wide letter m
self.assertAlmostEqual(ht, 6 * 19)
self.assertAlmostEqual(x, -7 * 19)
self.assertAlmostEqual(y, -3 * 19) # Bottom left
def test_key_bbox_sans_keys(self):
"""Test key_bbox of a list of keys is centred on 0,0."""
plato = Plato(unit_mm=19, width_in_units=10, height_in_units=3)
(x, y), (wd, ht) = plato.key_bbox()
self.assertAlmostEqual(wd, 10 * 19) # As supplied by caller
self.assertAlmostEqual(ht, 3 * 19)
self.assertAlmostEqual(x, -5 * 19)
self.assertAlmostEqual(y, -1.5 * 19)
def test_key_coords_of_0_0_is_top_left(self):
"""Test Plato key_coords of (0, 0) is top left."""
# Define a 3×2 layout.
plato = Plato(unit_mm=16)
keys = [Key('q', (0, 0), (1, 1)), Key('s', (2, 1), (1, 1))]
plato.calculate_layout(keys)
x, y = plato.key_coords(Key('q', (0, 0), (1, 1)))
self.assertAlmostEqual(x, -16) # 1 unit to left of centre because 3 keys across
self.assertAlmostEqual(y, 8) # half unit above centre because 2 units high
x, y = plato.key_coords(Key('s', (2, 1), (1, 1)))
self.assertAlmostEqual(x, 16)
self.assertAlmostEqual(y, -8)
x, y = plato.key_coords(Key('Ent', (1, 0), (2, 1)))
self.assertAlmostEqual(x, 8) # Adjusted for width
self.assertAlmostEqual(y, 8)
def test_key_coords_offset_if_not_key_in_col_zero(self):
"""Test Plato key_coords of (0, 0) is top left."""
# Define a 3×2 where for some reason there is space at top and left.
plato = Plato(unit_mm=16)
keys = [Key('q', (2, 1), (1, 1)), Key('s', (4, 2), (1, 1))]
plato.calculate_layout(keys)
x, y = plato.key_coords(Key('q', (2, 1), (1, 1))) # the q is top left of this 2x2 layout
self.assertAlmostEqual(x, -16) # 1 unit to left of centre because 3 keys across
self.assertAlmostEqual(y, 8) # half unit above centre because 2 units high
|
import os
import argparse
import numpy as np
import torch
from torch.utils.data import DataLoader
from datasets import Cifar10Dataset
from networks import Generator, Discriminator, weights_init_normal
from helpers import print_args, print_losses
from helpers import save_sample, adjust_learning_rate
def init_training(args):
"""Initialize the data loader, the networks, the optimizers and the loss functions."""
datasets = Cifar10Dataset.get_datasets_from_scratch(args.data_path)
for phase in ['train', 'test']:
print('{} dataset len: {}'.format(phase, len(datasets[phase])))
# define loaders
data_loaders = {
'train': DataLoader(datasets['train'], batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers),
'test': DataLoader(datasets['test'], batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers)
}
# check CUDA availability and set device
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print('Use GPU: {}'.format(str(device) != 'cpu'))
# set up models
generator = Generator(args.gen_norm).to(device)
discriminator = Discriminator(args.disc_norm).to(device)
# initialize weights
if args.apply_weight_init:
generator.apply(weights_init_normal)
discriminator.apply(weights_init_normal)
# adam optimizer with reduced momentum
optimizers = {
'gen': torch.optim.Adam(generator.parameters(), lr=args.base_lr_gen, betas=(0.5, 0.999)),
'disc': torch.optim.Adam(discriminator.parameters(), lr=args.base_lr_disc, betas=(0.5, 0.999))
}
# losses
losses = {
'l1': torch.nn.L1Loss(reduction='mean'),
'disc': torch.nn.BCELoss(reduction='mean')
}
# make save dir, if it does not exists
if not os.path.exists(args.save_path):
os.makedirs(args.save_path)
# load weights if the training is not starting from the beginning
global_step = args.start_epoch * len(data_loaders['train']) if args.start_epoch > 0 else 0
if args.start_epoch > 0:
generator.load_state_dict(torch.load(
os.path.join(args.save_path, 'checkpoint_ep{}_gen.pt'.format(args.start_epoch - 1)),
map_location=device
))
discriminator.load_state_dict(torch.load(
os.path.join(args.save_path, 'checkpoint_ep{}_disc.pt'.format(args.start_epoch - 1)),
map_location=device
))
return global_step, device, data_loaders, generator, discriminator, optimizers, losses
def run_training(args):
"""Initialize and run the training process."""
global_step, device, data_loaders, generator, discriminator, optimizers, losses = init_training(args)
# run training process
for epoch in range(args.start_epoch, args.max_epoch):
print('\n========== EPOCH {} =========='.format(epoch))
for phase in ['train', 'test']:
# running losses for generator
epoch_gen_adv_loss = 0.0
epoch_gen_l1_loss = 0.0
# running losses for discriminator
epoch_disc_real_loss = 0.0
epoch_disc_fake_loss = 0.0
epoch_disc_real_acc = 0.0
epoch_disc_fake_acc = 0.0
if phase == 'train':
print('TRAINING:')
else:
print('VALIDATION:')
for idx, sample in enumerate(data_loaders[phase]):
# get data
img_l, real_img_lab = sample[:, 0:1, :, :].float().to(device), sample.float().to(device)
# generate targets
target_ones = torch.ones(real_img_lab.size(0), 1).to(device)
target_zeros = torch.zeros(real_img_lab.size(0), 1).to(device)
if phase == 'train':
# adjust LR
global_step += 1
adjust_learning_rate(optimizers['gen'], global_step, base_lr=args.base_lr_gen,
lr_decay_rate=args.lr_decay_rate, lr_decay_steps=args.lr_decay_steps)
adjust_learning_rate(optimizers['disc'], global_step, base_lr=args.base_lr_disc,
lr_decay_rate=args.lr_decay_rate, lr_decay_steps=args.lr_decay_steps)
# reset generator gradients
optimizers['gen'].zero_grad()
# train / inference the generator
with torch.set_grad_enabled(phase == 'train'):
fake_img_ab = generator(img_l)
fake_img_lab = torch.cat([img_l, fake_img_ab], dim=1).to(device)
# adv loss
adv_loss = losses['disc'](discriminator(fake_img_lab), target_ones)
# l1 loss
l1_loss = losses['l1'](real_img_lab[:, 1:, :, :], fake_img_ab)
# full gen loss
full_gen_loss = (1.0 - args.l1_weight) * adv_loss + (args.l1_weight * l1_loss)
if phase == 'train':
full_gen_loss.backward()
optimizers['gen'].step()
epoch_gen_adv_loss += adv_loss.item()
epoch_gen_l1_loss += l1_loss.item()
if phase == 'train':
# reset discriminator gradients
optimizers['disc'].zero_grad()
# train / inference the discriminator
with torch.set_grad_enabled(phase == 'train'):
prediction_real = discriminator(real_img_lab)
prediction_fake = discriminator(fake_img_lab.detach())
loss_real = losses['disc'](prediction_real, target_ones * args.smoothing)
loss_fake = losses['disc'](prediction_fake, target_zeros)
full_disc_loss = loss_real + loss_fake
if phase == 'train':
full_disc_loss.backward()
optimizers['disc'].step()
epoch_disc_real_loss += loss_real.item()
epoch_disc_fake_loss += loss_fake.item()
epoch_disc_real_acc += np.mean(prediction_real.detach().cpu().numpy() > 0.5)
epoch_disc_fake_acc += np.mean(prediction_fake.detach().cpu().numpy() <= 0.5)
# save the first sample for later
if phase == 'test' and idx == 0:
sample_real_img_lab = real_img_lab
sample_fake_img_lab = fake_img_lab
# display losses
print_losses(epoch_gen_adv_loss, epoch_gen_l1_loss,
epoch_disc_real_loss, epoch_disc_fake_loss,
epoch_disc_real_acc, epoch_disc_fake_acc,
len(data_loaders[phase]), args.l1_weight)
# save after every nth epoch
if phase == 'test':
if epoch % args.save_freq == 0 or epoch == args.max_epoch - 1:
gen_path = os.path.join(args.save_path, 'checkpoint_ep{}_gen.pt'.format(epoch))
disc_path = os.path.join(args.save_path, 'checkpoint_ep{}_disc.pt'.format(epoch))
torch.save(generator.state_dict(), gen_path)
torch.save(discriminator.state_dict(), disc_path)
print('Checkpoint.')
# display sample images
save_sample(
sample_real_img_lab,
sample_fake_img_lab,
os.path.join(args.save_path, 'sample_ep{}.png'.format(epoch))
)
def get_arguments():
"""Get command line arguments."""
parser = argparse.ArgumentParser(
description='Image colorization with GANs',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--data_path', type=str, default='./data',
help='Download and extraction path for the dataset.')
parser.add_argument('--save_path', type=str, default='./checkpoints',
help='Save and load path for the network weights.')
parser.add_argument('--save_freq', type=int, default=5, help='Save frequency during training.')
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--num_workers', type=int, default=4)
parser.add_argument('--start_epoch', type=int, default=0,
help='If start_epoch>0, load previously saved weigth from the save_path.')
parser.add_argument('--max_epoch', type=int, default=200)
parser.add_argument('--smoothing', type=float, default=0.9)
parser.add_argument('--l1_weight', type=float, default=0.99)
parser.add_argument('--base_lr_gen', type=float, default=3e-4, help='Base learning rate for the generator.')
parser.add_argument('--base_lr_disc', type=float, default=6e-5, help='Base learning rate for the discriminator.')
parser.add_argument('--lr_decay_rate', type=float, default=0.1, help='Learning rate decay rate for both networks.')
parser.add_argument('--lr_decay_steps', type=float, default=6e4, help='Learning rate decay steps for both networks.')
parser.add_argument('--gen_norm', type=str, default='batch', choices=['batch', 'instance'],
help='Defines the type of normalization used in the generator.')
parser.add_argument('--disc_norm', type=str, default='batch', choices=['batch', 'instance', 'spectral'],
help='Defines the type of normalization used in the discriminator.')
parser.add_argument('--apply_weight_init', type=int, default=0, choices=[0, 1],
help='If set to 1, applies the "weights_init_normal" function from networks.py.')
return parser.parse_args()
if __name__ == '__main__':
args = get_arguments()
# display arguments
print_args(args)
run_training(args)
|
import numpy as np
import pandas as pd
class DataModel:
"""
This class implements a data model - values at time points and provides methods for working with these data.
"""
def __init__(self, n=0, values=None, times=None):
"""
A constructor that takes values and a time point.
:param values: Array of values process
:param times: Array of a time points
"""
if (values is None) or (times is None):
self._times = np.zeros((n, ))
self._values = np.zeros((n, ))
else:
if len(values) != len(times):
print("Different size of values and times")
else:
self._times = np.array(times, dtype=float)
self._values = np.array(values, dtype=float)
def print(self, n=None):
if n is not None:
_n = n
elif self._times.shape:
_n = self._times.shape[0]
for i in range(_n):
print("Time: {}___Value: {}".format(self._times[i], self._values[i]))
@property
def mean(self):
"""
:return: Mean of values
"""
return self._times.mean()
def get_values(self):
return self._values
def get_times(self):
return self._times
def add_value(self, value, index):
# self._values.__add__(value)
self._values[index] = value
def add_time(self, time, index):
# self._times.__add__(time)
self._times[index] = time
def get_value(self, index):
return self._values[index]
def get_time(self, index):
return self._times[index] |
import logging
from typing import NamedTuple, Optional, List, Tuple
from lightbus.api import Api
from lightbus.message import EventMessage, RpcMessage, ResultMessage
from lightbus.utilities.internal_queue import InternalQueue
logger = logging.getLogger(__name__)
class SendEventCommand(NamedTuple):
message: EventMessage
options: dict = {}
class ConsumeEventsCommand(NamedTuple):
events: List[Tuple[str, str]]
listener_name: str
destination_queue: InternalQueue[EventMessage]
options: dict = {}
class AcknowledgeEventCommand(NamedTuple):
message: EventMessage
options: dict = {}
class CallRpcCommand(NamedTuple):
message: RpcMessage
options: dict = {}
class ConsumeRpcsCommand(NamedTuple):
api_names: List[str]
options: dict = {}
class ExecuteRpcCommand(NamedTuple):
"""An RPC call has been received and must be executed locally"""
message: RpcMessage
class PublishApiSchemaCommand(NamedTuple):
api: Api
class CloseCommand(NamedTuple):
pass
class SendResultCommand(NamedTuple):
rpc_message: RpcMessage
message: ResultMessage
class ReceiveResultCommand(NamedTuple):
message: RpcMessage
destination_queue: InternalQueue
options: dict
class ReceiveSchemaUpdateCommand(NamedTuple):
schema: dict
class ShutdownCommand(NamedTuple):
exception: Optional[BaseException]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Python APRS Module Tests."""
__author__ = 'Greg Albrecht W2GMD <oss@undef.net>' # NOQA pylint: disable=R0801
__copyright__ = 'Copyright 2017 Greg Albrecht and Contributors' # NOQA pylint: disable=R0801
__license__ = 'Apache License, Version 2.0' # NOQA pylint: disable=R0801
|
import logging
from functools import lru_cache
@lru_cache(maxsize=2048)
class Logger(object):
def __init__(self, name, level=logging.INFO, *args, **kwargs):
format = '%(asctime)s: %(name)s: %(process)d: %(levelname)-8s: %(threadName)-11s: %(message)s'
logging.basicConfig(format=format, level=level)
self._logger = logging.getLogger(name=name)
def setLogger(self,_name):
self._logger = logging.getLogger(_name)
def getLogger(self):
return self._logger
def debug(self,message):
#LEVEL 10
self.getLogger().debug(message)
def info(self,message):
#LEVEL 20
self._logger.info(message)
def warn(self,message):
#LEVEL 30
self._logger.warning(message)
def error(self,message):
#LEVEL 40
self._logger.error(message)
def critical(self,message):
#LEVEL 50
self._logger.critical(message)
|
from flask import Flask, render_template, request
import re
import base64
import io
import numpy as np
from PIL import Image
from classifier import model as ml
model, graph = ml()
app = Flask(__name__)
app.config['TEMPLATES_AUTO_RELOAD'] = True
def convertImage(imgData1):
img = Image.open(io.BytesIO(
base64.b64decode(re.search(r'base64,(.*)', str(imgData1)).group(1))))
return img
@app.route('/', methods=['GET', 'POST'])
def index():
if request.method == "GET":
return render_template("index.html")
else:
imgData = request.get_data()
img = convertImage(imgData).convert('L')
img = img.resize((28, 28))
img = np.asarray(img).astype('float32')/255
img.resize(1, 28, 28, 1)
with graph.as_default():
return str(model.predict(img).argmax())
if __name__ == '__main__':
app.run(port=5000,host='0.0.0.0')
|
import datetime
import os
import sys
from api.infrastructure.mysql import connection
try:
sys.path.insert(1, os.path.join(sys.path[0], '../..'))
from users import getCustomersPerUser
except:
pass
try:
sys.path.insert(1, os.path.join(sys.path[0], '..'))
from users import getCustomersPerUser
except:
pass
try:
from api.qymatix.users import getCustomersPerUser
except:
pass
def getCustomersList(cur, username):
cur.execute("select database()")
dbname = cur.fetchone()[0]
dbname = dbname.replace('data_', '').replace('data_', '')
cust = getCustomersPerUser(dbname=dbname, username=username)
cust = cust[next(iter(cust))].replace('[', '(').replace(']', ')')
return cust
def monthdelta(date, delta):
m, y = (date.month + delta) % 12, date.year + ((date.month) + delta - 1) // 12
if not m: m = 12
d = min(date.day,
[31, 29 if y % 4 == 0 and not y % 400 == 0 else 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31][m - 1])
return date.replace(day=d, month=m, year=y)
def accounts(cur, username=''):
'''
'''
if username in ['', 'admin']:
script_nop = "\
SELECT customers.id\
FROM customers;\
"
else:
cust = getCustomersList(cur, username)
script_nop = "\
SELECT customers.id\
FROM customers\
WHERE customers.id IN {0};\
".format(cust)
cur.execute(script_nop)
_data = cur.fetchall()
d = [i[0] for i in _data]
return d
def accounts_name(cur, username=''):
'''
'''
if username in ['', 'admin']:
script_nop = "\
SELECT customers.name\
FROM customers;\
"
else:
cust = getCustomersList(cur, username)
script_nop = "\
SELECT customers.name\
FROM customers\
WHERE customers.id IN {0};\
".format(cust)
cur.execute(script_nop)
_data = cur.fetchall()
d = [i[0] for i in _data]
return d
def accountsThreeYD(cur, account='all', username=''):
'''
'''
today = str(datetime.datetime.now()).split(" ")[0]
today = datetime.datetime.now()
tyb = datetime.datetime(year=today.year - 3, month=1, day=1)
if username in ['', 'admin']:
script_nop = "\
SELECT customers.id, SUM(sales.{})\
FROM sales\
LEFT JOIN customers\
ON sales.customer_id=customers.id\
WHERE sales.date BETWEEN DATE('{}') AND DATE('{}')\
GROUP BY customers.id;\
".format('price', tyb, today)
else:
# cust = getCustomersList(cur, username)
cust = tuple(accounts(cur, username))
script_nop = "\
SELECT customers.id, SUM(sales.{})\
FROM sales\
LEFT JOIN customers\
ON sales.customer_id=customers.id\
WHERE sales.date BETWEEN DATE('{}') AND DATE('{}') AND customers.id IN {}\
GROUP BY customers.id;\
".format('price', tyb, today, cust)
cur.execute(script_nop)
_data = cur.fetchall()
d = dict()
for i in range(len(_data)):
d[_data[i][0]] = round(_data[i][1], 2)
return d
def activeAccounts(cur, param='price', today=None, username=''):
'''
'''
if today != None:
y = today.year
m = today.month
d = today.day
today = datetime.datetime(year=y, month=m, day=d)
tmb = datetime.datetime(year=y, month=m, day=d)
else:
today = str(datetime.datetime.now()).split(" ")[0]
today = datetime.datetime.now()
tmb = datetime.datetime.now()
'''
dif = today.month - 3
if dif <= 0:
m = 12 + dif
y = today.year - 1
tmb = datetime.datetime(year=y, month=m, day=today.day)
else:
#tmb = datetime.datetime(year=today.year, month=dif, day=today.day)
'''
tmb = monthdelta(today, -3)
if username in ['', 'admin']:
script_nop = "\
SELECT customers.id, SUM(sales.{})\
FROM sales\
LEFT JOIN customers\
ON sales.customer_id=customers.id\
WHERE sales.date BETWEEN DATE('{}') AND DATE('{}')\
GROUP BY customers.id;\
".format(param, tmb, today)
else:
cust = getCustomersList(cur, username)
script_nop = "\
SELECT customers.id, SUM(sales.{})\
FROM sales\
LEFT JOIN customers\
ON sales.customer_id=customers.id\
WHERE sales.date BETWEEN DATE('{}') AND DATE('{}') AND customers.id IN {}\
GROUP BY customers.id;\
".format(param, tmb, today, cust)
cur.execute(script_nop)
_data = cur.fetchall()
active_accounts = dict()
for i in range(len(_data)):
active_accounts[_data[i][0]] = round(_data[i][1], 2)
return active_accounts
def activeAccountsCRM(cur, param='price', when=None, username=''):
'''
'''
if when == None:
today = str(datetime.datetime.now()).split(" ")[0]
today = datetime.datetime.now()
# tmb = datetime.datetime.now()
else:
y = when.year
m = when.month
d = when.day
today = datetime.datetime(year=y, month=m, day=d)
# tmb = datetime.datetime(year=y, month=m, day=d)
tmb = monthdelta(today, -3)
script_nop = "\
SELECT plans.account, SUM(plans.goal)\
FROM plans\
WHERE plans.due BETWEEN DATE('{0}') AND DATE('{1}') OR plans.created >= DATE('{0}')\
GROUP BY plans.account;\
".format(tmb, today)
if username in ['', 'admin']:
script_nop = "\
SELECT tasks.account, COUNT(tasks.due)\
FROM tasks\
WHERE tasks.due BETWEEN DATE('{0}') AND DATE('{1}') OR tasks.created >= DATE('{0}')\
GROUP BY tasks.account;\
".format(tmb, today)
else:
cust = getCustomersList(cur, username)
script_nop = "\
SELECT tasks.account, COUNT(tasks.due)\
FROM tasks\
WHERE tasks.due BETWEEN DATE('{0}') AND DATE('{1}') OR tasks.created >= DATE('{0}') AND tasks.id IN {2}\
GROUP BY tasks.account;\
".format(tmb, today, cust)
# print(script_nop)
try:
cur.execute(script_nop)
_data = cur.fetchall()
d = dict()
for i in range(len(_data)):
d[_data[i][0]] = round(_data[i][1], 2)
except Exception as e:
# raise
print(e)
d = {}
return d
def goalsPerQuarter(cur, minYear=None, maxYear=None, account='all', username=''):
'''
'''
if minYear == None:
minYear = str(datetime.datetime.now().year)
# minYear = 2016
if maxYear == None:
maxYear = minYear
'''
try:
account = account.decode('utf-8')
except:
pass
account = account.encode('latin-1')
'''
if account == 'all':
if username in ['', 'admin']:
script_nop = "\
SELECT QUARTER(due), SUM(goal) from plans\
WHERE YEAR(due) BETWEEN {} AND {}\
GROUP BY QUARTER(due);\
".format(minYear, maxYear)
else:
cust = getCustomersList(cur, username)
script_nop = "\
SELECT QUARTER(due), SUM(goal) from plans\
WHERE YEAR(due) BETWEEN {} AND {} AND account IN {}\
GROUP BY QUARTER(due);\
".format(minYear, maxYear, cust)
else:
script_nop = "\
SELECT QUARTER(due), SUM(goal) from plans\
WHERE YEAR(due) BETWEEN {} AND {} AND account='{}'\
GROUP BY QUARTER(due);\
".format(minYear, maxYear, account)
# GROUP BY YEAR(due), MONTH(due);\
cur.execute(script_nop)
_data = cur.fetchall()
try:
d = dict()
print(_data)
for i in range(len(_data)):
d[int(_data[i][0])] = int(_data[i][1])
if d == {}:
today = datetime.datetime.now()
quarter = (today.month - 1) // 3 + 1
for q in range(1, quarter + 1):
d[q] = 0
except:
return 0
if 1 not in d.keys():
d[1] = 0.0
if 2 not in d.keys():
d[2] = 0.0
if 3 not in d.keys():
d[3] = 0.0
if 4 not in d.keys():
d[4] = 0.0
return d
def actionsPerYear(cur, account='all', username=None):
'''
'''
'''
try:
account = account.decode('utf-8')
except:
pass
account = account.encode('latin-1')
'''
if account == 'all':
if username in ['', 'admin']:
script_nop = "\
SELECT YEAR(due), COUNT(action) from tasks\
GROUP BY YEAR(due);\
"
else:
cust = getCustomersList(cur, username)
script_nop = "\
SELECT YEAR(due), COUNT(action) from tasks\
WHERE account IN {}\
GROUP BY YEAR(due);\
".format(cust)
else:
script_nop = "\
SELECT YEAR(due), COUNT(action) from tasks\
WHERE account = '{}'\
GROUP BY YEAR(due);\
".format(account)
cur.execute(script_nop)
_data = cur.fetchall()
try:
d = dict()
for i in range(len(_data)):
d[int(_data[i][0])] = int(_data[i][1])
except:
return 0
return d
def actionsQTD(cur, year=None, account='all', username=''):
'''
'''
if year == None:
year = str(datetime.datetime.now().year)
today = str(datetime.datetime.now()).split(" ")[0]
'''
try:
account = account.decode('utf-8')
except:
pass
account = account.encode('latin-1')
'''
if account == 'all':
if username in ['', 'admin']:
script_nop = "\
SELECT COUNT(action) from tasks\
WHERE QUARTER(tasks.due)=QUARTER('{0}') AND DATE(tasks.due)<='{0}' AND YEAR(tasks.due)='{1}';\
".format(today, year)
else:
cust = getCustomersList(cur, username)
script_nop = "\
SELECT COUNT(action) from tasks\
WHERE QUARTER(tasks.due)=QUARTER('{0}') AND DATE(tasks.due)<='{0}' AND YEAR(tasks.due)='{1}' AND account IN {2};\
".format(today, year, cust)
else:
script_nop = "\
SELECT COUNT(action) from tasks\
WHERE QUARTER(tasks.due)=QUARTER('{0}') AND DATE(tasks.due)<='{0}' AND YEAR(tasks.due)='{1}' AND account='{2}';\
".format(today, year, account)
cur.execute(script_nop)
_data = cur.fetchall()
return int(_data[0][0])
def actionsMTD(cur, year=None, account='all', username=''):
'''
'''
if year == None:
year = str(datetime.datetime.now().year)
month = str(datetime.datetime.now().month)
day = str(datetime.datetime.now().day)
'''
try:
account = account.decode('utf-8')
except:
pass
account = account.encode('latin-1')
'''
if account == 'all':
# WHERE due BETWEEN DATE_SUB(CURDATE(), INTERVAL {3} DAY) AND CURDATE();\
if username in ['', 'admin']:
script_nop = "\
SELECT COUNT(id) from tasks\
WHERE MONTH(tasks.due)={1} AND YEAR(tasks.due)={0} AND DAY(tasks.due)<={2};\
".format(year, month, int(day))
else:
cust = getCustomersList(cur, username)
script_nop = "\
SELECT COUNT(id) from tasks\
WHERE MONTH(tasks.due)={1} AND YEAR(tasks.due)={0} AND DAY(tasks.due)<={2} AND account IN {3};\
".format(year, month, day, cust)
else:
# WHERE due BETWEEN DATE_SUB(CURDATE(), INTERVAL 31 DAY) AND CURDATE() AND account='{3}';\
script_nop = "\
SELECT COUNT(id) from tasks\
WHERE MONTH(tasks.due)={1} AND YEAR(tasks.due)={0} AND DAY(tasks.due)<={2} AND account='{3}';\
".format(year, month, day, account)
cur.execute(script_nop)
_data = cur.fetchall()
try:
return int(_data[0][0])
except:
return 0
def actionsYTD(cur, account='all', username=''):
'''
'''
today = str(datetime.datetime.now()).split(" ")[0]
firstday = str(datetime.date(datetime.datetime.now().year, 1, 1))
if account == 'all':
if username in ['', 'admin']:
script_nop = "\
SELECT COUNT(id) from tasks\
WHERE DATE(tasks.due) BETWEEN '{}' AND '{}';\
".format(firstday, today)
else:
cust = getCustomersList(cur, username)
script_nop = "\
SELECT COUNT(id) from tasks\
WHERE DATE(tasks.due) BETWEEN '{}' AND '{}' AND account IN {};\
".format(firstday, today, cust)
else:
script_nop = "\
SELECT COUNT(id) from tasks\
WHERE DATE(tasks.due) BETWEEN '{}' AND '{}' AND account='{}';\
".format(firstday, today, account)
cur.execute(script_nop)
_data = cur.fetchall()
try:
return int(_data[0][0])
except:
return 0
def actionsPerMonth(cur, year=None, account='all', username=''):
'''
'''
if year == None:
year = str(datetime.datetime.now().year)
'''
try:
account = account.decode('utf-8')
except:
pass
account = account.encode('latin-1')
'''
if account == 'all':
if username in ['', 'admin']:
script_nop = "\
SELECT MONTH(due), COUNT(action) from tasks\
WHERE YEAR(tasks.due) BETWEEN {} AND {}\
GROUP BY MONTH(due);\
".format(year, year)
else:
cust = getCustomersList(cur, username)
script_nop = "\
SELECT MONTH(due), COUNT(action) from tasks\
WHERE YEAR(tasks.due) BETWEEN {} AND {} AND account IN {}\
GROUP BY MONTH(due);\
".format(year, year, cust)
else:
script_nop = "\
SELECT MONTH(due), COUNT(action) from tasks\
WHERE YEAR(tasks.due) BETWEEN {} AND {} AND account='{}'\
GROUP BY MONTH(due);\
".format(year, year, account)
cur.execute(script_nop)
_data = cur.fetchall()
try:
d = dict()
for i in range(len(_data)):
d[int(_data[i][0])] = int(_data[i][1])
return d
except:
return 0
def actionsPerDay(cur, year=None, yearMax=None, account='all', username=''):
'''
'''
if year == None:
year = str(datetime.datetime.now().year)
if yearMax == None:
yearMax = year
'''
try:
account = account.decode('utf-8')
except:
pass
account = account.encode('latin-1')
'''
if account == 'all':
if username in ['', 'admin']:
script_nop = "\
SELECT DATE(due), COUNT(action) from tasks\
WHERE YEAR(tasks.due) BETWEEN {} AND {}\
GROUP BY DATE(due);\
".format(year, yearMax)
else:
cust = getCustomersList(cur, username)
script_nop = "\
SELECT DATE(due), COUNT(action) from tasks\
WHERE YEAR(tasks.due) BETWEEN {} AND {} AND account IN {}\
GROUP BY DATE(due);\
".format(year, yearMax, cust)
else:
script_nop = "\
SELECT DATE(due), COUNT(action) from tasks\
WHERE YEAR(tasks.due) BETWEEN {} AND {} AND account='{}'\
GROUP BY DATE(due);\
".format(year, yearMax, account)
cur.execute(script_nop)
_data = cur.fetchall()
try:
d = dict()
for i in range(len(_data)):
d[str(_data[i][0])] = int(_data[i][1])
return d
except:
return 0
def totalPlanGoals(cur, account='all', username=''):
'''
'''
'''
try:
account = account.decode('utf-8')
except:
pass
account = account.encode('latin-1')
'''
if account == 'all':
if username in ['', 'admin']:
script_nop = "\
SELECT SUM(goal) FROM plans;\
"
else:
cust = getCustomersList(cur, username)
script_nop = "\
SELECT SUM(goal) FROM plans\
WHERE account IN {};\
".format(cust)
else:
script_nop = "\
SELECT SUM(goal) FROM plans\
WHERE account='{}';\
".format(account)
cur.execute(script_nop)
_data = cur.fetchall()
try:
return float(_data[0][0])
except:
return 0
def totalVisitsGoal(cur, year=None, account='all', username=''):
'''
'''
if year == None:
year = str(datetime.datetime.now().year)
'''
try:
account = account.decode('utf-8')
except:
pass
account = account.encode('latin-1')
'''
if account == 'all':
if username in ['', 'admin']:
script_nop = "\
SELECT SUM(visits) FROM plans\
WHERE YEAR(due) BETWEEN {} AND {};\
".format(year, year)
else:
cust = getCustomersList(cur, username)
script_nop = "\
SELECT SUM(visits) FROM plans\
WHERE YEAR(due) BETWEEN {} AND {} AND account IN {};\
".format(year, year, cust)
else:
script_nop = "\
SELECT SUM(visits) FROM plans\
WHERE YEAR(due) BETWEEN {} AND {} AND account='{}';\
".format(year, year, account)
cur.execute(script_nop)
_data = cur.fetchall()
try:
return int(_data[0][0])
except:
return 0
def totalCallsGoal(cur, year=None, account='all', username=''):
'''
'''
if year == None:
year = str(datetime.datetime.now().year)
'''
try:
account = account.decode('utf-8')
except:
pass
account = account.encode('latin-1')
'''
if account == 'all':
if username in ['', 'admin']:
script_nop = "\
SELECT SUM(calls) FROM plans\
WHERE YEAR(due) BETWEEN {} AND {};\
".format(year, year)
else:
cust = getCustomersList(cur, username)
script_nop = "\
SELECT SUM(calls) FROM plans\
WHERE YEAR(due) BETWEEN {} AND {} AND account IN {};\
".format(year, year, cust)
else:
script_nop = "\
SELECT SUM(calls) FROM plans\
WHERE YEAR(due) BETWEEN {} AND {} AND account='{}';\
".format(year, year, account)
cur.execute(script_nop)
_data = cur.fetchall()
try:
return int(_data[0][0])
except:
return 0
def totalOffersGoal(cur, year=None, account='all', username=''):
'''
'''
if year == None:
year = str(datetime.datetime.now().year)
'''
try:
account = account.decode('utf-8')
except:
pass
account = account.encode('latin-1')
'''
if account == 'all':
if username in ['', 'admin']:
script_nop = "\
SELECT SUM(offers) FROM plans\
WHERE YEAR(due) BETWEEN {} AND {};\
".format(year, year)
else:
cust = getCustomersList(cur, username)
script_nop = "\
SELECT SUM(offers) FROM plans\
WHERE YEAR(due) BETWEEN {} AND {} AND account IN {};\
".format(year, year, cust)
else:
script_nop = "\
SELECT SUM(offers) FROM plans\
WHERE YEAR(due) BETWEEN {} AND {} AND account='{}';\
".format(year, year, account)
cur.execute(script_nop)
_data = cur.fetchall()
try:
return int(_data[0][0])
except:
return 0
def totalSalesPlans(cur, account='all', username=''):
'''
'''
'''
try:
account = account.decode('utf-8')
except:
pass
account = account.encode('latin-1')
'''
if account == 'all':
if username in ['', 'admin']:
script_nop = "\
SELECT COUNT(*) FROM plans;\
"
else:
cust = getCustomersList(cur, username)
script_nop = "\
SELECT COUNT(*) FROM plans\
WHERE account IN {};\
".format(cust)
else:
script_nop = "\
SELECT COUNT(*) FROM plans\
WHERE account='{}';\
".format(account)
cur.execute(script_nop)
try:
return int(cur.fetchall()[0][0])
except:
return 0
def plansPerAccount(cur, username=''):
'''
'''
cur.execute("select database()")
dbname = cur.fetchone()[0]
dbname = dbname.replace('data_', '').replace('data_', '')
if username in ['', 'admin']:
# SELECT account, COUNT(*) FROM plans GROUP BY account;\
# "
script_nop = "\
SELECT c.id, COUNT(*) FROM plans AS p\
LEFT JOIN {0}.customers AS c ON p.account = c.id\
GROUP BY account;\
".format('data_' + dbname)
else:
print('else wali query')
cust = getCustomersList(cur, username)
script_nop = "\
SELECT c.id, COUNT(*) FROM plans AS p\
LEFT JOIN {0}.customers AS c ON p.account = c.id\
WHERE account IN {1}\
GROUP BY account;\
".format('data_' + dbname, cust)
cur.execute(script_nop)
_data = cur.fetchall()
plan_per_account = dict()
for i in range(len(_data)):
# plan_per_account[_data[i][0]] = int(_data[i][1])
plan_per_account["{}".format(_data[i][0])] = int(_data[i][1])
# data['actions per account'] = np.asarray(cur.fetchall()[0])
# data['actions per account'] = np.asarray[cur.fetchall()]
return plan_per_account
def actionsPerAccount(cur, username):
'''
'''
if username in ['', 'admin']:
script_nop = "\
SELECT account, COUNT(*) FROM tasks GROUP BY account;\
"
else:
cust = getCustomersList(cur, username)
script_nop = "\
SELECT account, COUNT(*) FROM tasks WHERE account IN {} GROUP BY account;\
".format(cust)
cur.execute(script_nop)
_data = cur.fetchall()
actions_per_account = dict()
for i in range(len(_data)):
actions_per_account[_data[i][0]] = int(_data[i][1])
return actions_per_account
def activityGoals(cur, account='all', username=''):
'''
'''
'''
try:
account = account.decode('utf-8')
except:
pass
account = account.encode('latin-1')
'''
if account == 'all':
if username in ['', 'admin']:
script_nop = "\
SELECT action, COUNT(*) FROM tasks GROUP BY action;\
"
else:
cust = getCustomersList(cur, username)
script_nop = "\
SELECT action, COUNT(*) FROM tasks\
WHERE account IN {}\
GROUP BY action;\
".format(cust)
else:
script_nop = "\
SELECT action, COUNT(*) FROM tasks\
WHERE account = '{}'\
GROUP BY action;\
".format(account)
cur.execute(script_nop)
_data = cur.fetchall()
try:
d = dict()
for i in range(len(_data)):
d[_data[i][0]] = int(_data[i][1])
return d
except:
raise
# return 0
def averageDealTime(cur, account='all', username=''):
'''
'''
'''
try:
account = account.decode('utf-8')
except:
pass
account = account.encode('latin-1')
'''
if account == 'all':
if username in ['', 'admin']:
script_nop = "\
SELECT plans.created, plans.due FROM plans;\
"
else:
cust = getCustomersList(cur, username)
script_nop = "\
SELECT plans.created, plans.due FROM plans\
WHERE account IN {};\
".format(cust)
else:
script_nop = "\
SELECT plans.created, plans.due FROM plans\
WHERE account='{}';\
".format(account)
cur.execute(script_nop)
_data = cur.fetchall()
try:
_diff = [d[1] - d[0] for d in _data]
avgLifetime = sum([d.days for d in _diff]) / len(_diff)
return avgLifetime
# return _data
except:
return 0
def closedPlans(cur, account='all', status='all', username=''):
'''
'''
'''
try:
account = account.decode('utf-8')
except:
pass
account = account.encode('latin-1')
'''
if account == 'all':
if username in ['', 'admin']:
if status == 'all':
script_nop = "\
SELECT COUNT(plans.id) FROM plans WHERE plans.status='Closed Lost' OR plans.status='Closed Won';\
"
else:
script_nop = "\
SELECT COUNT(plans.id) FROM plans WHERE plans.status='{}';\
".format(status)
else:
cust = getCustomersList(cur, username)
if status == 'all':
script_nop = "\
SELECT plans.id FROM plans\
WHERE (plans.status='Closed Lost' OR plans.status='Closed Won') AND account IN {};\
".format(cust)
else:
script_nop = "\
SELECT plans.id FROM plans\
WHERE plans.status='{1}' AND account IN {0};\
".format(cust, status)
else:
if status == 'all':
script_nop = "\
SELECT plans.id FROM plans\
WHERE (plans.status='Closed Lost' OR plans.status='Closed Won') AND account='{}';\
".format(account)
else:
script_nop = "\
SELECT plans.id FROM plans\
WHERE plans.status='{1}' AND account='{0}';\
".format(account, status)
cur.execute(script_nop)
_data = cur.fetchall()
try:
closed = int(_data[0][0])
return closed
# return _data
except:
return 0
if __name__ == "__main__":
dbname = 'data_crmtest1'
dbname = 'data_martinmasip'
dbname = 'data_demo'
dbname = 'data_qymatix_best'
dbname = 'data_qymatix_de'
dbname = 'data_qymatix_de'
mysql_connection = connection.MySQLConnection(dbname)
con = mysql_connection.connect()
cur = con.cursor()
# today = datetime.datetime.now()
# account = 'Acrion'
# ans = averageDealTime(cur)
# print(ans)
##ans = actionsPerDay(cur)
# print(ans)
# status = 'Closed Won'
# status = 'Closed Lost'
# status = 'all'
# ans = closedPlans(cur, status=status)
# print(ans)
# ans = activeAccountsCRM(cur)
# ans = activeAccounts(cur)
ans = accounts(cur, username='lucas_pedretti__qymatix_de')
# ans = accounts(cur, username='admin')
print("<<<")
print(ans)
# print(str(tuple(ans)))
# ans = goalsPerQuarter(cur, account='all', username='lucas_pedretti__qymatix_de')
# print(ans)
# ans = actionsPerYear(cur, account='all', username='lucas_pedretti__qymatix_de')
# ans = actionsPerYear(cur, account='all', username='admin')
# print(ans)
|
A = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89]
def lista_menor_10(lista):
numero_do_usuario = int(input('Digite o número desejado: '))
lista_menor_10 = []
for elemento in lista:
if elemento < numero_do_usuario:
lista_menor_10.append(elemento)
print(lista_menor_10)
if __name__ == "__main__":
lista_menor_10(A) |
import requests
import time
import os
import json
'''Configuration'''
BASE_URL = 'http://yao.example.com'
''''''
sess = requests.Session()
sess.headers.update({'Referer': BASE_URL})
status_map = [
'Created', # 0
'Starting', # 1
'Running', # 2
'Stopped', # 3
'Finished', # 4
'Failed', # 5
]
def login(user='', pwd=''):
# Get CSRF Token
r = sess.get(BASE_URL)
# print(r.content)
# Login
url = BASE_URL + '/service?action=user_login'
r = sess.post(url, data={})
# print(r.content)
return
def get_sys_status():
# Retrieve Status
r = sess.get(BASE_URL + '/service?action=summary_get')
print(r.content)
# b'{"jobs":{"finished":1,"running":0,"pending":0},"gpu":{"free":20,"using":0},"errno":0,"msg":"Success !"}'
# Get pool Util history
r = sess.get(BASE_URL + '/service?action=summary_get_pool_history')
# print(r.content)
return
def submit_job(job):
r = sess.post(BASE_URL + '/service?action=job_submit', data=job)
data = str(r.content, 'utf-8')
msg = json.loads(data)
print(msg)
return msg
def job_list():
print("\nList of jobs:")
r = sess.get(BASE_URL + '/service?action=job_list&who=self&sort=nobody&order=desc&offset=0&limit=10')
data = str(r.content, 'utf-8')
msg = json.loads(data)
if len(msg['jobs']) > 0:
for job in msg['jobs']:
name = job['name']
status = status_map[job['status']]
print("Status of job: {} is {}".format(name, status))
print("\n")
def job_status(job_name):
r = sess.get(BASE_URL + '/service?action=job_status&name=' + job_name)
data = str(r.content, 'utf-8')
msg = json.loads(data)
print("Status of tasks in {}:".format(job_name))
if len(msg['tasks']) > 0:
for task in msg['tasks']:
print("{} ({}) is/was running on {}".format(task['hostname'], task['status'], task['node']))
if __name__ == '__main__':
os.environ["TZ"] = 'Asia/Shanghai'
if hasattr(time, 'tzset'):
time.tzset()
login()
get_sys_status()
tasks = [{
"name": "node1",
"image": "quickdeploy/yao-tensorflow:1.14-gpu",
"cmd": "python /workspace/scripts/tf_cnn_benchmarks/tf_cnn_benchmarks.py \\ --num_gpus=1 \\ --batch_size=32 \\ --model=resnet50 \\ --num_batches=200 \\ --train_dir=/tmp \\ --variable_update=parameter_server",
"cpu_number": "4",
"memory": "4096",
"gpu_number": "1",
"gpu_memory": "8192",
"is_ps": "0",
"gpu_model": "k80",
}]
# print(json.dumps(tasks))
job = {
'name': 'test',
'workspace': 'https://github.com/tensorflow/benchmarks.git',
'cluster': 'default',
'priority': '25',
'run_before': '',
'locality': '0',
'tasks': json.dumps(tasks),
}
msg = submit_job(job)
if msg['errno'] == 0:
job_status(msg['job_name'])
job_list()
|
__version__ = "0.1"
__all__ = ["twitter_client","tweet_analyzer","user_analyzer"]
from .tweet_analyzer import TweetAnalyzer
from .tweet_analyzer import UserAnalyzer
from .twitter_client import TwitterClient
|
import tkinter as tk
import tkinter.font as tkFont
class Font:
def __init__(self, family, h1_size=26, h2_size=22, h3_size=14, body_size=12):
self.family = family
self.h1 = tkFont.Font(family=family, size=h1_size, weight='bold')
self.h2 = tkFont.Font(family=family, size=h2_size, weight='bold')
self.h3 = tkFont.Font(family=family, size=h3_size, weight='bold')
self.body = tkFont.Font(family=family, size=body_size, weight='normal')
self.custom = {}
def create_custom(self, name, family=None, size=None, weight='normal', slant='roman', underline=0, overstrike=0):
if family == None: family = self.family
if size == None: size = self.body
self.custom[name] = tkFont.Font(family=family, size=size, weight=weight, slant=slant, underline=underline, overstrike=overstrike)
return self.custom[name]
def get_custom(self, name):
return self.custom[name]
class ColorPalette:
def __init__(self, primary='#1D2025', secondary='#2E3640', accent='#607371', accent_alt='#4C5359', text='#DDE5E5', textcursor='white'):
self.primary = primary
self.secondary = secondary
self.accent = accent
self.accent_alt = accent_alt
self.text = text
self.textcursor = textcursor
self.custom = {}
def create_custom(self, name, color):
self.custom[name] = color
return self.custom[name]
def get_custom(self, name):
return self.custom[name]
class Style:
def __init__(self, font, color, padding):
if not isinstance(font, Font):
raise TypeError(f'font must be a Font class, not {type(font)}')
if not isinstance(color, ColorPalette):
raise TypeError(f'font must be a ColorPalette class, not {type(color)}')
if type(padding) != tuple or len(padding) != 2:
raise TypeError(f'padding must be a tuple of length 2')
self.font = font
self.color = color
self.padding = {'padx': padding[0], 'pady': padding[1]}
self.custom = {}
self.frame = {'bg': self.color.primary}
self.label = {'bg': self.color.primary, 'activebackground': self.color.accent_alt, 'fg': self.color.text}
self.button = {'bg': self.color.accent, 'activebackground': self.color.accent_alt, 'fg': self.color.text, 'activeforeground': self.color.text, 'disabledforeground': self.color.accent_alt}
self.checkbutton = {'bg': self.color.primary, 'activebackground': self.color.primary, 'selectcolor': self.color.accent, 'fg': self.color.text, 'activeforeground': self.color.text}
self.radiobutton = {'bg': self.color.primary, 'activebackground': self.color.primary, 'selectcolor': self.color.accent, 'fg': self.color.text, 'activeforeground': self.color.text}
self.listbox = {'bg': self.color.secondary, 'selectbackground': self.color.accent, 'fg': self.color.text, 'activestyle': 'none'}
self.entry = {
'normal': {'bg': self.color.secondary, 'readonlybackground': self.color.accent, 'disabledbackground': self.color.secondary, 'fg': self.color.text, 'disabledforeground': self.color.accent_alt, 'insertbackground': self.color.textcursor},
'warning': {'bg': 'yellow2', 'fg': 'black', 'insertbackground': 'black'},
'error': {'bg': 'firebrick1', 'fg': 'black', 'insertbackground': 'black'}
}
self.textbox = {
# textbox does not support disabled styles, so SimpleTextbox fudges it using config commands
'normal': {'bg': self.color.primary, 'fg': self.color.text, 'insertbackground': self.color.textcursor},
'readonly': {'bg': self.color.accent},
'disabled': {'bg': self.color.secondary}
}
self.style_types = [self.frame, self.label, self.button, self.checkbutton, self.radiobutton, self.listbox, self.entry['normal'], self.textbox]
def create_custom(self, name, properties):
if not type(properties) == dict:
raise TypeError(f'Properties must be of type {dict}, not {type(properties)}')
self.custom[name] = properties
return self.custom[name]
def get_custom(self, name):
return self.custom[name]
def copy(self):
return Style(self.font, self.color, (self.padding['padx'], self.padding['pady'])) |
import abc
import logging
import os
import re
import signal
import sys
import typing
from contextlib import contextmanager
import sh
from cached_property import cached_property
from kubeyard import base_command
from kubeyard import minikube
from kubeyard import settings
from kubeyard.commands import custom_script
logger = logging.getLogger(__name__)
MAX_JOB_RETRIES = 2
class BaseDevelCommand(base_command.InitialisedRepositoryCommand):
context_vars = ['image_name', 'tag']
def __init__(self, *, use_default_implementation, image_name, tag, **kwargs):
super().__init__(**kwargs)
self._image_name = image_name
self._tag = tag
self.use_default_implementation = use_default_implementation
if self.is_development:
self.cluster = self._prepare_cluster(self.context)
self.context.update(self.cluster.docker_env())
self.context['HOST_VOLUMES'] = ' '.join(self.volumes)
self.docker_runner = DockerRunner(self.context)
@staticmethod
def _prepare_cluster(context):
logger.info('Checking if cluster is running and configured...')
cluster = minikube.ClusterFactory().get(context)
cluster.ensure_started()
logger.info('Cluster is ready')
return cluster
@property
def args(self) -> list:
return []
def run(self):
super().run()
custom_script_runner = custom_script.CustomScriptRunner(self.project_dir, self.custom_command_context)
custom_script_exists = custom_script_runner.exists(self.custom_script_name)
if self.use_default_implementation or not custom_script_exists:
self.run_default()
else:
custom_script_runner.run(self.custom_script_name, self.args)
def docker(self, *args, **kwargs):
return self.docker_runner.run(*args, **kwargs)
def docker_with_output(self, *args, **kwargs):
return self.docker_runner.run_with_output(*args, **kwargs)
@property
def image(self):
return '{}/{}:{}'.format(self.docker_repository, self.image_name, self.tag)
@property
def latest_image(self):
return '{}/{}:latest'.format(self.docker_repository, self.image_name)
@property
def docker_repository(self):
return self.context.get("DOCKER_REGISTRY_NAME") or settings.DEFAULT_DOCKER_REGISTRY_NAME
@property
def image_name(self):
return self._image_name or self.context["DOCKER_IMAGE_NAME"]
@property
def tag(self):
return self._tag or self.default_tag
@property
def default_tag(self):
if self.is_development:
return 'dev'
else:
return 'latest'
@property
def is_development(self):
return self.context['KUBEYARD_MODE'] == 'development'
def run_default(self):
raise NotImplementedError
@property
@abc.abstractmethod
def custom_script_name(self):
raise NotImplementedError
@property
def volumes(self) -> typing.Iterable[str]:
if self.is_development:
mounted_project_dir = self.cluster.get_mounted_project_dir(self.project_dir)
for volume in self.context.get('DEV_MOUNTED_PATHS', []):
if 'mount-in-tests' in volume and volume['mount-in-tests']['image-name'] == self.image_name:
host_path = str(mounted_project_dir / volume['host-path'])
container_path = volume['mount-in-tests']['path']
mount_mode = self.get_mount_mode(volume['mount-in-tests'])
yield from ['-v', '{}:{}:{}'.format(host_path, container_path, mount_mode)]
def get_mount_mode(self, configuration):
mount_mode = configuration.get('mount-mode', 'ro')
if mount_mode not in {'ro', 'rw'}:
raise base_command.CommandException('Volume "mount-mode" should be one of: "ro", "rw".')
return mount_mode
@cached_property
def _id(self):
return str(sh.id())
@cached_property
def uid(self) -> str:
return re.findall(r"uid=(\d+)", self._id)[0]
@cached_property
def gid(self) -> str:
return re.findall(r"gid=(\d+)", self._id)[0]
class DockerRunner:
def __init__(self, context):
self.context = context
def run(self, *args, **kwargs):
if self.run_can_be_waited(*args, **kwargs):
process: sh.RunningCommand = sh.docker(*args, _env=self.sh_env, _bg=True, **kwargs)
try:
process.wait()
except KeyboardInterrupt as e:
logger.info("Stopping running command...")
process.signal(signal.SIGINT)
raise e
else:
process: sh.RunningCommand = sh.docker(*args, _env=self.sh_env, **kwargs)
return process
def run_can_be_waited(self, *args, _piped=False, _iter=False, _iter_noblock=False, **kwargs) -> bool:
"""Check special cases when sh require to not use .wait() method"""
return not any((_piped, _iter, _iter_noblock))
def run_with_output(self, *args, **kwargs):
return self.run(*args, _out=sys.stdout.buffer, _err=sys.stdout.buffer, **kwargs)
@cached_property
def sh_env(self):
env = os.environ.copy()
env.update(self.context.as_environment())
return env
@contextmanager
def temporary_volume(self):
volume_name = self.run('volume', 'create').strip()
logger.debug('volume_name: {}'.format(volume_name))
yield volume_name
self.run('volume', 'remove', volume_name)
|
from .address import AddressEntry, SearchAddressResponse
from .base import BaseModel
from .coordinates import Coordinates
from .item import Item, ItemCreate, ItemInDB, ItemUpdate
from .msg import Msg
from .need import NeedCreate, PublicNeed, PublicNeedPpeType
from .posttag import PosttagPostcodeFullResponse, PosttagResponseData
from .record import Record
from .supply import PublicSupply, PublicSupplyPpeType, SupplyCreate
from .token import Token, TokenPayload
from .user import User, UserCreate, UserInDB, UserUpdate
from .city import City
from .city_map_data import CityMapData
from .paginated import PaginatedResponse
|
# -*- coding: utf-8 -*-
# @Author: Lutz Reiter, Design Research Lab, Universität der Künste Berlin
# @Date: 2016-10-22 16:07:52
# @Last Modified by: lutzer
# @Last Modified time: 2016-10-25 00:31:45
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: Lutz Reiter, Design Research Lab, Universität der Künste Berlin
# @Date: 2016-01-26 16:07:22
# @Last Modified by: lutz
# @Last Modified time: 2016-02-03 17:32:56
from __future__ import with_statement
from threading import Lock
import logging
from PIL import Image,ImageChops
import sys
from Adafruit_Thermal import *
from fontRenderer import *
logger = logging.getLogger(__name__)
PRINTER_PAPER_WIDTH = 384
class LinePrinter:
printer = None
fontRenderer = None
fontRendererBig = None
def __init__(self,disablePrinter=False):
self.printerDisabled = disablePrinter
if not self.printerDisabled:
self.printer = Adafruit_Thermal("/dev/serial0", 19200, timeout=5)
self.printer.sleep()
currentDir = sys.path[0]
self.fontRenderer = FontRenderer(currentDir +'/font/cutivemono32bold.png', currentDir + '/font/cutivemono32bold.json')
self.fontRendererBig = FontRenderer(currentDir +'/font/cutivemono42bold.png', currentDir + '/font/cutivemono42bold.json')
self.queue = [] # message queue
self.queueLock = Lock()
logger.info("printer initialized")
# adds message to queue
def addMessage(self,text):
job = dict(type="message", text=text)
with self.queueLock:
self.queue.append(job)
# add horizontal line to queue
def addLine(self):
job = dict(type="line", text='---')
with self.queueLock:
self.queue.append(job)
# add question text
def addQuestion(self,text):
job = dict(type="question", text=text)
with self.queueLock:
self.queue.append(job)
def hasJobs(self):
return len(self.queue) > 0
def printNextJob(self):
with self.queueLock:
if len(self.queue) > 0:
job = self.queue.pop(0)
else:
return
logger.info("printing" + str(job))
self.wake();
if job['type'] == "question":
self.printText(job['text'],self.fontRendererBig)
self.feed(6)
elif job['type'] == "line":
self.printText("---=---",center=True)
self.feed(6)
else:
self.printText(job['text'])
self.feed(6)
self.sleep();
def printText(self,text,font=False,center=False):
if not font:
font = self.fontRenderer
fontHeight = font.fontHeight
# holds all the images of the columns
columns = []
column = Image.new("RGB", (PRINTER_PAPER_WIDTH, fontHeight), (255, 255, 255))
startX = PRINTER_PAPER_WIDTH # start from right
for character in text:
#first create character
symbol = font.getCharacterImage(character)
symbol = symbol.rotate(180, 0, True)
symbol = font.makeBgWhite(symbol)
charWidth = symbol.size[0]
startX -= charWidth
if startX > 0:
# add character to column
column.paste(symbol, box=(startX, 0))
else:
# prepend to columns array
columns.insert(0,column)
# start new column
column = Image.new("RGB", (PRINTER_PAPER_WIDTH, fontHeight), (255, 255, 255))
# add character
startX = PRINTER_PAPER_WIDTH - charWidth
column.paste(symbol, box=(startX, 0))
if center:
# shift column to the left
column = ImageChops.offset(column,-startX/2,0)
#insert last column
columns.insert(0,column)
# print all the columns
for img in columns:
self.__printImage(img)
def wake(self):
if not self.printerDisabled:
self.printer.wake()
def sleep(self):
if not self.printerDisabled:
self.printer.sleep()
def feed(self,amount):
if not self.printerDisabled:
self.printer.feed(amount)
def __printImage(self,img):
if not self.printerDisabled:
self.printer.printImage(img,LaaT=False);
else:
img.show()
|
import unittest
import solver
class TestSolution(unittest.TestCase):
def test_solve(self):
self.assertEqual(solver.solve(2, 2), 1)
self.assertEqual(solver.solve(3, 3), 4)
self.assertEqual(solver.solve(5, 5), 15)
|
import numpy as np
from . import xray
import numexpr as ne
import pandas as pd
import time as ttime
# def get_transition_grid(E_step, E_range, n, ascend=True):
# dE = (E_range*2/n - 2*E_step)/(n-1)
# steps = E_step + np.arange(n)*dE
# if not ascend:
# steps = steps[::-1]
# return np.cumsum(steps)
def get_transition_grid(dE_start, dE_end, E_range, round_up=True):
if round_up:
n = np.ceil(2 * E_range / (dE_start + dE_end))
else:
n = np.floor(2 * E_range / (dE_start + dE_end))
delta = (E_range*2/n - 2*dE_start)/(n-1)
steps = dE_start + np.arange(n)*delta
# if not ascend:
# steps = steps[::-1]
return np.cumsum(steps)
def xas_energy_grid(energy_range, e0, edge_start, edge_end, preedge_spacing, xanes_spacing, exafs_k_spacing,
E_range_before=15, E_range_after = 20, n_before = 10, n_after = 20):
# energy_range_lo= np.min(energy_range)
# energy_range_hi = np.max(energy_range)
energy_range_lo = np.min([e0 - 300, np.min(energy_range)])
energy_range_hi = np.max([e0 + 2500, np.max(energy_range)])
# preedge = np.arange(energy_range_lo, e0 + edge_start-1, preedge_spacing)
preedge = np.arange(energy_range_lo, e0 + edge_start, preedge_spacing)
# before_edge = np.arange(e0+edge_start,e0 + edge_start+7, 1)
before_edge = preedge[-1] + get_transition_grid(preedge_spacing, xanes_spacing, E_range_before, round_up=False)
edge = np.arange(before_edge[-1], e0+edge_end-E_range_after, xanes_spacing)
# after_edge = np.arange(e0 + edge_end - 7, e0 + edge_end, 0.7)
eenergy = xray.k2e(xray.e2k(e0+edge_end, e0), e0)
post_edge = np.array([])
while (eenergy < energy_range_hi):
kenergy = xray.e2k(eenergy, e0)
kenergy += exafs_k_spacing
eenergy = xray.k2e(kenergy, e0)
post_edge = np.append(post_edge, eenergy)
after_edge = edge[-1] + get_transition_grid(xanes_spacing, post_edge[1] - post_edge[0], post_edge[0] - edge[-1], round_up=True)
energy_grid = np.unique(np.concatenate((preedge, before_edge, edge, after_edge, post_edge)))
energy_grid = energy_grid[(energy_grid >= np.min(energy_range)) & (energy_grid <= np.max(energy_range))]
return energy_grid
def _generate_convolution_bin_matrix(sample_points, data_x):
fwhm = _compute_window_width(sample_points)
delta_en = _compute_window_width(data_x)
mat = _generate_sampled_gauss_window(data_x.reshape(1, -1),
fwhm.reshape(-1, 1),
sample_points.reshape(-1, 1))
mat *= delta_en.reshape(1, -1)
return mat
_GAUSS_SIGMA_FACTOR = 1 / (2 * (2 * np.log(2)) ** .5)
def _generate_sampled_gauss_window(x, fwhm, x0):
sigma = fwhm * _GAUSS_SIGMA_FACTOR
a = 1 / (sigma * (2 * np.pi) ** .5)
data_y = ne.evaluate('a * exp(-.5 * ((x - x0) / sigma) ** 2)')
# data_y = np.exp(-.5 * ((x - x0) / sigma) ** 2)
# data_y /= np.sum(data_y)
return data_y
def _compute_window_width(sample_points):
'''Given smaple points compute windows via approx 1D voronoi
Parameters
----------
sample_points : array
Assumed to be monotonic
Returns
-------
windows : array
Average of distances to neighbors
'''
d = np.diff(sample_points)
fw = (d[1:] + d[:-1]) / 2
return np.concatenate((fw[0:1], fw, fw[-1:]))
def bin(interpolated_dataset, e0, edge_start=-30, edge_end=50, preedge_spacing=5,
xanes_spacing= -1, exafs_k_spacing = 0.04, skip_binning=False ):
if skip_binning:
binned_df = interpolated_dataset
col = binned_df.pop("energy")
n = len(binned_df.columns)
binned_df.insert(n, col.name, col)
binned_df = binned_df.sort_values('energy')
else:
print(f'({ttime.ctime()}) Binning the data: BEGIN')
if xanes_spacing==-1:
if e0 < 14000:
xanes_spacing = 0.2
elif e0 >= 14000 and e0 < 21000:
xanes_spacing = 0.3
elif e0 >= 21000:
xanes_spacing = 0.4
else:
xanes_spacing = 0.3
interpolated_energy_grid = interpolated_dataset['energy'].values
binned_energy_grid = xas_energy_grid(interpolated_energy_grid, e0, edge_start, edge_end,
preedge_spacing, xanes_spacing, exafs_k_spacing)
convo_mat = _generate_convolution_bin_matrix(binned_energy_grid, interpolated_energy_grid)
ret = {k: convo_mat @ v.values for k, v in interpolated_dataset.items() if k != 'energy'}
ret['energy'] = binned_energy_grid
binned_df = pd.DataFrame(ret)
print(f'({ttime.ctime()}) Binning the data: DONE')
binned_df = binned_df.drop('timestamp', 1)
return binned_df
# def bin_pilatus_images(interpolated_dataset, db, uid):
# pass
# # make handler return images
# work from there
#
# fname = '/nsls2/xf08id/users/2020/3/300001/Cu_sine_1s_ud_10x 0001.raw'
# N = 10
# output_fname = '/nsls2/xf08id/Sandbox/ISS_beamline_paper/Cu_sine_1s_ud_x10.dat'
# T = 1
# T_offset = -0.1
#
# columns = ['timestamp', 'i0', 'it', 'ir', 'iff', 'aux1', 'aux2', 'aux3', 'aux4', 'energy', 'mu_t', 'mu_f', 'mu_r']
#
# data = np.genfromtxt(fname)
# df = pd.DataFrame(data, columns=columns)
# df.sort_values('timestamp', inplace=True)
# df['timestamp'] -= df['timestamp'].min()
# # T = df['timestamp'].max() / N / 2
#
#
# mus = []
#
# plt.figure(1)
# plt.clf()
#
# plt.figure(2)
# plt.clf()
#
# for i in range(N*2):
# print(i)
# df_loc = df[((df['timestamp']-T_offset) >= i*T) &
# ((df['timestamp']-T_offset) < (i+1)*T)]
#
# plt.figure(1)
# plt.plot(df_loc['timestamp'], df_loc['energy'])
#
# df_int = bin(df_loc, 8979)
# _e = df_int['energy'].values
# _mu = np.log(df_int['it'] / df_int['ir']).values
# plt.figure(2)
# plt.plot(_e, _mu)
# if i == 0:
# energy = _e.copy()
# _mu = np.interp(energy, _e, _mu)
# _mu -= np.mean(_mu[energy<8900])
# _mu /= np.mean(_mu[energy>9800])
# mus.append(_mu)
#
# mus = np.array(mus).T
#
# ddd = np.hstack((energy[:, None], mus))
# np.savetxt(output_fname, ddd, header=('energy ' + 'd u '*N))
#
# #
# #
# # df_down_1 =
# # df_up_1 =
# #
# # df_down_5 = df[(df['timestamp'] >= 8*T) & (df['timestamp'] < 9*T)]
# # df_up_5 = df[(df['timestamp'] >= 9*T) & (df['timestamp'] < 10*T)]
# #
####
# SCRATCH
# fnames = ['/nsls2/xf08id/data/2021/03/09/en_a9f590ab',
# '/nsls2/xf08id/data/2021/03/09/en_7377b91b',
# '/nsls2/xf08id/data/2021/03/09/en_c6436896',
# '/nsls2/xf08id/data/2021/03/04/en_ae93dac5',
# '/nsls2/xf08id/data/2021/03/04/en_ae2b1fb6',
# '/nsls2/xf08id/data/2021/03/04/en_79d333e4']
# fnames = ['/mnt/xf08ida-ioc1/test_5000',
# '/mnt/xf08ida-ioc1/test_50000',
# '/mnt/xf08ida-ioc1/test_89500',
# '/mnt/xf08ida-ioc1/test_150000',
# '/mnt/xf08ida-ioc1/test_200000']
#
# for f in fnames:
# _d = np.genfromtxt(f)
# print(1/np.median(np.diff(_d[:, 1]*1e-9)))
###
|
import os
import sys
def run_macrobase(cmd='pipeline', conf='conf/batch.conf', profiler=None,
**kwargs):
extra_args = ' '.join(['-D{key}={value}'.format(key=key, value=value)
for key, value in kwargs.items()])
if profiler == 'yourkit':
extra_args += ' -agentpath:/Applications/YourKit-Java-Profiler-2016.02.app/Contents/Resources/bin/mac/libyjpagent.jnilib' # noqa
macrobase_cmd = '''java {extra_args} -Xms128m -Xmx16G \\
-cp "core/target/classes:frontend/target/classes:frontend/src/main/resources/:contrib/target/classes:assembly/target/*:$CLASSPATH" \\
macrobase.MacroBase {cmd} {conf_file}'''.format(
cmd=cmd, conf_file=conf, extra_args=extra_args)
print 'running the following command:'
print macrobase_cmd
exit_status = os.system(macrobase_cmd)
if exit_status != 0:
sys.exit(exit_status)
|
from __future__ import absolute_import, division, print_function
import tensorflow as tf
from models.resnet import resnet_18, resnet_34, resnet_50, resnet_101, resnet_152
import config
from prepare_data import generate_datasets
import math
def get_model():
model = resnet_50()
if config.model == "resnet18":
model = resnet_18()
if config.model == "resnet34":
model = resnet_34()
if config.model == "resnet101":
model = resnet_101()
if config.model == "resnet152":
model = resnet_152()
model.build(input_shape=(None, config.image_height, config.image_width, config.channels))
model.summary()
return model
if __name__ == '__main__':
# GPU settings
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
# get the original_dataset
train_dataset, valid_dataset, test_dataset, train_count, valid_count, test_count = generate_datasets()
# create model
model = get_model()
# define loss and optimizer
loss_object = tf.keras.losses.SparseCategoricalCrossentropy()
optimizer = tf.keras.optimizers.Adadelta()
train_loss = tf.keras.metrics.Mean(name='train_loss')
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')
valid_loss = tf.keras.metrics.Mean(name='valid_loss')
valid_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='valid_accuracy')
@tf.function
def train_step(images, labels):
with tf.GradientTape() as tape:
predictions = model(images, training=True)
loss = loss_object(y_true=labels, y_pred=predictions)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(grads_and_vars=zip(gradients, model.trainable_variables))
train_loss(loss)
train_accuracy(labels, predictions)
@tf.function
def valid_step(images, labels):
predictions = model(images, training=False)
v_loss = loss_object(labels, predictions)
valid_loss(v_loss)
valid_accuracy(labels, predictions)
# start training
for epoch in range(config.EPOCHS):
print (f"\nEpoch: {epoch + 1}/{config.EPOCHS}")
train_loss.reset_states()
train_accuracy.reset_states()
valid_loss.reset_states()
valid_accuracy.reset_states()
progbar = tf.keras.utils.Progbar(len(train_dataset), stateful_metrics=["train_loss", "train_acc", "val_loss", "val_acc"])
for idx, (images, labels) in enumerate(train_dataset):
train_step(images, labels)
values = [("train_loss", train_loss.result()), ("train_acc", train_accuracy.result())]
progbar.update(idx+1, values=values)
for valid_images, valid_labels in valid_dataset:
valid_step(valid_images, valid_labels)
values = [ ("train_loss", train_loss.result()), ("train_acc", train_accuracy.result()), ("val_loss", valid_loss.result()), ("val_acc", valid_accuracy.result()) ]
progbar.update(idx+1, values=values)
model.save_weights(filepath=config.save_model_dir, save_format='tf')
|
import numpy
import h5py
import json
h5 = h5py.File("data/geomodelgrids/USGS_SFCVM_detailed_v21-1.h5", "r")
auxiliary = json.loads(h5.attrs["auxiliary"])
fault_blocks = sorted(auxiliary["fault_block_ids"].items(), key=lambda x: x[1])
print("Fault Blocks")
for label, value in fault_blocks:
lines = [
" <attrdomv>",
" <edom>",
f" <edomv>{value}</edomv>",
f" <edomvd>{label}</edomvd>",
" <edomvds>Producer-defined</edomvds>",
" </edom>",
" </attrdomv>",
]
print("\n".join(lines))
zones = sorted(auxiliary["zone_ids"].items(), key=lambda x: x[1])
print("Zone Ids")
for label, value in zones:
lines = [
" <attrdomv>",
" <edom>",
f" <edomv>{value}</edomv>",
f" <edomvd>{label}</edomvd>",
" <edomvds>Producer-defined</edomvds>",
" </edom>",
" </attrdomv>",
]
print("\n".join(lines))
for surface in h5["surfaces"]:
dset = h5["surfaces"][surface][:]
value_min = numpy.min(dset)
value_max = numpy.max(dset)
print(f"{surface} min={value_min:.1f} max={value_max:.1f}")
value_ranges = {}
for block in h5["blocks"]:
dset = h5["blocks"][block]
for index in range(5):
value_name = h5.attrs["data_values"][index]
value = dset[:,:,:,index].ravel()
mask = value > -1.0e+8
value_min = numpy.min(value[mask])
value_max = numpy.max(value[mask])
value_nodata = True if numpy.sum(~mask) > 0 else False
if not value_name in value_ranges:
value_ranges[value_name] = (value_min, value_max, value_nodata)
else:
current_min, current_max, current_nodata = value_ranges[value_name]
new_min = min(value_min, current_min)
new_max = max(value_max, current_max)
new_nodata = value_nodata or current_nodata
value_ranges[value_name] = (new_min, new_max, new_nodata)
for value_name, (value_min, value_max, value_nodata) in value_ranges.items():
print(f"{value_name} min={value_min:.1f} max={value_max:.1f}, nodata={value_nodata}")
|
# coding: utf-8
from django.db import models
from django.utils.translation import ugettext_lazy as _
TYPE_CHOICES = (
(1, _(u'Participante')),
(2, _(u'Organizador')),
)
class Subscription(models.Model):
email = models.EmailField(_(u'Email'))
type = models.IntegerField(_(u'Tipo'), choices=TYPE_CHOICES, default=0)
created_at = models.DateTimeField(_(u'Criado em'), auto_now_add=True)
def __unicode__(self):
return unicode(self.email)
class Meta:
verbose_name = _(u'Inscrição')
verbose_name_plural = _(u'Inscrições')
|
# time_hello_split.py
from webpie import WPApp, WPHandler
import time
class Greeter(WPHandler):
def hello(self, request, relpath):
return "Hello, World!\n"
class Clock(WPHandler):
def time(self, request, relpath): # 1
return time.ctime()+"\n", "text/plain" # 2
class TopHandler(WPHandler):
def __init__(self, *params):
WPHandler.__init__(self, *params)
self.greet = Greeter(*params)
self.clock = Clock(*params)
def version(self, request, relpath):
return "1.0.2"
application = WPApp(TopHandler)
application.run_server(8080)
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
import imageClassify_pb2 as imageClassify__pb2
class ImageClassifyGrpcStub(object):
"""The greeting service definition.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.ImageLocalClassify = channel.unary_unary(
'/imageFilter.ImageClassifyGrpc/ImageLocalClassify',
request_serializer=imageClassify__pb2.ImageLocalGrpcRequest2.SerializeToString,
response_deserializer=imageClassify__pb2.ImageClassifyGrpcReply2.FromString,
)
self.ImageUploadClassify = channel.unary_unary(
'/imageFilter.ImageClassifyGrpc/ImageUploadClassify',
request_serializer=imageClassify__pb2.ImageUploadGrpcRequest2.SerializeToString,
response_deserializer=imageClassify__pb2.ImageClassifyGrpcReply2.FromString,
)
self.ImageLocalClassifyAsync = channel.unary_unary(
'/imageFilter.ImageClassifyGrpc/ImageLocalClassifyAsync',
request_serializer=imageClassify__pb2.ImageLocalAsyncGrpcRequest2.SerializeToString,
response_deserializer=imageClassify__pb2.ImageRequestIdGrpcReply2.FromString,
)
self.ImageUploadClassifyAsync = channel.unary_unary(
'/imageFilter.ImageClassifyGrpc/ImageUploadClassifyAsync',
request_serializer=imageClassify__pb2.ImageUploadAsyncGrpcRequest2.SerializeToString,
response_deserializer=imageClassify__pb2.ImageRequestIdGrpcReply2.FromString,
)
self.BrowserFilter = channel.unary_unary(
'/imageFilter.ImageClassifyGrpc/BrowserFilter',
request_serializer=imageClassify__pb2.BrowserGrpcRequest.SerializeToString,
response_deserializer=imageClassify__pb2.ImageBrowserFilterGrpcReply.FromString,
)
self.BrowserClassify = channel.unary_unary(
'/imageFilter.ImageClassifyGrpc/BrowserClassify',
request_serializer=imageClassify__pb2.BrowserGrpcRequest.SerializeToString,
response_deserializer=imageClassify__pb2.ImageBrowserClassifyGrpcReply.FromString,
)
self.BrowserFilterAsync = channel.unary_unary(
'/imageFilter.ImageClassifyGrpc/BrowserFilterAsync',
request_serializer=imageClassify__pb2.BrowserAsyncGrpcRequest.SerializeToString,
response_deserializer=imageClassify__pb2.ImageRequestIdGrpcReply2.FromString,
)
self.BrowserClassifyAsync = channel.unary_unary(
'/imageFilter.ImageClassifyGrpc/BrowserClassifyAsync',
request_serializer=imageClassify__pb2.BrowserAsyncGrpcRequest.SerializeToString,
response_deserializer=imageClassify__pb2.ImageRequestIdGrpcReply2.FromString,
)
class ImageClassifyGrpcServicer(object):
"""The greeting service definition.
"""
def ImageLocalClassify(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ImageUploadClassify(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ImageLocalClassifyAsync(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ImageUploadClassifyAsync(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def BrowserFilter(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def BrowserClassify(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def BrowserFilterAsync(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def BrowserClassifyAsync(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ImageClassifyGrpcServicer_to_server(servicer, server):
rpc_method_handlers = {
'ImageLocalClassify': grpc.unary_unary_rpc_method_handler(
servicer.ImageLocalClassify,
request_deserializer=imageClassify__pb2.ImageLocalGrpcRequest2.FromString,
response_serializer=imageClassify__pb2.ImageClassifyGrpcReply2.SerializeToString,
),
'ImageUploadClassify': grpc.unary_unary_rpc_method_handler(
servicer.ImageUploadClassify,
request_deserializer=imageClassify__pb2.ImageUploadGrpcRequest2.FromString,
response_serializer=imageClassify__pb2.ImageClassifyGrpcReply2.SerializeToString,
),
'ImageLocalClassifyAsync': grpc.unary_unary_rpc_method_handler(
servicer.ImageLocalClassifyAsync,
request_deserializer=imageClassify__pb2.ImageLocalAsyncGrpcRequest2.FromString,
response_serializer=imageClassify__pb2.ImageRequestIdGrpcReply2.SerializeToString,
),
'ImageUploadClassifyAsync': grpc.unary_unary_rpc_method_handler(
servicer.ImageUploadClassifyAsync,
request_deserializer=imageClassify__pb2.ImageUploadAsyncGrpcRequest2.FromString,
response_serializer=imageClassify__pb2.ImageRequestIdGrpcReply2.SerializeToString,
),
'BrowserFilter': grpc.unary_unary_rpc_method_handler(
servicer.BrowserFilter,
request_deserializer=imageClassify__pb2.BrowserGrpcRequest.FromString,
response_serializer=imageClassify__pb2.ImageBrowserFilterGrpcReply.SerializeToString,
),
'BrowserClassify': grpc.unary_unary_rpc_method_handler(
servicer.BrowserClassify,
request_deserializer=imageClassify__pb2.BrowserGrpcRequest.FromString,
response_serializer=imageClassify__pb2.ImageBrowserClassifyGrpcReply.SerializeToString,
),
'BrowserFilterAsync': grpc.unary_unary_rpc_method_handler(
servicer.BrowserFilterAsync,
request_deserializer=imageClassify__pb2.BrowserAsyncGrpcRequest.FromString,
response_serializer=imageClassify__pb2.ImageRequestIdGrpcReply2.SerializeToString,
),
'BrowserClassifyAsync': grpc.unary_unary_rpc_method_handler(
servicer.BrowserClassifyAsync,
request_deserializer=imageClassify__pb2.BrowserAsyncGrpcRequest.FromString,
response_serializer=imageClassify__pb2.ImageRequestIdGrpcReply2.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'imageFilter.ImageClassifyGrpc', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class ImageClassifyGrpc(object):
"""The greeting service definition.
"""
@staticmethod
def ImageLocalClassify(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/imageFilter.ImageClassifyGrpc/ImageLocalClassify',
imageClassify__pb2.ImageLocalGrpcRequest2.SerializeToString,
imageClassify__pb2.ImageClassifyGrpcReply2.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ImageUploadClassify(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/imageFilter.ImageClassifyGrpc/ImageUploadClassify',
imageClassify__pb2.ImageUploadGrpcRequest2.SerializeToString,
imageClassify__pb2.ImageClassifyGrpcReply2.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ImageLocalClassifyAsync(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/imageFilter.ImageClassifyGrpc/ImageLocalClassifyAsync',
imageClassify__pb2.ImageLocalAsyncGrpcRequest2.SerializeToString,
imageClassify__pb2.ImageRequestIdGrpcReply2.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ImageUploadClassifyAsync(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/imageFilter.ImageClassifyGrpc/ImageUploadClassifyAsync',
imageClassify__pb2.ImageUploadAsyncGrpcRequest2.SerializeToString,
imageClassify__pb2.ImageRequestIdGrpcReply2.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def BrowserFilter(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/imageFilter.ImageClassifyGrpc/BrowserFilter',
imageClassify__pb2.BrowserGrpcRequest.SerializeToString,
imageClassify__pb2.ImageBrowserFilterGrpcReply.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def BrowserClassify(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/imageFilter.ImageClassifyGrpc/BrowserClassify',
imageClassify__pb2.BrowserGrpcRequest.SerializeToString,
imageClassify__pb2.ImageBrowserClassifyGrpcReply.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def BrowserFilterAsync(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/imageFilter.ImageClassifyGrpc/BrowserFilterAsync',
imageClassify__pb2.BrowserAsyncGrpcRequest.SerializeToString,
imageClassify__pb2.ImageRequestIdGrpcReply2.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def BrowserClassifyAsync(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/imageFilter.ImageClassifyGrpc/BrowserClassifyAsync',
imageClassify__pb2.BrowserAsyncGrpcRequest.SerializeToString,
imageClassify__pb2.ImageRequestIdGrpcReply2.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
import os
import json
import io
import markdown
from flask import Flask, render_template, request
from jinja2 import Environment
app = Flask(__name__)
@app.route("/")
def home():
return render_template("home.html")
@app.route('/projects')
def projects():
data = get_static_json("static/projects/projects.json")
projects = data['projects']
projects.sort(key=order_projects_by_weight, reverse=True)
tag = request.args.get('tags')
if tag is not None:
projects = [p for p in projects if tag.lower() in [tag.lower() for tag in p['tags']]]
badgebase = data['badge']['base']
badgetail = data['badge']['sm_tail']
return render_template('projects.html', projects=projects, tag=tag,
badgebase=badgebase, badgetail=badgetail)
@app.route('/projects/<title>')
def project(title):
data = get_static_json("static/projects/projects.json")
projects = data['projects']
in_project = next((p for p in projects if p['name'] == title), None)
if in_project is None:
return render_template('404.html'), 404
else:
selected = in_project
if 'description' not in selected:
cwd = "static/projects"
projdir = (cwd, selected['name'])
md = open_static('%s/%s/reflection.md' % projdir)
selected['description'] = markdown.markdown(md,
extensions=[
'codehilite',
'pymdownx.superfences',
'pymdownx.arithmatex'
],
extension_configs = {
'pymdownx.arithmatex' : {
'generic' : 'True'
}
}
)
name = selected['name']
last_commit = data['badge']['base'] + name + data['badge']['lg_tail']
return render_template('project.html', project=selected, last_commit=last_commit)
@app.route('/goals')
def goals():
return render_template('goals.html')
@app.route('/video')
def video():
return render_template('video.html')
@app.route('/resume')
def resume():
data = get_static_json("static/cv/cv.json")
items = data['items']
return render_template('cv.html', items=items)
def get_static_file(path):
site_root = os.path.realpath(os.path.dirname(__file__))
return os.path.join(site_root, path)
def get_static_json(path):
return json.load(open(get_static_file(path)))
def order_projects_by_weight(projects):
try:
return int(projects['weight'])
except KeyError:
return 0
def render_markdown_template(md_template, context):
env = Environment()
template = env.from_string(md_template)
rendered_md = template.render(context=context)
return rendered_md
def open_static(path):
return io.open(get_static_file(path), "r", encoding="utf-8").read() |
from grafanalib.core import (
Alert,
Graph,
Template,
Templating,
)
from grafanalib.cloudwatch import CloudwatchMetricsTarget
from lib.elasticsearch import (
generate_elasticsearch_cpu_graph,
generate_elasticsearch_jvm_memory_pressure_graph,
generate_elasticsearch_documents_graph,
generate_elasticsearch_storage_graph,
generate_elasticsearch_requests_graph,
generate_elasticsearch_status_red_alert_graph,
generate_elasticsearch_nodes_alert_graph,
# generate_elasticsearch_storage_alert_graph,
generate_elasticsearch_writes_blocked_alert_graph,
generate_elasticsearch_automated_snapshot_failure_alert_graph,
# generate_elasticsearch_jvm_memory_pressure_alert_graph,
generate_elasticsearch_dashboard,
# generate_elasticsearch_alerts_dashboard,
)
class TestElasticsearchDashboards:
def test_should_generate_elasticsearch_cpu_graph(self):
name = "es"
client_id = "1234567890"
cloudwatch_data_source = "cw"
generated_graph = generate_elasticsearch_cpu_graph(
name=name,
client_id=client_id,
cloudwatch_data_source=cloudwatch_data_source,
)
generated_graph.title.should.match(r"CPU utilization")
generated_graph.dataSource.should.match(cloudwatch_data_source)
generated_graph.targets.should.have.length_of(1)
generated_graph.targets[0].should.be.a(CloudwatchMetricsTarget)
generated_graph.targets[0].namespace.should.equal("AWS/ES")
generated_graph.targets[0].period.should.equal("1m")
generated_graph.targets[0].metricName.should.equal("CPUUtilization")
generated_graph.targets[0].statistics.should.equal(["Maximum"])
generated_graph.targets[0].dimensions.should.equal(
{"DomainName": name, "ClientId": client_id}
)
def test_should_generate_elasticsearch_jvm_memory_pressure_graph(self):
name = "es"
client_id = "1234567890"
cloudwatch_data_source = "cw"
notifications = []
generated_graph = generate_elasticsearch_jvm_memory_pressure_graph(
name=name,
client_id=client_id,
notifications=notifications,
cloudwatch_data_source=cloudwatch_data_source,
)
generated_graph.should.be.a(Graph)
generated_graph.title.should.match(r"JVM memory pressure")
generated_graph.dataSource.should.match(cloudwatch_data_source)
generated_graph.targets.should.have.length_of(1)
generated_graph.targets[0].should.be.a(CloudwatchMetricsTarget)
generated_graph.targets[0].period.should.equal("1m")
generated_graph.targets[0].metricName.should.equal("JVMMemoryPressure")
generated_graph.targets[0].statistics.should.equal(["Maximum"])
generated_graph.targets[0].dimensions.should.equal(
{"DomainName": name, "ClientId": client_id}
)
def test_should_generate_elasticsearch_jvm_memory_pressure_graph_with_notifications(
self,
):
name = "es"
client_id = "1234567890"
cloudwatch_data_source = "cw"
notifications = ["foo", "bar"]
generated_graph = generate_elasticsearch_jvm_memory_pressure_graph(
name=name,
client_id=client_id,
notifications=notifications,
cloudwatch_data_source=cloudwatch_data_source,
)
generated_graph.targets[0].refId.should.equal("A")
generated_graph.alert.should.be.a(Alert)
generated_graph.alert.frequency.should.equal("2m")
generated_graph.alert.gracePeriod.should.equal("2m")
generated_graph.alert.notifications.should.equal(notifications)
def test_should_generate_elasticsearch_documents_graph(self):
name = "es"
client_id = "1234567890"
cloudwatch_data_source = "cw"
generated_graph = generate_elasticsearch_documents_graph(
name=name,
client_id=client_id,
cloudwatch_data_source=cloudwatch_data_source,
)
generated_graph.should.be.a(Graph)
generated_graph.title.should.match(r"Documents")
generated_graph.dataSource.should.match(cloudwatch_data_source)
generated_graph.targets.should.have.length_of(2)
for target in generated_graph.targets:
target.should.be.a(CloudwatchMetricsTarget)
target.period.should.equal("1m")
target.statistics.should.equal(["Maximum"])
target.dimensions.should.equal({"DomainName": name, "ClientId": client_id})
generated_graph.targets[0].metricName.should.equal("SearchableDocuments")
generated_graph.targets[1].metricName.should.equal("DeletedDocuments")
def test_should_generate_elasticsearch_storage_graph(self):
name = "es"
client_id = "1234567890"
cloudwatch_data_source = "cw"
notifications = ["foo", "bar"]
generated_graph = generate_elasticsearch_storage_graph(
name=name,
client_id=client_id,
cloudwatch_data_source=cloudwatch_data_source,
notifications=notifications,
)
generated_graph.should.be.a(Graph)
generated_graph.title.should.match(r"Storage")
generated_graph.dataSource.should.match(cloudwatch_data_source)
generated_graph.targets.should.have.length_of(2)
for target in generated_graph.targets:
target.should.be.a(CloudwatchMetricsTarget)
target.period.should.equal("1m")
target.dimensions.should.equal({"DomainName": name, "ClientId": client_id})
generated_graph.targets[0].statistics.should.equal(["Minimum"])
generated_graph.targets[0].metricName.should.equal("FreeStorageSpace")
generated_graph.targets[1].statistics.should.equal(["Maximum"])
generated_graph.targets[1].metricName.should.equal("ClusterUsedSpace")
generated_graph.targets[0].refId.should.equal("A")
generated_graph.alert.should.be.a(Alert)
generated_graph.alert.frequency.should.equal("2m")
generated_graph.alert.gracePeriod.should.equal("2m")
generated_graph.alert.notifications.should.equal(notifications)
def test_should_generate_elasticsearch_requests_graph(self):
name = "es"
client_id = "1234567890"
cloudwatch_data_source = "cw"
generated_graph = generate_elasticsearch_requests_graph(
name=name,
client_id=client_id,
cloudwatch_data_source=cloudwatch_data_source,
)
generated_graph.should.be.a(Graph)
generated_graph.title.should.match(r"Requests")
generated_graph.dataSource.should.match(cloudwatch_data_source)
generated_graph.targets.should.have.length_of(4)
for target in generated_graph.targets:
target.should.be.a(CloudwatchMetricsTarget)
target.period.should.equal("1m")
target.dimensions.should.equal({"DomainName": name, "ClientId": client_id})
target.statistics.should.equal(["Sum"])
target.namespace.should.equal("AWS/ES")
generated_graph.targets[0].metricName.should.equal("2xx")
generated_graph.targets[1].metricName.should.equal("3xx")
generated_graph.targets[2].metricName.should.equal("4xx")
generated_graph.targets[3].metricName.should.equal("5xx")
def test_should_generate_elasticsearch_status_red_alert_graph(self):
name = "es"
client_id = "1234567890"
cloudwatch_data_source = "cw"
notifications = ["slack-1", "slack-2"]
generated_graph = generate_elasticsearch_status_red_alert_graph(
name=name,
client_id=client_id,
cloudwatch_data_source=cloudwatch_data_source,
notifications=notifications,
)
generated_graph.should.be.a(Graph)
generated_graph.title.should.match(r"Status RED alerts")
generated_graph.dataSource.should.match(cloudwatch_data_source)
generated_graph.targets.should.have.length_of(1)
generated_graph.targets[0].should.be.a(CloudwatchMetricsTarget)
generated_graph.targets[0].statistics.should.equal(["Maximum"])
generated_graph.targets[0].metricName.should.equal("ClusterStatus.red")
generated_graph.targets[0].period.should.equal("1m")
generated_graph.targets[0].namespace.should.equal("AWS/ES")
generated_graph.targets[0].refId.should.equal("A")
generated_graph.alert.should.be.a(Alert)
generated_graph.alert.frequency.should.equal("2m")
generated_graph.alert.gracePeriod.should.equal("2m")
generated_graph.alert.notifications.should.equal(notifications)
def test_should_generate_elasticsearch_nodes_alert_graph(self):
name = "es"
client_id = "1234567890"
cloudwatch_data_source = "cw"
notifications = ["slack-1", "slack-2"]
generated_graph = generate_elasticsearch_nodes_alert_graph(
name=name,
client_id=client_id,
cloudwatch_data_source=cloudwatch_data_source,
notifications=notifications,
)
generated_graph.should.be.a(Graph)
generated_graph.title.should.match(r"Elasticsearch node alerts")
generated_graph.dataSource.should.match(cloudwatch_data_source)
generated_graph.targets.should.have.length_of(2)
generated_graph.targets[0].should.be.a(CloudwatchMetricsTarget)
generated_graph.targets[0].statistics.should.equal(["Minimum"])
generated_graph.targets[0].period.should.equal("1m")
generated_graph.targets[0].namespace.should.equal("AWS/ES")
generated_graph.targets[0].metricName.should.equal("Nodes")
generated_graph.targets[0].dimensions.should.equal(
{"DomainName": name, "ClientId": client_id}
)
generated_graph.targets[0].refId.should.equal("A")
generated_graph.alert.should.be.a(Alert)
generated_graph.alert.frequency.should.equal("2m")
generated_graph.alert.gracePeriod.should.equal("2m")
generated_graph.alert.notifications.should.equal(notifications)
def test_should_generate_elasticsearch_writes_blocked_alert_graph(self):
name = "es"
client_id = "1234567890"
cloudwatch_data_source = "cw"
notifications = ["slack-1", "slack-2"]
generated_graph = generate_elasticsearch_writes_blocked_alert_graph(
name=name,
client_id=client_id,
cloudwatch_data_source=cloudwatch_data_source,
notifications=notifications,
)
generated_graph.should.be.a(Graph)
generated_graph.title.should.match(r"Elasticsearch write blocked alerts")
generated_graph.dataSource.should.match(cloudwatch_data_source)
generated_graph.targets.should.have.length_of(1)
generated_graph.targets[0].should.be.a(CloudwatchMetricsTarget)
generated_graph.targets[0].statistics.should.equal(["Maximum"])
generated_graph.targets[0].period.should.equal("1m")
generated_graph.targets[0].namespace.should.equal("AWS/ES")
generated_graph.targets[0].metricName.should.equal("ClusterIndexWritesBlocked")
generated_graph.targets[0].dimensions.should.equal(
{"DomainName": name, "ClientId": client_id}
)
generated_graph.targets[0].refId.should.equal("A")
generated_graph.alert.should.be.a(Alert)
generated_graph.alert.frequency.should.equal("2m")
generated_graph.alert.gracePeriod.should.equal("2m")
generated_graph.alert.notifications.should.equal(notifications)
def test_should_generate_elasticsearch_automated_snapshot_failure_alert_graph(self):
name = "es"
client_id = "1234567890"
cloudwatch_data_source = "cw"
notifications = ["slack-1", "slack-2"]
generated_graph = generate_elasticsearch_automated_snapshot_failure_alert_graph(
name=name,
client_id=client_id,
cloudwatch_data_source=cloudwatch_data_source,
notifications=notifications,
)
generated_graph.should.be.a(Graph)
generated_graph.title.should.match(
r"Elasticsearch automated snapshot failure alerts"
)
generated_graph.dataSource.should.match(cloudwatch_data_source)
generated_graph.targets.should.have.length_of(1)
generated_graph.targets[0].should.be.a(CloudwatchMetricsTarget)
generated_graph.targets[0].namespace.should.equal("AWS/ES")
generated_graph.targets[0].period.should.equal("1m")
generated_graph.targets[0].statistics.should.equal(["Maximum"])
generated_graph.targets[0].dimensions.should.equal(
{"DomainName": name, "ClientId": client_id}
)
generated_graph.targets[0].metricName.should.equal("AutomatedSnapshotFailure")
generated_graph.targets[0].refId.should.equal("A")
generated_graph.alert.should.be.a(Alert)
generated_graph.alert.frequency.should.equal("2m")
generated_graph.alert.gracePeriod.should.equal("2m")
generated_graph.alert.notifications.should.equal(notifications)
def test_should_generate_elasticsearch_dashboard(self):
name = "es"
client_id = "1234567890"
environment = "prod"
influxdb_data_source = "influxdb"
cloudwatch_data_source = "cw"
notifications = ["slack-1", "slack-2"]
generated_dashboard = generate_elasticsearch_dashboard(
name=name,
client_id=client_id,
environment=environment,
influxdb_data_source=influxdb_data_source,
cloudwatch_data_source=cloudwatch_data_source,
notifications=notifications,
)
generated_dashboard.title.should.equal("Elasticsearch: {}".format(name))
generated_dashboard.templating.should.be.a(Templating)
generated_dashboard.templating.list.should.have.length_of(1)
generated_dashboard.templating.list[0].should.be.a(Template)
generated_dashboard.tags.should.have.length_of(2)
generated_dashboard.rows.should.have.length_of(5)
generated_dashboard.links.should.have.length_of(1)
|
# coding: utf8
from __future__ import unicode_literals, print_function, division
from xml.etree.ElementTree import fromstring
from pyramid.config import Configurator
from mock import Mock
def main(global_config, **settings):
"""called when bootstrapping a pyramid app using clld/tests/test.ini."""
from clld import interfaces
from clld.web.app import MapMarker
from clld.web.adapters.base import Representation
settings['mako.directories'] = ['clld:web/templates']
config = Configurator(settings=settings)
config.include('clld.web.app')
config.registry.registerUtility(MapMarker(), interfaces.IMapMarker)
config.register_staticresource('css', 'clld:web/static/notexisting.css')
config.register_staticresource('js', 'clld:web/static/notexisting.js')
config.register_adapter(Representation, Mock, name='test')
config.register_menu(('home', lambda ctx, req: (req.resource_url(req.dataset), 'tt')))
return config.make_wsgi_app()
def handle_dt(req, dt_cls, model, **kw):
dt = dt_cls(req, model, **kw)
dt.render()
for item in dt.get_query():
for col in dt.cols:
col.format(item)
assert isinstance(dt.options, dict)
return dt
class XmlResponse(object):
"""Wrapper for XML responses."""
ns = None
def __init__(self, response):
self.raw = response.body
self.root = fromstring(response.body)
def findall(self, name):
if not name.startswith('{') and self.ns:
name = '{%s}%s' % (self.ns, name)
return self.root.findall('.//%s' % name)
def findone(self, name):
_all = self.findall(name)
if _all:
return _all[0]
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
###
### IMPORTANT: This is NOT a Buck binary, it runs using the system python3
###
import ctypes
import fcntl
import os
import signal
import struct
import sys
import termios
_prctl = ctypes.CDLL("libc.so.6").prctl
_PR_SET_PDEATHSIG = 1
ptm, pts = os.openpty()
# rows, columns, xpixel, ypixel
s = struct.pack("HHHH", 20, 1000, 0, 0)
fcntl.ioctl(pts, termios.TIOCSWINSZ, s)
pid = os.fork()
if pid == 0:
# Make sure the child is killed when we exit
_prctl(_PR_SET_PDEATHSIG, signal.SIGKILL)
os.dup2(pts, 1)
os.dup2(pts, 2)
os.execvp(sys.argv[1], sys.argv[1:])
os.close(pts)
# If the child gets interrupted, so do we. Ignore, we'll exit with the child.
signal.signal(signal.SIGINT, signal.SIG_IGN)
while True:
try:
chunk = os.read(ptm, 4096) # Raises OSError when the child exits
os.write(1, chunk)
except OSError:
break # Exit, killing the child (it's already dead on OSError)
_, status = os.waitpid(pid, 0)
exit(status)
|
# parse 是一个工具模块
# https://docs.python.org/3/library/urllib.parse.html#module-urllib.parse
from urllib import parse
# b'name=Ashe&hobbies=%5B%27%E6%91%84%E5%BD%B1%27%2C+%27%E6%97%85%E8%A1%8C%27%5D'
print(parse.urlencode({'name': 'Ashe', 'hobbies': ['摄影', '旅行']}).encode('utf-8'))
o = parse.urlparse('https://zlikun:123456@zlikun.com:8443/python/guide.html')
# ParseResult(scheme='https', netloc='zlikun:123456@zlikun.com:8443', path='/python/guide.html', params='', query='', fragment='')
print(o)
url = parse.urljoin('https://zlikun.com/python/guide.html', 'manual.html')
# https://zlikun.com/python/manual.html
print(url)
|
"""
This is main.
"""
from utils import ProjectUtils
if __name__ == '__main__':
utils = ProjectUtils()
df_news_cleaned, df_news_cleaned_path = utils.clean_export_news_file()
print('df_news_cleaned_path:', df_news_cleaned_path)
df_news_partial, df_news_partial_path = utils.create_sentiment_positivity_negativity(df_news_cleaned)
print('df_news_partial_path:', df_news_partial_path)
df_news_full, df_news_full_path = utils.create_polarity_subjectivity(df_news_partial)
print('df_news_full_path:', df_news_full_path)
df_price_labeled, df_price_labeled_path = utils.create_price_change_cols()
print('df_price_labeled_path:', df_price_labeled_path)
df_merged, df_merged_path = utils.merge_format_price_news(df_price=df_price_labeled, df_news=df_news_full)
print('df_merged_path:', df_merged_path)
df_merged = utils.convert_data_type(df_merged=df_merged)
df_normalized, df_normalized_path = utils.normalize_df_merged(df_merged=df_merged)
print('df_normalized_path:', df_normalized_path)
x_train, x_dev, x_test, y_train, y_dev, y_test = utils.read_split_model_data(model_data_path=df_normalized_path)
model_dict = utils.get_classifier_dict()
accuracy_dict, confusion_matrix_dict = utils.fit_and_measure(
x_train=x_train,
x_dev=x_dev,
x_test=x_test,
y_train=y_train,
y_dev=y_dev,
y_test=y_test
)
|
from mock import patch
from unittest import TestCase
from infi import asi
@asi.gevent_friendly
def simple_function():
return 1
def a_generator():
inner_call = lambda n: n
for i in range(2):
yield asi.gevent_friendly(inner_call)(i)
class GeventFriendlyTestCase(TestCase):
def test_simple_function(self):
with patch.object(asi, "_gevent_friendly_sleep") as _gevent_friendly_sleep:
self.assertEqual(simple_function(), 1)
self.assertTrue(_gevent_friendly_sleep.called)
def test_inside_generator(self):
with patch.object(asi, "_gevent_friendly_sleep") as _gevent_friendly_sleep:
self.assertEqual(list(a_generator()), [0, 1])
self.assertEqual(_gevent_friendly_sleep.call_count, 2)
|
"""General utilities."""
def _var_names(var_names):
"""Handle var_names input across arviz.
Parameters
----------
var_names: str, list, or None
Returns
-------
var_name: list or None
"""
if var_names is None:
return None
elif isinstance(var_names, str):
return [var_names]
else:
return var_names
|
import CI
import CI.builder.system
import os
description = 'configure the Fish shell'
features = CI.Features(
copy_standard_config = CI.Boolean(),
change_user_shell = CI.Boolean(),
)
CI.builder.system.features.packages = [
'fish',
]
class FishChangeUserShellAction(object):
description = "choose Fish as the user shell"
def __init__(self):
self.fish_path = None
def check(self, runner):
okay = True
with open('/etc/passwd') as f:
for line in f:
fields = line.strip().split(':')
if len(fields) > 6 and int(fields[2]) == os.getuid():
okay = os.path.basename(fields[6]) != 'fish'
#TODO: Handle non-standard locations?
self.fish_path = '/usr/bin/fish'
break
return okay
def perform(self, runner):
if self.fish_path:
runner.run('sudo', 'chsh', '-s', self.fish_path, os.getlogin())
def actions(runner):
if features.copy_standard_config:
yield CI.action.CopyFile('/usr/share/fish/config.fish', '~/.config/fish/config.fish',
overwrite=False, permissions=None, create_directory=True)
if features.change_user_shell:
yield FishChangeUserShellAction()
|
# (c) Copyright 2014,2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from freezer_dr.common.yaml_parser import YamlParser
from freezer_dr.fencers.common.driver import FencerBaseDriver
import libvirt
from oslo_config import cfg
from oslo_log import log
import time
CONF = cfg.CONF
LOG = log.getLogger(__name__)
class LibvirtDriver(FencerBaseDriver):
def __init__(self, nodes, fencer_conf):
super(LibvirtDriver, self).__init__(nodes, fencer_conf)
self.parser = YamlParser(self.fencer_conf['credentials_file'])
# initiate libvirt connection
conn_name = self.fencer_conf.get('name', None)
self.connection = libvirt.open(name=conn_name)
def force_shutdown(self, node):
target = self.connection.lookupByName(name=node.get('domain-name'))
return target.destroy()
def graceful_shutdown(self, node):
target = self.connection.lookupByName(name=node.get('domain-name'))
return target.shutdown()
def status(self, node):
target = self.connection.lookupByName(name=node.get('domain-name'))
return target.isActive()
def get_node_details(self, node):
"""Loads the node's fencing information from ``credentials_file``
:param node: a dict contains node ip or hostname
:return: a dict contains node fencing information
"""
node_details = self.parser.find_server_by_ip(node.get('ip')) or \
self.parser.find_server_by_hostname(node.get('host'))
return node_details
def fence(self):
"""Implements the fencing procedure for server fencing using ipmi
:return: a list of nodes and weather they're fenced or not!
"""
fenced_nodes = []
for node in self.nodes:
LOG.debug("fencing node {0}".format(node))
# load node details
node_details = self.get_node_details(node)
# loop on the node number of n times trying to fence it gently,
# if not force it!
for retry in range(0, self.fencer_conf['retries']):
if self.status(node=node_details):
try:
self.graceful_shutdown(node=node_details)
except Exception as e:
LOG.debug(e)
else:
node['status'] = True
break
time.sleep(self.fencer_conf['hold_period'])
LOG.info('wait for %d seconds before retrying to gracefully '
'shutdown' % self.fencer_conf['hold_period'])
try:
self.force_shutdown(node=node_details)
except Exception as e:
LOG.error(e)
if not self.status(node=node_details):
node['status'] = True
else:
node['status'] = False
fenced_nodes.append(node)
return fenced_nodes
def get_info(self):
return {
'name': 'Libvirt Interface driver',
'version': 1.1,
'author': 'Hewlett-Packard Enterprise Company, L.P'
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.