max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
session/static_configs.py | SirjanK/Multivac | 0 | 12767751 | <filename>session/static_configs.py
DEFAULT_REDIS_PORT = 6379
# Number of seconds to sleep upon successful end to allow graceful termination of subprocesses.
TERMINATION_TIME = 3
# Max caps on parameters
MAX_NUM_STEPS = 10000
MAX_OBSERVATION_DELTA = 5000
MAX_VIDEO_FPS = 60
| 1.203125 | 1 |
images_augmentation.py | KafKaf/CarND-Semantic-Segmentation | 0 | 12767752 | from glob import glob
import os.path
import cv2
def purge_augmentation(data_folder):
for f in glob(os.path.join(data_folder, 'image_2', 'equ_*.png')):
os.remove(f)
for f in glob(os.path.join(data_folder, 'image_2', 'flipped_*.png')):
os.remove(f)
for f in glob(os.path.join(data_folder, 'gt_image_2', 'flipped_*.png')):
os.remove(f)
def histogram_equalization(img):
img_yuv = cv2.cvtColor(img, cv2.COLOR_BGR2YUV)
# equalize the histogram of the Y channel
img_yuv[:,:,0] = cv2.equalizeHist(img_yuv[:,:,0])
# convert the YUV image back to RGB format
return cv2.cvtColor(img_yuv, cv2.COLOR_YUV2BGR)
def add_images_of_histogram_equalization(data_folder, image_paths):
for image_path in image_paths:
img = cv2.imread(image_path)
equ_img = histogram_equalization(img)
new_img_name = 'equ_' + os.path.basename(image_path)
cv2.imwrite(os.path.join(data_folder, 'image_2', new_img_name), equ_img)
def add_images_of_flip(data_folder, image_paths):
for image_path in image_paths:
img = cv2.imread(image_path)
flipped_img = cv2.flip(img, 1)
new_img_name = 'flipped_' + os.path.basename(image_path)
cv2.imwrite(os.path.join(data_folder, new_img_name), flipped_img)
def augment_images():
data_folder = 'data/data_road/training'
purge_augmentation(data_folder)
image_paths = glob(os.path.join(data_folder, 'image_2', '*.png'))
gt_image_paths = glob(os.path.join(data_folder, 'gt_image_2', '*.png'))
add_images_of_histogram_equalization(data_folder, image_paths)
add_images_of_flip(os.path.join(data_folder, 'image_2'), image_paths)
add_images_of_flip(os.path.join(data_folder, 'gt_image_2'), gt_image_paths)
augment_images() | 2.828125 | 3 |
src/WaveBlocks/IOM_plugin_grid.py | WaveBlocks/WaveBlocks | 0 | 12767753 | <filename>src/WaveBlocks/IOM_plugin_grid.py
"""The WaveBlocks Project
IOM plugin providing functions for handling grid data.
@author: <NAME>
@copyright: Copyright (C) 2010, 2011 <NAME>
@license: Modified BSD License
"""
import numpy as np
import h5py as hdf
def add_grid(self, parameters, blockid=0):
r"""
Add storage for a grid.
"""
self._srf[self._prefixb+str(blockid)].create_dataset("grid", (parameters["dimension"], parameters["ngn"]), np.floating)
def delete_grid(self, blockid=0):
r"""
Remove the stored grid.
"""
try:
del self._srf[self._prefixb+str(blockid)+"/grid"]
except KeyError:
pass
def has_grid(self, blockid=0):
r"""
Ask if the specified data block has the desired data tensor.
"""
return "grid" in self._srf[self._prefixb+str(blockid)].keys()
def save_grid(self, grid, blockid=0):
r"""
Save the grid nodes.
"""
path = "/"+self._prefixb+str(blockid)+"/grid"
self._srf[path][:] = np.real(grid)
def load_grid(self, blockid=0):
r"""
Load the grid nodes.
"""
path = "/"+self._prefixb+str(blockid)+"/grid"
return np.squeeze(self._srf[path])
| 2.453125 | 2 |
ps2000Examples/streaming_mode/streaming_mode_gathering.py | LauritzRaisch/picosdk-python-wrappers | 114 | 12767754 | from time import time_ns
from ctypes import POINTER, c_int16, c_uint32
import matplotlib.pyplot as plt
import numpy as np
from picosdk.ps2000 import ps2000
from picosdk.functions import assert_pico2000_ok
from picosdk.ctypes_wrapper import C_CALLBACK_FUNCTION_FACTORY
from enum import IntEnum
class Channel(IntEnum):
PS2000_CHANNEL_A = 0
PS2000_CHANNEL_B = 1
class PotentialRange(IntEnum):
PS2000_10MV = 0
PS2000_20MV = 1
PS2000_50MV = 2
PS2000_100MV = 3
PS2000_200MV = 4
PS2000_500MV = 5
PS2000_1V = 6
PS2000_2V = 7
PS2000_5V = 8
PS2000_10V = 9
PS2000_20V = 10
class TimeUnit(IntEnum):
FEMTOSECOND = 0
PICOSECOND = 1
NANOSECOND = 2
MICROSECOND = 3
MILLISECOND = 4
SECOND = 5
CALLBACK = C_CALLBACK_FUNCTION_FACTORY(None, POINTER(POINTER(c_int16)), c_int16, c_uint32, c_int16, c_int16, c_uint32)
# reimplement this because the other one only takes ctypes
def adc_to_mv(values, range_, bitness=16):
v_ranges = [10, 20, 50, 100, 200, 500, 1_000, 2_000, 5_000, 10_000, 20_000]
return [(x * v_ranges[range_]) / (2**(bitness - 1) - 1) for x in values]
def determine_time_unit(interval_ns):
unit = 0
units = ['ns', 'us', 'ms', 's']
while interval_ns > 5_000:
interval_ns /= 1000
unit += 1
return interval_ns, units[unit]
class StreamingDevice:
def __init__(self, gather_values, potential_range=PotentialRange.PS2000_50MV):
self.device = ps2000.open_unit()
self.potential_range = potential_range
self.gather_values = gather_values
res = ps2000.ps2000_set_channel(self.device.handle, Channel.PS2000_CHANNEL_A, True, True, potential_range)
assert_pico2000_ok(res)
# start 'fast-streaming' mode
res = ps2000.ps2000_run_streaming_ns(
self.device.handle,
500,
TimeUnit.NANOSECOND,
100_000,
False,
1,
50_000
)
assert_pico2000_ok(res)
self.start_time = time_ns()
self.end_time = time_ns()
def close(self):
ps2000.ps2000_stop(self.device.handle)
self.device.close()
def gather(self):
adc_values = []
def get_overview_buffers(buffers, _overflow, _triggered_at, _triggered, _auto_stop, n_values):
adc_values.extend(buffers[0][0:n_values])
callback = CALLBACK(get_overview_buffers)
while len(adc_values) < self.gather_values:
ps2000.ps2000_get_streaming_last_values(
self.device.handle,
callback
)
self.end_time = time_ns()
return adc_to_mv(adc_values, self.potential_range)
stream = StreamingDevice(6_000_000)
values = stream.gather()
stream.close()
print('Values gathered: {}'.format(len(values)))
fig, ax = plt.subplots()
interval, units = determine_time_unit(stream.end_time - stream.start_time)
ax.set_xlabel('time/{}'.format(units))
ax.set_ylabel('voltage/mV')
ax.plot(np.linspace(0, interval, len(values)), values)
plt.show()
| 2.515625 | 3 |
tests/test_history.py | l1kw1d/stashboard | 761 | 12767755 | <filename>tests/test_history.py
from datetime import datetime
from datetime import date
from datetime import timedelta
from base import TestbedTest
from models import Event
from models import Service
from models import Status
class HistoryTest(TestbedTest):
def setUp(self):
super(HistoryTest, self).setUp()
Status.load_defaults()
self.service = Service(slug="account", name="Account",
description="The BEST SERVICE")
self.service.put()
def test_history_order(self):
start = date(2011, 4, 13)
up = Status.get_by_slug("up")
history = self.service.history(5, up, start=start)
self.assertEquals(len(history), 5)
history_days = [ h["day"] for h in history ]
expected = [
date(2011, 4, 12),
date(2011, 4, 11),
date(2011, 4, 10),
date(2011, 4, 9),
date(2011, 4, 8),
]
self.assertEquals(history_days, expected)
def test_history_order_early_month(self):
start = date(2011, 4, 2)
up = Status.get_by_slug("up")
history = self.service.history(5, up, start=start)
history_days = [ h["day"] for h in history ]
expected = [
date(2011, 4, 1),
date(2011, 3, 31),
date(2011, 3, 30),
date(2011, 3, 29),
date(2011, 3, 28),
]
self.assertEquals(history_days, expected)
for h in history:
self.assertFalse(h["information"])
def test_history_order_late_month(self):
start = date(2011, 4, 5)
up = Status.get_by_slug("up")
history = self.service.history(5, up, start=start)
history_days = [ h["day"] for h in history ]
expected = [
date(2011, 4, 4),
date(2011, 4, 3),
date(2011, 4, 2),
date(2011, 4, 1),
date(2011, 3, 31),
]
self.assertEquals(history_days, expected)
def test_history_no_errors_boundary(self):
down = Status.get_by_slug("down")
up = Status.get_by_slug("up")
now = datetime(2011, 4, 5)
event = Event(status=down, service=self.service, start=now, message="HEY")
event.put()
history = self.service.history(5, up, start=date(2011, 4, 5))
self.assertEquals(history[0]["information"], False)
def test_history_one_error(self):
down = Status.get_by_slug("down")
up = Status.get_by_slug("up")
now = datetime(2011, 4, 4, 12)
event = Event(status=down, service=self.service, start=now, message="HEY")
event.put()
history = self.service.history(5, up, start=date(2011, 4, 5))
self.assertEquals(history[0]["information"], True)
self.assertEquals(history[0]["name"], "information")
def test_history_one_error_boundary(self):
down = Status.get_by_slug("down")
up = Status.get_by_slug("up")
now = datetime(2011, 3, 31)
event = Event(status=down, service=self.service, start=now, message="HEY")
event.put()
history = self.service.history(5, up, start=date(2011, 4, 5))
self.assertEquals(history[4]["information"], True)
self.assertEquals(history[4]["name"], "information")
def test_history_count(self):
up = Status.get_by_slug("up")
history = self.service.history(10, up, start=date(2011, 4, 5))
self.assertEquals(len(history), 10)
def test_history_current_status(self):
down = Status.get_by_slug("down")
up = Status.get_by_slug("up")
now = datetime(2011, 4, 4, 12, 51)
event = Event(status=down, service=self.service, start=now, message="HEY")
event.put()
history, = self.service.history(1, up, start=date(2011, 4, 5))
self.assertEquals(history["information"], True)
| 2.84375 | 3 |
pycozmo/protocol_utils.py | gimait/pycozmo | 1 | 12767756 | <filename>pycozmo/protocol_utils.py
import struct
_struct_cache = dict()
def _get_struct(fmt, length):
key = (fmt, length)
if key in _struct_cache:
return _struct_cache[key]
else:
reader = struct.Struct("<{0}{1}".format(length, fmt))
_struct_cache[key] = reader
return reader
def validate_float(name, value):
try:
value = float(value)
except ValueError:
raise ValueError("{name} must be float. Got {type}.".format(name=name, type=type(value).__name__))
return value
def validate_bool(name, value):
try:
value = bool(value)
except ValueError:
raise ValueError("{name} must be bool. Got {type}.".format(name=name, type=type(value).__name__))
return value
def validate_integer(name, value, minimum, maximum):
try:
value = int(value)
except ValueError:
raise ValueError("{name} must be an integer. Got a {type}.".format(name=name, type=type(value).__name__))
if value < minimum or value > maximum:
raise ValueError("{name} must be between {minimum} and {maximum}. Got {value}.".format(
name=name, minimum=minimum, maximum=maximum, value=value))
return value
def validate_object(name, value, expected_type):
if not isinstance(value, expected_type):
raise ValueError("{name} must be a {expected_type}. Got a {value_type}.".format(
name=name, expected_type=expected_type.__name__, value_type=type(value).__name__))
return value
def validate_farray(name, value, length, element_validation):
try:
value = tuple(value)
except ValueError:
raise ValueError("{name} must be a sequence. Got a {type}.".format(name=name, type=type(value).__name__))
if len(value) != length:
raise ValueError(("{name} must be a sequence of length {expected_length}. "
"Got a sequence of length {value_length}.").format(
name=name, expected_length=length, value_length=len(value)))
return [element_validation((name, i), element) for i, element in enumerate(value)]
def validate_varray(name, value, maximum_length, element_validation):
try:
value = tuple(value)
except ValueError:
raise ValueError("{name} must be a sequence. Got a {type}.".format(name=name, type=type(value).__name__))
if len(value) > maximum_length:
raise ValueError(("{name} must be a sequence with length less than or equal to {maximum_length}. "
"Got a sequence of length {value_length}.").format(
name=name, maximum_length=maximum_length, value_length=len(value)))
return [element_validation((name, i), element) for i, element in enumerate(value)]
def validate_string(name, value, maximum_length):
if len(value) > maximum_length:
raise ValueError(("{name} must be a string with less than or equal to {maximum_length}. "
"Got a string of length {value_length}.").format(
name=name, maximum_length=maximum_length, value_length=len(value)))
return value
def get_size(fmt):
""" Figures out the size of a value with the given format. """
return _get_struct(fmt, 1).size
def get_farray_size(fmt, length):
""" Figures out the size of a fixed array with given format. """
return _get_struct(fmt, length).size
def get_varray_size(value, length_format, data_format):
""" Figures out the size of a variable-length array with given format. """
return _get_struct(length_format, 1).size + _get_struct(data_format, len(value)).size
def get_string_size(value, length_format):
""" Figures out the size of a string with given length format. """
buf = value.encode('utf_8')
return _get_struct(length_format, 1).size + _get_struct('s', len(buf)).size
def get_object_size(value):
""" Figures out the size of a given object. """
return len(value)
def get_object_farray_size(value, length):
""" Figures out the size of a given fixed-length object sequence. """
if len(value) != length:
raise ValueError("The given fixed-length sequence has the wrong length.")
if not value:
return 0
else:
return sum(get_object_size(element) for element in value)
class BinaryReader(object):
""" Used to read in a stream of binary data, keeping track of the current position. """
def __init__(self, buffer: bytes, offset: int = 0):
self._buffer = buffer
self._index = offset
@property
def buffer(self):
return self._buffer
def __len__(self):
return len(self._buffer)
def seek_set(self, offset: int):
if offset < 0 or offset > len(self._buffer):
ValueError("Invalid offset.")
self._index = offset
def seek_cur(self, offset: int):
offset += self._index
if offset < 0 or offset > len(self._buffer):
ValueError("Invalid offset.")
self._index = offset
def tell(self):
""" Returns the current stream position as an offset within the buffer. """
return self._index
def read(self, fmt):
""" Reads in a single value of the given format. """
return self.read_farray(fmt, 1)[0]
def read_farray(self, fmt, length):
""" Reads in a fixed-length array of the given format and length. """
reader = _get_struct(fmt, length)
if self._index + reader.size > len(self._buffer):
raise IndexError('Buffer not large enough to read serialized message. Received {0} bytes.'.format(
len(self._buffer)))
result = reader.unpack_from(self._buffer, self._index)
self._index += reader.size
return result
def read_varray(self, data_format, length_format):
""" Reads in a variable-length array with the given length format and data format. """
length = self.read(length_format)
return self.read_farray(data_format, length)
def read_string(self, length_format):
""" Reads in a variable-length string with the given length format. """
length = self.read(length_format)
bs = self.read_farray('s', length)[0]
return bs.decode('utf_8')
def read_string_farray(self, string_length_format, array_length):
""" Reads in a fixed-length array of variable-length strings with the given length format. """
return [self.read_string(string_length_format) for _ in range(array_length)]
def read_string_varray(self, string_length_format, array_length_format):
""" Reads in a variable-length array of variable-length strings with the given length format. """
array_length = self.read(array_length_format)
return [self.read_string(string_length_format) for _ in range(array_length)]
def read_object(self, from__reader_method):
""" Reads in an object according to the given method. """
return from__reader_method(self)
def read_object_farray(self, from__reader_method, length):
""" Reads in a fixed-length object sequence according to the given method. """
return [from__reader_method(self) for _ in range(length)]
def read_object_varray(self, from__reader_method, length_format):
""" Reads in a variable-length object sequence according to the given method. """
length = self.read(length_format)
return [from__reader_method(self) for _ in range(length)]
class BinaryWriter(object):
""" Used to write out a stream of binary data. """
def __init__(self):
self._buffer = []
def clear(self):
del self._buffer[:]
def dumps(self) -> bytes:
return b"".join(self._buffer)
def write_bytes(self, value: bytes):
""" Writes out a byte sequence. """
self._buffer.append(value)
def write(self, value, fmt):
""" Writes out a single value of the given format. """
self.write_farray((value,), fmt, 1)
def write_farray(self, value, fmt, length):
""" Writes out a fixed-length array of the given format and length. """
writer = _get_struct(fmt, length)
self._buffer.append(writer.pack(*value))
def write_varray(self, value, data_format, length_format):
""" Writes out a variable-length array with the given length format and data format. """
self.write(len(value), length_format)
self.write_farray(value, data_format, len(value))
def write_string(self, value, length_format):
""" Writes out a variable-length string with the given length format. """
bs = value.encode('utf_8')
self.write(len(bs), length_format)
self.write_farray((bs,), 's', len(bs))
def write_string_farray(self, value, string_length_format, array_length):
""" Writes out a fixed-length array of variable-length strings with the given length format. """
if len(value) != array_length:
raise ValueError('The given fixed-length sequence has the wrong length.')
for element in value:
self.write_string(element, string_length_format)
def write_string_varray(self, value, string_length_format, array_length_format):
""" Writes out a variable-length array of variable-length strings with the given length format. """
self.write(len(value), array_length_format)
for element in value:
self.write_string(element, string_length_format)
def write_object(self, value):
""" Writes out an object that supports a to_writer() method. """
value.to_writer(self)
def write_object_farray(self, value, length):
""" Writes out a fixed-length object sequence that supports a to_writer() method. """
if len(value) != length:
raise ValueError('The given fixed-length sequence has the wrong length.')
for element in value:
element.to_writer(self)
def write_object_varray(self, value, length_format):
""" Writes out a variable-length object sequence that supports a to_writer() method. """
self.write(len(value), length_format)
for element in value:
element.to_writer(self)
| 2.625 | 3 |
modelzoo/apps.py | SuperElastix/ElastixModelZooWebsite | 1 | 12767757 | <filename>modelzoo/apps.py
from django.apps import AppConfig
class ModelzooConfig(AppConfig):
name = 'modelzoo'
| 1.328125 | 1 |
TimeSeriesTools/TS_statistics/ts_statistics.py | Psicowired87/TimeSeriesTools | 1 | 12767758 | <gh_stars>1-10
"""
This module contains the functions which computes interesting statistics. They
can be applied to continious time-series for obtaining important statistics for
the study of causality or prediction.
TODO
----
Pass to the individuals a list of values.
values_info or samevals for all.
"""
from itertools import product
import numpy as np
from TimeSeriesTools.Transformation.value_discretization import \
threshold_binning_builder, discretize_with_thresholds
def build_ngram_from_arrays(post, pres, L):
"""
"""
return Y
def prob_ngram_x(X, L, bins=None, samevals=True, normalize=True):
"""Function to compute the joints probability of a process and
the states of the times before.
Parameters
----------
X: array_like, shape (N, M)
the dynamics.
L: int
previous times to watch.
bins:
information of how to discretize.
samevals: boolean, list or array
all the signals of the system have the same states available.
normalize: boolean
to normalize the counting in order to get probabilities.
Returns
-------
probs: array_like
the joint probabilities. Probabilities of each combinations of states.
discretizor: array_like
the discretizor array.
values: array_like
the values of the probs considered.
TODO
----
Correct values.
"""
## 0. Formating inputs and preparing needed variables
n = X.shape[1]
## 1. Discretization
discretizor = threshold_binning_builder(X, bins)
X = discretize_with_thresholds(X, discretizor)
# needed variables
if type(samevals) == bool:
if samevals:
samevals = [np.unique(X) for i in range(n)]
else:
samevals = [np.unique(X[:, i]) for i in range(n)]
else:
if type(samevals) == np.ndarray:
samevals = [samevals for i in range(n)]
elif type(samevals) == list:
pass
## 2. Compute probability
probs = [[] for i in range(n)]
for i in range(n):
probs[i], _ = prob_ngram_ind(X[:, i], X[:, i], L, False,
samevals[i], normalize)
# Format output
probs = np.array(probs)
values = samevals
return probs, discretizor, values
def prob_ngram_xy(X, L, bins=None, auto=True, samevals=True, normalize=True):
"""Function to compute the joints probability of a process and
the states of the times before between each one of the pairs of variables
possible.
Parameters
----------
X: array_like, shape (N, M)
the dynamics.
L: int
previous times to watch.
bins:
information of how to discretize.
auto: boolean
if we compute the auto influence.
samevals: boolean or list or array
all the signals of the system have the same states available.
normalize: boolean
to normalize the counting in order to get probabilities.
Returns
-------
probs: array_like
the joint probabilities. Probabilities of each combinations of states.
discretizor: array_like
the discretizor array.
values: array_like
the values of the probs considered.
TODO
----
Correct values.
"""
## 0. Formating inputs and preparing needed variables
n = X.shape[1]
pairs = product(range(n), range(n))
## 1. Discretization
discretizor = threshold_binning_builder(X, bins)
X = discretize_with_thresholds(X, discretizor)
# needed variables
if type(samevals) == bool:
if samevals:
samevals = [np.unique(X) for i in range(n)]
else:
samevals = [np.unique(X[:, i]) for i in range(n)]
else:
if type(samevals) == np.ndarray:
samevals = [samevals for i in range(n)]
elif type(samevals) == list:
pass
## 2. Compute probability
probs = [[[] for j in range(n)] for i in range(n)]
for p in pairs:
p0, p1 = p[0], p[1]
probs[p0][p1], _ = prob_ngram_ind(X[:, p0], X[:, p1], L, auto,
[samevals[p0], samevals[p1]],
normalize)
# Format output
probs = np.array(probs)
values = samevals
return probs, discretizor, values
def prob_ngram_ind(x, y, L, auto=True, samevals=True, normalize=True):
"""Function to compute the joints probability of a process and
the states of the times before. We assume they are discretized.
Parameters
----------
X: array_like, shape (N, M)
the dynamics.
L: int
previous times to watch.
auto: boolean
if we compute the auto influence.
samevals: boolean or array or list
all the signals of the system have the same states available. If array
we pass the actual values.
normalize: boolean
to normalize the counting in order to get probabilities.
Returns
-------
probs: array_like
the joint probabilities. Probabilities of each combinations of states.
values: array_like
the values of the probs considered.
"""
## 0. Compute needed variables
assert x.shape[0] == y.shape[0]
nt = x.shape[0]
if type(samevals) == bool:
if samevals:
xvalues = np.unique(np.hstack([x, y]))
yvalues = xvalues
values = xvalues
else:
xvalues = np.unique(x)
yvalues = np.unique(y)
values = [xvalues, yvalues]
else:
if type(samevals) == np.ndarray:
xvalues = samevals
yvalues = samevals
values = [xvalues, yvalues]
elif type(samevals) == list:
values = samevals
xvalues = np.array(values[0])
yvalues = np.array(values[1])
## 1. Formatting arrays as index
aux = np.ones(x.shape)*np.inf
for i in range(xvalues.shape[0]):
aux[x == xvalues[i]] = i
x = aux[:]
aux = np.ones(y.shape)*np.inf
for i in range(yvalues.shape[0]):
aux[y == yvalues[i]] = i
y = aux[:]
## 2. Building lags matrices
xt = x[:nt-L].reshape((nt-L, 1))
if auto:
# Computing dependant times
xv = np.vstack([x[l:nt-L+l] for l in range(1, L+1)]).T
yv = np.vstack([y[l:nt-L+l] for l in range(1, L+1)]).T
# Aggregating
Xv = np.hstack([xt, xv, yv])
else:
# Compute dependant
yv = np.vstack([y[l:nt-L+l] for l in range(1, L+1)]).T
# Aggregating
Xv = np.hstack([xt, yv])
## 3. Counting statistics
dim = [xvalues.shape[0]]
if auto:
dim = dim + L*[xvalues.shape[0]]
dim = dim + L*[yvalues.shape[0]]
dim = tuple(dim)
probs = np.zeros(dim)
for i in range(Xv.shape[0]):
probs[tuple(Xv[i, :])] = probs[tuple(Xv[i, :])] + 1
if normalize:
probs /= np.sum(probs)
return probs, values
def prob_xy(X, bins=0, maxl=0, samevals=True):
"""Wrapper for prob_xy_ind. It computes the probability for all a matrix of
dynamics.
Parameters
----------
X: array_like, shape (N, Melements)
the signals of the system.
bins: int
number of bins to study the statistics.
maxl: int
the max lag times to compute.
samevals: boolean or array or list
all the signals of the system have the same states available. If array
we pass the actual values.
Returns
-------
probs: array_like, shape (Nelements, Nelements, maxl, n_bins, n_bins)
the probability for each pair of elements, timelag and range of values.
discretizor: array_like
the discretizor array.
values: array_like
the values of the probs considered.
"""
## 0. Formating inputs and preparing needed variables
n = X.shape[1]
pairs = product(range(n), range(n))
## 1. Discretization
discretizor = threshold_binning_builder(X, bins)
X = discretize_with_thresholds(X, discretizor)
# needed variables
if type(samevals) == bool:
if samevals:
samevals = [np.unique(X) for i in range(n)]
else:
samevals = [np.unique(X[:, i]) for i in range(n)]
else:
if type(samevals) == np.ndarray:
samevals = [samevals for i in range(n)]
elif type(samevals) == list:
pass
## 2. Compute probability
probs = [[[[] for k in range(maxl+1)] for j in range(n)] for i in range(n)]
for p in pairs:
for tlag in range(maxl+1):
p0, p1 = p[0], p[1]
# probs[p0, p1, tlag], _, _ = prob_xy_ind(X[:, p0], X[:, p1],
# samevals, 0, tlag)
probs[p0][p1][tlag], _, _ = prob_xy_ind(X[:, p0], X[:, p1],
samevals, 0, tlag)
# Format output
probs = np.array(probs)
values = samevals
return probs, discretizor, values
def prob_xy_ind(x, y, samevals=True, bins=0, timelag=0, normalize=True):
"""Probability of the signals to have some specific range of values.
Parameters
----------
x: array_like, shape (Nt,)
the signal of one element.
y: array_like, shape (Nt,)
the signal of one element.
bins: int, array_like, tuple
binning information.
timelag: int
the time lag considered between the first time serie with the second.
Returns
-------
probs: array_like, shape (n_bins, n_bins)
the probability of being in each possible combinations of regimes.
bins_edges: array_like, shape (n_bins+1,)
the edges of the bins.
TODO
----
When there is actually discretized
"""
## 1. Preparing discretizor
values1 = np.unique(x)
values2 = np.unique(y)
# Descriptions of situations
situation1 = bins is None or bins == 0
situation2 = type(bins) in [list, tuple, np.ndarray]
situation2 = situation2 and np.array(bins).shape[0] == 2
# Building discretizor
if situation1:
bins = [None, None]
elif situation2:
bins = bins
else:
bins = [bins, bins]
discretizor = []
discretizor.append(threshold_binning_builder(x, bins[0]))
discretizor.append(threshold_binning_builder(y, bins[1]))
## 2. Discretization
# Discretization
x = discretize_with_thresholds(x, discretizor[0])
y = discretize_with_thresholds(y, discretizor[1])
# Needed vars (from discretized arrays)
if type(samevals) == bool:
if samevals:
xvalues = np.unique(np.hstack([x, y]))
yvalues = xvalues
values = xvalues
else:
xvalues = np.unique(x)
yvalues = np.unique(y)
values = [xvalues, yvalues]
else:
if type(samevals) == np.ndarray:
xvalues = samevals
yvalues = samevals
values = [xvalues, yvalues]
elif type(samevals) == list:
values = samevals
xvalues = values[0]
yvalues = values[1]
## 3. Preparing lag
if timelag > 0:
x, y = x[timelag:], y[:y.shape[0]-timelag]
elif timelag < 0:
x, y = x[:x.shape[0]+timelag], y[-timelag:]
## 4. Computing probs
X = np.vstack([x, y]).T
probs = compute_joint_probs(X, values, normalize)
return probs, discretizor, values
def prob_x(X, n_bins=0, individually=True, normalize=True):
"""Study the probability of the signal to be in some range of values.
Parameters
----------
X: array_like, shape (N, Melements)
the time series.
n_bins: int
the number of bins selected.
individually: boolean
compute the probabilities for each variable or the global
probabilities.
normalize: boolean
return the counts normalized as probabilities or just the counts.
Returns
-------
probs: array_like, shape (n_bins, M) or shape (n_bins,)
the density of each bins for all the signals globally or individually.
discretizor: array_like, shape (N, Melements, n_bins+1)
the discretizor matrix.
values: array_like, shape (M,)
the code of the values of each regime in the dynamics.
TODO
----
...
"""
## 0. Needed variables
X = X.reshape(-1) if not individually else X
X = X.reshape((X.shape[0], 1)) if len(X.shape) == 1 else X
## 1. Ensure discretization
if np.all(n_bins == 0) or not np.any(n_bins):
values = np.unique(X)
n_bins = values.shape[0]
discretizor = None
else:
discretizor = threshold_binning_builder(X, n_bins)
X = discretize_with_thresholds(X, discretizor)
values = np.unique(X)
## 2. Compute probabilities
probs = np.zeros((values.shape[0], X.shape[1]))
for i in range(values.shape[0]):
val = values[i]
probs[i, :] = np.sum(X == val, axis=0)
## 3. Normalization
if normalize:
probs = np.divide(probs.astype(float), np.sum(probs, axis=0))
return probs, discretizor, values
def compute_joint_probs(Y_t, values, normalize=True):
"""Function used to compute joint probability from a group of stochastic
processes represented by Y_t, with different possible states given by
values. We assume discretization.
Parameters
----------
Y_t: array_like, shape (n_t, n_vars)
different discrete stochastic processes.
values: list or numpy.ndarray
the values of each stochastic variable can take.
normalize: boolean
if we return a normalized array.
Returns
-------
probs: array_like
the joint probability of being in each possible state of the product
space.
"""
## 0. Format variables and build needed
Y_t = Y_t if len(Y_t.shape) == 2 else Y_t.reshape((Y_t.shape[0], 1))
n_vars = Y_t.shape[1]
n_t = Y_t.shape[0]
# Build values
if values == []:
for i in range(n_vars):
values.append(np.unique(Y_t[:, i]))
elif type(values) == np.ndarray:
values = [values for i in range(n_vars)]
## 1. Transform to indexes
aux = np.ones(Y_t.shape)*np.inf
for i in range(n_vars):
for j in range(values[i].shape[0]):
aux[Y_t[:, i] == j, i] = j
Y_t = aux[:, :]
## 2. Building probs matrix
dim = tuple([vals.shape[0] for vals in values])
probs = np.zeros(dim)
for i in range(n_t):
indices = tuple(Y_t[i, :])
probs[indices] = probs[indices] + 1
## 3. Normalizing
if normalize:
probs /= np.sum(probs)
return probs
| 2.703125 | 3 |
ocrd_butler/celery_utils.py | stweil/ocrd_butler | 6 | 12767759 | <filename>ocrd_butler/celery_utils.py<gh_stars>1-10
# -*- coding: utf-8 -*-
"""Utils for celery."""
def init_celery(celery, app):
celery.conf.update(app.config)
celery.conf.update(
task_serializer='json',
accept_content=['json'], # Ignore other content
result_serializer='json',
timezone='Europe/Berlin',
enable_utc=True
)
TaskBase = celery.Task
class ContextTask(TaskBase):
abstract = True
def __call__(self, *args, **kwargs):
with app.app_context():
return TaskBase.__call__(self, *args, **kwargs)
celery.Task = ContextTask
| 2.109375 | 2 |
gender_recognition/apps.py | JasonZhangHub/django_gender_app | 0 | 12767760 | <gh_stars>0
from django.apps import AppConfig
class GenderRecognitionConfig(AppConfig):
name = 'gender_recognition'
| 1.320313 | 1 |
src/mysite/urls.py | kierrez/movie-website | 1 | 12767761 | <reponame>kierrez/movie-website
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.conf import settings
from django.contrib import admin
from django.views.generic import TemplateView
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include('titles.urls')),
url(r'^api/', include('api.urls')),
url(r'^accounts/', include('accounts.urls')),
url(r'^importer/', include('importer.urls')),
url(r'^', include('shared.urls')),
url(r'^robots.txt', TemplateView.as_view(template_name='robots.txt', content_type='text/plain')),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
import debug_toolbar
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
]
| 2.109375 | 2 |
appengine/cr-buildbucket/main.py | allaparthi/monorail | 2 | 12767762 | <reponame>allaparthi/monorail<gh_stars>1-10
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from components import endpoints_webapp2
from components import ereporter2
from components import utils
import gae_ts_mon
import webapp2
import handlers
import metrics
import swarming
def create_frontend_app(): # pragma: no cover
"""Returns WSGI app for frontend."""
app = webapp2.WSGIApplication(
handlers.get_frontend_routes(), debug=utils.is_local_dev_server()
)
gae_ts_mon.initialize(app)
return app
def create_backend_app(): # pragma: no cover
"""Returns WSGI app for backend."""
routes = handlers.get_backend_routes() + swarming.get_backend_routes()
app = webapp2.WSGIApplication(routes, debug=utils.is_local_dev_server())
gae_ts_mon.initialize(app, cron_module='backend')
gae_ts_mon.register_global_metrics(metrics.GLOBAL_METRICS)
gae_ts_mon.register_global_metrics_callback(
'buildbucket_global', metrics.update_global_metrics
)
return app
def initialize(): # pragma: no cover
"""Bootstraps the global state and creates WSGI applications."""
ereporter2.register_formatter()
return create_frontend_app(), create_backend_app()
| 1.96875 | 2 |
feed/urls.py | kubapi/hater | 0 | 12767763 | <reponame>kubapi/hater
from django.urls import path
from . import views
app_name = 'feed'
urlpatterns = [
path('', views.index, name='index'),
path('kontakt/', views.contact, name='contact'),
path('o-projekcie/', views.about, name='about'),
path('ranking/', views.ranking, name='ranking'),
path('architekt/', views.architect, name='architect'),
path("rejestracja/", views.register_view, name="register"),
path("logowanie/", views.login_view, name="login"),
path("wylogowywanie/", views.logout_view, name="logout"),
path("akceptuj/", views.accept_choice, name="accept_choice"),
path("odrzuc/", views.reject_choice, name="reject_choice"),
] | 1.828125 | 2 |
solutions/811.py | tdakhran/leetcode | 0 | 12767764 | from collections import defaultdict
from typing import List
class Solution:
def subdomainVisits(self, cpdomains: List[str]) -> List[str]:
total_count = defaultdict(int)
for cpdomain in cpdomains:
count, domain = cpdomain.split(' ')
while domain:
total_count[domain] += int(count)
domain = domain.partition('.')[2]
return [f'{count} {domain}' for domain, count in total_count.items()]
| 3.234375 | 3 |
screenshotter.py | rand0musername/bigmapmaker | 3 | 12767765 | import math
from io import BytesIO
import os
from time import sleep
from PIL import Image
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from geo_utils import get_meters_per_px, get_distance, get_latlng_inc_for_px_inc
class Screenshotter:
def __init__(self, start, end, zoom, out, add_transit, tile_size_px):
self.tile_size_px = tile_size_px
self.start_lat, self.start_lng = start
self.end_lat, self.end_lng = end
self.zoom = zoom
self.out = out
self.add_transit = add_transit
# Creates the driver and sets viewport size.
chrome_options = Options()
chrome_options.add_argument("--headless")
self.driver = webdriver.Chrome(chrome_options=chrome_options)
window_size = self.driver.execute_script("""
return [window.outerWidth - window.innerWidth + arguments[0],
window.outerHeight - window.innerHeight + arguments[1]];
""", self.tile_size_px, self.tile_size_px+200)
self.driver.set_window_size(*window_size)
# Builds the maps url for given params.
def build_url(self, lat, lng, zoom, add_transit):
url = 'https://www.google.com/maps/@{},{},{}z'.format(lat, lng, zoom)
if add_transit:
url += '/data=!5m1!1e2'
url += '?hl=en'
return url
# Builds a tile filename for given params.
def build_filename(self, row, col):
filename = 'tile_({:03d},{:03d}).png'.format(row, col)
filename = os.path.join(self.out, filename)
return filename
# Generates (lat, lng) pairs that correspond to tile centres. We are doing this as a
# separate step to know how many tiles there are before actually saving them.
def generate_pairs(self):
# (Y = row = lat decreasing, X = col = lng increasing)
# TODO: Translate start and end so that they are exactly in the corners of the image.
pairs = []
curr_lng = self.start_lng
lng_inc = get_latlng_inc_for_px_inc(self.start_lat, self.zoom, self.tile_size_px)[1]
while True:
# Initialize a new column.
curr_col = []
pairs.append(curr_col)
curr_lat = self.start_lat
while True:
# Save the current (lat, lng) pair.
curr_col.append((curr_lat, curr_lng))
# Check if the next row is out of bounds.
if curr_lat <= self.end_lat:
break
# Go to the next row.
lat_inc = get_latlng_inc_for_px_inc(curr_lat, self.zoom, self.tile_size_px)[0]
curr_lat -= lat_inc
# Check if the next column is out of bounds.
if curr_lng >= self.end_lng:
break
# Go to the next column.
curr_lng += lng_inc
return pairs
# Main screenshotter method that saves all tiles specified by input parameters.
def fetch_tiles(self):
print('[screenshotter] Starting the screenshotting process.')
# Create the output directory if it doesn't exist.
if not os.path.exists(self.out):
os.makedirs(self.out)
# Generate all (lat, lng) pairs.
pairs = self.generate_pairs()
nb_cols, nb_rows = len(pairs), len(pairs[0])
nb_tiles = nb_cols * nb_rows
print('[screenshotter] Done generating pairs. There will be {} tiles in total ({} x {}).'
.format(nb_tiles, nb_rows, nb_cols))
tiles_fetched = 0
for col in range(nb_cols):
for row in range(nb_rows):
# Skip fetching if the tile is already present in the directory.
filename = self.build_filename(row, col)
if os.path.exists(filename):
print("[screenshotter] Tile {}/{}: ({},{}) already exists in the output dir, skipping."
.format(tiles_fetched+1, nb_tiles, row, col), end='\r')
else:
# Fetch the tile, crop UI, and save.
latlng = pairs[col][row]
url = self.build_url(latlng[0], latlng[1], self.zoom, self.add_transit)
print("[screenshotter] Fetching tile {}/{}: ({},{}) from url {}"
.format(tiles_fetched+1, nb_tiles, row, col, url), end='\r')
self.driver.get(url)
png = self.driver.get_screenshot_as_png()
img = Image.open(BytesIO(png))
img = img.crop((0, 100, self.tile_size_px, self.tile_size_px + 100))
img.save(filename)
sleep(0.1)
tiles_fetched += 1
print("\n[screenshotter] Done fetching tiles.")
| 2.671875 | 3 |
src/utils/lookups.py | PieInTheSky-Inc/pss-statistics | 12 | 12767766 | from typing import Dict as _Dict
# ---------- Constants ----------
REDUCE_TOKENS_LOOKUP: _Dict[int, str] = {
0: '',
1: 'k',
2: 'm',
3: 'g'
} | 2.8125 | 3 |
PrimeTheory/sieve_of_eratosthenes.py | kbrezinski/Candidacy-Prep | 0 | 12767767 | <filename>PrimeTheory/sieve_of_eratosthenes.py
from typing import List
from math import sqrt
def primes_less_than(n: int) -> List[int]:
if n <= 2:
return []
is_prime = [True] * n
is_prime[0] = False
is_prime[1] = False
# loops from 2 to sqrt of n
for i in range(2, int(sqrt(n))):
# check if it hasn't been flagged yet
if is_prime[i]:
# in the case of 3, range(3*3, 100, 3) so {9, 12, 15, 18, etc.}
for x in range(i*i, n, i):
is_prime[x] = False
# return the primes that were flagged as True
return [i for i in range(n) if is_prime[i]]
if __name__ == "__main__":
import time
tic = time.perf_counter()
print(len(primes_less_than(10**9)))
print(f"Done in {time.perf_counter() - tic:.2f} [s]")
| 3.84375 | 4 |
2015/13_seatings.py | pchudzik/adventofcode | 0 | 12767768 | import re
import itertools
"""
--- Day 13: Knights of the Dinner Table ---
In years past, the holiday feast with your family hasn't gone so well. Not everyone gets along! This year, you resolve,
will be different. You're going to find the optimal seating arrangement and avoid all those awkward conversations.
You start by writing up a list of everyone invited and the amount their happiness would increase or decrease if they
were to find themselves sitting next to each other person. You have a circular table that will be just big enough to fit
everyone comfortably, and so each person will have exactly two neighbors.
For example, suppose you have only four attendees planned, and you calculate their potential happiness as follows:
Alice would gain 54 happiness units by sitting next to Bob.
Alice would lose 79 happiness units by sitting next to Carol.
Alice would lose 2 happiness units by sitting next to David.
Bob would gain 83 happiness units by sitting next to Alice.
Bob would lose 7 happiness units by sitting next to Carol.
Bob would lose 63 happiness units by sitting next to David.
Carol would lose 62 happiness units by sitting next to Alice.
Carol would gain 60 happiness units by sitting next to Bob.
Carol would gain 55 happiness units by sitting next to David.
David would gain 46 happiness units by sitting next to Alice.
David would lose 7 happiness units by sitting next to Bob.
David would gain 41 happiness units by sitting next to Carol.
Then, if you seat Alice next to David, Alice would lose 2 happiness units (because David talks so much), but David would
gain 46 happiness units (because Alice is such a good listener), for a total change of 44.
If you continue around the table, you could then seat Bob next to Alice (Bob gains 83, Alice gains 54). Finally, seat
Carol, who sits next to Bob (Carol gains 60, Bob loses 7) and David (Carol gains 55, David gains 41). The arrangement
looks like this:
+41 +46
+55 David -2
Carol Alice
+60 Bob +54
-7 +83
After trying every other seating arrangement in this hypothetical scenario, you find that this one is the most optimal,
with a total change in happiness of 330.
What is the total change in happiness for the optimal seating arrangement of the actual guest list?
Your puzzle answer was 618.
--- Part Two ---
In all the commotion, you realize that you forgot to seat yourself. At this point, you're pretty apathetic toward the
whole thing, and your happiness wouldn't really go up or down regardless of who you sit next to. You assume everyone
else would be just as ambivalent about sitting next to you, too.
So, add yourself to the list, and give all happiness relationships that involve you a score of 0.
What is the total change in happiness for the optimal seating arrangement that actually includes yourself?
Your puzzle answer was 601.
"""
def parse_seatings(attendees):
def find_happines_modifier(attendee):
happiness = int(re.match(r".*?(\d+).*", attendee).group(1))
return happiness if "gain" in attendee else -happiness
parsed_attendees = [(
attendee.split(" ")[0],
attendee.replace(".", "").split(" ")[-1].strip(),
find_happines_modifier(attendee),
) for attendee in attendees]
result = dict()
for attendee in parsed_attendees:
who = attendee[0]
neighbour = attendee[1]
happiness = attendee[2]
if who not in result:
result[who] = dict()
result[who][neighbour] = happiness
return result
def include_me(attendees, happines):
attendees = attendees.copy()
for name in attendees.keys():
attendees[name]["me"] = happines
attendees["me"] = dict((name, 0) for name in attendees.keys())
return attendees
def count_happiness(attendees, order):
total = 0
for index in range(-1, len(order) - 1):
who = order[index]
neighbour = order[index + 1]
total += attendees[who][neighbour] + attendees[neighbour][who]
return total
def find_seatings_with_happiness(attendees):
return map(
lambda order: (order, count_happiness(attendees, order)),
itertools.permutations(attendees.keys()))
def happines_change(attendees):
return max(
find_seatings_with_happiness(attendees),
key=lambda order_with_happiness: order_with_happiness[1])
if __name__ == "__main__":
with open("13_seatings.txt") as file:
attendees_list = parse_seatings(file.readlines())
includeing_me = include_me(attendees_list, 0)
print("Best order will be: ", happines_change(attendees_list))
print("Best order with me included will be: ", happines_change(includeing_me))
| 4.03125 | 4 |
avatar_sgg/dataset/ade20k.py | rafiberlin/clp-sose21-pm-vision | 0 | 12767769 | from avatar_sgg.config.util import get_config
import collections
import pandas as pd
import string
import json
import random
import torch
import torch.utils.data as data
import os
import sng_parser
import numpy as np
def get_ade20k_caption_annotations(path_prefix=None):
"""
Precondition: checkout the https://github.com/clp-research/image-description-sequences under the location
of the ade20k_dir directory
:return: a dictionary containing the paths to the images as keys. Each image has a dictionary with a "caption" key
and a "category" key.
"""
conf = get_config()["ade20k"]
ade20k_dir = conf["root_dir"]
ade20k_caption_dir = conf["caption_dir"]
captions_file = os.path.join(ade20k_caption_dir, "captions.csv")
sequences_file = os.path.join(ade20k_caption_dir, "sequences.csv")
captions_df = pd.read_csv(captions_file, sep="\t", header=0)
sequences_df = pd.read_csv(sequences_file, sep="\t", header=0)
sequences_df["d1"] = sequences_df["d1"].map(lambda a: a if a[-1] in string.punctuation else a + ". ")
sequences_df["d2"] = sequences_df["d2"].map(lambda a: a if a[-1] in string.punctuation else a + ". ")
sequences_df["d3"] = sequences_df["d3"].map(lambda a: a if a[-1] in string.punctuation else a + ". ")
sequences_df["d4"] = sequences_df["d4"].map(lambda a: a if a[-1] in string.punctuation else a + ". ")
sequences_df["d5"] = sequences_df["d5"].map(lambda a: a if a[-1] in string.punctuation else a + ". ")
sequences_df["merged_sequences"] = sequences_df[["d1", "d2", "d3", "d4", "d5"]].agg(lambda x: ''.join(x.values),
axis=1).T
sequences_fram = sequences_df[["image_id", "image_path", "image_cat", "merged_sequences"]]
captions_df = pd.merge(captions_df, sequences_fram, how='inner', left_on=['image_id'], right_on=['image_id'])
if path_prefix is None:
print("Using Real Image Paths as Key.")
captions_df["image_path"] = captions_df["image_path"].map(
lambda a: os.path.join("file://", ade20k_dir, "images", a))
else:
captions_df["image_path"] = captions_df["image_path"].map(
lambda a: os.path.join(path_prefix, a))
captions_df.drop(["Unnamed: 0"], axis=1)
captions_list = [{"image_id": row["image_id"], "id": row["caption_id"], "caption": row["caption"],
"image_path": row["image_path"], "image_cat": row["image_cat"],
"merged_sequences": row["merged_sequences"]} for i, row in captions_df.iterrows()]
# { id: list(captions_df[captions_df["image_id"] == id ]["caption"]) for id in ids }
# Group all captions together having the same image ID.
image_path_to_caption = collections.defaultdict(dict)
for val in captions_list:
caption = val['caption']
category = val['image_cat']
image_path = val["image_path"]
merged_sequences = val["merged_sequences"]
image_path_to_caption[image_path]["category"] = category
image_path_to_caption[image_path]["merged_sequences"] = merged_sequences
if "caption" not in image_path_to_caption[image_path].keys():
image_path_to_caption[image_path]["caption"] = [caption]
else:
image_path_to_caption[image_path]["caption"].append(caption)
return image_path_to_caption
def get_ade20k_split(test_proportion: int = 15, test_size: int = 10, path_prefix=None):
"""
Returns train, dev and test split.
Dev has only one image.
TODO: probably better to use cross validation for the splits
:param test_proportion:
:return:
"""
assert test_proportion > 0 and test_proportion < 100
captions = get_ade20k_caption_annotations(path_prefix)
# Make the split consistent
random.seed(1)
keys = list(captions.keys())
random.shuffle(keys)
start_idx = test_size
dev = {k: captions[k] for k in keys[:test_size]}
size = len(keys[start_idx:])
test_idx = int(test_proportion * size / 100)
test = {k: captions[k] for k in keys[start_idx:test_idx]}
train = {k: captions[k] for k in keys[test_idx:]}
return train, dev, test
def get_categories(split):
cat = {}
one_key = list(split.keys())[0]
if "category" in split[one_key].keys():
cat = {i: split[k]["category"] for i, k in enumerate(split)}
return cat
def group_entry_per_category(category):
category_to_entry_lookup = collections.defaultdict(list)
for k, v in category.items():
category_to_entry_lookup[v].append(k)
def generate_text_graph(self, captions):
raw_graphs = None
if type(captions) is list:
raw_graphs = [sng_parser.parse(cap) for cap in captions]
elif type(captions) is str:
raw_graphs = [sng_parser.parse(captions)]
else:
assert raw_graphs is not None
def output_split_list_with_new_prefix(split, old, new, file_path):
"""
:param split:
:param old: old prefix
:param new: new prefix
:param file_path: where to write the file
:return:
"""
prefix_index_end = len(old)
new_paths = []
for k in split.keys():
idx_start = k.find(old)
new_paths.append(new + k[idx_start + prefix_index_end:])
with open(file_path, 'w') as outfile:
json.dump(new_paths, outfile)
print("Saved", file_path)
def generate_text_graph(split, output_path, caption_number=None):
if not os.path.isfile(output_path):
text_graphs = {}
conf = get_config()["scene_graph"]
cap_graph_file = conf["capgraphs_file"]
cap_graph = json.load(open(cap_graph_file))
txt_rel_vocab = list(set(cap_graph['cap_predicate'].keys()))
txt_rel2id = {key: i + 1 for i, key in enumerate(txt_rel_vocab)}
txt_obj_vocab = list(set(cap_graph['cap_category'].keys()))
txt_obj2id = {key: i + 1 for i, key in enumerate(txt_obj_vocab)}
# generate union object vocabulary
txt_obj_vocab = list(set(cap_graph['cap_category'].keys()))
for k in split.keys():
if caption_number is not None:
captions = split[k]["caption"][caption_number]
else:
captions = split[k]["caption"]
if type(captions) is list:
raw_graphs = [sng_parser.parse(cap) for cap in captions]
elif type(captions) is str:
raw_graphs = [sng_parser.parse(captions)]
else:
assert raw_graphs is not None
cleaned_graphs = []
for i, g in enumerate(raw_graphs):
entities = g["entities"]
relations = g["relations"]
filtered_entities = [e["lemma_head"] if e["lemma_head"] in txt_obj_vocab else 'none' for e in
entities]
filtered_relations = [[r["subject"], r["object"], r["lemma_relation"]] for r in relations if
r["lemma_relation"] in txt_rel_vocab]
extracted_graph = {'entities': filtered_entities, 'relations': filtered_relations}
cleaned_graphs.append(extracted_graph)
encode_txt = {'entities': [], 'relations': []}
for item in cleaned_graphs:
entities = [txt_obj2id[e] for e in item['entities']]
relations = [[entities[r[0]], entities[r[1]], txt_rel2id[r[2]]] for r in item['relations']]
encode_txt['entities'] = encode_txt['entities'] + entities
encode_txt['relations'] = encode_txt['relations'] + relations
# === for text_graph =============================================here
entities = encode_txt['entities']
relations = encode_txt['relations']
if len(relations) == 0:
txt_graph = np.zeros((len(entities), 1))
else:
txt_graph = np.zeros((len(entities), len(relations)))
text_graph = []
for i, es in enumerate(entities):
for j, rs in enumerate(relations):
if es in rs:
txt_graph[i, j] = 1
else:
txt_graph[i, j] = 0
text_graph.append(txt_graph.tolist())
text_graphs[k] = {
'txt': encode_txt,
'text_graph': text_graph,
'category': split[k]["category"]}#needed later to perform the category based recall
with open(output_path, 'w') as outfile:
print("Saving Text Graphs under:", output_path)
json.dump(text_graphs, outfile)
else:
print("Loading:", output_path)
text_graphs = json.load(open(output_path))
return text_graphs
def get_preprocessed_text_text_graphs_for_test():
"""
This function returns the captions of the ADE20K test sets, as graph. They are not merged
and are available as tuple in the "entry" key.
:return:
"""
conf = get_config()
_, _, test = get_ade20k_split(path_prefix="images")
txt_graphs_1 = generate_text_graph(test, conf["scene_graph"]["ade20k_text_graph_1"], 0)
txt_graphs_2 = generate_text_graph(test, conf["scene_graph"]["ade20k_text_graph_2"], 1)
txt_keys = list(txt_graphs_1.keys())
txt_graphs = {}
for k in txt_keys:
item = txt_graphs_1[k]
item2 = txt_graphs_2[k]
if len(item["txt"]['entities']) < 2 \
or len(item2["txt"]["entities"]) < 2 \
or len(item["txt"]['relations']) < 1 \
or len(item2["txt"]['relations']) < 1:
print("no relationship detected, skipping:", k)
continue
else:
txt_graphs[k] = {"entry": (item, item2), "category": item["category"]}
return txt_graphs
def get_preprocessed_image_text_graphs_for_test():
"""
Returns a dictionary (key identifies an image), of dictionaries of this form:
{ 'img': encode_txt,
'image_graph': text_graph,
'txt': encode_txt,
'text_graph': text_graph}
:return:
"""
conf = get_config()
_, _, test = get_ade20k_split(path_prefix="images")
img_graphs = json.load(open(conf["scene_graph"]["ade20k_image_sg_test"]))
txt_graphs = generate_text_graph(test, conf["scene_graph"]["ade20k_text_sg_test"])
txt_keys = list(txt_graphs.keys())
for k in list(img_graphs.keys()):
assert k in txt_keys
for k in txt_keys:
item = img_graphs[k]
if len(item["img"]['entities']) < 2 \
or len(txt_graphs[k]["txt"]['entities']) < 2 \
or len(item["img"]['relations']) < 1 \
or len(txt_graphs[k]["txt"]['relations']) < 1:
print("no relationship detected, skipping:", k)
del(img_graphs[k])
del (txt_graphs[k])
continue
else:
item.update(txt_graphs[k])
return img_graphs
def get_preprocessed_image_graphs_for_map_world():
conf = get_config()
img_graphs = json.load(open(conf["scene_graph"]["ade20k_map_world_preprocessed_img_graph"]))
return img_graphs
if __name__ == "__main__":
print("Start")
conf = get_config()
train, dev, test = get_ade20k_split(path_prefix="images")
print(f"Train Split: {len(train)}")
print(f"Dev Split: {len(dev)}")
print(f"Test Split: {len(test)}")
# output_split_list_with_new_prefix(test, "/media/rafi/Samsung_T5/_DATASETS/ADE20K/",
# "/data/ImageCorpora/ADE20K_2016_07_26/",
# get_config()["output_dir"] + "/ade20k_caption_test.json")
graph = get_preprocessed_image_text_graphs_for_test()
print("Done")
| 2.359375 | 2 |
taskbar.py | sczzr/visual-window | 0 | 12767770 | <gh_stars>0
import os
import sys
import win32con
import win32gui
def resource_path(relative_path):
if hasattr(sys, '_MEIPASS'):
return os.path.join(sys._MEIPASS, relative_path)
return os.path.join(os.path.abspath("./icon"), relative_path)
class SysTrayIcon(object):
QUIT = "QUIT"
ID = 1234
def __init__(self, icon):
self.hwnd = None
self.notify_id = None
self.icon = icon
message_map = {
win32con.WM_DESTROY: self.destroy,
win32con.WM_USER + 20: self.notify
}
wc = win32gui.WNDCLASS()
wc.hInstance = win32gui.GetModuleHandle(None)
wc.lpszClassName = "PythonTaskbar"
wc.style = win32con.CS_VREDRAW | win32con.CS_HREDRAW
wc.hCursor = win32gui.LoadCursor(0, win32con.IDC_ARROW)
wc.hbrBackground = win32con.COLOR_WINDOW
wc.lpfnWndProc = message_map
self.classAtom = win32gui.RegisterClass(wc)
def active(self):
h_inst = win32gui.GetModuleHandle(None)
self.hwnd = win32gui.CreateWindow(self.classAtom,
"Taskbar",
win32con.WS_OVERLAPPED | win32con.WS_SYSMENU,
0, 0,
win32con.CW_USEDEFAULT,
win32con.CW_USEDEFAULT,
0, 0, h_inst, None)
win32gui.UpdateWindow(self.hwnd)
self.notify_id = None
self.refresh(icon=self.icon)
def refresh(self, icon, time=200):
h_inst = win32gui.GetModuleHandle(None)
icon = resource_path(icon)
try:
hicon = win32gui.LoadImage(h_inst, icon, win32con.IMAGE_ICON, 0, 0,
win32con.LR_LOADFROMFILE | win32con.IMAGE_ICON)
except BaseException as e:
hicon = win32gui.LoadIcon(0, win32con.IDI_APPLICATION)
if self.notify_id:
message = win32gui.NIM_MODIFY
else:
message = win32gui.NIM_ADD
self.notify_id = (self.hwnd, 0,
win32gui.NIF_ICON | win32gui.NIF_MESSAGE | win32gui.NIF_TIP | win32gui.NIF_INFO,
win32con.WM_USER + 20, hicon)
win32gui.Shell_NotifyIcon(message, self.notify_id)
def notify(self, hwnd, msg, wparam, lparam):
pass
def destroy(self):
nid = (self.hwnd, 0)
win32gui.Shell_NotifyIcon(win32gui.NIM_DELETE, nid)
win32gui.PostQuitMessage(0)
| 2.265625 | 2 |
notebooks/thunderdome/eth2spec/utils/bls.py | casparschwa/beaconrunner | 11 | 12767771 | <gh_stars>10-100
from py_ecc.bls import G2ProofOfPossession as py_ecc_bls
from py_ecc.bls.g2_primatives import signature_to_G2 as _signature_to_G2
import milagro_bls_binding as milagro_bls # noqa: F401 for BLS switching option
# Flag to make BLS active or not. Used for testing, do not ignore BLS in production unless you know what you are doing.
bls_active = True
# To change bls implementation, default to PyECC for correctness. Milagro is a good faster alternative.
bls = milagro_bls
STUB_SIGNATURE = b'\x11' * 96
STUB_PUBKEY = b'\x22' * 48
Z1_PUBKEY = b'\xc0' + b'\x00' * 47
Z2_SIGNATURE = b'\xc0' + b'\x00' * 95
STUB_COORDINATES = _signature_to_G2(Z2_SIGNATURE)
def use_milagro():
"""
Shortcut to use Milagro as BLS library
"""
global bls
bls = milagro_bls
def use_py_ecc():
"""
Shortcut to use Py-ecc as BLS library
"""
global bls
bls = py_ecc_bls
def only_with_bls(alt_return=None):
"""
Decorator factory to make a function only run when BLS is active. Otherwise return the default.
"""
def runner(fn):
def entry(*args, **kw):
if bls_active:
return fn(*args, **kw)
else:
return alt_return
return entry
return runner
@only_with_bls(alt_return=True)
def Verify(PK, message, signature):
try:
result = bls.Verify(PK, message, signature)
except Exception:
result = False
finally:
return result
@only_with_bls(alt_return=True)
def AggregateVerify(pubkeys, messages, signature):
try:
result = bls.AggregateVerify(list(pubkeys), list(messages), signature)
except Exception:
result = False
finally:
return result
@only_with_bls(alt_return=True)
def FastAggregateVerify(pubkeys, message, signature):
try:
result = bls.FastAggregateVerify(list(pubkeys), message, signature)
except Exception:
result = False
finally:
return result
@only_with_bls(alt_return=STUB_SIGNATURE)
def Aggregate(signatures):
return bls.Aggregate(signatures)
@only_with_bls(alt_return=STUB_SIGNATURE)
def Sign(SK, message):
if bls == py_ecc_bls:
return bls.Sign(SK, message)
else:
return bls.Sign(SK.to_bytes(32, 'big'), message)
@only_with_bls(alt_return=STUB_COORDINATES)
def signature_to_G2(signature):
return _signature_to_G2(signature)
@only_with_bls(alt_return=STUB_PUBKEY)
def AggregatePKs(pubkeys):
return bls._AggregatePKs(list(pubkeys))
@only_with_bls(alt_return=STUB_SIGNATURE)
def SkToPk(SK):
if bls == py_ecc_bls:
return bls.SkToPk(SK)
else:
return bls.SkToPk(SK.to_bytes(32, 'big'))
| 2.390625 | 2 |
Python_Fundamentals/04_Lambda_Functions_And_Exercise/task_dictionaries/a_key_key_value_value.py | Dochko0/Python | 0 | 12767772 | key = input().strip()
value = input().strip()
count = int(input())
result = ''
for entry in range(count):
keys, values = input().split(' => ')
if key in keys:
result += f'{keys}:\n'
if value in values:
all_values = '\n'.join([f'-{v}' for v in values.split(';') if value in v])
result += f'{all_values}\n'
print(result) | 3.59375 | 4 |
Incubator.py | Decamark/CR2Incubator | 0 | 12767773 | <gh_stars>0
# -*- coding: utf-8 -*-
import sys
from struct import pack, unpack
def iprint(s, level):
print(' '*level*2 + s)
class Incubator:
def open_raw(self, path):
raw_f = open(path, 'rb')
self.all_bytes = raw_f.read()
raw_f.close()
def save_jpg(self, path):
jpg_f = open(path, 'wb')
jpg_f.write(self.final_jpg_bytes)
jpg_f.close()
def prepare(self):
self.offset_ifd0 = unpack('<I', self.all_bytes[4:8])[0]
self.offset_1st_ifd0 = self.offset_ifd0+2
self.offset_ifd3 = unpack('<I', self.all_bytes[12:16])[0]
self.offset_1st_ifd3 = self.offset_ifd3+2
self.offset_subifd = self.find_tag_call(self.offset_1st_ifd0, 0x8769, self.get_value)
self.offset_1st_subifd = self.offset_subifd+2
def find_tag_call(self, curr_ptr, tag_id_to_find, callback):
while True:
tag_id = unpack('<H', self.all_bytes[curr_ptr:curr_ptr+2])[0]
tag_type = unpack('<H', self.all_bytes[curr_ptr+2:curr_ptr+4])[0]
value_num = unpack('<I', self.all_bytes[curr_ptr+4:curr_ptr+8])[0]
value = unpack('<I', self.all_bytes[curr_ptr+8:curr_ptr+12])[0]
if tag_id == tag_id_to_find:
return callback(tag_id, tag_type, value_num, value)
curr_ptr = curr_ptr+12
@staticmethod
def get_size(tag_id, tag_type, size, value):
iprint('Size is: 0x{:X}'.format(size), 1)
return size
@staticmethod
def get_value(tag_id, tag_type, size, value):
iprint('Value is: 0x{:X}'.format(value), 1)
return value
# Extract datetime
def get_datetime(self):
offset_datetime = self.find_tag_call(self.offset_1st_subifd, 0x9003, self.get_value)
size_datetime = self.find_tag_call(self.offset_1st_subifd, 0x9003, self.get_size)
self.datetime = self.all_bytes[offset_datetime:offset_datetime+size_datetime]
iprint('DateTime is: {}'.format(self.datetime.decode('utf-8')), 1)
return self.datetime.decode('utf-8')
# Exif APP1 marker (Taken date is embedded)
# (Note that pre-existing marker is untouched)
def make_exif(self):
exif = \
[ 0xFF, 0xE1 ] + \
list(pack('>H', 60+len(self.datetime))) + \
[ 0x45, 0x78, 0x69, 0x66, 0x00, 0x00 ] + \
[ 0x49, 0x49, 0x2A, 0x00, 0x08, 0x00, 0x00, 0x00] + \
[ 0x02, 0x00 ] + \
[ 0x12, 0x01, 0x03, 0x00, 0x01, 0x00, 0x00, 0x00 ] + list(pack('<I', self.orientation)) + \
[ 0x69, 0x87, 0x04, 0x00, 0x01, 0x00, 0x00, 0x00, 0x26, 0x00, 0x00, 0x00 ] + \
[ 0x00, 0x00, 0x00, 0x00 ] + \
[ 0x01, 0x00 ] + \
[ 0x03, 0x90, 0x02, 0x00, 0x14, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00 ] + \
list(self.datetime)
return bytearray(exif)
# Extract loosy JPEG in IFD0
def make_loosy_jpg(self):
offset_raw = self.find_tag_call(self.offset_1st_ifd0, 0x0111, self.get_value)
size_raw = self.find_tag_call(self.offset_1st_ifd0, 0x0117, self.get_value)
jpg_bytes = self.all_bytes[offset_raw:offset_raw+size_raw]
self.orientation = self.find_tag_call(self.offset_1st_ifd0, 0x0112, self.get_value)
exif = self.make_exif()
self.final_jpg_bytes = jpg_bytes[:2] + exif + jpg_bytes[2:]
# Extract loseless JPEG in IFD3
# (Note that this image is too large to save without compressing. Don't recommand)
def make_loseless_jpg(self):
offset_raw = self.find_tag_call(offset_1st_ifd3, 0x0111, self.get_value)
size_raw = self.find_tag_call(offset_1st_ifd3, 0x0117, self.get_value)
jpg_bytes = self.all_bytes[offset_raw:offset_raw+size_raw]
self.orientation = self.find_tag_call(self.offset_1st_ifd0, 0x0112, self.get_value)
exif = self.make_exif()
self.final_jpg_bytes = jpg_bytes[:2] + exif + jpg_bytes[2:]
| 2.578125 | 3 |
crazyflie-clients-python/src/cfclient/ui/dialogs/logconfigdialogue.py | manikamakam/swarm | 0 | 12767774 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2011-2013 Bitcraze AB
#
# Crazyflie Nano Quadcopter Client
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
This dialogue is used to configure different log configurations that is used to
enable logging of data from the Crazyflie. These can then be used in different
views in the UI.
"""
import logging
import cfclient
from PyQt5 import Qt, QtWidgets, uic
from PyQt5.QtCore import * # noqa
from PyQt5.QtWidgets import * # noqa
from PyQt5.Qt import * # noqa
from cflib.crazyflie.log import LogConfig
__author__ = 'Bitcraze AB'
__all__ = ['LogConfigDialogue']
logger = logging.getLogger(__name__)
(logconfig_widget_class, connect_widget_base_class) = (
uic.loadUiType(cfclient.module_path + '/ui/dialogs/logconfigdialogue.ui'))
NAME_FIELD = 0
ID_FIELD = 1
PTYPE_FIELD = 2
CTYPE_FIELD = 3
class LogConfigDialogue(QtWidgets.QWidget, logconfig_widget_class):
def __init__(self, helper, *args):
super(LogConfigDialogue, self).__init__(*args)
self.setupUi(self)
self.helper = helper
self.logTree.setHeaderLabels(['Name', 'ID', 'Unpack', 'Storage'])
self.varTree.setHeaderLabels(['Name', 'ID', 'Unpack', 'Storage'])
self.addButton.clicked.connect(lambda: self.moveNode(self.logTree,
self.varTree))
self.removeButton.clicked.connect(lambda: self.moveNode(self.varTree,
self.logTree))
self.cancelButton.clicked.connect(self.close)
self.loadButton.clicked.connect(self.loadConfig)
self.saveButton.clicked.connect(self.saveConfig)
self.loggingPeriod.textChanged.connect(self.periodChanged)
self.packetSize.setMaximum(26)
self.currentSize = 0
self.packetSize.setValue(0)
self.period = 0
def decodeSize(self, s):
size = 0
if ("16" in s):
size = 2
if ("float" in s):
size = 4
if ("8" in s):
size = 1
if ("FP16" in s):
size = 2
if ("32" in s):
size = 4
return size
def sortTrees(self):
self.varTree.invisibleRootItem().sortChildren(NAME_FIELD,
Qt.AscendingOrder)
for node in self.getNodeChildren(self.varTree.invisibleRootItem()):
node.sortChildren(NAME_FIELD, Qt.AscendingOrder)
self.logTree.invisibleRootItem().sortChildren(NAME_FIELD,
Qt.AscendingOrder)
for node in self.getNodeChildren(self.logTree.invisibleRootItem()):
node.sortChildren(NAME_FIELD, Qt.AscendingOrder)
def getNodeChildren(self, treeNode):
children = []
for i in range(treeNode.childCount()):
children.append(treeNode.child(i))
return children
def updatePacketSizeBar(self):
self.currentSize = 0
for node in self.getNodeChildren(self.varTree.invisibleRootItem()):
for leaf in self.getNodeChildren(node):
self.currentSize = (self.currentSize +
self.decodeSize(leaf.text(CTYPE_FIELD)))
if self.currentSize > 26:
self.packetSize.setMaximum(self.currentSize / 26.0 * 100.0)
self.packetSize.setFormat("%v%")
self.packetSize.setValue(self.currentSize / 26.0 * 100.0)
else:
self.packetSize.setMaximum(26)
self.packetSize.setFormat("%p%")
self.packetSize.setValue(self.currentSize)
def addNewVar(self, logTreeItem, target):
parentName = logTreeItem.parent().text(NAME_FIELD)
varParent = target.findItems(parentName, Qt.MatchExactly, NAME_FIELD)
item = logTreeItem.clone()
if (len(varParent) == 0):
newParent = QtWidgets.QTreeWidgetItem()
newParent.setData(0, Qt.DisplayRole, parentName)
newParent.addChild(item)
target.addTopLevelItem(newParent)
target.expandItem(newParent)
else:
parent = varParent[0]
parent.addChild(item)
def moveNodeItem(self, source, target, item):
if (item.parent() is None):
children = self.getNodeChildren(item)
for c in children:
self.addNewVar(c, target)
source.takeTopLevelItem(source.indexOfTopLevelItem(item))
elif (item.parent().childCount() > 1):
self.addNewVar(item, target)
item.parent().removeChild(item)
else:
self.addNewVar(item, target)
# item.parent().removeChild(item)
source.takeTopLevelItem(source.indexOfTopLevelItem(item.parent()))
self.updatePacketSizeBar()
self.sortTrees()
self.checkAndEnableSaveButton()
def checkAndEnableSaveButton(self):
if self.currentSize > 0 and self.period > 0 and self.currentSize <= 26:
self.saveButton.setEnabled(True)
else:
self.saveButton.setEnabled(False)
def moveNode(self, source, target):
self.moveNodeItem(source, target, source.currentItem())
def moveNodeByName(self, source, target, parentName, itemName):
parents = source.findItems(parentName, Qt.MatchExactly, NAME_FIELD)
node = None
if (len(parents) > 0):
parent = parents[0]
for n in range(parent.childCount()):
if (parent.child(n).text(NAME_FIELD) == itemName):
node = parent.child(n)
break
if (node is not None):
self.moveNodeItem(source, target, node)
return True
return False
def showEvent(self, event):
self.updateToc()
self.populateDropDown()
toc = self.helper.cf.log.toc
if (len(list(toc.toc.keys())) > 0):
self.configNameCombo.setEnabled(True)
else:
self.configNameCombo.setEnabled(False)
def resetTrees(self):
self.varTree.clear()
self.updateToc()
def periodChanged(self, value):
try:
self.period = int(value)
self.checkAndEnableSaveButton()
except Exception:
self.period = 0
def showErrorPopup(self, caption, message):
self.box = QMessageBox() # noqa
self.box.setWindowTitle(caption)
self.box.setText(message)
# self.box.setButtonText(1, "Ok")
self.box.setWindowFlags(Qt.Dialog | Qt.MSWindowsFixedSizeDialogHint)
self.box.show()
def updateToc(self):
self.logTree.clear()
toc = self.helper.cf.log.toc
for group in list(toc.toc.keys()):
groupItem = QtWidgets.QTreeWidgetItem()
groupItem.setData(NAME_FIELD, Qt.DisplayRole, group)
for param in list(toc.toc[group].keys()):
item = QtWidgets.QTreeWidgetItem()
item.setData(NAME_FIELD, Qt.DisplayRole, param)
item.setData(ID_FIELD, Qt.DisplayRole,
toc.toc[group][param].ident)
item.setData(PTYPE_FIELD, Qt.DisplayRole,
toc.toc[group][param].pytype)
item.setData(CTYPE_FIELD, Qt.DisplayRole,
toc.toc[group][param].ctype)
groupItem.addChild(item)
self.logTree.addTopLevelItem(groupItem)
self.logTree.expandItem(groupItem)
self.sortTrees()
def populateDropDown(self):
self.configNameCombo.clear()
toc = self.helper.logConfigReader.getLogConfigs()
for d in toc:
self.configNameCombo.addItem(d.name)
if (len(toc) > 0):
self.loadButton.setEnabled(True)
def loadConfig(self):
cText = self.configNameCombo.currentText()
config = None
for d in self.helper.logConfigReader.getLogConfigs():
if (d.name == cText):
config = d
if (config is None):
logger.warning("Could not load config")
else:
self.resetTrees()
self.loggingPeriod.setText("%d" % config.period_in_ms)
self.period = config.period_in_ms
for v in config.variables:
if (v.is_toc_variable()):
parts = v.name.split(".")
varParent = parts[0]
varName = parts[1]
if self.moveNodeByName(
self.logTree, self.varTree, varParent,
varName) is False:
logger.warning("Could not find node %s.%s!!",
varParent, varName)
else:
logger.warning("Error: Mem vars not supported!")
def saveConfig(self):
updatedConfig = self.createConfigFromSelection()
try:
self.helper.logConfigReader.saveLogConfigFile(updatedConfig)
self.close()
except Exception as e:
self.showErrorPopup("Error when saving file", "Error: %s" % e)
self.helper.cf.log.add_config(updatedConfig)
def createConfigFromSelection(self):
logconfig = LogConfig(str(self.configNameCombo.currentText()),
self.period)
for node in self.getNodeChildren(self.varTree.invisibleRootItem()):
parentName = node.text(NAME_FIELD)
for leaf in self.getNodeChildren(node):
varName = leaf.text(NAME_FIELD)
varType = str(leaf.text(CTYPE_FIELD))
completeName = "%s.%s" % (parentName, varName)
logconfig.add_variable(completeName, varType)
return logconfig
| 1.515625 | 2 |
pun/__main__.py | Unviray/pun | 2 | 12767775 | import sys
from .cli import main
sys.exit(main())
| 1.109375 | 1 |
xp-hdf5_parse1.py | johnwilldonicely/LDAS | 2 | 12767776 | <gh_stars>1-10
#!/usr/local/bin/python3
# script for reading HDF5 output from MultiChannelSystems recordings - combined timestamps and waveforms for detected events
# EXAMPLES
# NOTE!!! - the above pathmight be incorrect depending on the machine
# THEREFORE, consider running each command as follows: python3 xp-hdf5_parse1.py ...
# xp-hdf5_parse1.py count infile.h5 - get the max-channel-number (number of channels minus one)
# xp-hdf5_parse1.py --recordin 0 export 15 infile.h5 - export timestamps and waveforms for channel 15
# xp-hdf5_parse1.py metadata infile.h5 - print metadata to screen
# xp-hdf5_parse1.py shape infile.h5 - shape of the waveform matrix (samplesperspike,spikes)
# xp-hdf5_parse1.py --recording 0 attributes - recording-0 attributes - duration, recording-ID etc
# xp-hdf5_parse1.py attributes - file attributes - date, program-version, etc
# xp-hdf5_parse.py -h - displays the help information
import h5py
import argparse
from pathlib import Path
import numpy as np
def get(args):
h5 = h5py.File(args.filename,'r')
prefix = Path(args.filename).name
path = 'Data/Recording_{recording}/{type}Stream/Stream_{stream}'
timestamp_grp = h5[path.format(recording=args.recording, stream=args.stream, type="TimeStamp")]
timestamp = timestamp_grp['TimeStampEntity_{}'.format(args.entity_no)].value.astype(np.int64)
data_grp= h5[path.format(recording=args.recording, stream=args.stream, type="Segment")]
data = data_grp['SegmentData_{}'.format(args.entity_no)].value.astype(np.float32)
# output the timestamps
timestamp.tofile("{}.{}.i64.timestamp.dat".format(prefix,args.entity_no))
def wave(args):
h5 = h5py.File(args.filename,'r')
prefix = Path(args.filename).name
path = 'Data/Recording_{recording}/{type}Stream/Stream_{stream}'
timestamp_grp = h5[path.format(recording=args.recording, stream=args.stream, type="TimeStamp")]
timestamp = timestamp_grp['TimeStampEntity_{}'.format(args.entity_no)].value.astype(np.int64)
data_grp= h5[path.format(recording=args.recording, stream=args.stream, type="Segment")]
data = data_grp['SegmentData_{}'.format(args.entity_no)].value.astype(np.float32)
# output the waveforms, transposed
data = data.T
data.tofile("{}.{}.f32.dat".format(prefix,args.entity_no))
def count(args):
h5 = h5py.File(args.filename,'r')
recording = 0 if args.recording is None else args.recording
path = 'Data/Recording_{recording}/{type}Stream/Stream_{stream}'
timestamp_grp = h5[path.format(recording=recording, stream=args.stream, type="TimeStamp")]
print(len(timestamp_grp))
def display_structure(h5):
output = []
try:
for k in list(h5.keys()):
children = display_structure(h5[k])
if len(children)>0:
o = {k:children}
else:
o = k
output.append(o)
except:
pass
return output
def shape(args):
h5 = h5py.File(args.filename,'r')
recording = 0 if args.recording is None else args.recording
path = 'Data/Recording_{recording}/{type}Stream/Stream_{stream}'
data_grp= h5[path.format(recording=recording, stream=args.stream, type="Segment")]
data = data_grp['SegmentData_{}'.format(0)].value.astype(np.float32)
print(data.shape)
def display_structure_cmd(args):
print(display_structure(h5py.File(args.filename,'r')))
import sys
def get_meta(args):
h5 = h5py.File(args.filename,'r')
path = 'Data/Recording_{recording}/{type}Stream/Stream_{stream}/SourceInfoChannel'
recording = 0 if args.recording is None else args.recording
info_dataset = h5[path.format(recording=recording, stream=args.stream, type="Segment")]
if args.output is None:
f = sys.stdout
else:
f = open(args.output, "w+")
f.write("\t".join(info_dataset.dtype.names))
f.write("\n")
def decode(x):
if isinstance(x, bytes):
return x.decode()
return str(x)
for x in info_dataset:
f.write("\t".join(map(decode,x)))
f.write("\n")
def get_attr(args):
h5 = h5py.File(args.filename,'r')
if args.recording is None:
path = "Data"
else:
path = 'Data/Recording_{recording}/'.format(recording=args.recording)
if args.path is not None:
path = args.path
attrs = h5[path].attrs
if args.output is None:
f = sys.stdout
else:
f = open(args.output, "w+")
def decode(x):
if isinstance(x, bytes):
return x.decode()
return str(x)
for k, v in attrs.items():
f.write( k + "\t" + decode(v) + "\n")
# COMMAND LINE INTERFACE
parser = argparse.ArgumentParser(description='Read Spike data file')
parser.add_argument('--recording', type=int, help='The recording number', default=None)
parser.add_argument('--stream', type=int, help="The stream number", default=0)
subparser = parser.add_subparsers()
parser.add_argument('filename', type=str)
get_parser = subparser.add_parser("export", help="Export the timestamp data into seperate dat files")
get_parser.add_argument("entity_no", type=int)
get_parser.set_defaults(func=get)
get_parser = subparser.add_parser("wave", help="Export the waveform data into seperate dat files")
get_parser.add_argument("entity_no", type=int)
get_parser.set_defaults(func=wave)
count_parser = subparser.add_parser("count", help="Count the number of events to stdout")
count_parser.set_defaults(func=count)
structure_parser = subparser.add_parser("display", help="Display the hdf5 structure")
structure_parser.set_defaults(func=display_structure_cmd)
structure_parser = subparser.add_parser("shape", help="Display the shape of the waveform matrix")
structure_parser.set_defaults(func=shape)
meta_parser = subparser.add_parser("metadata", help="Export the metadata into a tab-separated file")
meta_parser.add_argument("--output", type=str, default=None, help="An output file. If not specified printed to stdout")
meta_parser.set_defaults(func=get_meta)
attr_parser = subparser.add_parser("attributes", help="Export the attributes into a tab-separated file")
attr_parser.add_argument("--output", type=str, default=None, help="An output file. If not specified printed to stdout")
attr_parser.add_argument("--path", type=str, default=None, help="The HDF5 path to dump (overrides others)")
attr_parser.set_defaults(func=get_attr)
if __name__ == "__main__":
args = parser.parse_args()
args.func(args)
| 2.671875 | 3 |
src/detext/layers/multi_layer_perceptron.py | StarWang/detext | 1,229 | 12767777 | <gh_stars>1000+
from typing import List
import tensorflow as tf
class MultiLayerPerceptron(tf.keras.layers.Layer):
""" A multi layer perceptron """
def __init__(self, num_hidden: List[int], activations: List, prefix: str = ''):
""" Initializes the layer
:param num_hidden: list of hidden layer sizes
:param activations: list of activations for dense layer
:param prefix: prefix of hidden layer name
"""
super(MultiLayerPerceptron, self).__init__()
assert len(num_hidden) == len(activations), "num hidden and activations must contain the same number of elements"
self.mlp = []
for i, (hidden_size, activation) in enumerate(zip(num_hidden, activations)):
if hidden_size == 0:
continue
layer = tf.keras.layers.Dense(units=hidden_size, use_bias=True, activation=activation,
name=f'{prefix}hidden_projection_{str(i)}')
self.mlp.append(layer)
def call(self, inputs, **kwargs):
""" Applies multi-layer perceptron on given inputs
:return output Shape=inputs.shape[:-1] + [num_hidden[-1]]
"""
x = inputs
for layer in self.mlp:
x = layer(x)
return x
| 3.15625 | 3 |
BackPropogationMLR.py | 1MT3J45/ml-imbalanced-bigdata | 0 | 12767778 | <filename>BackPropogationMLR.py
# MULTILINEAR REGRESSION
# IMPORT LIBRARIES
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# IMPORTING DATASET
def fit_data(data):
dataset = data# pd.read_csv("")
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, -1].values
print("FIRST X ___\n", X.view())
print("FIRST y ___\n", y.view())
# Splitting Data into Training & Testing
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
# Fitting Multiple Linear Regression to the Training set
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train, y_train)
# Predicting the Test set
y_pred = regressor.predict(X_test)
return X, y, y_pred
def summary(X, y):
# Building Optimal Model for Backward Elimination Model
import statsmodels.formula.api as sm
# Add unit column for constant value to sustain the in records
#X = np.append(arr=np.ones((50,1)).astype(int), values=X, axis=1)
X_opt = X[:, range(0, X.shape[1])]
regressor_OLS = sm.OLS(endog=y, exog=X_opt).fit()
print(regressor_OLS.summary())
| 3.53125 | 4 |
vindauga/types/rect.py | gabbpuy/vindauga | 5 | 12767779 | # -*- coding: utf-8 -*-
from .point import Point
class Rect:
"""
A screen rectangular area.
`Rect` is used to hold two coordinates on the screen, which usually specify
the upper left corner and the lower right corner of views. Sometimes the
second coordinate specify the size (extension) of the view. The two
coordinates are named `topLeft` and `bottomRight`.
"""
__slots__ = ('topLeft', 'bottomRight')
def __init__(self, ax, ay, bx, by):
self.topLeft = Point(ax, ay)
self.bottomRight = Point(bx, by)
@property
def width(self):
return self.bottomRight.x - self.topLeft.x
@property
def height(self):
return self.bottomRight.y - self.topLeft.y
def move(self, deltaX, deltaY):
"""
Moves the rectangle to a new position.
The two parameters are added to the two old coordinates as delta
values. Both parameters can be negative or positive.
:param deltaX: Change in x
:param deltaY: Change in y
"""
self.topLeft += Point(deltaX, deltaY)
self.bottomRight += Point(deltaX, deltaY)
def grow(self, deltaX, deltaY):
"""
Enlarges the rectangle by a specified value.
Changes the size of the calling rectangle by subtracting `deltaX` from
`topLeft.x`, adding `deltaX` to `bottomRight.x`, subtracting `deltaY` from
`topLeft.y`, and adding `deltaY` to `bottomRight.y`.
The left side is left-moved by `deltaX` units and the right side is
right-moved by `deltaX` units. In a similar way the upper side is
upper-moved by `deltaY` units and the bottom side is bottom-moved by `deltaY`
units.
:param deltaX: X distance
:param deltaY: Y distance
"""
self.topLeft -= Point(deltaX, deltaY)
self.bottomRight += Point(deltaX, deltaY)
def intersect(self, r: 'Rect'):
"""
Calculates the intersection between this rectangle and the parameter
rectangle.
The resulting rectangle is the largest rectangle which contains both
part of this rectangle and part of the parameter rectangle.
:param r: Intersection rectangle
"""
self.topLeft.x = max(self.topLeft.x, r.topLeft.x)
self.topLeft.y = max(self.topLeft.y, r.topLeft.y)
self.bottomRight.x = min(self.bottomRight.x, r.bottomRight.x)
self.bottomRight.y = min(self.bottomRight.y, r.bottomRight.y)
def union(self, r: 'Rect'):
"""
Calculates the union between this rectangle and the `r` parameter
rectangle.
The resulting rectangle is the smallest rectangle which contains both
this rectangle and the `r` rectangle.
:param r: Union rectangle
"""
self.topLeft.x = min(self.topLeft.x, r.topLeft.x)
self.topLeft.y = min(self.topLeft.y, r.topLeft.y)
self.bottomRight.x = max(self.bottomRight.x, r.bottomRight.x)
self.bottomRight.y = max(self.bottomRight.y, r.bottomRight.y)
def __contains__(self, point: Point) -> bool:
"""
Is a Point `in` this `Rect`
:param point: Point to check
:return: True if the point is within the bounds of this rectangle
"""
return self.topLeft.x <= point.x < self.bottomRight.x and self.topLeft.y <= point.y < self.bottomRight.y
contains = __contains__
def __eq__(self, other: 'Rect') -> bool:
return self.topLeft == other.topLeft and self.bottomRight == other.bottomRight
def __ne__(self, other: 'Rect'):
return self.topLeft != other.topLeft or self.bottomRight != other.bottomRight
def __repr__(self):
return "({0.topLeft.x}, {0.topLeft.y}, {0.bottomRight.x}, {0.bottomRight.y})".format(self)
def isEmpty(self) -> bool:
"""
Checks if the rectangle is empty, i.e. if the first coordinate is
greater than the second one.
Empty means that `(topLeft.x >= bottomRight.x or topLeft.y >= bottomRight.y)`.
:return: True if this rectangle is empty
"""
return self.topLeft.x >= self.bottomRight.x or self.topLeft.y >= self.bottomRight.y
def copy(self) -> 'Rect':
"""
Copy to a new `Rect`
:return: A new `Rect` object
"""
return Rect(self.topLeft.x, self.topLeft.y, self.bottomRight.x, self.bottomRight.y)
| 4.375 | 4 |
solutions/numbers/factorial.py | TheAtomicGoose/karan | 0 | 12767780 | <gh_stars>0
"""
Written by <NAME>
Finds the factorial of a number.
"""
def factorial(num):
# While num > 1, multiply num by num - 1
if num > 1:
return num * factorial(num - 1)
return num
print(factorial(10)) | 3.953125 | 4 |
example_project/server/django_server/ajax_client/urls.py | ghuntley/simpleapi | 1 | 12767781 | # -*- coding: utf-8 -*-
from django.conf.urls.defaults import *
from django.views.generic.simple import direct_to_template
urlpatterns = patterns('',
(r'^$', direct_to_template, {'template': 'ajax_client/index.html'}),
) | 1.328125 | 1 |
py/lukai/proto/github/com/luk_ai/lukai/protobuf/aggregatorpb/aggregator_pb2_grpc.py | d4l3k/lukai | 23 | 12767782 | <reponame>d4l3k/lukai
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from github.com.luk_ai.lukai.protobuf.aggregatorpb import aggregator_pb2 as github_dot_com_dot_luk__ai_dot_lukai_dot_protobuf_dot_aggregatorpb_dot_aggregator__pb2
class AggregatorStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetWork = channel.unary_stream(
'/aggregatorpb.Aggregator/GetWork',
request_serializer=github_dot_com_dot_luk__ai_dot_lukai_dot_protobuf_dot_aggregatorpb_dot_aggregator__pb2.GetWorkRequest.SerializeToString,
response_deserializer=github_dot_com_dot_luk__ai_dot_lukai_dot_protobuf_dot_aggregatorpb_dot_aggregator__pb2.GetWorkResponse.FromString,
)
self.ReportWork = channel.stream_unary(
'/aggregatorpb.Aggregator/ReportWork',
request_serializer=github_dot_com_dot_luk__ai_dot_lukai_dot_protobuf_dot_aggregatorpb_dot_aggregator__pb2.ReportWorkRequest.SerializeToString,
response_deserializer=github_dot_com_dot_luk__ai_dot_lukai_dot_protobuf_dot_aggregatorpb_dot_aggregator__pb2.ReportWorkResponse.FromString,
)
self.Notify = channel.unary_unary(
'/aggregatorpb.Aggregator/Notify',
request_serializer=github_dot_com_dot_luk__ai_dot_lukai_dot_protobuf_dot_aggregatorpb_dot_aggregator__pb2.NotifyRequest.SerializeToString,
response_deserializer=github_dot_com_dot_luk__ai_dot_lukai_dot_protobuf_dot_aggregatorpb_dot_aggregator__pb2.NotifyResponse.FromString,
)
self.CancelModelTraining = channel.unary_unary(
'/aggregatorpb.Aggregator/CancelModelTraining',
request_serializer=github_dot_com_dot_luk__ai_dot_lukai_dot_protobuf_dot_aggregatorpb_dot_aggregator__pb2.CancelModelTrainingRequest.SerializeToString,
response_deserializer=github_dot_com_dot_luk__ai_dot_lukai_dot_protobuf_dot_aggregatorpb_dot_aggregator__pb2.CancelModelTrainingResponse.FromString,
)
class AggregatorServicer(object):
# missing associated documentation comment in .proto file
pass
def GetWork(self, request, context):
"""GetWork sends work to clients to process.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ReportWork(self, request_iterator, context):
"""ReportWork is used to report the trained model/work to the server.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Notify(self, request, context):
"""Internal RPCs.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CancelModelTraining(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_AggregatorServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetWork': grpc.unary_stream_rpc_method_handler(
servicer.GetWork,
request_deserializer=github_dot_com_dot_luk__ai_dot_lukai_dot_protobuf_dot_aggregatorpb_dot_aggregator__pb2.GetWorkRequest.FromString,
response_serializer=github_dot_com_dot_luk__ai_dot_lukai_dot_protobuf_dot_aggregatorpb_dot_aggregator__pb2.GetWorkResponse.SerializeToString,
),
'ReportWork': grpc.stream_unary_rpc_method_handler(
servicer.ReportWork,
request_deserializer=github_dot_com_dot_luk__ai_dot_lukai_dot_protobuf_dot_aggregatorpb_dot_aggregator__pb2.ReportWorkRequest.FromString,
response_serializer=github_dot_com_dot_luk__ai_dot_lukai_dot_protobuf_dot_aggregatorpb_dot_aggregator__pb2.ReportWorkResponse.SerializeToString,
),
'Notify': grpc.unary_unary_rpc_method_handler(
servicer.Notify,
request_deserializer=github_dot_com_dot_luk__ai_dot_lukai_dot_protobuf_dot_aggregatorpb_dot_aggregator__pb2.NotifyRequest.FromString,
response_serializer=github_dot_com_dot_luk__ai_dot_lukai_dot_protobuf_dot_aggregatorpb_dot_aggregator__pb2.NotifyResponse.SerializeToString,
),
'CancelModelTraining': grpc.unary_unary_rpc_method_handler(
servicer.CancelModelTraining,
request_deserializer=github_dot_com_dot_luk__ai_dot_lukai_dot_protobuf_dot_aggregatorpb_dot_aggregator__pb2.CancelModelTrainingRequest.FromString,
response_serializer=github_dot_com_dot_luk__ai_dot_lukai_dot_protobuf_dot_aggregatorpb_dot_aggregator__pb2.CancelModelTrainingResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'aggregatorpb.Aggregator', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class EdgeStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.ProdModel = channel.unary_unary(
'/aggregatorpb.Edge/ProdModel',
request_serializer=github_dot_com_dot_luk__ai_dot_lukai_dot_protobuf_dot_aggregatorpb_dot_aggregator__pb2.ProdModelRequest.SerializeToString,
response_deserializer=github_dot_com_dot_luk__ai_dot_lukai_dot_protobuf_dot_aggregatorpb_dot_aggregator__pb2.ProdModelResponse.FromString,
)
self.FindWork = channel.unary_unary(
'/aggregatorpb.Edge/FindWork',
request_serializer=github_dot_com_dot_luk__ai_dot_lukai_dot_protobuf_dot_aggregatorpb_dot_aggregator__pb2.FindWorkRequest.SerializeToString,
response_deserializer=github_dot_com_dot_luk__ai_dot_lukai_dot_protobuf_dot_aggregatorpb_dot_aggregator__pb2.FindWorkResponse.FromString,
)
self.ModelURL = channel.unary_unary(
'/aggregatorpb.Edge/ModelURL',
request_serializer=github_dot_com_dot_luk__ai_dot_lukai_dot_protobuf_dot_aggregatorpb_dot_aggregator__pb2.ModelURLRequest.SerializeToString,
response_deserializer=github_dot_com_dot_luk__ai_dot_lukai_dot_protobuf_dot_aggregatorpb_dot_aggregator__pb2.ModelURLResponse.FromString,
)
self.ReportError = channel.unary_unary(
'/aggregatorpb.Edge/ReportError',
request_serializer=github_dot_com_dot_luk__ai_dot_lukai_dot_protobuf_dot_aggregatorpb_dot_aggregator__pb2.ReportErrorRequest.SerializeToString,
response_deserializer=github_dot_com_dot_luk__ai_dot_lukai_dot_protobuf_dot_aggregatorpb_dot_aggregator__pb2.ReportErrorResponse.FromString,
)
class EdgeServicer(object):
# missing associated documentation comment in .proto file
pass
def ProdModel(self, request, context):
"""ProdModel returns the current production model.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def FindWork(self, request, context):
"""FindWork returns an address of the aggregator that the client should
request work from.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ModelURL(self, request, context):
"""ModelURL returns a URL that can be used to download the model. For billing
purposes, hitting this endpoint will count as one download of the model.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ReportError(self, request, context):
"""ReportError reports an error to the server so the developers can later view
them.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_EdgeServicer_to_server(servicer, server):
rpc_method_handlers = {
'ProdModel': grpc.unary_unary_rpc_method_handler(
servicer.ProdModel,
request_deserializer=github_dot_com_dot_luk__ai_dot_lukai_dot_protobuf_dot_aggregatorpb_dot_aggregator__pb2.ProdModelRequest.FromString,
response_serializer=github_dot_com_dot_luk__ai_dot_lukai_dot_protobuf_dot_aggregatorpb_dot_aggregator__pb2.ProdModelResponse.SerializeToString,
),
'FindWork': grpc.unary_unary_rpc_method_handler(
servicer.FindWork,
request_deserializer=github_dot_com_dot_luk__ai_dot_lukai_dot_protobuf_dot_aggregatorpb_dot_aggregator__pb2.FindWorkRequest.FromString,
response_serializer=github_dot_com_dot_luk__ai_dot_lukai_dot_protobuf_dot_aggregatorpb_dot_aggregator__pb2.FindWorkResponse.SerializeToString,
),
'ModelURL': grpc.unary_unary_rpc_method_handler(
servicer.ModelURL,
request_deserializer=github_dot_com_dot_luk__ai_dot_lukai_dot_protobuf_dot_aggregatorpb_dot_aggregator__pb2.ModelURLRequest.FromString,
response_serializer=github_dot_com_dot_luk__ai_dot_lukai_dot_protobuf_dot_aggregatorpb_dot_aggregator__pb2.ModelURLResponse.SerializeToString,
),
'ReportError': grpc.unary_unary_rpc_method_handler(
servicer.ReportError,
request_deserializer=github_dot_com_dot_luk__ai_dot_lukai_dot_protobuf_dot_aggregatorpb_dot_aggregator__pb2.ReportErrorRequest.FromString,
response_serializer=github_dot_com_dot_luk__ai_dot_lukai_dot_protobuf_dot_aggregatorpb_dot_aggregator__pb2.ReportErrorResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'aggregatorpb.Edge', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| 1.515625 | 2 |
hw1/Resources Given/autograder/auto_grader.py | Shubham8037/Rutgers-CS314-Spring19 | 1 | 12767783 | <reponame>Shubham8037/Rutgers-CS314-Spring19
#!/usr/bin/env python3
import os
import sys
import time
import signal
import subprocess
basepath = 'hw1'
problems = ['matmult', 'bst', 'rpn', 'dfa']
buildRequired = False
problemsInSeparateDirectories = False
test_cases_directory = os.getcwd() + '/testcases/'
maxPossibleScore = 0 # calculated automatically
# if a program has single-liner input and output, we put all test
# cases in single file. Otherwise, we have a file for test and
# associated file with results
formats = {'matmult': 'file',
'bst': 'file',
'rpn': 'file',
'dfa': 'file'}
# value for each test case for each problem
weight = {'matmult': 5.0,
'bst': 5.0,
'rpn': 5.0,
'dfa': 5.0}
# only used to determine when to use floating point comparisons
valueTypes = {'matmult': 'float',
'bst': 'string',
'rpn': 'float',
'dfa': 'string'}
class ExperimentError(Exception):
def __init__(self, command, output):
self.command = command
limit = 10000
if len(output) > limit:
self.output = output[:limit/2] \
+ '\n\n...TRUNCATED...\n\n' \
+ output[-limit/2:]
else:
self.output = output
def __str__(self):
return 'ExperimentError:' + repr(self.command)
def run_command(command_string, inputFile=None, max_lines=0, throw_exception=True, user_program=False):
if inputFile:
with open(inputFile) as f:
obj = subprocess.Popen(command_string, stdin=f, stdout=subprocess.PIPE)
else:
obj = subprocess.Popen(command_string, stdout=subprocess.PIPE)
output = ''
maxSleep = 20
if user_program: #program may have an infinite loop
while maxSleep > 0:
time.sleep(0.25)
maxSleep -= 1
if obj.poll() != -1:
break
if maxSleep == 0 and obj.poll() == -1:
os.kill(obj.pid, signal.SIGKILL)
print(command_string + ' taking longer than expected. Killed.')
return ''
for line in obj.stdout:
output += line.decode()
exit_status = obj.wait()
if max_lines != 0:
lines = output.split('\n')
output = string.join(lines[-max_lines:], '\n')
if throw_exception and exit_status != 0:
raise ExperimentError(command_string, output)
return output
def compare_string_file(correctFile, actualString, floats=False):
actualLines = actualString.split('\n')
i = 0
flag = True
with open(correctFile) as fd:
for correctLine in fd:
if i < len(actualLines):
if not compare_string(correctLine, actualLines[i], floats):
flag = False
elif len(correctLine.strip()) > 0:
print('Output missing: ' + correctLine.strip())
flag = False
i += 1
while i < len(actualLines):
if len(actualLines[i].strip()) == 0:
i += 1
continue
print('Extra output: ' + actualLines[i])
i += 1
flag = False
return flag
def compare_floats(f1, f2, s1, s2):
if len(f1) != len(f2):
print('Expected: ' + s1)
print('Observed: ' + s2)
return False
for i in range(len(f1)):
if abs(f1[i] - f2[i]) >= 1e-6:
print('Expected: ' + s1)
print('Observed: ' + s2)
return False
return True
# A bit of a hack, but some problems that have float values
# may also need to output error strings on some inputs
def looksLikeFloats(s2):
try:
[float(x) for x in s2.split()]
return True
except ValueError:
return False
def compare_string(s1, s2, floats=False):
s1 = s1.strip()
s2 = s2.strip()
if floats and looksLikeFloats(s2):
f1 = [float(x) for x in s1.split()]
f2 = [float(x) for x in s2.split()]
return compare_floats(f1, f2, s1, s2)
if s1 == s2:
return True
elif s1.lower() == s2.lower():
print('%s and %s are in different case. Please print your output in correct case.' % (s1, s2))
else:
print('Expected: ' + s1.strip())
print('Observed: ' + s2.strip())
return False
def make_executable(dirname):
if os.path.isfile('Makefile') or os.path.isfile('makefile'):
run_command(['make', 'clean'])
run_command(['make'])
else:
print('No Makefile found in ' + dirname)
print('Please submit a Makefile to receive full grade.')
run_command(['gcc', '-Wall', '-Werror', '-fsanitize=address', '-o', dirname, '*.c', '*.h'])
def buildProject(dirname):
if not buildRequired:
return True
try:
make_executable(dirname)
except:
print('An exception occured trying to build ' + dirname)
print('Score is %d\n' % score)
return False
if not os.path.isfile(dirname):
print('Executable %s missing. Please check the compilation output.' % dirname)
print('Score is %d\n' % score)
return False
return True
def gradeFileInput(dirname):
print('Grading ' + dirname)
prevdir = os.getcwd()
if problemsInSeparateDirectories:
os.chdir(dirname)
score = 0
if not buildProject(dirname):
os.chdir(prevdir)
return
test_dir = test_cases_directory + dirname + '/'
for testfile in sorted(os.listdir(test_dir)):
if os.path.isdir(testfile) or not testfile.startswith('test'):
continue
resultfile = test_dir + 'result' + testfile[4:len(testfile)]
if not os.path.isfile(resultfile):
print('Found a test file %s, but no associated result file.' % testfile)
continue
try:
global maxPossibleScore
maxPossibleScore += weight[dirname]
# command_str = [dirname, test_dir + testfile]
command_str = ['python3', dirname + '.py']
inputFile = test_dir + testfile
ret = run_command(command_str, user_program=True, inputFile=inputFile)
floats = True if valueTypes[dirname] == 'float' else False
if compare_string_file(resultfile, ret, floats):
score += weight[dirname]
else:
print('The output is not correct for input file ' + testfile)
except:
print('An exception occurred while executing %s %s' % (dirname, testfile))
print('Score is %d\n' % score)
scores[dirname] = score
if problemsInSeparateDirectories:
os.chdir(prevdir)
def gradeLineInput(dirname):
print('Grading ' + dirname)
prevdir = os.getcwd()
if problemsInSeparateDirectories:
os.chdir(dirname)
score = 0
if not buildProject(dirname):
if problemsInSeparateDirectories:
os.chdir(prevdir)
return
test_file = test_cases_directory + dirname + '/test.txt'
if not os.path.isfile(test_file):
print('Expecting the test cases in test.txt. Not found.')
print('Score is %d\n' % score)
if problemsInSeparateDirectories:
os.chdir(prevdir)
return
else:
print('')
with open(test_file) as fd:
for line in fd:
maxPossibleScore += weight[dirname]
inputline = line
outputline = next(fd)
try:
command_str = [dirname, inputline]
ret = run_command(command_str, user_program=True)
floats = True if valueTypes[dirname] == 'float' else False
if compare_string(outputline, ret, floats):
score += weight[dirname]
else:
print('The output is not correct for input ' + inputline)
except:
print('An exception occured trying to execute ' + ' '.join(command_str))
print('Score is %d\n' % score)
scores[dirname] = score
if problemsInSeparateDirectories:
os.chdir(prevdir)
def global_grade(dirname):
for p in problems:
if problemsInSeparateDirectories and not os.path.isdir(os.path.join(p)):
continue
if p in formats and formats[p]=='line':
gradeLineInput(p)
elif p in formats and formats[p]=='file':
gradeFileInput(p)
else:
print('Error: no format specified for problem ' + p)
def main():
global scores
scores = {}
for p in problems:
scores[p] = 0
tarmode = False
if len(sys.argv) > 1:
if sys.argv[1].strip().endswith('tar'):
tarmode=True
if not tarmode:
if not os.path.isdir(basepath):
print(basepath + ' is not present in this directory.')
sys.exit(1)
else:
print('Grading the content of ' + basepath)
os.chdir(basepath)
global_grade(basepath)
else:
prevdir = os.getcwd()
if not os.path.exists(basepath + '.tar'):
print('Expecting %s.tar in current directory (%s)' % (basepath, prevdir))
print('Please make sure you created %s.tar in the right directory' % basepath)
sys.exit(1)
if os.path.exists('obj_temp'):
print('Deleting the directory obj_temp.')
run_command(['rm', '-rf', 'obj_temp'])
run_command(['mkdir', 'obj_temp'])
os.chdir('obj_temp')
run_command(['tar', '-xvf', '../%s.tar' % basepath])
if os.path.isdir(basepath):
os.chdir(basepath)
global_grade(basepath)
else:
print('There is no directory named %s in %s.tar.' % (basepath, basepath))
print('Please check your tar file.')
os.chdir(prevdir)
totalScore = 0.0
print('You scored ')
for p in problems:
totalScore += scores[p]
print('%s: %d' % (p, scores[p]))
print('Your total score = %d / %d' % (totalScore, maxPossibleScore))
if __name__ == '__main__':
main()
| 2.609375 | 3 |
packaging/setup/plugins/ovirt-engine-common/dockerc/core.py | leongold/ovirt-engine | 0 | 12767784 | <filename>packaging/setup/plugins/ovirt-engine-common/dockerc/core.py<gh_stars>0
#
# ovirt-engine-setup -- ovirt engine setup
# Copyright (C) 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""dockerc plugin."""
import gettext
from otopi import plugin
from otopi import util
from ovirt_engine_setup import constants as osetupcons
from ovirt_engine_setup.dockerc import constants as odockerccons
from ovirt_setup_lib import dialog
def _(m):
return gettext.dgettext(message=m, domain='ovirt-engine-setup')
@util.export
class Plugin(plugin.PluginBase):
"""dockerc plugin."""
def __init__(self, context):
super(Plugin, self).__init__(context=context)
@plugin.event(
stage=plugin.Stages.STAGE_INIT,
)
def _init(self):
self.environment.setdefault(
odockerccons.RemoveEnv.REMOVE_DCLIST,
None
)
self.environment.setdefault(
odockerccons.ConfigEnv.DOCKERC_DAEMON,
None
)
self.environment.setdefault(
odockerccons.ConfigEnv.DOCKERC_NEEDED,
False
)
self.environment.setdefault(
odockerccons.ConfigEnv.DOCKERC_CTAG,
odockerccons.Const.DEFAULT_CTAG
)
@plugin.event(
stage=plugin.Stages.STAGE_SETUP,
)
def _setup(self):
self.environment[
osetupcons.CoreEnv.SETUP_ATTRS_MODULES
].append(odockerccons)
@plugin.event(
stage=plugin.Stages.STAGE_CUSTOMIZATION,
before=(
osetupcons.Stages.DIALOG_TITLES_E_PRODUCT_OPTIONS,
),
after=(
osetupcons.Stages.DIALOG_TITLES_S_PRODUCT_OPTIONS,
odockerccons.Stages.DOCKERC_CUSTOMIZE,
odockerccons.Stages.REMOVE_CUSTOMIZATION_DOCKERC,
),
condition=lambda self: (
self.environment[
odockerccons.ConfigEnv.DOCKERC_NEEDED
] and
(
self.environment[
odockerccons.ConfigEnv.DOCKERC_CINDER
] or
self.environment[
odockerccons.ConfigEnv.DOCKERC_GLANCE
]
) and
not self.environment[
osetupcons.CoreEnv.DEVELOPER_MODE
]
),
)
def _customization(self):
if self.services.status(
name=odockerccons.Const.DOCKER_SERVICE_NANE,
):
self.logger.info(_('Found a running docker daemon'))
else:
self.logger.info(_('Unable to find an active docker daemon'))
if self.environment[
odockerccons.ConfigEnv.DOCKERC_DAEMON
] is None:
self.environment[
odockerccons.ConfigEnv.DOCKERC_DAEMON
] = dialog.queryBoolean(
dialog=self.dialog,
name='OVESETUP_CONFIG_DOCKERC_DAEMON',
note=_(
'To continue with this setup the docker daemon '
'should be active.\n'
'Would you like to start it and continue with Setup? '
'(@VALUES@) [@DEFAULT@]: '
),
prompt=True,
default=True,
)
if self.environment[
odockerccons.ConfigEnv.DOCKERC_DAEMON
]:
self.logger.info(_('Starting Docker'))
self.services.state(
name=odockerccons.Const.DOCKER_SERVICE_NANE,
state=True,
)
else:
raise RuntimeError(
_('Docker daemon is required to complete this setup')
)
# vim: expandtab tabstop=4 shiftwidth=4
| 1.757813 | 2 |
promoterz/evaluationPool.py | Jeket/japonicus | 0 | 12767785 | <filename>promoterz/evaluationPool.py
#!/bin/python
import time
import random
import itertools
from multiprocessing import Pool, Process, Pipe, TimeoutError
from multiprocessing.pool import ThreadPool
showIndividue = lambda evaldata: "~ bP: %.3f\tS: %.3f\tnbT:%.3f" % (
evaldata[0][0], evaldata[0][1], evaldata[1]
)
class EvaluationPool():
def __init__(self, EvaluationTool, Urls, poolsize, individual_info):
self.EvaluationTool = EvaluationTool
self.Urls = Urls
self.lasttimes = [0 for x in Urls]
self.lasttimesperind = [0 for x in Urls]
self.poolsizes = [poolsize for x in Urls]
self.individual_info = individual_info
def ejectURL(self, Index):
self.Urls.pop(Index)
self.lasttimes.pop(Index)
self.lasttimesperind.pop(Index)
self.poolsizes.pop(Index)
def evaluateBackend(self, datasetSpecification, DateRange, I, inds):
stime = time.time()
dateInds = list(itertools.product(DateRange, inds))
# print(list(dateInds))
Q = [
(datasetSpecification, dateRange, Ind, self.Urls[I])
for dateRange, Ind in dateInds
]
P = Pool(self.poolsizes[I])
fitnesses = P.starmap(self.EvaluationTool, Q)
P.close()
P.join()
delta_time = time.time() - stime
return fitnesses, delta_time
def evaluatePopulation(self, locale):
individues_to_simulate = [
ind for ind in locale.population if not ind.fitness.valid
]
props = self.distributeIndividuals(individues_to_simulate)
args = [
[
locale.World.EnvironmentParameters[0].specifications,
[locale.DateRange],
I,
props[I],
]
for I in range(len(self.Urls))
]
pool = ThreadPool(len(self.Urls))
results = []
for A in args:
results.append(pool.apply_async(self.evaluateBackend, A))
pool.close()
TimedOut = []
for A in range(len(results)):
try:
perindTime = 3 * self.lasttimesperind[A] if self.lasttimesperind[
A
] else 12
timeout = perindTime * len(
props[A]
) if A else None # no timeout for local machine;
results[A] = results[A].get(timeout=timeout)
except TimeoutError: # Timeout: remote machine is dead, et al
print("Machine timeouts!")
args[A][1] = 0 # Set to evaluate @ local machine
results[A] = self.evaluateBackend(* args[A])
TimedOut.append(A)
pool.join()
TotalNumberOfTrades = 0
for PoolIndex in range(len(results)):
for i, fit in zip(range(len(results[PoolIndex][0])), results[PoolIndex][0]):
if self.individual_info:
print(showIndividue(fit))
props[PoolIndex][i].fitness.values = (
fit['relativeProfit'], fit['sharpe']
)
TotalNumberOfTrades += fit['trades']
self.lasttimes[PoolIndex] = results[PoolIndex][1]
L = len(props[PoolIndex])
self.lasttimesperind[PoolIndex] = self.lasttimes[PoolIndex] / L if L else 5
F = [x.fitness.valid for x in individues_to_simulate]
assert (all(F))
for T in TimedOut:
self.ejectURL(T)
N = len(individues_to_simulate)
averageTrades = TotalNumberOfTrades / max(1, N)
return N, averageTrades
def distributeIndividuals(self, tosimulation):
nb_simulate = len(tosimulation)
sumtimes = sum(self.lasttimes)
# stdtime = sum(self.lasttimes)/len(self.lasttimes)
std = nb_simulate / len(self.Urls)
# stdTPI = sum(self.lasttimesperind)/len(self.lasttimesperind)
#print(stdTPI)
if sumtimes:
vels = [1 / x for x in self.lasttimes]
constant = nb_simulate / sum(vels)
proportions = [max(1, x * constant) for x in vels]
else:
proportions = [std for x in self.Urls]
proportions = [int(round(x)) for x in proportions]
pC = lambda x: random.randrange(0, len(x))
pB = lambda x: x.index(min(x))
pM = lambda x: x.index(max(x))
while sum(proportions) < nb_simulate:
proportions[pB(proportions)] += 1
print('+')
while sum(proportions) > nb_simulate:
proportions[pM(proportions)] -= 1
print('-')
print(proportions)
assert (sum(proportions) == nb_simulate)
distribution = []
L = 0
for P in proportions:
distribution.append(tosimulation[L: L + P])
L = L + P
return distribution
| 2.359375 | 2 |
examples/litescope_client.py | TrustedThings/litepuf | 0 | 12767786 | from itertools import product
from sys import stdout
import json
import ctypes
from litex.tools.litex_client import RemoteClient
from litescope.software.driver.analyzer import LiteScopeAnalyzerDriver
wb = RemoteClient(csr_csv="test/csr.csv")
wb.open()
analyzer = LiteScopeAnalyzerDriver(wb.regs, "analyzer", debug=True, config_csv="test/analyzer.csv")
analyzer.configure_subsampler(1) ## increase this to "skip" cycles, e.g. subsample
analyzer.configure_group(0)
# trigger conditions will depend upon each other in sequence
analyzer.add_rising_edge_trigger("puf_reset")
analyzer.run(offset=8, length=512) ### CHANGE THIS TO MATCH DEPTH offset=32 by default
for i, j in product(range(2), repeat=2):
wb.regs.teropuf_reset.write(1) # enable reset
wb.regs.teropuf_cell0_select.write(i)
wb.regs.teropuf_cell1_select.write(j)
wb.regs.teropuf_reset.write(0) # disable reset
print(f'Comparator from set {i} and {j}:')
for _ in range(10):
print(wb.regs.teropuf_bit_value.read())
print(ctypes.c_int32(wb.regs.teropuf_bit_value.read()).value)
analyzer.wait_done()
analyzer.upload()
analyzer.save("test/dump.vcd")
wb.close()
| 2.171875 | 2 |
qcli/bsp2svg/api.py | LaudateCorpus1/quake-cli-tools | 42 | 12767787 | <filename>qcli/bsp2svg/api.py<gh_stars>10-100
from collections import namedtuple
from functools import lru_cache
from vgio.quake import bsp
def dot(v0, v1):
return v0[0] * v1[0] + v0[1] * v1[1] + v0[2] * v1[2]
def cross(v0, v1):
return v0[1] * v1[2] - v0[2] * v1[1], \
v0[2] * v1[0] - v0[0] * v1[2], \
v0[0] * v1[1] - v0[1] * v1[0]
def subtract(v0, v1):
return v0[0] - v1[0], v0[1] - v1[1], v0[2] - v1[2]
__all__ = ['Bsp']
class Bsp(object):
__slots__ = (
'models'
)
def __init__(self, models):
self.models = models
@staticmethod
def open(file):
bsp_file = bsp.Bsp.open(file)
bsp_file.close()
def get_models():
return [process_model(m) for m in bsp_file.models]
def process_model(bsp_model):
faces = get_faces(bsp_model)
return Model(faces)
def get_faces(bsp_model):
start = bsp_model.first_face
stop = start + bsp_model.number_of_faces
face_range = range(start, stop)
return [process_face(f) for f in face_range]
@lru_cache(maxsize=None)
def process_face(face_index):
edges = get_edges(face_index)
vertexes = get_vertexes(face_index)
uvs = []#get_uvs(face_index)
plane = get_plane(face_index)
texture_name = get_texture_name(face_index)
return Face(vertexes, edges, uvs, plane, texture_name)
@lru_cache(maxsize=None)
def get_edges(face_index):
bsp_face = bsp_file.faces[face_index]
start = bsp_face.first_edge
stop = start + bsp_face.number_of_edges
es = bsp_file.surf_edges[start:stop]
result = []
for e in es:
v = bsp_file.edges[abs(e)].vertexes
if e < 0:
v = list(reversed(v))
v0 = process_vertex(v[0])
v1 = process_vertex(v[1])
result.append(Edge(v0, v1))
return result
@lru_cache(maxsize=None)
def get_vertexes(face_index):
edges = get_edges(face_index)
return [e.vertex_0 for e in edges]
@lru_cache(maxsize=None)
def process_vertex(index):
bsp_vertex = bsp_file.vertexes[index]
return Vertex(*bsp_vertex[:])
@lru_cache(maxsize=None)
def get_texture_name(face_index):
if face_index == -1:
return ''
bsp_face = bsp_file.faces[face_index]
if bsp_face.texture_info == -1:
return ''
tex_info = bsp_file.texture_infos[bsp_face.texture_info]
miptex = bsp_file.miptextures[tex_info.miptexture_number]
if not miptex:
return ''
return miptex.name
@lru_cache(maxsize=None)
def get_uvs(face_index):
bsp_face = bsp_file.faces[face_index]
vertexes = get_vertexes(face_index)
texture_info = bsp_file.texture_infos[bsp_face.texture_info]
miptex = bsp_file.miptextures[texture_info.miptexture_number]
s = texture_info.s
ds = texture_info.s_offset
t = texture_info.t
dt = texture_info.t_offset
w = miptex.width
h = miptex.height
uvs = []
for v in vertexes:
v = v[:]
uv = (dot(v, s) + ds) / w, -(dot(v, t) + dt) / h
uvs.append(uv)
return uvs
@lru_cache(maxsize=None)
def get_plane(face_index):
bsp_face = bsp_file.faces[face_index]
return bsp_file.planes[bsp_face.plane_number]
models = get_models()
result = Bsp(models)
return result
class Model(object):
__slots__ = (
'faces'
)
def __init__(self, faces):
self.faces = faces
@property
@lru_cache(maxsize=None)
def vertexes(self):
return list(set([v for f in self.faces for v in f.vertexes]))
@property
@lru_cache(maxsize=None)
def edges(self):
return list(set([e for f in self.faces for e in f.edges]))
class Face(object):
__slots__ = (
'vertexes',
'edges',
'uvs',
'plane',
'texture_name'
)
def __init__(self, vertexes, edges, uvs, plane, texture_name):
self.vertexes = vertexes
self.edges = edges
self.uvs = uvs
self.plane = plane
self.texture_name = texture_name
Edge = namedtuple('Edge', ['vertex_0', 'vertex_1'])
class Vertex(object):
__slots__ = (
'x',
'y',
'z'
)
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
def __getitem__(self, item):
return [self.x, self.y, self.z][item]
| 2.328125 | 2 |
directory_client_core/helpers.py | fericsepi/directory-client-core | 0 | 12767788 | from functools import wraps
import json
import logging
from urllib.parse import urlencode
import requests
from requests.exceptions import HTTPError, RequestException
from w3lib.url import canonicalize_url
from django.conf import settings
from directory_client_core.cache_control import ETagCacheControl
logger = logging.getLogger(__name__)
MESSAGE_CACHE_HIT = 'Fallback cache hit. Using cached content.'
MESSAGE_CACHE_MISS = 'Fallback cache miss. Cannot use any content.'
MESSAGE_NOT_FOUND = 'Resource not found.'
class ThrottlingFilter(logging.Filter):
"""
Filters out records that have been seen within the past <period of time>
thereby reducing noise.
How this works:
- with `cache.add` the entry is stored only if the key is not yet
present in the cache
- cache.add returns True if the entry is stored, otherwise False
- these cache entries expire after <period of time>.
Therefore `filter` returns True if the key hasn't been seen in the past
<period of time>, and False if it has. The logger takes this to mean
"don't log this"
"""
def __init__(self, cache):
self.cache = cache
self.timeout_in_seconds = getattr(
settings,
'DIRECTORY_CLIENT_CORE_CACHE_LOG_THROTTLING_SECONDS',
None
) or 60*60*24 # default 24 hours
def create_cache_key(sef, record):
return f'noise-{record.getMessage()}-{record.url}'
def filter(self, record):
key = self.create_cache_key(record)
return self.cache.add(key, '', timeout=self.timeout_in_seconds)
class PopulateResponseMixin:
@classmethod
def from_response(cls, raw_response):
response = cls()
response.__setstate__(raw_response.__getstate__())
return response
class LiveResponse(PopulateResponseMixin, requests.Response):
pass
class FailureResponse(PopulateResponseMixin, requests.Response):
pass
class CacheResponse(requests.Response):
@classmethod
def from_cached_content(cls, cached_content):
response = cls()
response.status_code = 200
response._content = cached_content
return response
def fallback(cache):
"""
Caches content retrieved by the client, thus allowing the cached
content to be used later if the live content cannot be retrieved.
"""
log_filter = ThrottlingFilter(cache=cache)
logger.filters = []
logger.addFilter(log_filter)
def get_cache_control(cached_content):
if cached_content:
parsed = json.loads(cached_content.decode())
if 'etag' in parsed:
return ETagCacheControl(f'"{parsed["etag"]}"')
def closure(func):
@wraps(func)
def wrapper(client, url, params={}, *args, **kwargs):
cache_key = canonicalize_url(url + '?' + urlencode(params))
cached_content = cache.get(cache_key, {})
try:
response = func(
client,
url=url,
params=params,
cache_control=get_cache_control(cached_content),
*args,
**kwargs,
)
except RequestException:
# Failed to create the request e.g., the remote server is down,
# perhaps a timeout occurred, or even connection closed by
# remote, etc.
if cached_content:
logger.error(MESSAGE_CACHE_HIT, extra={'url': url})
return CacheResponse.from_cached_content(cached_content)
else:
raise
else:
log_context = {'status_code': response.status_code, 'url': url}
if response.status_code == 404:
logger.error(MESSAGE_NOT_FOUND, extra=log_context)
return LiveResponse.from_response(response)
elif response.status_code == 304:
return CacheResponse.from_cached_content(cached_content)
elif not response.ok:
# Successfully requested the content, but the response is
# not OK (e.g., 500, 403, etc)
if cached_content:
logger.error(MESSAGE_CACHE_HIT, extra=log_context)
return CacheResponse.from_cached_content(cached_content)
else:
logger.exception(MESSAGE_CACHE_MISS, extra=log_context)
return FailureResponse.from_response(response)
else:
cache.set(
cache_key,
response.content,
settings.DIRECTORY_CLIENT_CORE_CACHE_EXPIRE_SECONDS
)
return LiveResponse.from_response(response)
raise NotImplementedError('unreachable')
return wrapper
return closure
| 2.171875 | 2 |
PythonExercicios/ex033.py | marcoantonio97/Curso-de-Python | 0 | 12767789 | <gh_stars>0
n1 = float(input('Primeiro número: '))
n2 = float(input('Segundo número: '))
n3 = float(input('Terceiro número: '))
menor = n1
if n2<n1 and n2<n3:
menor = n2
if n3<n1 and n3<n2:
menor = n3
maior = n1
if n2>n1 and n2>n3:
maior = n2
if n3>n1 and n3>n2:
maior = n3
print('{} é o maior'.format(maior))
print('{} é o menor'.format(menor))
'''if n1 > n2:
if n1 > n3:
if n2 > n3:
print('{} é o maior'.format(n1))
print('{} é o menor'.format(n3))
else:
print('{} é o maior'.format(n1))
print('{} é o menor'.format(n2))
else:
print('{} é o maior'.format(n3))
print('{} é o menor'.format(n2))
else:
if n1 > n3:
print('{} é o maior'.format(n2))
print('{} é o menor'.format(n3))
else:
if n2 > n3:
print('{} é o maior'.format(n2))
print('{} é o menor'.format(n1))
else:
print('{} é o maior'.format(n3))
print('{} é o menor'.format(n1))'''
| 3.953125 | 4 |
demo/quadruped.py | jviereck/pinocchio_bullet | 0 | 12767790 | #///////////////////////////////////////////////////////////////////////////////
#// BSD 3-Clause License
#//
#// Copyright (C) 2018-2019, New York University , Max Planck Gesellschaft
#// Copyright note valid unless otherwise stated in individual files.
#// All rights reserved.
#///////////////////////////////////////////////////////////////////////////////
# brief Example for using the PinBulletWrapper for a quadruped robot.
from __future__ import print_function
import os
import rospkg
import numpy as np
import time
import robot_properties_solo
from robot_properties_solo.config import SoloConfig
import pybullet as p
import pinocchio as se3
from pinocchio.utils import zero
from py_pinocchio_bullet.wrapper import PinBulletWrapper
class QuadrupedRobot(PinBulletWrapper):
def __init__(self, physicsClient=None):
if physicsClient is None:
self.physicsClient = p.connect(p.DIRECT)
p.setGravity(0,0, -9.81)
p.setPhysicsEngineParameter(fixedTimeStep=1.0/1000.0, numSubSteps=1)
# Load the plain.
plain_urdf = (rospkg.RosPack().get_path("robot_properties_solo") +
"/urdf/plane_with_restitution.urdf")
self.planeId = p.loadURDF(plain_urdf)
# Load the robot
robotStartPos = [0.,0,0.40]
robotStartOrientation = p.getQuaternionFromEuler([0,0,0])
self.urdf_path = SoloConfig.urdf_path
self.robotId = p.loadURDF(self.urdf_path, robotStartPos,
robotStartOrientation, flags=p.URDF_USE_INERTIA_FROM_FILE,
useFixedBase=False)
p.getBasePositionAndOrientation(self.robotId)
# Create the robot wrapper in pinocchio.
package_dirs = [os.path.dirname(os.path.dirname(self.urdf_path)) + '/urdf']
self.pin_robot = SoloConfig.buildRobotWrapper()
# Query all the joints.
num_joints = p.getNumJoints(self.robotId)
for ji in range(num_joints):
p.changeDynamics(self.robotId, ji, linearDamping=.04,
angularDamping=0.04, restitution=0.0, lateralFriction=0.5)
self.base_link_name = "base_link"
self.joint_names = ['FL_HFE', 'FL_KFE', 'FR_HFE', 'FR_KFE', 'HL_HFE',
'HL_KFE', 'HR_HFE', 'HR_KFE']
controlled_joints = ['FL_HFE', 'FL_KFE', 'FR_HFE', 'FR_KFE', 'HL_HFE',
'HL_KFE', 'HR_HFE', 'HR_KFE']
# Creates the wrapper by calling the super.__init__.
super(QuadrupedRobot,self).__init__(self.robotId, self.pin_robot,
controlled_joints,
['FL_ANKLE', 'FR_ANKLE', 'HL_ANKLE', 'HR_ANKLE']
)
if __name__ == "__main__":
np.set_printoptions(precision=2, suppress=True)
# Setup pybullet for the quadruped and a wrapper to pinocchio.
quad = QuadrupedRobot()
# Get the current state and modify the joints to have the legs
# bend inwards.
q, dq = quad.get_state()
q[7] = q[9] = 0.8
q[11] = q[13] = -0.8
q[8] = q[10] = -1.6
q[12] = q[14] = 1.6
# Take the initial joint states as desired state.
q_des = q[7:].copy()
# Update the simulation state to the new initial configuration.
quad.reset_state(q, dq)
# Run the simulator for 2000 steps = 2 seconds.
for i in range(2000):
# Get the current state (position and velocity)
q, dq = quad.get_state()
active_contact_frames, contact_forces = quad.get_force()
# Alternative, if you want to use properties from the pinocchio robot
# like the jacobian or similar, you can also get the state and update
# the pinocchio internals with one call:
#
# q, dq = quad.get_state_update_pinocchio()
if i % 100 == 0:
print('Forces:', active_contact_frames, contact_forces)
# Compute the command torques at the joints. The torque
# vector only takes the actuated joints (excluding the base)
tau = 5. * (q_des - q[7:]) - 0.1 * dq[6:]
# Send the commands to the robot.
quad.send_joint_command(tau)
# Step the simulator and sleep.
p.stepSimulation()
time.sleep(0.001)
# Print the final active force frames and the forces
force_frames, forces = quad.get_force()
print("Active force_frames:", force_frames)
print("Corresponding forces:", forces)
| 2.359375 | 2 |
src/axom/sidre/interface/c_fortran/genfsidresplicer.py | bmhan12/axom | 86 | 12767791 | # Copyright (c) 2017-2021, Lawrence Livermore National Security, LLC and
# other Axom Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: (BSD-3-Clause)
#
# Routines to generate splicers for wrappers.
# Used to generate several variations of a routine for Fortran.
# Similar to templates in C++.
#
from __future__ import print_function
import sys
# types to use for generic routines
types = (
( 'int', 'integer(C_INT)', 'SIDRE_INT_ID'),
( 'long', 'integer(C_LONG)', 'SIDRE_LONG_ID'),
( 'float', 'real(C_FLOAT)', 'SIDRE_FLOAT_ID'),
( 'double', 'real(C_DOUBLE)', 'SIDRE_DOUBLE_ID'),
)
# maximum number of dimensions of generic routines
maxdims = 4
def XXnum_metabuffers():
return len(types) * (maxdims + 1) # include scalars
######################################################################
def group_get_scalar(d):
"""Create methods on Group to get a scalar.
"""
return """
! Generated by genfsidresplicer.py
subroutine group_get_scalar_{typename}(grp, name, value)
use iso_c_binding
class(SidreGroup), intent(IN) :: grp
character(*), intent(IN) :: name
{f_type}, intent(OUT) :: value
integer(C_INT) :: lname
type(SIDRE_SHROUD_view_capsule) view
type(C_PTR) viewptr
lname = len_trim(name)
viewptr = c_group_get_view_from_name_bufferify(grp%cxxmem, name, lname, view)
value = c_view_get_data_{typename}(view)
end subroutine group_get_scalar_{typename}""".format(**d)
def group_set_scalar(d):
"""Create methods on Group to set a scalar.
"""
return """
! Generated by genfsidresplicer.py
subroutine group_set_scalar_{typename}(grp, name, value)
use iso_c_binding
class(SidreGroup), intent(IN) :: grp
character(*), intent(IN) :: name
{f_type}, intent(IN) :: value
integer(C_INT) :: lname
type(SIDRE_SHROUD_view_capsule) view
type(C_PTR) viewptr
lname = len_trim(name)
viewptr = c_group_get_view_from_name_bufferify(grp%cxxmem, name, lname, view)
call c_view_set_scalar_{typename}(view, value)
end subroutine group_set_scalar_{typename}""".format(**d)
def group_create_array_view(d):
# typename - part of function name
# nd - number of dimensions
# f_type - fortran type
# shape - :,:, to match nd
if d['rank'] == 0:
extents_decl = 'extents(1)'
extents_asgn = 'extents(1) = 1_SIDRE_IndexType'
else:
extents_decl = 'extents(%d)' % d['rank']
extents_asgn = 'extents = shape(value, kind=SIDRE_IndexType)'
return """
! Generated by genfsidresplicer.py
function group_create_array_view_{typename}{nd}(grp, name, value) result(rv)
use iso_c_binding
implicit none
class(SidreGroup), intent(IN) :: grp
character(*), intent(IN) :: name
{f_type}, target, intent(IN) :: value{shape}
integer(C_INT) :: lname
type(SidreView) :: rv
integer(SIDRE_IndexType) :: {extents_decl}
integer(C_INT), parameter :: type = {sidre_type}
type(C_PTR) addr, viewptr
lname = len_trim(name)
#ifdef USE_C_LOC_WITH_ASSUMED_SHAPE
addr = c_loc(value)
#else
call SIDRE_C_LOC(value{lower_bound}, addr)
#endif
{extents_asgn}
viewptr = c_group_create_view_external_bufferify( &
grp%cxxmem, name, lname, addr, rv%cxxmem)
call c_view_apply_type_shape(rv%cxxmem, type, {rank}, extents)
end function group_create_array_view_{typename}{nd}""".format(
extents_decl=extents_decl,
extents_asgn=extents_asgn, **d)
def group_set_array_data_ptr(d):
"""
call view%set_external_data_ptr
hide c_loc call and add target attribute
"""
# XXX - should this check the type/shape of value against the view?
# typename - part of function name
# nd - number of dimensions
# f_type - fortran type
# shape - :,:, to match nd
if d['rank'] == 0:
extents_decl = 'extents(1)'
extents_asgn = 'extents(1) = 1_SIDRE_IndexType'
else:
extents_decl = 'extents(%d)' % d['rank']
extents_asgn = 'extents = shape(value, kind=SIDRE_IndexType)'
return """
! Generated by genfsidresplicer.py
! This function does nothing if view name does not exist in group.
subroutine group_set_array_data_ptr_{typename}{nd}(grp, name, value)
use iso_c_binding
implicit none
class(SidreGroup), intent(IN) :: grp
character(len=*), intent(IN) :: name
{f_type}, target, intent(IN) :: value{shape}
integer(C_INT) :: lname
type(SIDRE_SHROUD_view_capsule) view
! integer(SIDRE_IndexType) :: {extents_decl}
! integer(C_INT), parameter :: type = {sidre_type}
type(C_PTR) addr, viewptr
lname = len_trim(name)
! {extents_asgn}
viewptr = c_group_get_view_from_name_bufferify(grp%cxxmem, name, lname, view)
if (c_associated(view%addr)) then
#ifdef USE_C_LOC_WITH_ASSUMED_SHAPE
addr = c_loc(value)
#else
call SIDRE_C_LOC(value{lower_bound}, addr)
#endif
call c_view_set_external_data_ptr_only(view, addr)
! call c_view_apply_type_shape(rv%cxxmem, type, {rank}, extents)
endif
end subroutine group_set_array_data_ptr_{typename}{nd}""".format(
extents_decl=extents_decl,
extents_asgn=extents_asgn, **d)
def view_set_array_data_ptr(d):
"""
call view%set_external_data_ptr
hide c_loc call and add target attribute
"""
# XXX - should this check the type/shape of value against the view?
# typename - part of function name
# nd - number of dimensions
# f_type - fortran type
# shape - :,:, to match nd
if d['rank'] == 0:
extents_decl = 'extents(1)'
extents_asgn = 'extents(1) = 1_SIDRE_IndexType'
else:
extents_decl = 'extents(%d)' % d['rank']
extents_asgn = 'extents = shape(value, kind=SIDRE_IndexType)'
return """
! Generated by genfsidresplicer.py
subroutine view_set_array_data_ptr_{typename}{nd}(view, value)
use iso_c_binding
implicit none
class(SidreView), intent(IN) :: view
{f_type}, target, intent(IN) :: value{shape}
! integer(SIDRE_IndexType) :: {extents_decl}
! integer(C_INT), parameter :: type = {sidre_type}
type(C_PTR) addr
! lname = len_trim(name)
! {extents_asgn}
#ifdef USE_C_LOC_WITH_ASSUMED_SHAPE
addr = c_loc(value)
#else
call SIDRE_C_LOC(value{lower_bound}, addr)
#endif
call c_view_set_external_data_ptr_only(view%cxxmem, addr)
! call c_view_apply_type_shape(rv%cxxmem, type, {rank}, extents)
end subroutine view_set_array_data_ptr_{typename}{nd}""".format(
extents_decl=extents_decl,
extents_asgn=extents_asgn, **d)
def print_get_data(d):
# typename - part of function name
# nd - number of dimensions
# f_type - fortran type
# shape - :,:, to match nd
if d['rank'] == 0:
return """
! Generated by genfsidresplicer.py
subroutine view_get_data_{typename}{nd}{suffix}(view, value)
use iso_c_binding
implicit none
class(SidreView), intent(IN) :: view
{f_type}, pointer, intent(OUT) :: value{shape}
{f_type}, pointer :: tmp(:)
type(C_PTR) cptr
integer(SIDRE_IndexType) :: offset
cptr = view%get_void_ptr()
if (c_associated(cptr)) then
offset = view%get_offset()
if (offset > 0) then
call c_f_pointer(cptr, tmp, [offset+1]) ! +1 to convert 0-based offset to 1-based index
cptr = c_loc(tmp(offset+1)) ! Emulates pointer arithmetic
endif
call c_f_pointer(cptr, value)
else
nullify(value)
endif
end subroutine view_get_data_{typename}{nd}{suffix}""".format(**d)
else:
return """
! Generated by genfsidresplicer.py
subroutine view_get_data_{typename}{nd}{suffix}(view, value)
use iso_c_binding
implicit none
class(SidreView), intent(IN) :: view
{f_type}, pointer, intent(OUT) :: value{shape}
{f_type}, pointer :: tmp(:)
type(C_PTR) cptr
integer rank
integer(SIDRE_IndexType) extents({rank})
integer(SIDRE_IndexType) :: offset
cptr = view%get_void_ptr()
if (c_associated(cptr)) then
offset = view%get_offset()
if (offset > 0) then
call c_f_pointer(cptr, tmp, [offset+1]) ! +1 to convert 0-based offset to 1-based index
cptr = c_loc(tmp(offset+1)) ! Emulates pointer arithmetic
endif
rank = view%get_shape({rank}, extents)
call c_f_pointer(cptr, value, extents)
else
nullify(value)
endif
end subroutine view_get_data_{typename}{nd}{suffix}""".format(**d)
class AddMethods(object):
"""Create lines necessary to add generic methods to a derived type.
Loops over types and rank.
procedure :: {stem}_{typename}{nd}{suffix} => {wrap_class}_{stem}_{typename}{nd}{suffix}
generic :: {stem} => &
gen1, &
genn
"""
def __init__(self, wrap_class):
self.wrap_class = wrap_class
self.lines = []
self.methods = []
@staticmethod
def type_bound_procedure_part(d):
return 'procedure :: {stem}_{typename}{nd}{suffix} => {wrap_class}_{stem}_{typename}{nd}{suffix}'.format(**d)
@staticmethod
def type_bound_procedure_generic(d):
return '{stem}_{typename}{nd}{suffix}'.format(**d)
def add_method(self, stem, fcn, scalar=False, **kwargs):
self.methods.append((stem, fcn, scalar, kwargs))
def gen_type_bound(self):
lines = []
for stem, fcn, scalar, kwargs in self.methods:
generics = []
extra = dict(
wrap_class=self.wrap_class,
stem=stem,
)
extra.update(kwargs)
foreach_type(lines, AddMethods.type_bound_procedure_part, scalar=scalar, **extra)
foreach_type(generics, AddMethods.type_bound_procedure_generic, scalar=scalar, **extra)
lines.append('generic :: {stem} => &'.format(stem=stem))
for gen in generics[:-1]:
lines.append(' ' + gen + ', &')
lines.append(' ' + generics[-1])
return lines
def gen_body(self):
lines = []
for stem, fcn, scalar, kwargs in self.methods:
foreach_type(lines, fcn, scalar=scalar, **kwargs)
return lines
def foreach_type(lines, fcn, scalar=False, **kwargs):
""" Call fcn once for each type and rank, appending to lines.
kwargs - additional values for format dictionary.
"""
shape = []
lbound = []
for nd in range(maxdims + 1):
shape.append(':')
lbound.append('lbound(value,%d)' % (nd+1))
d = dict(
suffix='' # suffix of function name
)
d.update(kwargs)
indx = 0
for typetuple in types:
d['typename'], d['f_type'], d['sidre_type'] = typetuple
# scalar values
# XXX - generic does not distinguish between pointer and non-pointer
# d['rank'] = -1
# d['nd'] = 'scalar'
# d['shape'] = ''
# lines.append(fcn(d))
# scalar pointers
d['index'] = indx
indx += 1
d['rank'] = 0
d['shape'] = ''
d['lower_bound'] = ''
if scalar:
d['nd'] = ''
lines.append(fcn(d))
else:
d['nd'] = '_scalar'
lines.append(fcn(d))
for nd in range(1,maxdims+1):
d['index'] = indx
indx += 1
d['rank'] = nd
d['nd'] = '_%dd' % nd
d['shape'] = '(' + ','.join(shape[:nd]) + ')'
d['lower_bound'] = '(' + ','.join(lbound[:nd]) + ')'
lines.append(fcn(d))
#----------------------------------------------------------------------
def group_string():
"""Text for functions with get and set strings for a group.
get_string => grp->getView(name)->getString()
set_string => grp->getView(name)->setString()
"""
return """
subroutine group_get_string(grp, name, value)
use iso_c_binding
class(SidreGroup), intent(IN) :: grp
character(*), intent(IN) :: name
character(*), intent(OUT) :: value
integer(C_INT) :: lname
type(SIDRE_SHROUD_view_capsule) view
type(C_PTR) viewptr
lname = len_trim(name)
viewptr = c_group_get_view_from_name_bufferify(grp%cxxmem, name, lname, view)
call c_view_get_string_bufferify(view, value, len(value, kind=C_INT))
end subroutine group_get_string
subroutine group_set_string(grp, name, value)
use iso_c_binding
class(SidreGroup), intent(IN) :: grp
character(*), intent(IN) :: name
character(*), intent(IN) :: value
integer(C_INT) :: lname
type(SIDRE_SHROUD_view_capsule) view
type(C_PTR) viewptr
lname = len_trim(name)
viewptr = c_group_get_view_from_name_bufferify(grp%cxxmem, name, lname, view)
call c_view_set_string_bufferify(view, value, len_trim(value, kind=C_INT))
end subroutine group_set_string
"""
#----------------------------------------------------------------------
def gen_fortran():
"""Generate splicers used by Shroud.
"""
print('! Generated by genfsidresplicer.py')
# Group
t = AddMethods('group')
t.add_method('get_scalar', group_get_scalar, True)
t.add_method('set_scalar', group_set_scalar, True)
t.add_method('create_array_view', group_create_array_view)
t.add_method('set_array_data_ptr', group_set_array_data_ptr)
print('! splicer begin class.Group.type_bound_procedure_part')
for line in t.gen_type_bound():
print(line)
print('procedure :: get_string => group_get_string')
print('procedure :: set_string => group_set_string')
print('! splicer end class.Group.type_bound_procedure_part')
print()
print('------------------------------------------------------------')
print()
print('! splicer begin class.Group.additional_functions')
for line in t.gen_body():
print(line)
print(group_string())
print('! splicer end class.Group.additional_functions')
# View
t = AddMethods('view')
t.add_method('get_data', print_get_data, suffix='_ptr')
t.add_method('set_array_data_ptr', view_set_array_data_ptr)
print('! splicer begin class.View.type_bound_procedure_part')
for line in t.gen_type_bound():
print(line)
print('! splicer end class.View.type_bound_procedure_part')
print()
print('------------------------------------------------------------')
print()
print('! splicer begin class.View.additional_functions')
for line in t.gen_body():
print(line)
print('! splicer end class.View.additional_functions')
######################################################################
if __name__ == '__main__':
try:
cmd = sys.argv[1]
except IndexError:
raise RuntimeError("Missing command line argument")
if cmd == 'fortran':
# fortran splicers
gen_fortran()
elif cmd == 'test':
AllocateAllocatable(print)
else:
raise RuntimeError("Unknown command")
| 1.929688 | 2 |
leveesim/util.py | jdossgollin/leveesim | 0 | 12767792 | <gh_stars>0
"""
Useful functions
These generally fall into three categories:
* those for working with stan / pystan
* those for managing the file system
* those for building the GEV distribution. Although scipy provides a good
implementation of the GEV distribution, it's not compatible with numba, and
so in order to use numba I have to implement my own.
"""
from glob import glob
from hashlib import md5
import pickle
import os
from typing import Any, Tuple
from numba import jit
import numpy as np
from pystan import StanModel
ROOT_DIR = os.path.abspath(os.path.dirname(__file__))
DATA_DIR = os.path.join(ROOT_DIR, "data")
CACHE_DIR = os.path.abspath(os.path.join(os.getcwd(), "leveesim-cache"))
def clean_cache() -> None:
"""
Clean up the cached files
:returns: None
"""
try:
files = glob(f"{CACHE_DIR}/*")
for f in files:
os.remove(f)
os.rmdir(CACHE_DIR)
except OSError:
pass
def ensure_folder(dirname: str) -> None:
"""
Ensure that a folder exists, and if not create it
:param dirname: the path whose existence to ensure
:returns: None
"""
if not os.path.isdir(dirname):
os.makedirs(dirname)
def hash_string(input_string: str) -> str:
"""
Create a unique (and short) hash for a given string
This string might be the code of a stan model or the parameters
of some paramatter object.
:param input_string: the string to hash
:returns: the hashed string
"""
return md5(input_string.encode("ascii")).hexdigest()
def safe_pkl_dump(obj: Any, fname: str) -> None:
"""
Dump a file to `pickle`.
If a file is saved with that same name, it is overwritten.
If the parent directory does not exist, it is created.
:param obj: The object to be saved to file
:param fname: The full filename, including path
:returns: None
"""
# If the directory doesn't exist, try to make it
par_dir = os.path.dirname(fname)
ensure_folder(par_dir)
# dump the object to file
with open(fname, "wb") as file:
pickle.dump(obj, file)
def compile_model(
filename: str, model_name: str = "anon_model", cache_dir: str = CACHE_DIR
) -> StanModel:
"""Compile a stan model only if it hasn't already been compiled
This will automatically cache models - great if you're just running a
script on the command line.
"""
ensure_folder(cache_dir)
with open(filename) as file:
model_code = file.read()
code_hash = md5(model_code.encode("ascii")).hexdigest()
cache_fn = f"cached-{model_name}-{code_hash}.pkl"
cache_fn = os.path.join(cache_dir, cache_fn)
try:
smodel = pickle.load(open(cache_fn, "rb"))
except: # pylint: disable=bare-except
smodel = StanModel(model_code=model_code, model_name=model_name)
safe_pkl_dump(obj=smodel, fname=cache_fn)
return smodel
@jit
def qgev(p: float, loc: float, scale: float, shape: float) -> float:
"""
Calculates the inverse of a gev distribution with the given parameters
"""
assert p >= 0.0
assert p <= 1.0
if shape == 0.0:
val = loc - scale * np.log(-1 * np.log(p))
else:
val = loc + scale * (np.power(-1 * np.log(p), -1 * shape) - 1) / shape
return val
@jit
def random_gev(loc: float, scale: float, shape: float) -> float:
"""
Sample a random GEV value
"""
p = np.random.uniform(0, 1)
return qgev(p=p, loc=loc, scale=scale, shape=shape)
@jit
def gev_cdf(x: float, loc: float, scale: float, shape: float) -> float:
"""
CDF of a GEV distribution
Clips probability to zero outside the accepted support
"""
if shape == 0.0:
t_of_x = np.exp(-(x - loc) / scale)
prob = np.exp(-t_of_x)
elif shape > 0.0:
lower_bound = loc - scale / shape
if x > lower_bound:
t_of_x = (1 + shape * ((x - loc) / scale)) ** (-1 / shape)
prob = np.exp(-t_of_x)
else:
prob = 0.0
elif shape < 0.0:
upper_bound = loc - scale / shape
if x < upper_bound:
t_of_x = (1 + shape * ((x - loc) / scale)) ** (-1 / shape)
prob = np.exp(-t_of_x)
else:
prob = 1.0
return prob
| 2.921875 | 3 |
src/github/db/repositories/Repositories.py | ytyaru/GitHub.Repo.Create.201701281344 | 0 | 12767793 | <filename>src/github/db/repositories/Repositories.py<gh_stars>0
#!python3
#encoding:utf-8
import dataset
import requests
import urllib.parse
import datetime
import time
import pytz
import json
import github.api.Pagenation
import github.db.repositories.Languages
class Repositories:
def __init__(self, db_path_repo, req_param):
self.req = req_param
self.db_path_repo = db_path_repo
self.db_repo = dataset.connect('sqlite:///' + db_path_repo)
self.page = github.api.Pagenation.Pagenation(req_param)
self.lang = github.db.repositories.Languages.Languages(db_path_repo, req_param)
def upload(self, name, description=None, homepage=None):
res = self.api.create(name, description, homepage)
self.__insert(res)
self.lang.fill()
def update_local_db(self):
now = datetime.datetime.now(pytz.utc)
diff_repos = self.__get_repos_diff()
self.db_repo.begin()
for r in diff_repos:
self.__insert(r)
self.db_repo.commit()
self.lang.update_local_db()
return diff_repos
def __insert(self, r):
self.db_repo['Repositories'].insert(dict(
IdOnGitHub=r['id'],
Name=r['name'],
Description=r['description'],
Homepage=r['homepage'],
CreatedAt=r['created_at'],
PushedAt=r['pushed_at'],
UpdatedAt=r['updated_at'],
CheckedAt="{0:%Y-%m-%dT%H:%M:%SZ}".format(now)))
repo = self.db['Repositories'].find_one(IdOnGitHub=r['id'])
self.db_repo['Counts'].insert(dict(
RepositoryId=repo['Id'],
Forks=r['forks_count'],
Stargazers=r['stargazers_count'],
Watchers=r['watchers_count'],
Issues=r['open_issues_count']))
def __get_repos_diff(self):
since = self.__get_since_repo_id()
print(since)
method = 'GET'
endpoint = 'user/repos'
params = self.req.get(method, endpoint)
params['params'] = {"type": "all", "sort": "created", "direction": "desc", "per_page": 100}
r = requests.get(urllib.parse.urljoin("https://api.github.com", endpoint), **params)
res = None
if since is None:
res = self.page.pagenate(r, r.json())
else:
res = self.__pagenate(r, r.json(), since)
res.reverse()
return res
"""
指定したリポジトリIDよりも新しいリポジトリだけをすべて返す。
@param [requests.response] r is response object.
@param [json] res is json object.
@param [int] since is github repository id.
@param [int] start is github repositories index.
"""
def __pagenate(self, r, res, since, start=0):
print("since={0}".format(since))
count = 0
for repo in res[start:]:
print("{0} {1}".format(repo['id'], repo['name']))
if not(since == repo['id']):
count = count + 1
else:
break
start += count
# 存在しないなら(このページはすべて返すべき対象。次ページにも対象がある可能性がある)
if count == len(res):
print("num {0}".format(len(res)))
print(r.links)
if "next" in r.links.keys():
print(r.links["next"]["url"])
params = self.req.update_otp()
if "params" in params:
del params["params"]
r2 = requests.get(r.links["next"]["url"], **params)
res += r2.json()
print(" num {0}".format(len(r2.json())))
print("sum num {0}".format(len(res)))
time.sleep(2)
return self.__pagenate(r2, res, since, (start + 1))
else:
print("all num {0}".format(len(res)))
print("len(res[:start]) {0}".format(len(res[:start])))
return res[:start]
else:
print("all num {0}".format(len(res)))
print("len(res[:start]) {0}".format(len(res[:start])))
return res[:start]
def __get_since_repo_id(self):
repo = self.db_repo['Repositories'].find_one(order_by='-CreatedAt')
print(repo)
if repo is None:
return None
else:
return repo['IdOnGitHub']
| 2.484375 | 2 |
setup.py | sthysel/flatspace | 1 | 12767794 | # -*- encoding: utf-8 -*-
from glob import glob
from os.path import basename, splitext
from setuptools import find_packages, setup
with open('README.md', 'r') as f:
long_description = f.read()
setup(
name='flatspace',
license='GPLv2',
version='0.0.1',
description='Space is flat here',
long_description=long_description,
long_description_content_type='text/markdown',
entry_points={
'console_scripts': [
'flatspace=flatspace.cli:cli',
],
},
install_requires=[],
url='',
classifiers=[
'License :: BHP',
'Development Status :: 4 - Beta',
'Operating System :: Unix',
'Operating System :: POSIX',
'Programming Language :: Python :: 3',
'Topic :: Utilities',
],
keywords=[],
extras_require={},
setup_requires=[],
packages=find_packages(where='src'),
package_dir={'': 'src'},
py_modules=[splitext(basename(path))[0] for path in glob('src/*.py')],
include_package_data=True,
zip_safe=False,
package_data={
'': ['config/*.yml'],
},
)
| 1.765625 | 2 |
api/app.py | Tibblist/react-flask-docker-sample | 0 | 12767795 | from flask import request, jsonify, Flask
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
db_string = "postgres://postgres:example@db:5432/postgres"
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = db_string
db = SQLAlchemy(app)
migrate = Migrate(app, db)
class ListModel(db.Model):
__tablename__ = 'list'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String())
def __init__(self, name):
self.name = name
def __repr__(self):
return f"<List {self.name}>"
@app.route('/items/list', methods=['GET'])
def list_items():
items = ListModel.query.order_by(ListModel.id).all()
results = [
{
"id": item.id,
"name": item.name,
} for item in items]
print(results, flush=True)
return jsonify(results)
@app.route('/items/create', methods=['POST'])
def create_item():
new_item = ListModel(name="")
db.session.add(new_item)
db.session.commit()
return {"message": f"Item has been created successfully."}
@app.route('/items/update', methods=["POST"])
def update_item():
if request.is_json:
data = request.get_json()
print(data, flush=True)
item = ListModel.query.get_or_404(data["id"])
item.name = data["name"]
db.session.add(item)
db.session.commit()
return {"message": f"Item {item.name} has been updated successfully."}
else:
return {"error": "The request payload is not in JSON format"}
@app.route("/items/delete", methods=["DELETE"])
def delete_item():
if request.is_json:
data = request.get_json()
item = ListModel.query.get_or_404(data["id"])
db.session.delete(item)
db.session.commit()
return {"message": f"Item {item.name} has been deleted successfully."}
else:
return {"error": "The request payload is not in JSON format"}
if __name__ == "__main__":
app.run() | 2.90625 | 3 |
tests/test_mixins.py | gBobCodes/django-rest-framework-proxy-gateway | 0 | 12767796 | from rest_framework import status
from rest_framework.response import Response
from rest_framework.test import APITestCase
from rest_framework_proxy.views import ProxyView
from rest_framework_proxy_gateway.mixins import (
BlockDelete,
BlockGet,
BlockOptions,
BlockPatch,
BlockPost,
BlockPut,
)
class MixinTests(APITestCase):
"""Test the Block* mixin classes."""
def test_block_delete(self):
"""Verify the BlockDelete mixin does block the DELETE method."""
class TestProxyView(BlockDelete, ProxyView):
source = 'api/test'
response = TestProxyView().delete(request=None)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_block_get(self):
"""Verify the BlockGet mixin does block the GET method."""
class TestProxyView(BlockGet, ProxyView):
source = 'api/test'
response = TestProxyView().get(request=None)
assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED
def test_block_options(self):
"""Verify the BlockOptions mixin does block the OPTIONS method."""
class TestProxyView(BlockOptions, ProxyView):
source = 'api/test'
response = TestProxyView().options(request=None)
assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED
def test_block_patch(self):
"""Verify the BlockPatch mixin does block the PATCH method."""
class TestProxyView(BlockPatch, ProxyView):
source = 'api/test'
response = TestProxyView().patch(request=None)
assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED
def test_block_post(self):
"""Verify the BlockPost mixin does block the POST method."""
class TestProxyView(BlockPost, ProxyView):
source = 'api/test'
response = TestProxyView().post(request=None)
assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED
def test_block_put(self):
"""Verify the BlockPut mixin does block the PUT method."""
class TestProxyView(BlockPut, ProxyView):
source = 'api/test'
response = TestProxyView().put(request=None)
assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED
def test_block_delete_patch_put(self):
"""Verify that combined mixins block the methods."""
class TestProxyView(BlockDelete, BlockPatch, BlockPut, ProxyView):
source = 'api/test'
view = TestProxyView()
response = view.delete(request=None)
assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED
response = view.patch(request=None)
assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED
response = view.put(request=None)
assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED
| 2.40625 | 2 |
Leetcode/0201-0250/0238-product-of-array-except-self.py | MiKueen/Data-Structures-and-Algorithms | 0 | 12767797 | '''
Author : MiKueen
Level : Medium
Problem Statement : Product of Array Except Self
Given an array nums of n integers where n > 1, return an array output such that output[i] is equal to the product of all the elements of nums except nums[i].
Example:
Input: [1,2,3,4]
Output: [24,12,8,6]
Constraint: It's guaranteed that the product of the elements of any prefix or suffix of the array (including the whole array) fits in a 32 bit integer.
Note: Please solve it without division and in O(n).
Follow up:
Could you solve it with constant space complexity? (The output array does not count as extra space for the purpose of space complexity analysis.)
'''
class Solution:
def productExceptSelf(self, nums: List[int]) -> List[int]:
# Time Complexity - O(n)
# Space Complexity - O(1)
res = [1] * len(nums)
left = right = 1
for i in range(len(nums)):
j = -(i+1)
res[i] *= left
res[j] *= right
left *= nums[i]
right *= nums[j]
return res
| 3.578125 | 4 |
src/Database/digikey_web_scraping.py | hong-yh/datasheet-scrubber | 13 | 12767798 | <filename>src/Database/digikey_web_scraping.py
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as ec
import pandas as pd
import time
import shutil
import re
import os
import glob
#WEB SCRAPING FOR DIGIKEY WEBSITE USING SELENIUM LIBRARY and CHROME DRIVER
#Replace with another category
category = 'integrated-circuits-ics'
#Replace with path where you want to download csv files
download_folder = "/Users/zinebbenameur/Desktop/Desktop - MacBook Pro/Fasoc/csv_tables"
#files containing the links to urls
links_urls = "/Users/zinebbenameur/Desktop/Desktop - MacBook Pro/Fasoc/links_and_type.csv"
#Read urls
df = pd.read_csv(links_urls, usecols=[0,1], names=['colA', 'colB'], header=None)
#url_extender = r"?FV=ffe00300&quantity=0&ColumnSort=1&page=" + str(1) + "&pageSize=500"
urls = df['colA']
sub_category = df['colB']
print("URLS", urls)
#print("sub", sub_category)
chromeOptions = webdriver.ChromeOptions()
prefs = {"download.default_directory" : download_folder}
chromeOptions.add_experimental_option("prefs",prefs)
#Replace with path to chrome driver
chromedriver = "/usr/local/bin/chromedriver"
driver = webdriver.Chrome(executable_path=chromedriver, options=chromeOptions)
#Iterate through all urls from the csv file
for url in urls:
print("I enter the for loop")
#add extension to the URL in order to scrap pages with 500 elements
url_extender = r"?FV=ffe00300&quantity=0&ColumnSort=1&page=" + str(1) + "&pageSize=500"
print("I will scrap this url", url)
driver.get(url+ url_extender)
driver.maximize_window()
current_page = WebDriverWait(driver, 200).until(ec.visibility_of_element_located((By.CLASS_NAME, "current-page")))
print("CURRENT_PAGE", current_page.text)
nb_pages = (current_page.text).partition("/")[2]
print("nb_pages", nb_pages)
for i in range(1, int(nb_pages)+1):
url_extender = r"?FV=ffe00300&quantity=0&ColumnSort=1&page=" + str(i) + "&pageSize=500"
#Extract subcategory from url
start = category +'/'
end = '/'
subcategory = url[url.find(start)+len(start):url.rfind(end)]
print("subcategory", subcategory)
driver.get(url+ url_extender)
driver.maximize_window()
try:
# wait for Fastrack item to appear, then click it
fastrack = WebDriverWait(driver, 300).until(ec.visibility_of_element_located((By.CLASS_NAME, "download-table")))
WebDriverWait(driver, 300)
driver.execute_script("arguments[0].scrollIntoView()", fastrack)
#click on the download button
fastrack.click()
WebDriverWait(driver, 300)
time.sleep(15)
#Replace with path where you downloaded files
Initial_path = download_folder
list_of_files = glob.glob(download_folder +'/*.csv') # * means all if need specific format then *.csv
#search the latest file downloaded
latest_file = max(list_of_files, key=os.path.getctime)
# filename = max([Initial_path + "\\" + f for f in os.listdir(Initial_path)],key=os.path.getctime)
#rename with more appropriate name
shutil.move(latest_file,os.path.join(Initial_path,subcategory + "_" + str(i) + ".csv"))
print("file renamed")
except Exception:
pass
print('Deleting the URL from the file', url)
#we erase the link from the file in order to make sure we did not miss any link during the scraping process
df.drop(df.index[0], inplace = True)
df.to_csv(links_urls, index=False, header=None)
| 3.203125 | 3 |
filestack/utils/intelligent_ingestion.py | yousefiparsa/filestack-python | 1 | 12767799 | import os
import sys
import mimetypes
import hashlib
import logging
import time
from multiprocessing import Queue, Process
from base64 import b64encode
from collections import deque, OrderedDict
import requests
from filestack.config import HEADERS
from filestack.utils.utils import store_params
log = logging.getLogger(__name__)
log.setLevel(logging.ERROR)
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(logging.Formatter("%(asctime)s - %(processName)s[%(process)d] - %(levelname)s - %(message)s"))
log.addHandler(handler)
UPLOAD_HOST = 'https://upload.filestackapi.com'
MB = 1024 ** 2
DEFAULT_PART_SIZE = 8 * MB
DEFAULT_CHUNK_SIZE = 8 * MB
NUM_OF_UPLOADERS = 4
NUM_OF_COMMITTERS = 2
MAX_DELAY = 4
class ResponseNotOk(Exception):
pass
class S3UploadException(Exception):
pass
class UploadManager(object):
def __init__(self, apikey, filepath, storage, params, security, upload_q, commit_q, response_q):
self.chunk_size = DEFAULT_CHUNK_SIZE
self.apikey = apikey
self.filepath = filepath
self.storage = storage
self.params = params
self.security = security
self.upload_q = upload_q
self.commit_q = commit_q
self.response_q = response_q
self.filename = os.path.split(filepath)[1]
self.filesize = os.path.getsize(filepath)
self.mimetype = mimetypes.guess_type(filepath)[0]
self.start_response = None
self.parts = OrderedDict()
self._currently_processed = 0
def run(self):
self._multipart_start()
self._create_parts()
self._manage_upload_process()
def _multipart_start(self):
data = {
'apikey': self.apikey,
'filename': self.filename,
'mimetype': self.mimetype,
'size': self.filesize,
'store_location': self.storage,
'multipart': True
}
if self.params:
data.update(store_params(self.params))
if self.security:
data.update({
'policy': self.security['policy'],
'signature': self.security['signature']
})
response = requests.post(
UPLOAD_HOST + '/multipart/start',
data=data,
files={'file': (self.filename, '', None)},
params=self.params,
headers=HEADERS
)
self.start_response = response.json()
def _multipart_complete(self):
response_code = 0
data = {
'apikey': self.apikey,
'uri': self.start_response['uri'],
'region': self.start_response['region'],
'upload_id': self.start_response['upload_id'],
'filename': self.filename,
'size': self.filesize,
'mimetype': self.mimetype,
'multipart': True,
'store_location': self.storage
}
if self.params:
data.update(store_params(self.params))
while response_code != 200:
log.info('Waiting for complete')
response = requests.post(
UPLOAD_HOST + '/multipart/complete',
data=data,
files={'file': (self.filename, '', None)},
params=self.params,
headers=HEADERS
)
if not response.ok:
log.error('Unexpected backend response: %s', response.content)
raise Exception(response.content)
response_code = response.status_code
log.info('Got response %s, %s', response, response.content)
self.response_q.put(response)
def _create_parts(self):
for index, seek_point in enumerate(
self._get_byte_ranges(self.filesize, DEFAULT_PART_SIZE)):
chunks = deque()
for ch in self._get_byte_ranges(seek_point['size'], self.chunk_size):
chunks.appendleft({'offset': ch['seek'], 'size': ch['size']})
self.parts[index + 1] = {
'seek': seek_point['seek'],
'size': seek_point['size'],
'currently_processed': 0,
'chunks': chunks
}
def _split_chunk(self, chunk):
return [
{'offset': ch['seek'], 'size': ch['size']}
for ch in self._get_byte_ranges(chunk['size'], self.chunk_size, start=chunk['offset'])
]
def _get_next_chunk(self):
for part_num in self.parts:
if self.parts[part_num]['chunks']:
return part_num, self.parts[part_num]['chunks'].pop()
return None, None
def _feed_uploaders(self):
while self._currently_processed < NUM_OF_UPLOADERS:
part_num, chunk = self._get_next_chunk()
if not chunk:
break
if chunk['size'] > self.chunk_size:
smaller_chunks = self._split_chunk(chunk)
chunk, rest = smaller_chunks[0], smaller_chunks[1:]
for c in reversed(rest):
self.parts[part_num]['chunks'].append(c)
self._submit_upload_job(part_num, chunk)
def _manage_upload_process(self):
self._feed_uploaders()
while self.parts:
response = self.response_q.get(block=True)
log.info('Got response %s', response)
if response['worker'] == 'uploader':
self.parts[response['part']]['currently_processed'] -= 1
self._currently_processed -= 1
old_chunk = response['chunk']
if not response['success']:
log.warning('Failed response received %s', response)
if response['delay']:
# this means uploader got a response, but it wasn't ok (status code >= 400)
# resubmit with requested delay if max delay not exceeded
if response['delay'] > MAX_DELAY:
log.error('Max delay exceeded for chunk %s', old_chunk)
return
self._submit_upload_job(response['part'], old_chunk, delay=response['delay'])
continue
if old_chunk['size'] <= self.chunk_size:
log.info(
'Failed to upload %s bytes. Changing chunk size from %s to %s bytes',
old_chunk['size'], self.chunk_size, self.chunk_size / 2
)
self.chunk_size /= 2
if self.chunk_size < 32 * 1024:
log.error('Minimal chunk size failed')
return
new_chunks = self._split_chunk(old_chunk)
for new_chunk in reversed(new_chunks):
self.parts[response['part']]['chunks'].append(new_chunk)
self._feed_uploaders()
continue
if not self.parts[response['part']]['chunks'] and self.parts[response['part']]['currently_processed'] == 0:
log.info('No more chunks for part %s, time to commit', response['part'])
self.commit_q.put({
'apikey': self.apikey,
'uri': self.start_response['uri'],
'region': self.start_response['region'],
'upload_id': self.start_response['upload_id'],
'size': self.filesize,
'part': response['part'],
'store_location': self.storage,
'filename': self.filename,
})
self._feed_uploaders()
elif response['worker'] == 'committer':
log.info('Got commit done message %s', response)
log.info('Removing part %s', response['part'])
self.parts.pop(response['part'])
if self._get_next_chunk()[1] is None:
self._multipart_complete()
def _submit_upload_job(self, part_num, chunk, delay=0):
self.upload_q.put({
'chunk': chunk,
'apikey': self.apikey,
'store_location': self.storage,
'part': part_num,
'seek': self.parts[part_num]['seek'],
'offset': chunk['offset'],
'size': chunk['size'],
'filepath': self.filepath,
'filename': self.filename,
'filesize': self.filesize,
'uri': self.start_response['uri'],
'region': self.start_response['region'],
'upload_id': self.start_response['upload_id'],
'delay': delay
})
self.parts[part_num]['currently_processed'] += 1
self._currently_processed += 1
@staticmethod
def _get_byte_ranges(filesize, part_size, start=0, bytes_to_read=None):
if bytes_to_read is None:
bytes_to_read = filesize
ranges = []
pos = start
while bytes_to_read > 0:
point = {'seek': pos}
if bytes_to_read > part_size:
size = part_size
bytes_to_read -= part_size
pos += part_size
else:
size = bytes_to_read
bytes_to_read = 0
point['size'] = size
ranges.append(point)
return ranges
def manage_upload(apikey, filepath, storage, params, security, upload_q, commit_q, response_q):
manager = UploadManager(apikey, filepath, storage, params, security, upload_q, commit_q, response_q)
manager.run()
def consume_upload_job(upload_q, response_q):
log.info('Uploader ready')
while True:
job = upload_q.get(block=True)
if job == 'die':
break # we need a way to stop it in tests (other than terminate())
log.info(
'Uploader got chunk %s for part %s',
job['chunk'], job['part']
)
log.debug('Job details: %s', job)
delay = job.get('delay', 0)
time.sleep(delay)
log.info('Uploader waiting for %s seconds', delay)
with open(job['filepath'], 'rb') as f:
f.seek(job['seek'] + job['offset'])
chunk = f.read(job['size'])
success = True
try:
backend_resp = requests.post(
UPLOAD_HOST + '/multipart/upload',
data={
'apikey': job['apikey'],
'part': job['part'],
'size': job['size'],
'md5': b64encode(hashlib.md5(chunk).digest()).strip(),
'uri': job['uri'],
'region': job['region'],
'upload_id': job['upload_id'],
'store_location': job['store_location'],
'multipart': True,
'offset': job['offset']
},
files={'file': (job['filename'], '', None)},
headers=HEADERS
)
if not backend_resp.ok:
raise ResponseNotOk('Incorrect backend response %s', backend_resp)
backend_data = backend_resp.json()
try:
s3_resp = requests.put(
backend_data['url'],
headers=backend_data['headers'],
data=chunk
)
except Exception as e:
log.warning('Upload to S3 failed %s', e)
raise S3UploadException(str(e))
if not s3_resp.ok:
raise ResponseNotOk('Incorrect S3 response %s', s3_resp)
except ResponseNotOk:
delay = delay * 1.3 or 1
success = False
except S3UploadException:
delay = 0
success = False
except Exception as e:
delay = 0
log.error('Request to backend failed %s', e)
success = False
response_q.put({
'worker': 'uploader',
'chunk': job['chunk'],
'part': job['part'],
'offset': job['offset'],
'size': job['size'],
'success': success,
'delay': delay
})
log.info(
'Uploader finished chunk %s for part %s. Success: %s',
job['chunk'], job['part'], success
)
def commit_part(commit_q, response_q):
log.info('Committer ready')
while True:
job = commit_q.get(block=True)
if job == 'die':
break # we need a way to stop it in tests (other than terminate())
log.info('Committer got job for part %s', job['part'])
log.debug('Job details: %s', job)
requests.post(
UPLOAD_HOST + '/multipart/commit',
data={
'apikey': job['apikey'],
'uri': job['uri'],
'region': job['region'],
'upload_id': job['upload_id'],
'size': job['size'],
'part': job['part'],
'store_location': job['store_location']
},
files={'file': (job['filename'], '', None)},
headers=HEADERS
)
response_q.put({
'worker': 'committer',
'success': True,
'part': job['part']
})
log.info('Commit job done')
def upload(apikey, filepath, storage, params=None, security=None):
upload_q = Queue()
commit_q = Queue()
response_q = Queue()
manager_proc = Process(
target=manage_upload,
name='manager',
args=(apikey, filepath, storage, params, security, upload_q, commit_q, response_q)
)
side_processes = [
Process(
target=consume_upload_job,
name='uploader',
args=(upload_q, response_q)
) for _ in range(NUM_OF_UPLOADERS)
]
for _ in range(NUM_OF_COMMITTERS):
side_processes.append(
Process(
target=commit_part,
name='committer',
args=(commit_q, response_q)
)
)
for proc in side_processes:
proc.start()
manager_proc.start()
manager_proc.join()
for proc in side_processes:
proc.terminate()
try:
final_response = response_q.get(block=True, timeout=1)
if not isinstance(final_response, requests.Response):
raise Exception()
return final_response
except Exception:
raise Exception('Upload aborted')
| 2.140625 | 2 |
examples/iris-scatterplot.py | chofchof/probml | 0 | 12767800 | #https://seaborn.pydata.org/generated/seaborn.pairplot.html
import matplotlib.pyplot as plt
import os
import seaborn as sns; sns.set(style="ticks", color_codes=True)
iris = sns.load_dataset("iris")
#g = sns.pairplot(iris)
g = sns.pairplot(iris, hue="species")
plt.savefig(os.path.join('figures', 'iris-scatterplot.pdf')) | 2.734375 | 3 |
zenqueue/client/http/common.py | zacharyvoase/zenqueue | 6 | 12767801 | # -*- coding: utf-8 -*-
import urllib
from urlobject import URLObject
from zenqueue import json
from zenqueue.client.common import AbstractQueueClient
class HTTPQueueClient(AbstractQueueClient):
log_name = 'zenq.client.http'
def __init__(self, host='127.0.0.1', port=3080):
super(HTTPQueueClient, self).__init__() # Initializes logging.
self.host = host
self.port = port
def send(self, url, data=''):
raise NotImplementedError
def action(self, action, args, kwargs):
# It's really pathetic, but it's still debugging output.
self.log.debug('Action %r called with %d args', action,
len(args) + len(kwargs))
path = '/' + urllib.quote(action) + '/'
url = URLObject(host=self.host).with_port(self.port).with_path(path)
received_data = self.send(url, data=json.dumps([args, kwargs]))
return self.handle_response(received_data) | 2.40625 | 2 |
python3/pracmln/utils/latexmath2png.py | seba90/pracmln | 123 | 12767802 | #!/usr/bin/python2.5
# Until Python 2.6
from dnutils import logs
from pracmln.utils import locs
"""
Converts LaTeX math to png images.
Run latexmath2png.py --help for usage instructions.
"""
"""
Author:
<NAME> <<EMAIL>>
URL: http://www.kamilkisiel.net
Revision History:
2007/04/20 - Initial version
TODO:
- Make handling of bad input more graceful?
---
Some ideas borrowed from Kjell Fauske's article at http://fauskes.net/nb/htmleqII/
Licensed under the MIT License:
Copyright (c) 2007 <NAME> <<EMAIL>>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
"""
import os
import tempfile
from PIL import Image
import base64
logger = logs.getlogger(__name__, logs.DEBUG)
# Default packages to use when generating output
default_packages = [
'amsmath',
'amsthm',
'amssymb',
'bm'
]
def __build_preamble(packages, declarations):
preamble = '\documentclass{article}\n'
for p in packages:
preamble += "\\usepackage{{{}}}\n".format(p)
for d in declarations:
preamble += '{}\n'.format(d)
preamble += "\pagestyle{empty}\n\\begin{document}\n"
return preamble
def __write_output(infile, outdir, workdir='.', filename='', size=1, svg=True):
try:
# Generate the DVI file. NOTE: no output in stdout, as it is piped into /dev/null!
latexcmd = 'latex -halt-on-error -output-directory {} {} >/dev/null'.format(workdir, infile)
rc = os.system(latexcmd)
# Something bad happened, abort
if rc != 0:
raise Exception('latex error')
# Convert the DVI file to PNG's
dvifile = infile.replace('.tex', '.dvi')
outfilename = os.path.join(outdir, filename)
if svg:
dvicmd = "dvisvgm -v 0 -o {}.svg --no-fonts {}".format(outfilename, dvifile)
else:
dvicmd = "dvipng -q* -T tight -x {} -z 9 -bg Transparent -o {}.png {} >/dev/null".format(size * 1000, outfilename, dvifile)
rc = os.system(dvicmd)
if rc != 0:
raise Exception('{} error'.format('dvisvgm error' if svg else'dvipng'))
finally:
# Cleanup temporaries
basefile = infile.replace('.tex', '')
tempext = ['.aux', '.dvi', '.log']
for te in tempext:
tempfile = basefile + te
if os.path.exists(tempfile):
os.remove(tempfile)
def math2png(content, outdir, packages=default_packages, declarations=[], filename='', size=1, svg=True):
"""
Generate png images from $$...$$ style math environment equations.
Parameters:
content - A string containing latex math environment formulas
outdir - Output directory for PNG images
packages - Optional list of packages to include in the LaTeX preamble
declarations - Optional list of declarations to add to the LaTeX preamble
filename - Optional filename for output files
size - Scale factor for output
"""
outfilename = '/tmp/default.tex'
# Set the working directory
workdir = tempfile.gettempdir()
# Get a temporary file
fd, texfile = tempfile.mkstemp('.tex', 'eq', workdir, True)
try:
content = content.replace('$', r'\$')
# Create the TeX document and save to tempfile
fileContent = '{}$${}$$\n\end{{document}}'.format(__build_preamble(packages, declarations), content)
with os.fdopen(fd, 'w+') as f:
f.write(fileContent)
__write_output(texfile, outdir, workdir=workdir, filename=filename, size=size, svg=svg)
outfilename = os.path.join(outdir, '{}.{}'.format(filename, 'svg' if svg else 'png'))
except:
logger.error('Unable to create image. A reason you encounter '
'this error might be that you are either missing latex '
'packages for generating .dvi files or {} for '
'generating the {} image from the .dvi file.'.format('dvisvgm' if svg else 'dvipng', 'svg' if svg else 'png'))
outfilename = os.path.join(locs.etc, 'default.{}'.format('svg' if svg else 'png'))
finally:
if svg:
with open(outfilename, 'r') as outfile:
filecontent = outfile.read()
ratio = 1
else:
# determine image size
im = Image.open(outfilename)
width, height = im.size
ratio = float(width)/float(height)
# create base64 encoded file content
png = open(outfilename)
filecontent = base64.b64encode(png.read())
# cleanup and delete temporary files
if os.path.exists(texfile) and locs.etc not in outfilename:
os.remove(texfile)
if os.path.exists(outfilename) and locs.etc not in outfilename:
os.remove(outfilename)
return filecontent, ratio | 2.125 | 2 |
URI/1009.py | Diiego2202/Python | 0 | 12767803 | name = str(input())
salary = float(input())
sales = float(input())
total = salary + (sales * 0.15)
print(f'TOTAL = R$ {total:.2f}') | 3.484375 | 3 |
main/cogs/beta.py | avizum/Groot | 0 | 12767804 | from utils._type import *
import discord
from discord.ext import commands
class Beta(commands.Cog):
"""
A cog with commands available to only the beta-testers
"""
def __init__(self, bot):
self.bot = bot
def cog_check(self, ctx: customContext):
member: discord.Member = self.bot.get_guild(int(self.bot.config["SUPPORT_SERVER"])).get_member(ctx.author.id)
if member is None:
return False
check = ctx.author == self.bot.owner or discord.utils.get(member.roles, id=823951076193337384)
return check
def setup(bot):
bot.add_cog(Beta(bot)) | 2.375 | 2 |
com/kakao/cafe/menu/smoothie/berryBerrySmoothie.py | roseonghwan/kakao-cafe | 2 | 12767805 | <gh_stars>1-10
from com.kakao.cafe.menu.smoothie.smoothie import Smoothie
class BerryBerrySmoothie(Smoothie):
def __init__(self):
super().__init__()
self.__mixedBerry = 1
self.name = 'BerryBerrySmoothie'
self.__price = 5000
self.__groundIce = 400
def getName(self) -> str:
return self.name
def setName(self, name: str) -> None:
self.name = name
return
def getPrice(self) -> int:
return self.__price
def setPrice(self, price: int) -> None:
self.__price = price
def getGroundIce(self) -> int:
return self.__groundIce
def setGroundIce(self, groundIce: int) -> None:
self.__groundIce = groundIce
def isIced(self) -> bool:
self._Iced = True
return self._Iced
def setIced(self) -> None:
pass
def getMixedBerry(self) -> int:
return self.__mixedBerry
def setMixedBerry(self, mixedBerry: int) -> None:
self.__mixedBerry = mixedBerry
def addBerry(self, amount: int) -> None:
self.setMixedBerry(self.getMixedBerry() + amount)
self.setPrice(self.getPrice() + amount * 500)
return
| 2.921875 | 3 |
api.py | richrosenthal/Hackathon-SteelHacks-_Creddy_Website | 0 | 12767806 | <reponame>richrosenthal/Hackathon-SteelHacks-_Creddy_Website
import ads
import json
ADS_DEV_KEY = '<KEY>'
def main():
with open( 'papers.json', 'w' ) as f:
ads.config.token = ADS_DEV_KEY
papers = ads.SearchQuery( q ="supernova", sort='citation_count', fl=['title','citation_count'])
foundPapers = {};
for paper in papers:
print(paper.title)
foundPapers[str(paper.title)] = str(paper.citation_count)
with open( 'papers.json', 'w' ) as f:
json.dump( foundPapers, f )
if __name__== "__main__":
main()
| 2.4375 | 2 |
bebotPlatform/settings.py | ElitosGon/bebotPlatform | 0 | 12767807 | """
Django settings for bebotPlatform project.
Generated by 'django-admin startproject' using Django 2.0.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ugettext
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_ROOT = os.path.abspath(os.path.join(BASE_DIR, '..'))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '<KEY>'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['localhost', '127.0.0.1','172.16.31.10']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'django.contrib.sites',
'webPlatform',
'vote',
'actstream',
'notifications',
]
SITE_ID = 1
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.locale.LocaleMiddleware',
]
ROOT_URLCONF = 'bebotPlatform.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.i18n',
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'bebotPlatform.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'bebotDB',
'USER': 'bebot',
'PASSWORD': '<PASSWORD>',
'HOST': 'localhost',
'PORT': '',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
TIME_ZONE = 'America/Santiago'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
# File handler
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# Language
LANGUAGE_CODE = 'es'
LANGUAGES = [
('es', _('Spanish'))
]
LOCALE_PATH = (os.path.join(BASE_DIR,'locale'))
# Email setting
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 25
EMAIL_HOST_USER = '<EMAIL>'
EMAIL_HOST_PASSWORD = '<PASSWORD>'
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
SMTP_ENABLED = True
EMAIL_HOST_MEDGO = '<EMAIL>'
TEMPLATED_EMAIL_TEMPLATE_DIR = 'templated_email/' #use '' for top level template dir, ensure there is a trailing slash
TEMPLATED_EMAIL_FILE_EXTENSION = 'email'
# Images Avatar
DJANGORESIZED_DEFAULT_KEEP_META = True
DJANGORESIZED_DEFAULT_FORCE_FORMAT = 'JPEG'
# Google
GOOGLE_RECAPTCHA_SECRET_KEY = '6LfuJEAUAAAAAJdnw0LxAKSlMbhEeYt8ijfoUNyl'
# ACTSTREAM
ACTSTREAM_SETTINGS = {
'FETCH_RELATIONS': True,
'USE_PREFETCH': True,
'USE_JSONFIELD': True,
'GFK_FETCH_DEPTH': 1,
}
# Notification
NOTIFICATIONS_SOFT_DELETE=True
| 1.789063 | 2 |
users/migrations/0010_user_cerbere_login.py | MTES-MCT/apilos | 0 | 12767808 | # Generated by Django 3.2.13 on 2022-05-17 14:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("users", "0009_alter_user_filtre_departements"),
]
operations = [
migrations.AddField(
model_name="user",
name="cerbere_login",
field=models.CharField(max_length=255, null=True),
),
]
| 1.578125 | 2 |
examples/Garch_Example.py | zli69/finance_ml | 0 | 12767809 | #https://machinelearningmastery.com/develop-arch-and-garch-models-for-time-series-forecasting-in-python/
# example of ARCH model
from random import gauss
from random import seed
from matplotlib import pyplot
from arch import arch_model
# seed pseudorandom number generator
seed(1)
# create dataset
data = [gauss(0, i*0.01) for i in range(0,100)]
# split into train/test
n_test = 10
train, test = data[:-n_test], data[-n_test:]
# define model
model = arch_model(train, mean='Zero', vol='ARCH', p=15)
# fit model
model_fit = model.fit()
# forecast the test set
yhat = model_fit.forecast(horizon=n_test)
# plot the actual variance
var = [i*0.01 for i in range(0,100)]
pyplot.plot(var[-n_test:])
# plot forecast variance
pyplot.plot(yhat.variance.values[-1, :])
pyplot.show()
# define model
model = arch_model(train, mean='Zero', vol='GARCH', p=15, q=15)
# fit model
model_fit = model.fit()
# forecast the test set
yhat = model_fit.forecast(horizon=n_test)
# plot the actual variance
var = [i*0.01 for i in range(0,100)]
pyplot.plot(var[-n_test:])
# plot forecast variance
pyplot.plot(yhat.variance.values[-1, :])
pyplot.show() | 3.875 | 4 |
src/main.py | shanytc/BlastML | 4 | 12767810 | <reponame>shanytc/BlastML<filename>src/main.py<gh_stars>1-10
from BlastML.blastml import CFG, BlastML
# /Users/i337936/Desktop/
# /home/ubuntu/projects/
def main():
# Configurations for BlastML
cfg = CFG(
project={
'project_name': 'shanynet',
'root': '/Users/i337936/Desktop/',
'project_folder': 'final_project/',
'dataset': 'dataset/',
'train': 'train/',
'inference': 'inference/',
'validation': 'validation/',
'model': 'model/',
},
image={
'width': 128,
'height': 128,
'channels': 3
},
model={
'load_model_embeddings': False, # strip dropouts and fc layers
'enable_saving': True,
'save_model': True,
'save_weights': True,
'save_history': True,
'save_bottleneck_features': True,
'reset_learn_phase': True
},
hyper_params={
'batch': 16,
'epochs': 3000,
'classes': 5,
'class_mode': 'sparse',
'shuffle': False,
'optimizer': 'adam',
'loss_function': 'sparse_categorical_crossentropy',
'compile_metrics': ['accuracy']
},
multithreading={
'enable_multithreading': True,
'threads': 5
},
augmentation={
'train': {
'enable': True,
'featurewise_center': False,
'samplewise_center': False,
'featurewise_std_normalization': False,
'samplewise_std_normalization': False,
'rotation_range': 0,
'width_shift_range': 0.0,
'height_shift_range': 0.0,
'brightness_range': None,
'shear_range': 0.2,
'zoom_range': 0.2,
'channel_shift_range': 0.0,
'fill_mode': 'nearest',
'horizontal_flip': True,
'vertical_flip': True,
'rescale': 1./255
},
'validation': {
'enable': True,
'featurewise_center': False,
'samplewise_center': False,
'featurewise_std_normalization': False,
'samplewise_std_normalization': False,
'rotation_range': 0,
'width_shift_range': 0.0,
'height_shift_range': 0.0,
'brightness_range': None,
'shear_range': 0.2,
'zoom_range': 0.2,
'channel_shift_range': 0.0,
'fill_mode': 'nearest',
'horizontal_flip': True,
'vertical_flip': True,
'rescale': 1./255
}
},
object_detection={
'yolo': {
'cfg': 'model/darknet/yolov3-tiny-2c.cfg',
'weights': 'model/darknet/yolov3-tiny-2c_final.weights',
'training_data': 'model/darknet/data/train.txt',
'class_names': 'model/darknet/data/classes.txt',
'anchors': 'model/darknet/data/anchors.txt',
'log': 'model/darknet/data/log',
'rectlabel_csv': 'model/darknet/data/annotations.csv',
'bboxes_font': 'model/darknet/data/Arial.ttf',
"score": 0.3,
"iou": 0.45,
"model_image_size": (416, 416),
"gpu_num": 1,
'enable_saving': True,
'save_model': True,
'save_weights': True,
'clusters': 6,
'auto_estimate_anchors': True,
'draw_bboxes': True,
'exclude_infer_classes': ['guns', 'humans', 'knifes'],
'enable_transfer_learning': True,
'transfer_learning_epoch_ratio': [300, 150]
}
},
gan={
'dcgan': {
"save_images_interval": 10,
"random_noise_dimension": 100,
"optimizer": {
"type": 'adam',
"learning_rate": 0.0002,
"beta_1": 0.5
}
},
'srgan': {
"train_test_ratio": 0.8,
"downscale_factor": 4
}
})
# Create a BlastML instance
net = BlastML(cfg=cfg)
##################
# CNN Examples #
##################
# Create new project from dataset/ folder (contains only classes and their images)
# net.create_project()
# compile, train and evaluate a simple cnn instance
# net.simple().compile().train().evaluate().infer()
# compile, train and evaluate a vgg16 instances
# net.vgg16().compile().train().evaluate()
# compile, train and evaluate a resnet18 instance
# net.resnet18().compile().train().evaluate()
# Create, compile, train and evaluate a custom CNN instance
# net.create()\
# .add_2d(filters=32, kernel=(3, 3), activation="relu", padding='same', input_shape=(net.config.get_width(), net.config.get_height(), net.config.get_channels()))\
# .add_2d(filters=32, kernel=(3, 3), activation='relu')\
# .add_max_pooling()\
# .add_dropout()\
# .add_basic_block()\
# .add_basic_block()\
# .add_flatten()\
# .add_dense(size=512, activation='relu', name="layer_features")\
# .add_dense(size=cfg.get_num_classes(), activation='softmax', name="layer_classes")\
# .show_model_summary()\
# .compile()\
# .train()\
# .evaluate()
# Load model
# net.load_model()
# plot (save as images) history
# net.plot_history()
# Infer images
# cfg.threads = 1 # better to use 1 thread, but you can change it.
# res = net.infer()
# print(res) # show embeddings/classification results
############################
# (GAN) Examples #
############################
net.gan().srgan().train()
# net.gan().dcgan().train()
############################
# YOLO/Darknet Examples #
############################
# convert DarkNet model+weights to Keras model+weights
# net.yolo().export_to_keras()
# Calculate YOLOv3 anchors (this is done automatically) and save them to anchors.txt (check config)
# net.yolo().generate_anchors()
# Convert RectLabel csv export file to YOLOv3 format used in this BlastML implementation
# net.yolo().rectLabel_to_YOLOv3()
# train yolo model using DarkNet with model/data
# net.yolo().create().compile().train()
# infer yolo model
# net.yolo().load_model().infer()
# convert yolo model to protobuf
# net.yolo().load_model().export_to_pb()
# infer yolo model with webcame
# net.yolo().load_model().infer_webcam()
main() | 2.046875 | 2 |
readme.py | mungojelly/Fifth-Dimensional-Chaos-Matrix | 0 | 12767811 | <filename>readme.py<gh_stars>0
#!/usr/bin/python
import re
import random
alternatives_bracket = re.compile(r'\[([^/]*)/([^\]]*)\]') # thar be dragons! explanation:
# \[ open bracket
# ([^/]*) first group, goes up to a slash
# / the slash
# ([^\]]*) second group, goes up to the closing bracket
# \] closing bracket
# so it parses things of the form [first group/second group]
def choose_one(match_object):
return match_object.group(random.choice([1,2]))
def process_brackets(text_to_process):
return alternatives_bracket.sub(choose_one,text_to_process)
print process_brackets("""Content-type: text/html
<html>
<head>
<title>[The /]Fifth Dimensional Chaos Matrix[, The/]</title>
</head>
<body>
<p>[Hello/Welcome][!/!!][!/]</p>
<p>[This is/You have reached] [the/a] [homepage/webpage] for
[the /]Fifth Dimensional Chaos Matrix, [a Discordian/an Erisian]
[programming/software] project[./!]</p>
<p>The [founder/originator] of this [project/program] is
[<NAME>/<NAME> the Lesser Mungojelly],
who [can be reached/you can reach] at
<a href="mailto:<EMAIL>"><EMAIL></a>[./!]</p>
<p>[The /]Fifth Dimensional Chaos Matrix is named [after/following] the
<a href="http://jubal.westnet.com/hyperdiscordia/hodge_podge_transformer.gif">diagram</a>
in the <a href="http://jubal.westnet.com/hyperdiscordia/hodge_podge.html">Hodge-Podge</a>
[section/part] of the
<a href="http://en.wikipedia.org/wiki/Principia_Discordia">Principia Discordia</a>[./!]</p>
<p>The [intention/aim] of [this project/FDCM] [is not/isn't] to [make/create] something
[useful/practical][./!] [It is/It's] an exp[eriment/loration][./!]</p>
<p>[FDCM/Fifth Dimensional Chaos Matrix] [mak/creat]es chaos [from/out of] order and
order [from/out of] chaos[./!]</p>
<p>[You're free/Feel free] to [contribute/write] [parts/components] for [FDCM/Fifth Dimensional
Chaos Matrix][./!] ([Which/That] [should/ought to] be [fairly/relatively] easy, since
[parts/components] of the [FDCM/Fifth Dimensional Chaos Matrix] don't [even /]necessarily
[need/have] to do [anything/something] [coherent/predictable][./!])</p>
<p>This [page/description] [changes/transforms] [each/every] time [it's/it is]
[viewed/reloaded][./!] Its <a
href="http://github.com/mungojelly/Fifth-Dimensional-Chaos-Matrix/blob/master/readme.py">
[code/source]</a> [is included/can be found] [at/with] [its/the]
<a href="http://github.com/mungojelly/Fifth-Dimensional-Chaos-Matrix">GitHub project</a>[./!]
</p>
<p>[Thanks/Thank you] for your [interest/attention][./!]</p>
<p><3[!/][!/]</p>
</body>
</html>
""")
| 3.53125 | 4 |
RTplzrunBlog/Divide and Conquer/1780.py | lkc263/Algorithm_Study_Python | 0 | 12767812 | import sys
read = sys.stdin.readline
n = int(read())
board = [list(map(int, read().split())) for _ in range(n)]
array = [0] * 3
def dfs(x, y, n):
global array
num_check = board[x][y]
# 위의 구간 두개의 for문 돌리면서 현재 시작점과 해당 위치 구간 값이 다르다면 재귀호출
# print()
# print("현재 x, y, n", x, y, n)
for i in range(x, x + n):
for j in range(y, y + n):
# print("i, j", i, j, "board : ", board[i][j], "num_check: ", num_check, "N : ", n)
if board[i][j] != num_check:
for k in range(3):
for l in range(3):
# print("i, j 결과", i, j)
# print("k, l 결과",k,l)
dfs(x + k * n // 3, y + l * n // 3, n // 3)
# print("dfs 종료")
# 위 반복문 두 개는 이제 해당 구간안에서 9개 구간을 재귀호출한다.
return
# 해당 좌표 값이 -1, 0, 1 인 경우
if num_check == -1:
array[0] += 1
elif num_check == 0:
array[1] += 1
else:
array[2] += 1
dfs(0, 0, n)
print("\n".join(map(str, array)))
| 3.328125 | 3 |
more/transaction/tests/test_transaction.py | morepath/more.transaction | 3 | 12767813 | <reponame>morepath/more.transaction
import morepath
from transaction import TransactionManager
from transaction.interfaces import TransientError
from more.transaction import TransactionApp
from more.transaction.main import transaction_tween_factory, default_commit_veto
from webtest import TestApp as Client
import pytest
def test_multiple_path_variables():
class TestApp(TransactionApp):
attempts = 0
@TestApp.path("/{type}/{id}")
class Document:
def __init__(self, type, id):
self.type = type
self.id = id
@TestApp.view(model=Document)
def view_document(self, request):
TestApp.attempts += 1
# on the first attempt raise a conflict error
if TestApp.attempts == 1:
raise Conflict
return "ok"
@TestApp.setting(section="transaction", name="attempts")
def get_retry_attempts():
return 2
client = Client(TestApp())
response = client.get("/document/1")
assert response.text == "ok"
assert TestApp.attempts == 2
def test_reset_unconsumed_path():
class TestApp(TransactionApp):
attempts = 0
@TestApp.path("/foo/bar")
class Foo:
pass
@TestApp.view(model=Foo)
def view_foo(self, request):
TestApp.attempts += 1
# on the first attempt raise a conflict error
if TestApp.attempts == 1:
raise Conflict
return "ok"
# if the unconsumed path is reset wrongly, it'll accidentally pick
# up this model instead of Foo
@TestApp.path("/bar/foo")
class Bar:
pass
@TestApp.view(model=Bar)
def view_bar(self, request):
return "error"
@TestApp.setting(section="transaction", name="attempts")
def get_retry_attempts():
return 2
client = Client(TestApp())
response = client.get("/foo/bar")
assert response.text == "ok"
assert TestApp.attempts == 2
def test_reset_app():
class RootApp(TransactionApp):
attempts = 0
class TestApp(morepath.App):
pass
@RootApp.mount(app=TestApp, path="/mount")
def mount_testapp():
return TestApp()
@TestApp.path("/sub")
class Foo:
pass
@TestApp.view(model=Foo)
def view_foo(self, request):
RootApp.attempts += 1
# on the first attempt raise a conflict error
if RootApp.attempts == 1:
raise Conflict
return "ok"
@RootApp.setting(section="transaction", name="attempts")
def get_retry_attempts():
return 2
client = Client(RootApp())
response = client.get("/mount/sub")
assert response.text == "ok"
assert RootApp.attempts == 2
def test_handler_exception():
def handler(request):
raise NotImplementedError
txn = DummyTransaction()
publish = transaction_tween_factory(DummyApp(), handler, txn)
with pytest.raises(NotImplementedError):
publish(DummyRequest())
assert txn.began
assert txn.aborted
assert not txn.committed
def test_handler_retryable_exception():
from transaction.interfaces import TransientError
class Conflict(TransientError):
pass
count = []
response = DummyResponse()
app = DummyApp()
app.settings.transaction.attempts = 3
def handler(request, count=count):
count.append(True)
if len(count) == 3:
return response
raise Conflict
txn = DummyTransaction(retryable=True)
publish = transaction_tween_factory(app, handler, txn)
request = DummyRequest()
result = publish(request)
assert txn.began
assert txn.committed == 1
assert txn.aborted == 2
assert request.made_seekable == 3
assert result is response
def test_handler_retryable_exception_defaults_to_1():
count = []
def handler(request, count=count):
raise Conflict
publish = transaction_tween_factory(DummyApp(), handler, DummyTransaction())
with pytest.raises(Conflict):
publish(DummyRequest())
def test_handler_isdoomed():
txn = DummyTransaction(doomed=True)
def handler(request):
return
publish = transaction_tween_factory(DummyApp(), handler, txn)
publish(DummyRequest())
assert txn.began
assert txn.aborted
assert not txn.committed
def test_handler_notes():
txn = DummyTransaction()
def handler(request):
return DummyResponse()
publish = transaction_tween_factory(DummyApp(), handler, txn)
publish(DummyRequest())
assert txn._note == "/"
assert txn.username is None
def test_identity():
txn = DummyTransaction()
request = DummyRequest()
request.identity = morepath.Identity("foo")
def handler(request):
return DummyResponse()
publish = transaction_tween_factory(DummyApp(), handler, txn)
publish(request)
assert txn.username == ":foo"
def test_500_without_commit_veto():
response = DummyResponse()
response.status = "500 Bad Request"
def handler(request):
return response
txn = DummyTransaction()
publish = transaction_tween_factory(DummyApp(), handler, txn)
result = publish(DummyRequest())
assert result is response
assert txn.began
assert not txn.aborted
assert txn.committed
def test_500_with_default_commit_veto():
app = DummyApp()
app.settings.transaction.commit_veto = default_commit_veto
response = DummyResponse()
response.status = "500 Bad Request"
def handler(request):
return response
txn = DummyTransaction()
publish = transaction_tween_factory(app, handler, txn)
result = publish(DummyRequest())
assert result is response
assert txn.began
assert txn.aborted
assert not txn.committed
def test_null_commit_veto():
response = DummyResponse()
response.status = "500 Bad Request"
def handler(request):
return response
app = DummyApp()
app.settings.transaction.commit_veto = None
txn = DummyTransaction()
publish = transaction_tween_factory(app, handler, txn)
result = publish(DummyRequest())
assert result is response
assert txn.began
assert not txn.aborted
assert txn.committed
def test_commit_veto_true():
app = DummyApp()
def veto_true(request, response):
return True
app.settings.transaction.commit_veto = veto_true
response = DummyResponse()
def handler(request):
return response
txn = DummyTransaction()
publish = transaction_tween_factory(app, handler, txn)
result = publish(DummyRequest())
assert result is response
assert txn.began
assert txn.aborted
assert not txn.committed
def test_commit_veto_false():
app = DummyApp()
def veto_false(request, response):
return False
app.settings.transaction.commit_veto = veto_false
response = DummyResponse()
def handler(request):
return response
txn = DummyTransaction()
publish = transaction_tween_factory(app, handler, txn)
result = publish(DummyRequest())
assert result is response
assert txn.began
assert not txn.aborted
assert txn.committed
def test_commitonly():
response = DummyResponse()
def handler(request):
return response
txn = DummyTransaction()
publish = transaction_tween_factory(DummyApp(), handler, txn)
result = publish(DummyRequest())
assert result is response
assert txn.began
assert not txn.aborted
assert txn.committed
class DummySettingsSectionContainer:
def __init__(self):
self.transaction = DummyTransactionSettingSection()
class DummyTransactionSettingSection:
def __init__(self):
self.attempts = 1
self.commit_veto = None
class DummyApp:
def __init__(self):
self.settings = DummySettingsSectionContainer()
class DummyTransaction(TransactionManager):
began = False
committed = False
aborted = False
_resources = []
username = None
def __init__(self, doomed=False, retryable=False):
self.doomed = doomed
self.began = 0
self.committed = 0
self.aborted = 0
self.retryable = retryable
self.active = False
@property
def manager(self):
return self
def _retryable(self, t, v):
if self.active:
return self.retryable
def get(self):
return self
def setUser(self, name, path="/"):
self.username = f"{path}:{name}"
def isDoomed(self):
return self.doomed
def begin(self):
self.began += 1
self.active = True
return self
def commit(self):
self.committed += 1
def abort(self):
self.active = False
self.aborted += 1
def note(self, value):
self._note = value
class DummyRequest:
path = "/"
identity = morepath.NO_IDENTITY
def __init__(self):
self.environ = {}
self.made_seekable = 0
def make_body_seekable(self):
self.made_seekable += 1
def reset(self):
self.make_body_seekable()
@property
def path_info(self):
return self.path
class DummyResponse:
def __init__(self, status="200 OK", headers=None):
self.status = status
if headers is None:
headers = {}
self.headers = headers
class Conflict(TransientError):
pass
| 2.25 | 2 |
PluginMaster/pluginListener.py | swinflowcloud/mspf | 2 | 12767814 | <filename>PluginMaster/pluginListener.py
from watchdog.events import *
import logging
from . import globalVal
class FileEventHandler(FileSystemEventHandler):
def __init__(self):
logging.basicConfig(filename=globalVal.LOGS_DIR+'/plugin_master.log',
level=logging.DEBUG,
filemode="w", #文件的写入格式,w为重新写入文件,默认是追加
format=globalVal.LOGS_FORMAT,
datefmt=globalVal.LOGS_DATE_FORMAT)
FileSystemEventHandler.__init__(self)
def on_moved(self, event):
logger = logging.getLogger()
if event.is_directory:
logger.info("directory moved from {0} to {1}".format(event.src_path, event.dest_path))
else:
logger.info("file moved from {0} to {1}".format(event.src_path, event.dest_path))
def on_created(self, event):
logger = logging.getLogger()
if event.is_directory:
logger.info("directory created:{0}".format(event.src_path))
else:
logger.info("file created:{0}".format(event.src_path))
def on_deleted(self, event):
logger = logging.getLogger()
if event.is_directory:
logger.info("directory deleted:{0}".format(event.src_path))
else:
logger.info("file deleted:{0}".format(event.src_path))
def on_modified(self, event):
logger = logging.getLogger()
if event.is_directory:
logger.info("directory modified:{0}".format(event.src_path))
else:
logger.info("file modified:{0}".format(event.src_path))
| 2.234375 | 2 |
server/domain/usecase/create_room.py | jwilyandi19/tic-tac-royale | 0 | 12767815 | import server.domain.model.tictactoe as tttMod
import server.domain.model.room as roomMod
import asyncio.locks as lockMod
class CreateRoom:
def __init__(self, ticTacToe: tttMod.TicTacRoyale, lock: lockMod.Lock):
self.tictactoe = ticTacToe
self.lock = lock
def Create(self) -> roomMod.Room:
self.lock.acquire()
room = self.tictactoe.createRoom()
self.lock.release()
return room
| 2.421875 | 2 |
lib/amqp/amqpadapter.py | Juniper/YAPT | 33 | 12767816 | # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER
# Copyright (c) 2018 Juniper Networks, Inc.
# All rights reserved.
# Use is subject to license terms.
#
# Author: cklewar
import abc
import sys
import threading
import pika
from pika import exceptions
from lib.logmsg import LogAmqp as logmsg
import lib.constants as c
from lib.tools import Tools
class AMQPBlockingServerAdapter(threading.Thread):
def __init__(self, group=None, target=None, name=None, args=(), kwargs=None):
"""
:param group:
:param target:
:type target: Processor
:param name:
:param args:
:param kwargs:
"""
super(AMQPBlockingServerAdapter, self).__init__(group=group, target=target, name=name, args=args, kwargs=kwargs)
self._exchange = args[0]
self._type = args[1]
self._routing_key = args[2]
self._queue = args[3]
self._logger = c.logger
try:
self._connection = pika.BlockingConnection(
pika.ConnectionParameters(host=c.conf.AMQP.Host, port=c.conf.AMQP.Port,
credentials=pika.PlainCredentials(c.conf.AMQP.User,
Tools.get_password(
c.YAPT_PASSWORD_TYPE_AMQP))))
self._channel = self._connection.channel()
self._channel.queue_declare(queue=self._queue, durable=False)
self._channel.basic_qos(prefetch_count=1)
self._channel.basic_consume(self.receive_message, queue=self._routing_key)
except exceptions.ConnectionClosed as err:
print Tools.create_log_msg('AMQP', None, logmsg.AMQP_BUS_NOK.format(err))
self._logger.info(Tools.create_log_msg('AMQP', None, logmsg.AMQP_BUS_NOK.format(err)))
sys.exit()
Tools.create_amqp_startup_log(exchange=self._exchange, type=self._type,
routing_key=self._routing_key,
host=c.conf.AMQP.Host,
channel=self._channel)
def run(self):
self._channel.start_consuming()
def send_message_amqp(self, message, routing_key):
connection = pika.BlockingConnection(
pika.ConnectionParameters(host=c.conf.AMQP.Host, port=c.conf.AMQP.Port,
credentials=pika.PlainCredentials(c.conf.AMQP.User,
Tools.get_password(
c.YAPT_PASSWORD_TYPE_AMQP))))
channel = connection.channel()
channel.queue_declare(queue=self._routing_key, durable=False)
channel.basic_publish(exchange='',
routing_key=routing_key,
body=message,
properties=pika.BasicProperties(
delivery_mode=1,
))
connection.close()
@abc.abstractmethod
def receive_message(self, ch, method, properties, body):
raise NotImplementedError()
class AMQPRpcServerAdapter(threading.Thread):
def __init__(self, group=None, target=None, name=None, args=(), kwargs=None):
"""
:param group:
:param target:
:type target: type: Processor
:param name:
:param args:
:param kwargs:
"""
super(AMQPRpcServerAdapter, self).__init__(group=group, target=target, name=name, args=args, kwargs=kwargs)
self._exchange = args[0]
self._type = args[1]
self._routing_key = args[2]
self._logger = c.logger
try:
self._connection = pika.BlockingConnection(pika.ConnectionParameters(
host=c.conf.AMQP.Host, port=c.conf.AMQP.Port,
credentials=pika.PlainCredentials(c.conf.AMQP.User,
Tools.get_password(
c.YAPT_PASSWORD_TYPE_AMQP))))
self._channel = self._connection.channel()
self._channel.queue_declare(queue=self._routing_key)
self._channel.basic_qos(prefetch_count=1)
self._channel.basic_consume(self.on_request, queue=self._routing_key)
except (exceptions.ProbableAuthenticationError, exceptions.ConnectionClosed) as err:
print Tools.create_log_msg('AMQP', None, logmsg.AMQP_BUS_NOK.format(err))
self._logger.info(Tools.create_log_msg('AMQP', None, logmsg.AMQP_BUS_NOK.format(err)))
sys.exit()
Tools.create_amqp_startup_log(exchange=self._exchange, type=self._type,
routing_key=self._routing_key,
host=c.conf.AMQP.Host, channel=self._channel)
def run(self):
self._channel.start_consuming()
@abc.abstractmethod
def on_request(self, ch, method, props, body):
raise NotImplementedError()
class AMQPBlockingClientAdapter(object):
def __init__(self, exchange=None, routing_key=None, queue=None):
self._exchange = exchange
self._routing_key = routing_key
self._queue = queue
self._logger = c.logger
def send_message_amqp(self, message):
connection = pika.BlockingConnection(
pika.ConnectionParameters(host=c.conf.AMQP.Host, port=c.conf.AMQP.Port,
credentials=pika.PlainCredentials(c.conf.AMQP.User,
Tools.get_password(
c.YAPT_PASSWORD_TYPE_AMQP))))
channel = connection.channel()
channel.queue_declare(queue=self._queue, durable=False)
channel.basic_publish(exchange='',
routing_key=self._routing_key,
body=message,
properties=pika.BasicProperties(
delivery_mode=1,
))
connection.close()
class AMQPRpcClientAdapter(object):
def __init__(self, exchange=None, routing_key=None):
self._exchange = exchange
self._routing_key = routing_key
self._logger = c.logger
try:
self._connection = pika.BlockingConnection(
pika.ConnectionParameters(host=c.conf.AMQP.Host, port=c.conf.AMQP.Port,
credentials=pika.PlainCredentials(c.conf.AMQP.User,
Tools.get_password(
c.YAPT_PASSWORD_TYPE_AMQP))))
self._channel = self._connection.channel()
self._result = self._channel.queue_declare(exclusive=True)
self._callback_queue = self._result.method.queue
self._channel.basic_consume(self.on_response, no_ack=True, queue=self._callback_queue)
self._response = None
self._corr_id = None
except pika.exceptions.ConnectionClosed as err:
print Tools.create_log_msg('AMQP', None, logmsg.AMQP_BUS_NOK.format(err))
self._logger.info(Tools.create_log_msg('AMQP', None, logmsg.AMQP_BUS_NOK.format(err)))
@abc.abstractmethod
def call(self, data):
raise NotImplementedError()
@abc.abstractmethod
def on_response(self, ch, method, props, body):
raise NotImplementedError()
| 1.789063 | 2 |
Node3D/base/data/Math.py | ArnoChenFx/Node3D | 3 | 12767817 | import numpy as np
def clamp(value, min, max):
return np.clip(value, min, max)
def lerp(a, b, fraction):
fraction = clamp(fraction, 0, 1)
return a * (1 - fraction) + b * fraction
def fit(value, omin, omax, nmin, nmax):
v = (value - omin) / (omax - omin)
return v * (nmax - nmin) + nmin
def fit01(value, min, max):
return value * (max - min) + min
def fit10(value, min, max):
return (1.0 - value) * (max - min) + min
def fit11(value, min, max):
return fit(value, -1, 1, min, max)
def fit_to_01(value, min, max):
return (value - min) / (max - min)
def fit_11_to_01(value):
return (value + 1.0) * 0.5
| 2.8125 | 3 |
chapter_6/scalability/scalability.py | mossblaser/phd_thesis_experiments | 1 | 12767818 | <gh_stars>1-10
import random
import math
import time
import logging
import argparse
import sys
from collections import defaultdict
from rig.place_and_route import place, allocate, route, Machine, Cores, SDRAM
from rig.netlist import Net
from rig.routing_table import routing_tree_to_tables, minimise_tables
from rig.routing_table.remove_default_routes import minimise as remove_default_routes
from select_algo import placer
# Parse arguments
parser = argparse.ArgumentParser(description="Scalability experiment")
parser.add_argument("--print-header", action="store_true", default=False)
parser.add_argument("--width", "-W", type=int, default=None)
parser.add_argument("--height", "-H", type=int, default=None)
parser.add_argument("--fan-out", "-f", type=int, default=4)
parser.add_argument("--distance", "-d", type=float, default=3.0)
parser.add_argument("--verbose", "-v", action="count", default=0)
parser.add_argument("--placer", "-p", type=placer, default=(place, "default"))
parser.add_argument("--seed", "-s", type=int, default=random.getrandbits(32))
args = parser.parse_args()
if args.print_header:
print("placer,width,height,fan_out,distance_sd,runtime,placed_net_length,manual_net_length,placed_total_entries,manual_total_entries,placed_max_entries,manual_max_entries")
sys.exit(0)
if args.verbose >= 2:
logging.basicConfig(level=logging.DEBUG)
elif args.verbose >= 1:
logging.basicConfig(level=logging.INFO)
random.seed(args.seed)
width = args.width
if width is None:
width = args.height
if width is None:
parser.error("--width or --height not specified")
height = args.height if args.height else width
fan_out = args.fan_out
distance_sd = args.distance
place, placer_name = args.placer
# Construct network
vertices = {(x, y): object() for x in range(width) for y in range(height)}
nets = [Net(v, [vertices[(int(x+random.gauss(0, distance_sd))%width,
int(y+random.gauss(0, distance_sd))%height)]
for _ in range(fan_out)])
for (x, y), v in vertices.items()]
vertices_resources = {v: {Cores: 1, SDRAM: 1024} for v in vertices.values()}
machine = Machine(int(math.ceil(width / 4.0)), int(math.ceil(height / 4.0)),
chip_resources={Cores:16, SDRAM:1024*1024})
#sys.stderr.write("Graph created...\n")
# Place-and-route
place_start = time.time()
placements = place(vertices_resources, nets, machine, [])
runtime = time.time() - place_start
#sys.stderr.write("Placed\n")
allocations = allocate(vertices_resources, nets, machine, [], placements)
#sys.stderr.write("Allocated\n")
routes = route(vertices_resources, nets, machine, [], placements, allocations)
#sys.stderr.write("Routed\n")
def count_table_entries(routes):
# Work out how many table entries are necessary on each chip when
# default-route removal is used.
chip_entries = defaultdict(lambda: 0)
for route in routes.values():
for arriving, xy, leaving in route.traverse():
if len(leaving) == 1 and tuple(leaving)[0] == arriving:
# Default route!
pass
else:
chip_entries[xy] += 1
return (sum(chip_entries.values()), max(chip_entries.values()))
# NB: Don't use actual routing table generator since the uncompressed tables
# require essentially the same amount of memory as the routing tree and this
# tips things over the edge for very large networks...
#routing_tables = routing_tree_to_tables(
# routes,
# {n: (i, 0xFFFFFFFF) for i, n in enumerate(routes)})
#sys.stderr.write("Tables generated\n")
#routing_tables = minimise_tables(routing_tables, None, [remove_default_routes])
#sys.stderr.write("Tables minimised\n")
#
## Return (total_entries, max_entries)
#return (sum(len(t) for t in routing_tables.values()),
# max(len(t) for t in routing_tables.values()))
placed_net_length = sum(sum(1 for _ in route.traverse()) for route in routes.values())
#sys.stderr.write("Route length calculated\n")
placed_total_entries, placed_max_entries = count_table_entries(routes)
del routes
# 'Idealised' placement + routing
placements = {v: (x//4, y//4) for (x, y), v in vertices.items()}
allocations = allocate(vertices_resources, nets, machine, [], placements)
#sys.stderr.write("Idealised placement generated\n")
routes = route(vertices_resources, nets, machine, [], placements, allocations)
#sys.stderr.write("Idealised routing complete\n")
manual_net_length = sum(sum(1 for _ in route.traverse()) for route in routes.values())
manual_total_entries, manual_max_entries = count_table_entries(routes)
print(",".join(map(str, [
placer_name,
width, height,
fan_out, distance_sd,
runtime,
placed_net_length, manual_net_length,
placed_total_entries, manual_total_entries,
placed_max_entries, manual_max_entries,
])))
| 2.078125 | 2 |
poky-dunfell/bitbake/lib/bb/parse/parse_py/BBHandler.py | lacie-life/YoctoPi | 0 | 12767819 | """
class for handling .bb files
Reads a .bb file and obtains its metadata
"""
# Copyright (C) 2003, 2004 <NAME>
# Copyright (C) 2003, 2004 <NAME>
#
# SPDX-License-Identifier: GPL-2.0-only
#
import re, bb, os
import bb.build, bb.utils
from . import ConfHandler
from .. import resolve_file, ast, logger, ParseError
from .ConfHandler import include, init
# For compatibility
bb.deprecate_import(__name__, "bb.parse", ["vars_from_file"])
__func_start_regexp__ = re.compile(r"(((?P<py>python(?=(\s|\()))|(?P<fr>fakeroot(?=\s)))\s*)*(?P<func>[\w\.\-\+\{\}\$:]+)?\s*\(\s*\)\s*{$" )
__inherit_regexp__ = re.compile(r"inherit\s+(.+)" )
__export_func_regexp__ = re.compile(r"EXPORT_FUNCTIONS\s+(.+)" )
__addtask_regexp__ = re.compile(r"addtask\s+(?P<func>\w+)\s*((before\s*(?P<before>((.*(?=after))|(.*))))|(after\s*(?P<after>((.*(?=before))|(.*)))))*")
__deltask_regexp__ = re.compile(r"deltask\s+(?P<func>\w+)(?P<ignores>.*)")
__addhandler_regexp__ = re.compile(r"addhandler\s+(.+)" )
__def_regexp__ = re.compile(r"def\s+(\w+).*:" )
__python_func_regexp__ = re.compile(r"(\s+.*)|(^$)|(^#)" )
__python_tab_regexp__ = re.compile(r" *\t")
__infunc__ = []
__inpython__ = False
__body__ = []
__classname__ = ""
cached_statements = {}
def supports(fn, d):
"""Return True if fn has a supported extension"""
return os.path.splitext(fn)[-1] in [".bb", ".bbclass", ".inc"]
def inherit(files, fn, lineno, d):
__inherit_cache = d.getVar('__inherit_cache', False) or []
files = d.expand(files).split()
for file in files:
if not os.path.isabs(file) and not file.endswith(".bbclass"):
file = os.path.join('classes', '%s.bbclass' % file)
if not os.path.isabs(file):
bbpath = d.getVar("BBPATH")
abs_fn, attempts = bb.utils.which(bbpath, file, history=True)
for af in attempts:
if af != abs_fn:
bb.parse.mark_dependency(d, af)
if abs_fn:
file = abs_fn
if not file in __inherit_cache:
logger.debug(1, "Inheriting %s (from %s:%d)" % (file, fn, lineno))
__inherit_cache.append( file )
d.setVar('__inherit_cache', __inherit_cache)
include(fn, file, lineno, d, "inherit")
__inherit_cache = d.getVar('__inherit_cache', False) or []
def get_statements(filename, absolute_filename, base_name):
global cached_statements
try:
return cached_statements[absolute_filename]
except KeyError:
with open(absolute_filename, 'r') as f:
statements = ast.StatementGroup()
lineno = 0
while True:
lineno = lineno + 1
s = f.readline()
if not s: break
s = s.rstrip()
feeder(lineno, s, filename, base_name, statements)
if __inpython__:
# add a blank line to close out any python definition
feeder(lineno, "", filename, base_name, statements, eof=True)
if filename.endswith(".bbclass") or filename.endswith(".inc"):
cached_statements[absolute_filename] = statements
return statements
def handle(fn, d, include):
global __func_start_regexp__, __inherit_regexp__, __export_func_regexp__, __addtask_regexp__, __addhandler_regexp__, __infunc__, __body__, __residue__, __classname__
__body__ = []
__infunc__ = []
__classname__ = ""
__residue__ = []
base_name = os.path.basename(fn)
(root, ext) = os.path.splitext(base_name)
init(d)
if ext == ".bbclass":
__classname__ = root
__inherit_cache = d.getVar('__inherit_cache', False) or []
if not fn in __inherit_cache:
__inherit_cache.append(fn)
d.setVar('__inherit_cache', __inherit_cache)
if include != 0:
oldfile = d.getVar('FILE', False)
else:
oldfile = None
abs_fn = resolve_file(fn, d)
# actual loading
statements = get_statements(fn, abs_fn, base_name)
# DONE WITH PARSING... time to evaluate
if ext != ".bbclass" and abs_fn != oldfile:
d.setVar('FILE', abs_fn)
try:
statements.eval(d)
except bb.parse.SkipRecipe:
d.setVar("__SKIPPED", True)
if include == 0:
return { "" : d }
if __infunc__:
raise ParseError("Shell function %s is never closed" % __infunc__[0], __infunc__[1], __infunc__[2])
if __residue__:
raise ParseError("Leftover unparsed (incomplete?) data %s from %s" % __residue__, fn)
if ext != ".bbclass" and include == 0:
return ast.multi_finalize(fn, d)
if ext != ".bbclass" and oldfile and abs_fn != oldfile:
d.setVar("FILE", oldfile)
return d
def feeder(lineno, s, fn, root, statements, eof=False):
global __func_start_regexp__, __inherit_regexp__, __export_func_regexp__, __addtask_regexp__, __addhandler_regexp__, __def_regexp__, __python_func_regexp__, __inpython__, __infunc__, __body__, bb, __residue__, __classname__
# Check tabs in python functions:
# - def py_funcname(): covered by __inpython__
# - python(): covered by '__anonymous' == __infunc__[0]
# - python funcname(): covered by __infunc__[3]
if __inpython__ or (__infunc__ and ('__anonymous' == __infunc__[0] or __infunc__[3])):
tab = __python_tab_regexp__.match(s)
if tab:
bb.warn('python should use 4 spaces indentation, but found tabs in %s, line %s' % (root, lineno))
if __infunc__:
if s == '}':
__body__.append('')
ast.handleMethod(statements, fn, lineno, __infunc__[0], __body__, __infunc__[3], __infunc__[4])
__infunc__ = []
__body__ = []
else:
__body__.append(s)
return
if __inpython__:
m = __python_func_regexp__.match(s)
if m and not eof:
__body__.append(s)
return
else:
ast.handlePythonMethod(statements, fn, lineno, __inpython__,
root, __body__)
__body__ = []
__inpython__ = False
if eof:
return
if s and s[0] == '#':
if len(__residue__) != 0 and __residue__[0][0] != "#":
bb.fatal("There is a comment on line %s of file %s (%s) which is in the middle of a multiline expression.\nBitbake used to ignore these but no longer does so, please fix your metadata as errors are likely as a result of this change." % (lineno, fn, s))
if len(__residue__) != 0 and __residue__[0][0] == "#" and (not s or s[0] != "#"):
bb.fatal("There is a confusing multiline, partially commented expression on line %s of file %s (%s).\nPlease clarify whether this is all a comment or should be parsed." % (lineno, fn, s))
if s and s[-1] == '\\':
__residue__.append(s[:-1])
return
s = "".join(__residue__) + s
__residue__ = []
# Skip empty lines
if s == '':
return
# Skip comments
if s[0] == '#':
return
m = __func_start_regexp__.match(s)
if m:
__infunc__ = [m.group("func") or "__anonymous", fn, lineno, m.group("py") is not None, m.group("fr") is not None]
return
m = __def_regexp__.match(s)
if m:
__body__.append(s)
__inpython__ = m.group(1)
return
m = __export_func_regexp__.match(s)
if m:
ast.handleExportFuncs(statements, fn, lineno, m, __classname__)
return
m = __addtask_regexp__.match(s)
if m:
if len(m.group().split()) == 2:
# Check and warn for "addtask task1 task2"
m2 = re.match(r"addtask\s+(?P<func>\w+)(?P<ignores>.*)", s)
if m2 and m2.group('ignores'):
logger.warning('addtask ignored: "%s"' % m2.group('ignores'))
# Check and warn for "addtask task1 before task2 before task3", the
# similar to "after"
taskexpression = s.split()
for word in ('before', 'after'):
if taskexpression.count(word) > 1:
logger.warning("addtask contained multiple '%s' keywords, only one is supported" % word)
ast.handleAddTask(statements, fn, lineno, m)
return
m = __deltask_regexp__.match(s)
if m:
# Check and warn "for deltask task1 task2"
if m.group('ignores'):
logger.warning('deltask ignored: "%s"' % m.group('ignores'))
ast.handleDelTask(statements, fn, lineno, m)
return
m = __addhandler_regexp__.match(s)
if m:
ast.handleBBHandlers(statements, fn, lineno, m)
return
m = __inherit_regexp__.match(s)
if m:
ast.handleInherit(statements, fn, lineno, m)
return
return ConfHandler.feeder(lineno, s, fn, statements)
# Add us to the handlers list
from .. import handlers
handlers.append({'supports': supports, 'handle': handle, 'init': init})
del handlers
| 2.6875 | 3 |
tests/test_lib.py | ajberkley/minorminer | 0 | 12767820 | """
Unit tests over parameter combinations of the library.
TODO ADD MORE
"""
from __future__ import print_function
from minorminer import find_embedding as find_embedding_orig
from warnings import warn
import os
import sys
import time
# Given that this test is in the tests directory, the calibration data should be
# in a sub directory. Use the path of this source file to find the calibration
calibration_dir = os.path.join(os.path.dirname(
os.path.abspath(__file__)), "calibration")
def find_embedding(Q, A, return_overlap=False, **args):
args['verbose'] = 0
args['tries'] = 1
if return_overlap:
emb, succ = find_embedding_orig(
Q, A, return_overlap=return_overlap, **args)
if not succ:
return emb, succ
elif check_embedding(Q, A, emb, **args):
if check_embedding.warning:
warn(check_embedding.warning, RuntimeWarning)
return emb, succ
else:
raise RuntimeError(
"bad embedding reported as success (%s)" % (check_embedding.errcode))
else:
emb = find_embedding_orig(Q, A, return_overlap=return_overlap, **args)
if emb:
if not check_embedding(Q, A, emb, **args):
raise RuntimeError(
"bad embedding reported as success (%s)" % (check_embedding.errcode))
elif check_embedding.warning:
warn(check_embedding.warning, RuntimeWarning)
return emb
def check_embedding(Q, A, emb, **args):
from networkx import Graph, is_connected
check_embedding.warning = None
Qg = Graph()
Ag = Graph()
Qg.add_edges_from(Q)
Ag.add_edges_from(A)
qubhits = 0
footprint = set()
var = {}
for x in Qg:
try:
embx = emb[x]
except KeyError:
check_embedding.errcode = "missing chain"
return False
for q in embx:
var[q] = x
footprint.update(embx)
qubhits += len(embx)
if not is_connected(Ag.subgraph(embx)):
check_embedding.errcode = "broken chain for %s: (%s)" % (x, embx)
return False
if len(footprint) != qubhits:
check_embedding.errcode = "overlapped chains"
return False
Qv = Graph()
for p, q in Ag.edges():
try:
Qv.add_edge(var[p], var[q])
except KeyError:
continue
for x, y in Qg.edges():
if not Qv.has_edge(x, y):
check_embedding.errcode = "missing edge"
return False
for x, chain in args.get("fixed_chains", {}).items():
if set(chain) != set(emb[x]):
check_embedding.errcode = "fixed chain mismatch"
return False
for x, domain in args.get("restrict_chains", {}).items():
if not set(domain) >= set(emb[x]):
check_embedding.warning = "restrict chain mismatch"
return True
def Path(n):
return [(i, i + 1) for i in range(n - 1)]
def Grid(n):
return [((x, y), (x + dx, y + dy)) for a in range(n) for b in range(n - 1) for (x, y, dx, dy) in [(a, b, 0, 1), (b, a, 1, 0)]]
def Clique(n):
return [(u, v) for u in range(n) for v in range(u)]
def Biclique(n):
return [(u, v) for u in range(n) for v in range(n, 2 * n)]
def Chimera(n, l=4):
return [((x, y, u, k), (x + dx, y + dy, u, k))
for a in range(n)
for b in range(n - 1)
for k in range(l)
for x, y, u, dx, dy in [(b, a, 0, 1, 0), (a, b, 1, 0, 1)]
] + [((x, y, 0, k), (x, y, 1, kk)) for x in range(n) for y in range(n) for k in range(l) for kk in range(l)]
def NAE3SAT(n):
import networkx
from math import ceil
from random import seed, randint
seed(18293447845779813366)
c = int(ceil(sum(randint(1, ceil(n * 4.2)) for _ in range(100)) / 100.))
return networkx.generators.k_random_intersection_graph(c, n, 3).edges()
def ChordalCycle(p):
import networkx
G = networkx.generators.chordal_cycle_graph(p)
G.remove_edges_from(list(G.selfloop_edges()))
return G.edges()
def GeometricGraph(n, pos=None):
import networkx
G = networkx.generators.geometric.random_geometric_graph(
n, n**-.333, dim=2, pos=pos)
if pos is not None:
for g in G:
if len(list(G[g])) == 0:
del pos[g]
return G.edges()
def CartesianProduct(n):
import networkx
K = networkx.generators.complete_graph(n)
return networkx.product.cartesian_product(K, K).edges()
def GridChimeraEmbedding(n):
emb = {}
M = [[0, 2, 2, 0], [1, 3, 3, 1], [1, 3, 3, 1], [0, 2, 2, 0]]
for x in range(n):
for y in range(n):
emb[x, y] = [(x // 2, y // 2, 0, M[x % 4][y % 4]),
(x // 2, y // 2, 1, M[y % 4][x % 4])]
return emb
def mask_wxw(n, w=2, l=4):
return {(X // w, Y // w): [(x, y, u, k) for x in range(X, X + w) for y in range(Y, Y + w) for u in (0, 1) for k in range(l)] for X in range(0, n, w) for Y in range(0, n, w)}
success_count_functions = []
def success_count(n, *a, **k):
from functools import wraps
from math import log
def count_successes(f):
global success_count_functions
success_count_functions.append([f, n, a, k])
if os.path.exists(os.path.join(calibration_dir, f.__name__)):
S, N = load_success_count_calibration(f)
N += (S == N)
accept_prob = .0001 # .01% false negative rate
tts = int(log(accept_prob * S / N, 1 - S / N) + 1)
false_prob = (S / N) * (1 - S / N)**tts
@wraps(f)
def test_run():
for i in range(tts):
if f(*a, **k):
break
else:
assert False, "took %d tries without success, this should only happen %.02f%% of the time" % (
tts, false_prob * 100)
else:
def test_run():
raise RuntimeError(
"%s is not calibrated -- run calibrate_all() or calibrate_new()" % (f.__name__))
test_run.original = f
return test_run
return count_successes
def calibrate_success_count(f, n, a, k, directory=calibration_dir, M=None):
succ = 0
if M is None:
M = 10000
N = M * n
print("calibrating %s, %d trials " % (f.__name__, N))
t0 = time.clock()
for i in range(N):
if i % (N / 10) == 0:
print("%d " % (10 * i // N), end='')
sys.stdout.flush()
succ += bool(f(*a, **k))
print()
dt = time.clock() - t0
print("%s: %.04e per trial; success rate %.01f%% " %
(f.__name__, dt / N, succ * 100. / N))
if directory != calibration_dir and os.path.exists(os.path.join(calibration_dir, f.__name__)):
olds, oldn = load_success_count_calibration(f)
print("standard is %.01f%%" % (olds * 100. / oldn))
else:
print()
with open(os.path.join(directory, f.__name__), "w") as cal_f:
cal_f.write(repr((succ, float(N))))
def load_success_count_calibration(f, directory=calibration_dir):
with open(os.path.join(directory, f.__name__)) as cal_f:
return eval(cal_f.read())
def calibrate_all(directory=calibration_dir, M=None):
global success_count_functions
if not os.path.exists(directory):
os.mkdir(directory)
for f, n, a, k in success_count_functions:
calibrate_success_count(f, n, a, k, directory=directory, M=M)
print()
def calibrate_new(directory=calibration_dir, M=None):
for f, n, a, k in success_count_functions:
if os.path.exists(os.path.join(directory, f.__name__)):
continue
else:
calibrate_success_count(f, n, a, k, directory=directory, M=M)
def success_perfect(n, *a, **k):
from functools import wraps
def is_perfect(f):
@wraps(f)
def test_run():
for _ in range(n):
assert bool(f(*a, **k)), "test fail"
test_run.original = f
return test_run
return is_perfect
def success_bounce(n, *a, **k):
from functools import wraps
def is_perfect(f):
@wraps(f)
def test_run():
succs = sum(bool(f(*a, **k)) for _ in range(n))
assert False, "%d successes out of %d trials" % (succs, n)
test_run.original = f
return test_run
return is_perfect
def check_args(prob, hard, initial_chains=None, fixed_chains=None, restrict_chains=None, skip_initialization=False):
import networkx
probg = networkx.Graph()
probg.add_edges_from(prob)
hardg = networkx.Graph()
hardg.add_edges_from(hard)
assert networkx.is_connected(hardg), "hardware graph not connected"
assert networkx.is_connected(probg), "problem graph not connected"
if fixed_chains is not None:
for v, chain in fixed_chains.items():
assert probg.has_node(
v), "fixed_chains vars not contained in problem graph"
for q in chain:
assert hardg.has_node(
q), "fixed_chains chains not contained in hardware graph"
if initial_chains is not None:
for v in fixed_chains:
assert v not in initial_chains, "fixed_chains chains overwrite initial chains"
if restrict_chains is not None:
for v in fixed_chains:
assert v not in restrict_chains, "fixed_chains chains are restricted"
if initial_chains is not None:
for v, chain in initial_chains.items():
assert probg.has_node(
v), "initial vars not contained in problem graph"
for q in chain:
assert hardg.has_node(
q), "initial chains not contained in hardware graph"
if skip_initialization:
for u, v in probg.edges():
edgelord = {z for q in initial_chains[v] for z in hardg.neighbors(
q)} | set(initial_chains[v])
assert set(
initial_chains[u]) & edgelord, "%s and %s are connected as variables but not as initials" % (u, v)
if restrict_chains is not None:
fullset = set(hardg.nodes())
for v, chain in restrict_chains.items():
assert probg.has_node(
v), "restricted vars not contained in problem graph"
for q in chain:
assert hardg.has_node(
q), "restricted chains not contained in hardware graph"
for u, v in probg.edges():
edgelord = {z for q in restrict_chains.get(v, fullset) for z in hardg.neighbors(
q)} | set(restrict_chains.get(v, fullset))
assert set(restrict_chains.get(
u, fullset)) & edgelord, "%s and %s are connected as variables but not as domains" % (u, v)
@success_count(100, 5)
def test_path_label_00(n):
p = Path(n)
return find_embedding(p, p)
@success_count(100, 5)
def test_path_label_01(n):
p = Path(n)
L = [str(i) for i in range(n)]
Lp = [(L[x], L[y]) for x, y in p]
return find_embedding(p, Lp)
@success_count(100, 5)
def test_path_label_10(n):
p = Path(n)
L = [str(i) for i in range(n)]
Lp = [(L[x], L[y]) for x, y in p]
return find_embedding(Lp, p)
@success_count(100, 5)
def test_path_label_11(n):
p = Path(n)
L = [str(i) for i in range(n)]
Lp = [(L[x], L[y]) for x, y in p]
return find_embedding(Lp, Lp)
@success_count(30, 3)
def test_grid_init_restrict(n):
from random import choice
chim = Chimera(n, l=4)
mask = mask_wxw(n, 1, l=4)
grid = Grid(2 * n)
init = {(x, y): [choice(mask[x // 2, y // 2])]
for x in range(2 * n) for y in range(2 * n)}
doms = {(x, y): mask[x // 2, y // 2]
for x in range(2 * n) for y in range(2 * n)}
return find_embedding(grid, chim, initial_chains=init, restrict_chains=doms, skip_initialization=False)
@success_count(30, 3)
def test_grid_init(n):
from random import choice
chim = Chimera(n, l=4)
mask = mask_wxw(n, 1, l=2)
grid = Grid(2 * n)
init = {(x, y): mask[x // 2, y // 2]
for x in range(2 * n) for y in range(2 * n)}
return find_embedding(grid, chim, initial_chains=init, skip_initialization=False)
@success_count(30, 15, 7)
def test_nae3sat(n, m):
from random import choice
chim = Chimera(m)
prob = NAE3SAT(n)
return find_embedding(prob, chim)
@success_count(30, 79, 6)
def test_expander(p, m):
prob = ChordalCycle(p)
chim = Chimera(m)
return find_embedding(prob, chim)
@success_count(30, 5)
def test_cartesian(n):
prob = CartesianProduct(n)
chim = Chimera(n, l=n)
return find_embedding(prob, chim)
@success_count(30, 45, 6)
def test_geometric_nohint(n, m):
prob = GeometricGraph(n)
chim = Chimera(m)
return find_embedding(prob, chim)
@success_count(30, 55, 6)
def test_geometric_hint(n, m):
from random import randint
pos = {}
chains = {}
for i in range(n):
x = randint(0, m - 1)
k1 = randint(0, 3)
y = randint(0, m - 1)
k2 = randint(0, 3)
pos[i] = (4 * x + k2) / 4. / m, (4 * y + k1) / 4. / m
chains[i] = (x, y, 0, k1), (x, y, 1, k2)
prob = GeometricGraph(n, pos)
chim = Chimera(m)
return find_embedding(prob, chim, initial_chains={i: c for i, c in chains.items() if i in pos})
@success_count(30, 3)
def test_grid_restrict(n):
chim = Chimera(n)
mask = mask_wxw(n, 1)
grid = Grid(2 * n)
doms = {(x, y): mask[x // 2, y // 2]
for x in range(2 * n) for y in range(2 * n)}
check_args(grid, chim, restrict_chains=doms)
return find_embedding(grid, chim, restrict_chains=doms)
@success_perfect(100, 4)
def test_grid_with_answer_fast(n):
chim = Chimera(n)
mask = mask_wxw(n, 1)
grid = Grid(2 * n)
init = GridChimeraEmbedding(2 * n)
check_args(grid, chim, initial_chains=init, skip_initialization=True)
return find_embedding(grid, chim, initial_chains=init, skip_initialization=True, chainlength_patience=0)
@success_perfect(100, 2)
def test_grid_with_answer_slow(n):
chim = Chimera(n)
mask = mask_wxw(n, 1)
grid = Grid(2 * n)
init = GridChimeraEmbedding(2 * n)
check_args(grid, chim, initial_chains=init, skip_initialization=True)
return find_embedding(grid, chim, initial_chains=init, skip_initialization=True, chainlength_patience=10)
@success_count(30, 5)
def test_grid_suspend(n):
chim = Chimera(n)
mask = mask_wxw(n, 1)
grid = Grid(2 * n)
suspg = [((x, y), (x // 2, y // 2, 0))
for x in range(2 * n) for y in range(2 * n)]
suspc = [((x, y, 0), m) for x in range(n)
for y in range(n) for m in mask[x, y]]
suspension = {(x, y, 0): [(x, y, 0)] for x in range(n) for y in range(n)}
return find_embedding(grid + suspg, chim + suspc, fixed_chains=suspension, chainlength_patience=0)
@success_count(30, 5)
def test_grid_plant_suspend(n):
chim = Chimera(n)
mask = mask_wxw(n, 1)
grid = Grid(2 * n)
suspg = [((x, y), (x // 2, y // 2, 0))
for x in range(2 * n) for y in range(2 * n)]
suspc = [(m, (x, y, 0)) for x in range(n)
for y in range(n) for m in mask[x, y]]
suspension = {(x, y, 0): [(x, y, 0)] for x in range(n) for y in range(n)}
init = {(x, y): mask[x // 2, y // 2]
for x in range(2 * n) for y in range(2 * n)}
return find_embedding(grid + suspg, chim + suspc, fixed_chains=suspension, initial_chains=init, chainlength_patience=0)
@success_count(30, 5)
def test_grid_suspend_chains(n):
chim = Chimera(n)
mask = mask_wxw(n, 1)
grid = Grid(2 * n)
suspension = {(x, y): [mask[x//2, y//2]]
for x in range(2*n) for y in range(2*n)}
return find_embedding(grid, chim, suspend_chains=suspension, chainlength_patience=0)
@success_count(30, 5)
def test_grid_suspend_domain(n):
chim = Chimera(n)
mask = mask_wxw(n, 1)
grid = Grid(2 * n)
suspg = [((x, y), (x // 2, y // 2, 0))
for x in range(2 * n) for y in range(2 * n)]
suspc = [((x, y, 0), m) for x in range(n)
for y in range(n) for m in mask[x, y]]
suspension = {(x, y, 0): [(x, y, 0)] for x in range(n) for y in range(n)}
doms = {(x, y): mask[x // 2, y // 2]
for x in range(2 * n) for y in range(2 * n)}
check_args(grid + suspg, chim + suspc, fixed_chains=suspension,
skip_initialization=False, restrict_chains=doms)
return find_embedding(grid + suspg, chim + suspc, fixed_chains=suspension, restrict_chains=doms, chainlength_patience=0)
@success_count(30, 5)
def test_grid_cheat_domain(n):
chim = Chimera(n)
grid = Grid(2 * n)
cheat = GridChimeraEmbedding(2 * n)
return find_embedding(grid, chim, restrict_chains=cheat, chainlength_patience=0)
@success_count(30, 2)
def test_biclique_chimera(n):
chim = Chimera(n)
kliq = Biclique(4 * n)
return find_embedding(kliq, chim, chainlength_patience=0)
@success_count(30, 5)
def test_path_cheat_domain(n):
P = Path(n)
cheat = {p: [p] for p in range(n)}
return find_embedding(P, P, restrict_chains=cheat, chainlength_patience=0)
@success_count(30, 6, 25)
def test_clique(n, k):
chim = Chimera(n)
cliq = Clique(k)
return find_embedding(cliq, chim, chainlength_patience=0)
@success_perfect(20, 25, 25)
def test_clique_clique(n, k):
cliq = Clique(k)
return find_embedding(cliq, cliq, chainlength_patience=0)
@success_perfect(3, 16)
def test_clique_large_nosegfault(n):
chim = Chimera(n)
cliq = Clique(4 * n + 2)
return not find_embedding(cliq, chim, chainlength_patience=0, timeout=1)
@success_count(30, 6, 25)
def test_clique_parallel(n, k):
chim = Chimera(n)
cliq = Clique(k)
return find_embedding(cliq, chim, chainlength_patience=0, threads=2)
@success_count(30, 3, 13)
def test_clique_term(n, k):
chim = Chimera(n)
cliq = Clique(k)
cterm = [((n // 2, n // 2, 0, 0), k)]
kterm = [(0, k)]
fix = {k: [k]}
return find_embedding(cliq + kterm, chim + cterm, fixed_chains=fix, chainlength_patience=0)
@success_count(30, 8)
def test_grid_heal_A(n):
from random import randint
grid = Grid(2 * n)
chim = Chimera(n + 2)
breaks = {(x, x, x % 2, randint(0, 3)) for x in range(1, 4)}
chim = [e for e in chim if not breaks.intersection(e)]
emb = GridChimeraEmbedding(2 * n)
i_emb = {}
for v, chain in emb.items():
remainder = {(x + 1, y + 1, u, k)
for x, y, u, k in chain}.difference(breaks)
if remainder:
i_emb[v] = remainder
return find_embedding(grid, chim, initial_chains=i_emb, chainlength_patience=0)
@success_count(30, 4)
def test_grid_heal_B(n):
from random import randint
grid = Grid(2 * n)
chim = Chimera(n + 2)
breaks = {(x, x, x % 2, randint(0, 3)) for x in range(1, 4)}
chim = [e for e in chim]
chimb = [(b, (b, None)) for b in breaks]
gridb = [(b, (b, None)) for b in breaks]
f_emb = {(b, None): [(b, None)] for b in breaks}
emb = GridChimeraEmbedding(2 * n)
return find_embedding(grid + gridb, chim + chimb, initial_chains=emb, fixed_chains=f_emb, chainlength_patience=0)
@success_perfect(1000, 3)
def test_fail_impossible(n):
Kn = Clique(n) # we're gonna try to embed this here clique
Pn = Path(n) # into this here path, and it ain't gonna work
return not find_embedding(Kn, Pn)
@success_perfect(1, 16, .1)
def test_fail_timeout(n, t):
Kn = Clique(4 * n + 1) # we're gonna try to embed this here clique
# into this here chimera, and it might work but we'll time out
Cn = Chimera(n)
return not find_embedding(Kn, Cn, tries=1e6, max_no_improvement=1e6, inner_rounds=1e6, timeout=t, threads=4)
@success_count(30)
def test_chainlength_fast():
C = Chimera(4)
K = Clique(16)
e = find_embedding(K, C, tries=1, chainlength_patience=1)
if not len(e):
return False
return max(len(c) for c in e.values()) <= 7
@success_count(30)
def test_chainlength_slow():
C = Chimera(4)
K = Clique(16)
e = find_embedding(K, C, tries=1, chainlength_patience=10)
if not len(e):
return False
return max(len(c) for c in e.values()) <= 6
def chainlength_diagnostic(n=100, old=False, chainlength_argument=0, verbose=0, m=8):
C = Chimera(m)
K = Clique(4 * m)
if old:
from dwave_sapi2.embedding import find_embedding as find_embedding_dws2
nodes = set(x for e in C for x in e)
trans = {x: i for i, x in enumerate(nodes)}
C = [(trans[x], trans[y]) for x, y in C]
assert 0 <= chainlength_argument <= 1, "sapi2 only supports a chainlength argument of 0 or 1"
embs = [find_embedding_dws2(
K, C, tries=1, fast_embedding=chainlength_argument, verbose=verbose) for _ in range(n)]
else:
embs = [find_embedding_orig(
K, C, tries=1, chainlength_patience=chainlength_argument, verbose=verbose).values() for _ in range(n)]
return sorted(max(map(len, e)) if e else None for e in embs)
def chainlength_rundown(n=100, m=8):
from dwave_sapi2.embedding import find_embedding as find_embedding_dws2
C = Chimera(m)
K = Clique(4 * m)
nodes = set(x for e in C for x in e)
trans = {x: i for i, x in enumerate(nodes)}
C = [(trans[x], trans[y]) for x, y in C]
def trial(f):
t0 = time.clock()
stats = [f() for _ in range(n)]
t = time.clock() - t0
stats = filter(None, stats)
stats = [max(map(len, e)) for e in stats]
print("successes %d, best maxchain %d, avg maxchain %.02f, time %.02fs" % (
len(stats), min(stats), sum(stats) / float(len(stats)), t))
return t
print("sapi fast embedding:", end='')
trial(lambda: find_embedding_dws2(K, C, tries=1, fast_embedding=True))
print("sapi slow embedding:", end='')
basetime = trial(lambda: find_embedding_dws2(
K, C, tries=1, fast_embedding=False))
patience = 0
while 1:
print("minorminer, chainlength_patience %d:" % patience, end='')
t = trial(lambda: find_embedding_orig(K, C, tries=1,
chainlength_patience=patience).values())
if t > basetime:
break
patience += 1
| 2.5 | 2 |
CadeMinhaPasta/teste.py | RafaelContact/pythonProject | 0 | 12767821 | nome = str(input('Digite seu nome : '))
print(f'Voce digitou {nome:^40}') | 3.015625 | 3 |
few_shot_learning/train_few_shot.py | gitmatti/few-shot-learning | 0 | 12767822 | """
Run few-shot learning on FashionProductImaes dataset using code from github
repo https://github.com/oscarknagg/few-shot under
MIT License
Copyright (c) 2019 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
reproducing results of
Snell et al Prototypical Networks. In places where substantial changes have
been made to the original code, this is marked with an ADAPTED/BEFORE comment
"""
import torch
from torch.optim import Adam
import torch.nn.parallel
from torch.utils.data import DataLoader
from torchvision import transforms, models
import warnings
import numpy as np
from typing import Callable, Tuple
from few_shot.models import get_few_shot_encoder
from few_shot.core import NShotTaskSampler, create_nshot_task_label
from few_shot.proto import proto_net_episode
from few_shot.train import fit
from few_shot.callbacks import *
from few_shot.utils import setup_dirs
from few_shot.metrics import categorical_accuracy
from few_shot_learning.datasets import FashionProductImages, \
FashionProductImagesSmall
from few_shot_learning.models import Identity
from config import DATA_PATH, PATH
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
def few_shot_training(
datadir=DATA_PATH,
dataset='fashion',
num_input_channels=3,
drop_lr_every=20,
validation_episodes=200,
evaluation_episodes=1000,
episodes_per_epoch=100,
n_epochs=80,
small_dataset=False,
n_train=1,
n_test=1,
k_train=30,
k_test=5,
q_train=5,
q_test=1,
distance='l2',
pretrained=False,
monitor_validation=False,
n_val_classes=10,
architecture='resnet18',
gpu=None
):
setup_dirs()
if dataset == 'fashion':
dataset_class = FashionProductImagesSmall if small_dataset \
else FashionProductImages
else:
raise (ValueError, 'Unsupported dataset')
param_str = f'{dataset}_nt={n_train}_kt={k_train}_qt={q_train}_' \
f'nv={n_test}_kv={k_test}_qv={q_test}_small={small_dataset}_' \
f'pretrained={pretrained}_validate={monitor_validation}'
print(param_str)
###################
# Create datasets #
###################
# ADAPTED: data transforms including augmentation
resize = (80, 60) if small_dataset else (400, 300)
background_transform = transforms.Compose([
transforms.RandomResizedCrop(resize, scale=(0.8, 1.0)),
# transforms.RandomGrayscale(),
transforms.RandomPerspective(),
transforms.RandomHorizontalFlip(),
# transforms.Resize(resize),
transforms.ToTensor(),
# transforms.Normalize(mean=[0.485, 0.456, 0.406],
# std=[0.229, 0.224, 0.225])
])
evaluation_transform = transforms.Compose([
transforms.Resize(resize),
# transforms.CenterCrop(224),
transforms.ToTensor(),
# transforms.Normalize(mean=[0.485, 0.456, 0.406],
# std=[0.229, 0.224, 0.225])
])
if monitor_validation:
if not n_val_classes >= k_test:
n_val_classes = k_test
print("Warning: `n_val_classes` < `k_test`. Take a larger number"
" of validation classes next time. Increased to `k_test`"
" classes")
# class structure for background (training), validation (validation),
# evaluation (test): take a random subset of background classes
validation_classes = list(
np.random.choice(dataset_class.background_classes, n_val_classes))
background_classes = list(set(dataset_class.background_classes).difference(
set(validation_classes)))
# use keyword for evaluation classes
evaluation_classes = 'evaluation'
# Meta-validation set
validation = dataset_class(datadir, split='all',
classes=validation_classes,
transform=evaluation_transform)
# ADAPTED: in the original code, `episodes_per_epoch` was provided to
# `NShotTaskSampler` instead of `validation_episodes`.
validation_sampler = NShotTaskSampler(validation, validation_episodes,
n_test, k_test, q_test)
validation_taskloader = DataLoader(
validation,
batch_sampler=validation_sampler,
num_workers=4
)
else:
# use keyword for both background and evaluation classes
background_classes = 'background'
evaluation_classes = 'evaluation'
# Meta-training set
background = dataset_class(datadir, split='all',
classes=background_classes,
transform=background_transform)
background_sampler = NShotTaskSampler(background, episodes_per_epoch,
n_train, k_train, q_train)
background_taskloader = DataLoader(
background,
batch_sampler=background_sampler,
num_workers=4
)
# Meta-test set
evaluation = dataset_class(datadir, split='all',
classes=evaluation_classes,
transform=evaluation_transform)
# ADAPTED: in the original code, `episodes_per_epoch` was provided to
# `NShotTaskSampler` instead of `evaluation_episodes`.
evaluation_sampler = NShotTaskSampler(evaluation, evaluation_episodes,
n_test, k_test, q_test)
evaluation_taskloader = DataLoader(
evaluation,
batch_sampler=evaluation_sampler,
num_workers=4
)
#########
# Model #
#########
if torch.cuda.is_available():
if gpu is not None:
device = torch.device('cuda', gpu)
else:
device = torch.device('cuda')
torch.backends.cudnn.benchmark = True
else:
device = torch.device('cpu')
if not pretrained:
model = get_few_shot_encoder(num_input_channels)
# ADAPTED
model.to(device)
# BEFORE
# model.to(device, dtype=torch.double)
else:
assert torch.cuda.is_available()
model = models.__dict__[architecture](pretrained=True)
model.fc = Identity()
if gpu is not None:
model = model.cuda(gpu)
else:
model = model.cuda()
# TODO this is too risky: I'm not sure that this can work, since in
# the few-shot github repo the batch axis is actually split into
# support and query samples
# model = torch.nn.DataParallel(model).cuda()
def lr_schedule(epoch, lr):
# Drop lr every 2000 episodes
if epoch % drop_lr_every == 0:
return lr / 2
else:
return lr
############
# Training #
############
print(f'Training Prototypical network on {dataset}...')
optimiser = Adam(model.parameters(), lr=1e-3)
loss_fn = torch.nn.NLLLoss().to(device)
callbacks = [
# ADAPTED: this is the test monitoring now - and is only done at the
# end of training.
EvaluateFewShot(
eval_fn=proto_net_episode,
num_tasks=evaluation_episodes, # THIS IS NOT USED
n_shot=n_test,
k_way=k_test,
q_queries=q_test,
taskloader=evaluation_taskloader,
prepare_batch=prepare_nshot_task(n_test, k_test, q_test, device=device),
distance=distance,
on_epoch_end=False,
on_train_end=True,
prefix='test_'
)
]
if monitor_validation:
callbacks.append(
# ADAPTED: this is the validation monitoring now - computed
# after every epoch.
EvaluateFewShot(
eval_fn=proto_net_episode,
num_tasks=evaluation_episodes, # THIS IS NOT USED
n_shot=n_test,
k_way=k_test,
q_queries=q_test,
# BEFORE taskloader=evaluation_taskloader,
taskloader=validation_taskloader, # ADAPTED
prepare_batch=prepare_nshot_task(n_test, k_test, q_test, device=device),
distance=distance,
on_epoch_end=True, # ADAPTED
on_train_end=False, # ADAPTED
prefix='val_'
)
)
callbacks.extend([
ModelCheckpoint(
filepath=PATH + f'/models/proto_nets/{param_str}.pth',
monitor=f'val_{n_test}-shot_{k_test}-way_acc',
verbose=1, # ADAPTED
save_best_only=monitor_validation # ADAPTED
),
LearningRateScheduler(schedule=lr_schedule),
CSVLogger(PATH + f'/logs/proto_nets/{param_str}.csv'),
])
fit(
model,
optimiser,
loss_fn,
epochs=n_epochs,
dataloader=background_taskloader,
prepare_batch=prepare_nshot_task(n_train, k_train, q_train, device=device),
callbacks=callbacks,
metrics=['categorical_accuracy'],
fit_function=proto_net_episode,
fit_function_kwargs={'n_shot': n_train, 'k_way': k_train,
'q_queries': q_train, 'train': True,
'distance': distance},
)
# ADAPTED: the original code used torch.double
def prepare_nshot_task(n: int, k: int, q: int, device=None) -> Callable:
"""Typical n-shot task preprocessing.
# Arguments
n: Number of samples for each class in the n-shot classification task
k: Number of classes in the n-shot classification task
q: Number of query samples for each class in the n-shot classification task
# Returns
prepare_nshot_task_: A Callable that processes a few shot tasks with specified n, k and q
"""
def prepare_nshot_task_(batch: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[
torch.Tensor, torch.Tensor]:
"""Create 0-k label and move to GPU.
TODO: Move to arbitrary device
"""
x, y = batch
# BEFROE x = x.double().cuda()
x = x.to(device) # ADPATED
# Create dummy 0-(num_classes - 1) label
y = create_nshot_task_label(k, q).to(device)
return x, y
return prepare_nshot_task_
class EvaluateFewShot(Callback):
"""Evaluate a network on an n-shot, k-way classification tasks after every epoch.
# Arguments
eval_fn: Callable to perform few-shot classification. Examples include `proto_net_episode`,
`matching_net_episode` and `meta_gradient_step` (MAML).
num_tasks: int. Number of n-shot classification tasks to evaluate the model with.
n_shot: int. Number of samples for each class in the n-shot classification tasks.
k_way: int. Number of classes in the n-shot classification tasks.
q_queries: int. Number query samples for each class in the n-shot classification tasks.
task_loader: Instance of NShotWrapper class
prepare_batch: function. The preprocessing function to apply to samples from the dataset.
prefix: str. Prefix to identify dataset.
"""
def __init__(self,
eval_fn: Callable,
num_tasks: int,
n_shot: int,
k_way: int,
q_queries: int,
taskloader: torch.utils.data.DataLoader,
prepare_batch: Callable,
prefix: str = 'val_',
on_epoch_end: bool = True,
on_train_end: bool = False,
**kwargs):
super(EvaluateFewShot, self).__init__()
self.eval_fn = eval_fn
self.num_tasks = num_tasks
self.n_shot = n_shot
self.k_way = k_way
self.q_queries = q_queries
self.taskloader = taskloader
self.prepare_batch = prepare_batch
self.prefix = prefix
self.kwargs = kwargs
self.metric_name = f'{self.prefix}{self.n_shot}-shot_{self.k_way}-way_acc'
# ADAPTED
self._on_epoch_end = on_epoch_end
self._on_train_end = on_train_end
def on_train_begin(self, logs=None):
self.loss_fn = self.params['loss_fn']
self.optimiser = self.params['optimiser']
# ADAPTED
def on_epoch_end(self, epoch, logs=None):
if self._on_epoch_end:
self._validate(epoch, logs=logs)
# ADAPTED
def on_train_end(self, epoch, logs=None):
if self._on_train_end:
self._validate(epoch, logs=logs)
# ADAPTED
def _validate(self, epoch, logs=None):
logs = logs or {}
seen = 0
totals = {'loss': 0, self.metric_name: 0}
for batch_index, batch in enumerate(self.taskloader):
x, y = self.prepare_batch(batch)
loss, y_pred = self.eval_fn(
self.model,
self.optimiser,
self.loss_fn,
x,
y,
n_shot=self.n_shot,
k_way=self.k_way,
q_queries=self.q_queries,
train=False,
**self.kwargs
)
seen += y_pred.shape[0]
totals['loss'] += loss.item() * y_pred.shape[0]
totals[self.metric_name] += categorical_accuracy(y, y_pred) * \
y_pred.shape[0]
logs[self.prefix + 'loss'] = totals['loss'] / seen
logs[self.metric_name] = totals[self.metric_name] / seen
class ModelCheckpoint(Callback):
"""Save the model after every epoch.
`filepath` can contain named formatting options, which will be filled the value of `epoch` and keys in `logs`
(passed in `on_epoch_end`).
For example: if `filepath` is `weights.{epoch:02d}-{val_loss:.2f}.hdf5`, then the model checkpoints will be saved
with the epoch number and the validation loss in the filename.
# Arguments
filepath: string, path to save the model file.
monitor: quantity to monitor.
verbose: verbosity mode, 0 or 1.
save_best_only: if `save_best_only=True`,
the latest best model according to
the quantity monitored will not be overwritten.
mode: one of {auto, min, max}.
If `save_best_only=True`, the decision
to overwrite the current save file is made
based on either the maximization or the
minimization of the monitored quantity. For `val_acc`,
this should be `max`, for `val_loss` this should
be `min`, etc. In `auto` mode, the direction is
automatically inferred from the name of the monitored quantity.
save_weights_only: if True, then only the model's weights will be
saved (`model.save_weights(filepath)`), else the full model
is saved (`model.save(filepath)`).
period: Interval (number of epochs) between checkpoints.
"""
def __init__(self, filepath, monitor='val_loss', verbose=0,
save_best_only=False, mode='auto', period=1):
super(ModelCheckpoint, self).__init__()
self.monitor = monitor
self.verbose = verbose
self.filepath = filepath
self.save_best_only = save_best_only
self.period = period
self.epochs_since_last_save = 0
if mode not in ['auto', 'min', 'max']:
raise ValueError('Mode must be one of (auto, min, max).')
if mode == 'min':
self.monitor_op = np.less
self.best = np.Inf
elif mode == 'max':
self.monitor_op = np.greater
self.best = -np.Inf
else:
if 'acc' in self.monitor or self.monitor.startswith('fmeasure'):
self.monitor_op = np.greater
self.best = -np.Inf
else:
self.monitor_op = np.less
# BEFORE: THIS IS A BUG
# self.best = np.Inf
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
self.epochs_since_last_save += 1
if self.epochs_since_last_save >= self.period:
self.epochs_since_last_save = 0
filepath = self.filepath.format(epoch=epoch + 1, **logs)
if self.save_best_only:
current = logs.get(self.monitor)
if current is None:
warnings.warn(
'Can save best model only with %s available, '
'skipping.' % (self.monitor), RuntimeWarning)
else:
if self.monitor_op(current, self.best):
if self.verbose > 0:
print(
'\nEpoch %05d: %s improved from %0.5f to %0.5f,'
' saving model to %s'
% (epoch + 1, self.monitor, self.best,
current, filepath))
self.best = current
torch.save(self.model.state_dict(), filepath)
else:
if self.verbose > 0:
print(
'\nEpoch %05d: %s did not improve from %0.5f' %
(epoch + 1, self.monitor, self.best))
else:
if self.verbose > 0:
print('\nEpoch %05d: saving model to %s' % (
epoch + 1, filepath))
torch.save(self.model.state_dict(), filepath)
| 1.539063 | 2 |
test.py | lelloman/python-introcutter | 1 | 12767823 | <reponame>lelloman/python-introcutter<gh_stars>1-10
#!/usr/bin/python
"""
a simple test to check that every piece is in place
TMP_DIR and OUTPUT_DIR from conf.py are used in order to check that
writing there is ok
first a mock intro track is created, just some random tones put together,
then N_TEST_CASES tracks are created, they will contain the intro at different
positions while the rest of the file will be other random tones.
all the test tracks are then matched against the mock intro and the expected results
evaluated, the tracks are made of chunks that can be either the intro ('x') or some other
random tones ('-') so that they look like this:
track 0 x-------
track 1 -x------
track 2 --x-----
...
therefore match values should be something like this:
track index match v actual expected
0 0 46464 0.00 0.00
1 77 22631 7.15 7.28
2 156 24357 14.49 14.56
...
"""
from __future__ import print_function
import os
import wave
from struct import pack
from math import sin, pi
from random import randint
from os.path import isdir
import fingerprint
import conf
# something around 10 should be ok
N_TEST_CASES = 8
# the size of this is different from the one used for the fft
WINDOW_SIZE = randint(1980, 2020)
# convert test frames into seconds
k = WINDOW_SIZE / float(conf.AUDIO_FRAME_RATE)
# convert matched frame index into seconds
K = conf.AUDIO_WINDOW_SIZE / float(conf.AUDIO_FRAME_RATE)
INTRO_DURATION = 30 * 6 * k
NOISE_DURATION = 10 * 8 * k
CLEAN_UP = True
TEST_INTRO = os.path.join(conf.TMP_DIR, 'test_intro.wav')
TEST_TRACK = os.path.join(conf.OUTPUT_DIR, 'test_track{}.wav')
def write_wave_file(filename, frames):
"""
create a wave file from a list of pcm windows
:param filename: wave file to be created
:param frames: a list of pcm windows
"""
fmt = '<{}h'.format(len(frames[0]))
w = wave.open(filename, 'w')
w.setparams((1, 2, conf.AUDIO_FRAME_RATE, 0, 'NONE', 'not compressed'))
for frame in frames:
w.writeframes(pack(fmt, *frame))
w.close()
class Voice(object):
"""
given a frequency, it can generate frames of a
sin wave keeping track of the phase
"""
amplitude = (2 ** 15) * .4
def __init__(self, freq):
self.freq = freq
self.k = (freq / 22050.) * 2 * pi
self.fi = 0
def get_window(self):
return [sin(self.phase() * self.k) for _ in range(WINDOW_SIZE)]
def phase(self):
self.fi += 1
return self.fi
def combine_voices(voices):
"""
sum a list of pcm window with clipping
:param voices: a list of pcm windows
:return: a pcm window
"""
f = [0 for _ in range(WINDOW_SIZE)]
max_f = (2 ** 15) - 1000
min_f = -max_f
for v in voices:
frame = v.get_window()
for i in range(WINDOW_SIZE):
f[i] += frame[i]
for i in range(WINDOW_SIZE):
fi = f[i] * Voice.amplitude
if fi > max_f:
fi = max_f
elif fi < min_f:
fi = min_f
f[i] = int(fi)
return f
def make_intro_track():
"""
create a mock intro track, a bunch of random tones
:return: a list of pcm frames
"""
frames = []
for _ in range(30):
voices = (lambda: [Voice(55 + randint(0, 1200)) for _ in range(4)])()
frames.extend([combine_voices(voices) for _ in range(6)])
return frames
def make_pseudo_noise_track():
"""
create a non intro pcm window, a bunch of random tones
:return: a list of pcm frames
"""
frames = []
for _ in range(10):
frames.extend([combine_voices([Voice(55 + randint(0, 1200)) for _ in range(2)]) for _ in range(8)])
return frames
def make_track(pattern, x=None):
"""
create a pcm track, the format of the input should look like
'--x----' where x is the intro and - is non intro window.
if an intro x is passed it will be used otherwise it will generate
a random one
:param pattern: a string like -x--
:param x: the intro track as pcm frames list
:return: a big list of pcm frames
"""
pattern = pattern.lower()
if pattern.count('x') != 1:
raise Exception("there must be one x!")
if x is None:
x = make_intro_track()
track = []
for ele in pattern:
track += x if ele == 'x' else make_pseudo_noise_track()
return track
def make_test_cases(n, intro):
"""
create a list of test cases to test the script,
each test case is a dictionary with keys:
'i': the index of the element
'track': the test track as a pcm frames list
'pattern': a string like -x---
'expected': the position of the intro in seconds
:param n: number of test cases to create
:param intro: the intro track to put in the test track
:return: a list of test case dictionaries
"""
patterns = [('-' * i) + 'x' + ('-' * ((N_TEST_CASES - 1) - i)) for i in range(n)]
test_cases = [
{
'dummy': print('make track {} of {}'.format(i + 1, N_TEST_CASES)),
'track': make_track(x, intro),
'pattern': x,
'i': i,
'expected': NOISE_DURATION * i
}
for i, x in enumerate(patterns)
]
for i, test_case in enumerate(test_cases):
print("writing wave file", i+1, "of", len(test_cases), "...")
write_wave_file(TEST_TRACK.format(i), test_case['track'])
return test_cases
def run_test():
if not isdir(conf.TMP_DIR):
raise Exception("TMP_DIR {} does not exist!".format(conf.TMP_DIR))
if not isdir(conf.OUTPUT_DIR):
raise Exception("OUTPUT_DIR {} does not exist!".format(conf.OUTPUT_DIR))
intro = make_track('x')
write_wave_file(TEST_INTRO, intro)
fp = fingerprint.make_fingerprint(TEST_INTRO)
test_cases = make_test_cases(N_TEST_CASES, intro)
print("tracks:")
print('\n'.join([t['pattern'] for t in test_cases]))
print("intro duration", INTRO_DURATION)
print("noise duration", NOISE_DURATION)
print('track\tindex\tmatch v\t\tactual\t\texpected')
for test in test_cases:
match = fingerprint.find_fingerprint_in_file(fp, TEST_TRACK.format(test['i']))
test['actual'] = K * match[0]
print('{}\t{:9d}\t{}\t{:8.2f}\t{:8.2f}'.format(test['i'], match[0], match[1], test['actual'], test['expected']))
if CLEAN_UP:
os.remove(TEST_INTRO)
for i in range(len(test_cases)):
os.remove(TEST_TRACK.format(i))
ok = True
for test in test_cases:
if abs(test['expected'] - test['actual']) > 1.:
ok = False
print("<!!!> expected value for track {} was {} but got {}".format(test['i'], test['expected'], test['actual']))
print("OK!" if ok else "")
if __name__ == '__main__':
run_test()
| 3.40625 | 3 |
deep_banana_eater.py | jwergieluk/deep_banana_eater | 0 | 12767824 | <filename>deep_banana_eater.py
import math
import datetime
import os
import pandas
import matplotlib.pyplot as plt
import numpy
import random
from unityagents import UnityEnvironment
import torch
import torch.nn
import torch.optim
from collections import deque, namedtuple
import click
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class ReplayBuffer:
"""Fixed-size buffer to store experience tuples."""
def __init__(self, buffer_size):
self.memory = deque(maxlen=buffer_size)
self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"])
def add(self, state, action, reward, next_state, done):
"""Add a new experience to memory."""
e = self.experience(state, action, reward, next_state, done)
self.memory.append(e)
def sample(self, batch_size: int, device):
"""Randomly sample a batch of experiences from memory."""
experiences = random.sample(self.memory, k=batch_size)
states = torch.from_numpy(numpy.vstack([e.state for e in experiences])).float().to(device)
actions = torch.from_numpy(numpy.vstack([e.action for e in experiences])).long().to(device)
rewards = torch.from_numpy(numpy.vstack([e.reward for e in experiences])).float().to(device)
next_states = torch.from_numpy(numpy.vstack([e.next_state for e in experiences])).float().to(device)
dones = torch.from_numpy(numpy.vstack([e.done for e in experiences]).astype(numpy.uint8)).float().to(device)
return states, actions, rewards, next_states, dones
def __len__(self):
"""Return the current size of internal memory."""
return len(self.memory)
class QNet(torch.nn.Module):
""" Deep Q Network approximating the state-action value function """
def __init__(self, input_dim: int, action_no):
super().__init__()
self._net = torch.nn.Sequential(
torch.nn.Linear(input_dim, 96),
torch.nn.ReLU(),
torch.nn.Linear(96, 96),
torch.nn.ReLU(),
torch.nn.Linear(96, action_no)
)
def forward(self, x):
return self._net(x)
class Agent0:
LEARNING_RATE = 0.0005
UPDATE_EVERY = 4
REPLAY_BUFFER_SIZE = 100_000
BATCH_SIZE = 128
GAMMA = 0.99
def __init__(self, state_space_dim: int, no_actions: int, device):
self.state_space_dim = state_space_dim
self.no_actions = no_actions
self.device = device
self.q_net = QNet(self.state_space_dim, self.no_actions)
self.q_net.to(device)
self.optimizer = torch.optim.Adam(self.q_net.parameters(), lr=self.LEARNING_RATE)
self.loss = torch.nn.MSELoss()
self._replay_buffer = ReplayBuffer(self.REPLAY_BUFFER_SIZE)
self.t = 1 # counts the calls to the learn() method
def load_weights(self, file_name: str):
""" Loads the DQN weights from a file and sets the Agent to test mode """
self.q_net.load_state_dict(torch.load(file_name))
self.q_net.eval()
self.t = 1800.0*300.0
def save_weights(self, file_name: str):
""" Save DQN weights to file """
torch.save(self.q_net.state_dict(), file_name)
def epsilon(self):
""" Returns the probability of taking a random action during the training time """
return math.exp(-self.t*0.00002)
def get_action(self, state):
""" Produce an optimal action for a given state """
if random.random() <= self.epsilon():
return random.randint(0, self.no_actions-1)
state = torch.from_numpy(state).float().unsqueeze(0).to(self.device)
self.q_net.eval()
with torch.no_grad():
action_values = self.q_net(state)
self.q_net.train()
return numpy.argmax(action_values.cpu().detach().numpy())
def learn(self, state, action, reward, next_state, done):
self.t += 1
self._replay_buffer.add(state, action, reward, next_state, done)
if self.t % self.UPDATE_EVERY != 0:
return
if len(self._replay_buffer) < self.BATCH_SIZE:
return
states, actions, rewards, next_states, dones = self._replay_buffer.sample(self.BATCH_SIZE, self.device)
# Get max predicted Q values (for next states) from target model
q_targets_next = self.q_net(next_states).detach().max(1)[0].unsqueeze(1)
# Compute Q targets for current states
q_targets = rewards + (self.GAMMA * q_targets_next * (1 - dones))
# Get expected Q values from local model
q_expected = self.q_net(states).gather(1, actions)
loss_value = self.loss(q_expected, q_targets)
self.optimizer.zero_grad()
loss_value.backward()
self.optimizer.step()
# ------------------- update target network ------------------- #
# self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU)
def soft_update(self, local_model, target_model, tau):
"""Soft update model parameters.
θ_target = τ*θ_local + (1 - τ)*θ_target
Params
======
local_model (PyTorch model): weights will be copied from
target_model (PyTorch model): weights will be copied to
tau (float): interpolation parameter
"""
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)
class UnityEnvWrapper:
""" This class provides gym-like wrapper around the unity environment """
def __init__(self, env_file: str = 'Banana_Linux_NoVis/Banana.x86_64'):
self._env = UnityEnvironment(file_name=env_file)
self._brain_name = self._env.brain_names[0]
self._brain = self._env.brains[self._brain_name]
env_info = self._env.reset(train_mode=True)[self._brain_name]
state = env_info.vector_observations[0]
self.state_space_dim = len(state)
self.action_space_size = self._brain.vector_action_space_size
def reset(self, train_mode: bool = False):
env_info = self._env.reset(train_mode)[self._brain_name]
state = env_info.vector_observations[0]
return state
def step(self, action):
env_info = self._env.step(action)[self._brain_name] # send the action to the environment
next_state = env_info.vector_observations[0] # get the next state
reward = env_info.rewards[0] # get the reward
done = env_info.local_done[0] # see if episode has finished
return next_state, reward, done, None
def close(self):
self._env.close()
def train(max_episodes: int):
""" Train the agent using a head-less environment and save the DQN weights when done """
env = UnityEnvWrapper('Banana_Linux_NoVis/Banana.x86_64')
agent = Agent0(env.state_space_dim, env.action_space_size, DEVICE)
data = []
scores = []
for episode in range(1, max_episodes):
state = env.reset(train_mode=True)
score = 0
for step in range(1, 300):
action = agent.get_action(state)
next_state, reward, done, _ = env.step(action)
agent.learn(state, action, reward, next_state, done)
score += reward
state = next_state
if done:
break
scores.append(score)
rolling_average_score = sum(scores[-100:])/min(episode, 100)
data.append([score, rolling_average_score])
print(f'Episode {episode}. Final score {score}. Average score (last 100 episodes) {rolling_average_score}.')
# Save weights and score series
now_str = datetime.datetime.utcnow().strftime('%Y-%m-%d_%H-%M-%S')
os.makedirs('runs', exist_ok=True)
agent.save_weights(f'runs/weights-{now_str}.bin')
# Plot average scores
df = pandas.DataFrame(data=data, index=range(1, max_episodes), columns=['score', 'rolling_avg_score'])
df.to_csv(f'runs/scores-{now_str}.csv')
plt.figure(figsize=(8, 6), dpi=120)
plt.tight_layout()
df['rolling_avg_score'].plot(grid=True, colormap='cubehelix')
plt.savefig(f'runs/scores-{now_str}.png')
def test(weights_file_name: str):
""" Load DQN weights and run the agent """
env = UnityEnvWrapper('Banana_Linux/Banana.x86_64')
agent = Agent0(env.state_space_dim, env.action_space_size, DEVICE)
agent.load_weights(weights_file_name)
state = env.reset(train_mode=False)
score = 0
for step in range(1, 300):
action = agent.get_action(state)
next_state, reward, done, _ = env.step(action)
print(f'Step {step}. Action {action}. Reward {reward}.')
score += reward
state = next_state
if done:
break
print(f'Final score {score}.')
env.close()
@click.group()
@click.version_option()
def cli():
""" deep_banana_eater -- command line interface """
@cli.command('train')
@click.option('--max-episodes', type=click.INT, default=2000)
def train_command(max_episodes: int):
""" Train the agent using a head-less environment and save the DQN weights when done """
train(max_episodes)
@cli.command('test')
@click.option('--load-weights-from', type=click.Path(dir_okay=False, file_okay=True, readable=True, exists=True))
def test_command(load_weights_from: str):
""" Load DQN weights and run the agent """
test(load_weights_from)
if __name__ == '__main__':
cli()
| 2.578125 | 3 |
NEATClub/Parameters.py | Magody/IAProyecto_NEAT | 0 | 12767825 |
class Parameters:
WINDOW_WIDTH = 500
WINDOW_HEIGHT = 600
BASE_HEIGHT = 100
BASE_IMAGE = "base.png"
BACKGROUND_IMAGE = "bg.png"
BIRD_IMAGES = ["bird1.png", "bird2.png", "bird3.png"]
PIPE_IMAGES = ["pipe.png"]
| 1.71875 | 2 |
stx/outputs/html/themes.py | bakasoft/stx | 1 | 12767826 | from typing import List
class HtmlTheme:
def __init__(self):
self.head_styles: List[str] = []
self.head_scripts: List[str] = []
self.body_scripts: List[str] = []
class NullHtmlTheme(HtmlTheme):
pass
| 2.296875 | 2 |
aradhana/views.py | hehaichi/Aradhana-2017-Webapp | 0 | 12767827 | from django.shortcuts import render,redirect,get_object_or_404
from .models import Student, Event
# Create your views here.
def index(request):
students = Student.objects.all()
events = Event.objects.all().order_by('name','category')
context={'event_len':len(events),'students':len(students),'events':events}
return render(request,'aradhana/details.html',context=context)
def events(request,eventID):
event=get_object_or_404(Event,pk=eventID)
students=event.student_set.all().order_by('name','school')
total=len(students)
context={'event':event,'students':students,'total':total}
return render(request,'aradhana/event.html',context=context) | 1.929688 | 2 |
src/bitwardentools/common.py | kiorky/bitwardentools | 0 | 12767828 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import enum
import logging
import os
import re
from http.client import HTTPConnection
class VAULTIER_SECRET(enum.IntEnum):
secret = 200
note = 100
file = 300
def sanitize(st):
return st
def as_bool(value):
if isinstance(value, str):
return bool(re.match("^(y|o|1|t)", value.lower()))
else:
return bool(value)
CFG = os.environ.get("CONFIG", "/w/data/config.init")
EXPORT_DIR = os.environ.get("VAULTIER_EXPORT_DIR", "/w/data/export")
L = logging.getLogger("passwords")
LOGLEVEL = os.environ.get("LOGLEVEL", "info").upper()
REQUEST_DEBUG = as_bool(os.environ.get("REQUEST_DEBUG", ""))
def setup_logging(loglevel=LOGLEVEL):
logging.basicConfig(level=getattr(logging, loglevel))
if REQUEST_DEBUG:
HTTPConnection.debuglevel = 1
req_log = logging.getLogger("requests.packages.urllib3")
req_log.setLevel(logging.DEBUG)
req_log.propagate = True
# vim:set et sts=4 ts=4 tw=80:
| 2.28125 | 2 |
repositories/__init__.py | Simon1093/book-shop | 0 | 12767829 | from .pastebin_repository import PasteBinRepository
| 1.023438 | 1 |
deco/sinks/sink.py | mfojtak/decor | 1 | 12767830 | from abc import ABC, abstractmethod
class Sink(ABC):
@abstractmethod
def __iter__(self):
pass
def eval(self):
it = iter(self)
while True:
try:
next(it)
except StopIteration:
break | 3.3125 | 3 |
Sorting and pathfinding/dijkstra.py | LucasEmmes/pythonScripts | 0 | 12767831 | <filename>Sorting and pathfinding/dijkstra.py
# It make a node
class node:
def __init__(self, symbol):
self.symbol = symbol
self.edges = []
self.shortest_distance = float('inf')
self.shortest_path_via = None
# Adds another node as a weighted edge
def add_edge(self, node, distance):
self.edges.append([node, distance])
# Checks every node it has an edge to, and updates it if neccessary
def update_edges(self):
for edge in self.edges:
distance_via = self.shortest_distance + edge[1]
if distance_via < edge[0].shortest_distance:
edge[0].shortest_distance = distance_via
edge[0].shortest_path_via = self
def get_node(nodes, symbol):
"""
Searches "nodes" for node with symbol "symbol" and returns it if found.
PARAMS:\n
nodes (array): array of nodes to search from
symbol (str): string to search matches for
RETURNS:\n
node: if match is found
None: if no match found
"""
for node in nodes:
if node.symbol == symbol:
return node
return None
def make_nodes(edge_data, *args):
"""
Takes an array of edges and makes them into node objects.
PARAMS:
edge_data (arr): array of edges with format [start_node (str), end_node (str), distance (int)]
*args (boolean): True if you want digraph, False if not (default is True) Can save time when entering edges by hand.
*args (array[str]): array of symbols to use for nodes that may not have edges and are not included in "edge_data"
RETURNS:
array: array of the nodes that it created
"""
nodes = []
# Decide if digraph or not
if len(args) > 0:
digraph = args[0]
else:
digraph = False
# Fill in empty nodes
if len(args) > 1:
for symbol in args[1]:
nodes.append(node(symbol))
# Make edges into nodes and couple them
for edge in edge_data:
node1 = get_node(nodes, edge[0])
node2 = get_node(nodes, edge[1])
if node1 == None:
node1 = node(edge[0])
if node2 == None:
node2 = node(edge[1])
node1.add_edge(node2, edge[2])
if not digraph: node2.add_edge(node1, edge[2]) # REMOVE THIS IF YOU WANT DIGRAPH 2/2
if node1 not in nodes: nodes.append(node1)
if node2 not in nodes: nodes.append(node2)
return nodes
def get_path_array(node):
"""
Takes an end node and gives you every node (in order) for the shortest path to it.
PARAMS:
node (node): end node
RETURNS:
array[nodes]: every note you need to visit (in order)
"""
if node.shortest_path_via == None:
return [node]
else:
return get_path_array(node.shortest_path_via) + [node]
def dijkstra(nodes, start, end):
"""
Finds the fastest way from "start" to "end" (usually what dijkstra does).
PARAMS:
nodes (array): array of nodes
start (node): start of path
end (node): end of path
RETURNS
array[node]: path of nodes from "start" to "end" (inclusive) if one is found
None: if no path is found
"""
queue = []
path = []
# Setup
queue = nodes.copy()
start.shortest_distance = 0
queue.sort(key=lambda node: node.shortest_distance)
# Exploration loop
while queue[0] != end:
node = queue[0]
node.update_edges()
path.append(queue.pop(0))
queue.sort(key=lambda node: node.shortest_distance)
# Test if there actually was a path found
if end.shortest_distance == float('inf'):
print("End has not been found")
return None
return get_path_array(end) | 4 | 4 |
app.py | aslan1301/sql_alchemy | 0 | 12767832 | <reponame>aslan1301/sql_alchemy<filename>app.py
# Dependencies
from flask import Flask, jsonify
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
# Set up database
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
Base = automap_base()
Base.prepare(engine, reflect=True)
# Assign the classes to the matching tables
measurement = Base.classes.measurement
station = Base.classes.station
# Create app
app = Flask(__name__)
# Create the session
session = Session(engine)
| 2.484375 | 2 |
python/ml4ir/applications/ranking/tests/test_tfrecord_reader.py | ducouloa/ml4ir | 0 | 12767833 | import unittest
import tensorflow as tf
import logging
from ml4ir.base.data.tfrecord_reader import TFRecordSequenceExampleParser
from ml4ir.base.features.feature_config import FeatureConfig
from ml4ir.base.config.keys import TFRecordTypeKey
from ml4ir.base.io.local_io import LocalIO
from ml4ir.base.features.preprocessing import PreprocessingMap
DATASET_PATH = "ml4ir/applications/ranking/tests/data/tfrecord/train/file_0.tfrecord"
FEATURE_CONFIG_PATH = "ml4ir/applications/ranking/tests/data/configs/feature_config.yaml"
MAX_SEQUENCE_SIZE = 25
class SequenceExampleParserTest(unittest.TestCase):
"""
Test class for ml4ir.base.data.tfrecord_reader.TFRecordSequenceExampleParser
"""
def setUp(self):
file_io = LocalIO()
logger = logging.getLogger()
self.dataset = tf.data.TFRecordDataset(DATASET_PATH)
self.proto = next(iter(self.dataset))
self.feature_config = FeatureConfig.get_instance(
tfrecord_type=TFRecordTypeKey.SEQUENCE_EXAMPLE,
feature_config_dict=file_io.read_yaml(FEATURE_CONFIG_PATH),
logger=logger,
)
self.parser = TFRecordSequenceExampleParser(
feature_config=self.feature_config,
preprocessing_map=PreprocessingMap(),
required_fields_only=False,
pad_sequence=True,
max_sequence_size=25,
)
def test_features_spec(self):
"""
Test the feature specification constructed and used to parse the Example proto
"""
features_spec = self.parser.features_spec
assert isinstance(features_spec, tuple)
assert isinstance(features_spec[0], dict)
assert isinstance(features_spec[1], dict)
# Check if the feature specification matches with the feature_config
assert len(set(self.feature_config.get_context_features("name"))) == len(features_spec[0])
assert len(set(self.feature_config.get_sequence_features("name"))) == len(features_spec[1])
for feature in self.feature_config.get_context_features("name"):
assert feature in features_spec[0]
for feature in self.feature_config.get_sequence_features("name"):
assert feature in features_spec[1]
def test_extract_features_from_proto(self):
"""
Test extraction of features from serialized proto
"""
context_features, sequence_features = self.parser.extract_features_from_proto(self.proto)
for feature in self.feature_config.get_context_features("name"):
assert feature in context_features
# Test that all features are sparse tensor
assert isinstance(context_features[feature], tf.sparse.SparseTensor)
feature_tensor = tf.sparse.to_dense(tf.sparse.reset_shape(context_features[feature]))
# Test the shape of each extracted feature
assert context_features[feature].shape == (1,)
for feature in self.feature_config.get_sequence_features("name"):
assert feature in sequence_features
# Test that all features are sparse tensor
assert isinstance(sequence_features[feature], tf.sparse.SparseTensor)
feature_tensor = tf.sparse.to_dense(tf.sparse.reset_shape(sequence_features[feature]))
assert feature_tensor.shape == (2, 1)
# Assert that there is no mask feature
assert "mask" not in sequence_features
def test_get_default_tensor(self):
"""
Test the default tensor used for missing features
"""
default_tensor = self.parser.get_default_tensor(
self.feature_config.get_feature("query_text"), sequence_size=25
)
assert default_tensor.shape == (1,)
default_tensor = self.parser.get_default_tensor(
self.feature_config.get_feature("quality_score"), sequence_size=8
)
assert default_tensor.shape == (8, 1)
def test_get_feature(self):
"""
Test fetching feature tensor from extracted feature dictionary
"""
# Checking context features
feature_tensor = self.parser.get_feature(
self.feature_config.get_feature("query_text"),
extracted_features=({"query_text": tf.zeros((3, 4, 6))}, {}),
sequence_size=10,
)
assert feature_tensor.shape == (3, 4, 6)
# Check missing feature being replaced with default tensor
feature_tensor = self.parser.get_feature(
self.feature_config.get_feature("query_text"),
extracted_features=({}, {}),
sequence_size=10,
)
assert feature_tensor.shape == (1,)
# Checking sequence features
feature_tensor = self.parser.get_feature(
self.feature_config.get_feature("quality_score"),
extracted_features=({}, {"quality_score": tf.zeros((3, 4, 6))}),
sequence_size=10,
)
assert feature_tensor.shape == (3, 4, 6)
# Check missing feature being replaced with default tensor
feature_tensor = self.parser.get_feature(
self.feature_config.get_feature("quality_score"),
extracted_features=({}, {}),
sequence_size=10,
)
assert feature_tensor.shape == (10, 1)
def test_generate_and_add_mask(self):
"""
Test mask generation and addition
"""
rank_tensor = tf.constant([[1], [2], [3], [4], [5]])
indices = tf.where(tf.not_equal(rank_tensor, tf.constant(0)))
values = tf.gather_nd(rank_tensor, indices)
sparse_rank_tensor = tf.SparseTensor(indices, values, rank_tensor.shape)
# Check when pad sequence is set to True
features_dict, sequence_size = self.parser.generate_and_add_mask(
({}, {"rank": sparse_rank_tensor}), {}
)
assert "mask" in features_dict
assert features_dict["mask"].shape == (25, 1)
assert tf.reduce_sum(features_dict["mask"]).numpy() == 5
assert sequence_size == 25
# Check when pad sequence is set to False
self.parser.pad_sequence = False
features_dict, sequence_size = self.parser.generate_and_add_mask(
({}, {"rank": sparse_rank_tensor}), {}
)
assert "mask" in features_dict
assert features_dict["mask"].shape == (5, 1)
assert tf.reduce_sum(features_dict["mask"]).numpy() == 5
assert sequence_size == 5
self.parser.pad_sequence = True
def test_parse_fn(self):
"""
Test the Example parsing function
"""
# Check tensor shapes when pad_sequence is True
features, labels = self.parser.get_parse_fn()(self.proto)
assert isinstance(features, dict)
assert isinstance(labels, tf.Tensor)
for feature in self.feature_config.get_all_features(key="node_name", include_label=False):
assert feature in features
assert features["mask"].shape == (25, 1)
for feature in self.feature_config.get_context_features("node_name"):
assert features[feature].shape == (1,)
for feature in self.feature_config.get_sequence_features("node_name"):
if feature != "clicked":
assert features[feature].shape == (25, 1)
assert labels.shape == (25, 1)
# Check tensor shapes when pad_sequence is False
self.parser.pad_sequence = False
features, labels = self.parser.get_parse_fn()(self.proto)
assert features["mask"].shape == (2, 1)
for feature in self.feature_config.get_context_features("node_name"):
assert features[feature].shape == (1,)
for feature in self.feature_config.get_sequence_features("node_name"):
if feature != "clicked":
assert features[feature].shape == (2, 1)
assert labels.shape == (2, 1)
self.pad_sequence = True
| 2.40625 | 2 |
CosmOrc/cospar.py | akvatol/CosmOrc | 1 | 12767834 | import re
import pandas as pd
from CosmOrc.setting import Setting
def chunkit(data: list or tuple = None, n: int = None):
"""
Функция разбивает исходный массив на N частей (N == n).
Arguments
---------
data_list: list or tuple
Массив, который будет разделен на n частей
n: int
Число подмассивов в возвращаемом массиве (default: 2)
Returns
-------
list: разделенный на части список
Example
-------
>>> l = [1, 2, 3, 4, 5, 6, 7, 8]
>>> chunkit(l)
[[1, 2, 3, 4], [5, 6, 7, 8]]
>>> chunkit(l, n=4)
[[1, 2], [3, 4], [5, 6], [7, 8]]
"""
new_data = []
if not n:
n = 2
avg = len(data) / n
last = 0
while last < len(data):
new_data.append(data[int(last):int(last + avg)])
last += avg
return new_data
def read_data_cosmo(file_path: str = None) -> list:
"""Функция для чтения *.tab файлов CosmoTherm, выбирает строки с
параметрами расчета, единицами измерения и непосредственно результатами
расчета.
Arguments
---------
file_path: str
Путь к *.tab файлу
Return
------
data: list
Двумерный список, len(data) == количеству работ (job) в исходном файле,
в каждый подмассив массива, входят данные о каждой конкретной работы
"""
with open(file_path, "r") as file:
data = []
for line in file:
if line.split():
# Выбираем строки с параметрами расчета
if "Setting" in line:
jobs_data = []
jobs_data.append(line)
data.append(jobs_data)
# Выбираем строки с единицами измерения и данными расчетов
elif "job" not in line or "Units" in line:
jobs_data.append(line)
return data
def compound_nr(some_str: str):
_compound_nr = r"x\(([\d]*)\)="
_compound_nr_string = re.search(_compound_nr, some_str)
if _compound_nr_string:
return _compound_nr_string.group(1)
def setting_pars(settings_str: str):
# TODO Документация job_indx
"""Функция для извлечения параметров расчета из строк,
принимает строку из *.tab файла, содержащую подстроку
'Settings'
Arguments
---------
settings_str: str
Строка *.tab файла, содержащая ключевое слово 'Settings'
Return
------
job_indx: str
settings_list: tuple
Кортеж содержащий объекты класса Setting, описывающие
условия проведения расчета
Example
-------
>>> setting_pars('Settings job 2 : T= 223.15 K ; x(1)= 0.1000;')
(2, (T= 223.15 K, x(1)= 0.1 %))
"""
settings_list = []
job_indx, new_line = settings_str.split(":")
job_indx = job_indx.split()[2]
settings = new_line.split(";")
for setting in settings:
new_setting = None
if len(setting.split()) == 3:
settings_list.append(Setting.from_record(setting))
elif len(setting.split()) == 2:
new_setting = Setting.from_record(setting)
new_setting.convert(name=compound_nr(new_setting.name), unit="%")
settings_list.append(new_setting)
elif len(setting.split()) > 3:
# TODO: Проблемное место, пофиксить n в chunkit
for element in chunkit(setting.split(),
n=len(setting.split()) / 2):
new_setting = Setting.from_record(element)
new_setting.convert(name=compound_nr(new_setting.name),
unit="%")
settings_list.append(new_setting)
return int(job_indx), tuple(settings_list)
def columns_pars(head_str: str):
"""Функция для парсинга строки заголовка таблицы,
возвращает массив с названиями всех столбцов
данной таблицы, за исключением 'Compound'
Arguments
---------
head_str: str
Строка - заголовок таблицы
Return
------
Возвращает кортеж со именами колонок в таблице CosmoTherm,
за исключением 'Compound'
Example
-------
>>> columns_pars('Nr Compound H ln(gamma) pv Gsolv pvExp HpvExp GpvExp')
('Nr', 'H', 'ln(gamma)', 'pv', 'Gsolv', 'pvExp', 'HpvExp', 'GpvExp')
"""
return tuple(filter(lambda x: x != "Compound", head_str.split()))
def data_pars(data: list or tuple):
# TODO Documentations
"""Функция для пасинга данных одной таблицы
Arguments
---------
data: list or tuple
Список содержащий строки с данными расчета CosmoTherm
Return
------
Возвращает список содержащий имена веществ, заданных в
таблице *.tab файла CosmoTherm
Example
-------
>>> data = ['1 dbunew 7.9345E-10 0.31479727 5.7916E-07 -11.11061250',
... '2 dbu+new 6.3253E-33 2.96259067 3.2692E-31 -33.6383173',
... '3 cosmo1 3.0623E-36 -5.34179718 6.3968E-31 -36.8714363',
... '4 cosmo2 2.3622E-44 -4.50125249 2.1291E-39 -44.7837135',
... '5 cosmo3 1.0057E-48 -2.99155560 2.0031E-44 -49.0465532',
... '6 cosmo4 1.9260E-40 -4.55722446 1.8359E-35 -40.9690089']
>>> data_pars(data)
(['dbunew', 'dbu+new', 'cosmo1', 'cosmo2', 'cosmo3', 'cosmo4'], [['1', '7.9345E-10', '0.31479727', '5.7916E-07', '-11.11061250'], ['2', '6.3253E-33', '2.96259067', '3.2692E-31', '-33.6383173'], ['3', '3.0623E-36', '-5.34179718', '6.3968E-31', '-36.8714363'], ['4', '2.3622E-44', '-4.50125249', '2.1291E-39', '-44.7837135'], ['5', '1.0057E-48', '-2.99155560', '2.0031E-44', '-49.0465532'], ['6', '1.9260E-40', '-4.55722446', '1.8359E-35', '-40.9690089']])
"""
compounds = []
new_parameters = []
for line in data:
_ = line.split()
compounds.append(_[1])
new_parameters.append([_[0]] + _[2:])
return compounds, new_parameters
class Job:
"""
Arguments
---------
data: list or tuple
Данные из одного "job" CosmoTherm
Attributes
---------
setting:
Набор настроек данного расчета
units:
Строка с информацией о некоторых единицах измерения
parameters:
Данные расчетов СosmoTherm
Properties
----------
full_df:
small_df:
settings_df:
"""
__slots__ = ("units", "settings", "compounds", "parameters", "columns",
"job_indx")
def __init__(self, job: list or tuple):
self.units = job[1]
self.job_indx, self.settings = setting_pars(job[0])
self.compounds, self.parameters = data_pars(job[3:])
self.columns = columns_pars(job[2])
self.settings = list(self.settings)
def full_df(self):
"""
Метод для получения полной информации об одной работе,
вспомогательный метод для упрощения работы с классом
Jobs. Сработает только если класс правильно инициализирован.
Return
------
pd.Dataframe(): Возвращает датафрейм с данными одной работы,
index -- мультииндекс состоящий из номера работы и списка
рассчитываемых веществ.
columns -- названия параметров,
data -- значения таблицы COSMOtherm
"""
index = list(zip([self.job_indx] * len(self.compounds),
self.compounds))
multiindex = pd.MultiIndex.from_tuples(index,
names=["Job", "Compound"])
return pd.DataFrame(data=self.parameters,
index=multiindex,
columns=self.columns)
def small_df(self, columns: list or tuple):
"""
Вспомогательный метод, помогает получать одну таблицу с определенными
столбцами. Нужен для упрощения работы с классом Jobs.
Arguments
---------
columns: list or tuple
Список колонок
"""
_small_df = self.full_df().loc[:, columns].copy()
return _small_df
def settings_df(self, detailed=None):
# TODO Документация
"""
"""
columns = [self.job_indx]
index = [x.name for x in self.settings]
if 'p=' in index:
pass
else:
index.append('p=')
self.settings.append(Setting(name='p=', value=1, unit='atm'))
if detailed:
data = self.settings
else:
data = [x.value for x in self.settings]
return pd.DataFrame(columns=columns, index=index, data=data)
class Jobs:
"""
Класс, хранит в себе данные одного расчета COSMOTherm.
При инициализации принимает аргумент path: str - путь к *.tab файлу,
автоматически считывает данные из файла и инициализирует классы Job,
для каждой отдельной работы.
Arguments
---------
path: str
Путь к *.tab файлу
Methods
-------
full_df(csv: bool, invert: bool): df
small_df(csv: bool, invert: bool): df
settings_df(csv: bool): df
for need spec df for calc
"""
__slots__ = ("path", "data")
def __init__(self, path: str):
self.path = path
self.data = [Job(i) for i in read_data_cosmo(path)]
def full_df(self, invert=None):
# TODO Документация
"""
"""
df = pd.concat([job.full_df() for job in self.data], sort=True)
df = df.applymap(lambda x: 0 if x == 'NA' else x)
df = df.apply(pd.to_numeric)
df.fillna(0, inplace=True)
if invert:
df.sort_index(axis=0, level=1, inplace=True)
return df.swaplevel(i=-2, j=-1, axis=0)
else:
return df
def small_df(self, columns: list or tuple = None, invert: bool = None):
# TODO Документация
"""
"""
if columns:
_small_df = self.full_df().loc[:, columns].copy()
if invert:
_small_df.sort_index(axis=0, level=1, inplace=True)
return _small_df.swaplevel(i=-2, j=-1, axis=0)
else:
return _small_df
else:
pass
def settings_df(self, detailed=None):
# TODO
"""[summary]
Returns
-------
[type]
[description]
"""
if detailed:
df = pd.concat([job.settings_df(detailed=1) for job in self.data],
axis=1,
sort=True)
df.fillna(0, inplace=True)
return df
else:
df = pd.concat([job.settings_df() for job in self.data],
axis=1,
sort=True)
df.fillna(0, inplace=True)
return df
def main():
from os import listdir
from os.path import isfile, join
mypath = '/home/anton/Documents/Scamt_projects/Adonin_project/COSMOthermProject/EA_scrf/'
onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]
files = [i for i in onlyfiles if i.endswith('tab')]
for file in files:
Jobs(mypath + file).small_df(
invert=1, columns=('Gsolv', 'ln(gamma)',
'Nr')).T.to_csv(f'{mypath + file}.csv')
Jobs(mypath +
file).settings_df().T.to_csv(f'{mypath + file}_Settings.csv')
if __name__ == "__main__":
main()
| 3.109375 | 3 |
alembic/versions/61298763f7e4_create_characters_table.py | chalupaul/twitch_dungeon | 0 | 12767835 | <filename>alembic/versions/61298763f7e4_create_characters_table.py
"""create characters table
Revision ID: 61298763f7e4
Revises:
Create Date: 2020-10-24 12:29:03.632815
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "61298763f7e4"
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
"characters",
sa.Column("id", sa.Integer, primary_key=True),
sa.Column("name", sa.String),
sa.Column("level", sa.Integer),
sa.Column("STR", sa.Integer),
sa.Column("DEX", sa.Integer),
sa.Column("CON", sa.Integer),
sa.Column("WIS", sa.Integer),
sa.Column("INT", sa.Integer),
)
def downgrade():
pass
| 1.445313 | 1 |
actionnetwork_activist_sync/full_member_api_pull.py | afitts/actionnetwork_activist_sync | 0 | 12767836 | <gh_stars>0
import pandas as pd
import numpy as np
import requests
import datetime
from actionnetwork_activist_sync.actionnetwork import ActionNetwork
def get_all_people(api=None, people=None, url="https://actionnetwork.org/api/v2/people",i=0):
if not people:
people = []
data = requests.get(url=url, headers=api.headers)
people += [d for d in data.json()['_embedded']['osdi:people']]
if data.json().get('_links', {}).get('next', None):
next_url = data.json().get('_links').get('next').get('href')
i += 1
return get_all_people(api=api, people=people, url=next_url,i=i)
return people
def full_member_pull():
branch_apis = {'DC': 'adc35ecc62127c367a0d4ff6d80d4aaf',
'MOCO': '35fafdb312f073b2477d7535fe10e531',
'PGC': 'dd0a692686682295025cc9d2f126cbf4',
'NOVA': '0054bd054c8338e26d45428a9cdec7f5'
}
actionnetwork_dict = {}
for branch in list(branch_apis.keys()):
if branch in list(branch_apis.keys()):
actionnetwork_dict[branch] = ActionNetwork(api_key=branch_apis[branch])
else:
print(f"Unknown Branch {branch}")
email_dfs = []
for api in actionnetwork_dict.values():
all_people = get_all_people(api=api)
email = []
phone = []
ad = []
for person in all_people:
for address in person['email_addresses']:
email.append(address['address'])
for number in person['phone_numbers']:
if number['primary'] == True:
try:
phone.append(number['number'])
except:
phone.append(np.nan)
for address in person['postal_addresses']:
if address['primary'] == True:
try:
ad.append(address['address_lines'][0])
except:
ad.append(np.nan)
df = pd.json_normalize(all_people)
df['email_address'] = email
df['phone_number'] = phone
df['address'] = ad
drop_cols = ['email_addresses','phone_numbers','postal_addresses','identifiers','created_date','modified_date',
'languages_spoken', '_links.self.href','_links.osdi:signatures.href',
'_links.osdi:submissions.href','_links.osdi:donations.href', '_links.osdi:taggings.href',
'_links.osdi:outreaches.href','_links.osdi:attendances.href']
df.drop(drop_cols,axis=1,inplace=True)
df.to_csv(f'ActionNetwork_full_member_export_{datetime.datetime.now().date()}.csv',index=False)
email_dfs.append(df['email_address'])
totdf = pd.concat(email_dfs)
totdf.drop_duplicates().to_csv(f'ActionNetwork_full_member_email_export_{datetime.datetime.now().date()}.csv',
index=False,header=False)
if __name__ == "__main__":
full_member_pull()
| 2.4375 | 2 |
runRec.py | rocco-haro/Linear-Difference-Eq. | 0 | 12767837 | <reponame>rocco-haro/Linear-Difference-Eq.
# Linear Algebra Take Home Quiz #3
# Author: <NAME>
# University of Alaska, Anchorage
# Oct '17'
# https://github.com/rocco-haro/Linear-Difference-Eq..git
import numpy as np
class StateMtx(object):
""" Calculates the k+1 values for a linear difference equation
"""
def __init__(self, *args):
self.args = args
# Converts input arr to matrix, which returns a N by 1 matrix.
# So I take the transpose of this returned matrix to make it a valid
# matrix for multiplication against the stateMtx
self.initParameters = np.mat(self.args[0]).T
# just hard codoing the matrix vals
prep = [[0.97, 0.05, 0.10], [0.00, 0.90, 0.05], [0.03, 0.05, 0.85]]
self.stateMtx = np.mat(prep) # converts array to matrix
def printClass(self):
print("Initial Parameters: ")
print(self.initParameters)
print("\nState Matrix: ")
print(self.stateMtx)
print("\n")
def getNextK(self, currK):
return self.stateMtx*currK
def getKthValueAt(self, targetDay):
currK = self.initParameters
for x in range(targetDay):
nextK = self.getNextK(currK)
currK = nextK
return currK
if __name__ == "__main__":
k0 = [295, 55, 150 ]
m = StateMtx(k0)
m.printClass()
res = m.getKthValueAt(30)
print(res)
| 3.90625 | 4 |
addresses/models.py | indoriyasboyz/E-commerce | 0 | 12767838 | from django.db import models
from billing.models import BillingProfile
ADDRESS_TYPES = (
('billing', 'Billing'),
('shipping', 'Shipping')
)
class Address(models.Model):
billing_profile = models.ForeignKey(BillingProfile, null=True, blank=False, on_delete=models.SET_NULL)
address_type = models.CharField(max_length=120, choices=ADDRESS_TYPES)
address_line_1 = models.CharField(max_length=120)
address_line_2 = models.CharField(max_length=120, null=True, blank=True)
city = models.CharField(max_length=120)
country = models.CharField(max_length=120, default='Turkey')
state = models.CharField(max_length=120)
postal_code = models.CharField(max_length=120)
def __str__(self):
return str(self.billing_profile) + ' : ' + str(self.address_type).upper()
def get_address(self):
return f"{self.address_line_1} {self.address_line_2 or ''} / {self.state}, {self.city} {self.postal_code} {self.country}" | 2.5625 | 3 |
shgp/classification/demos/demo_pgpr_1D.py | GiovanniPasserello/SHGP | 0 | 12767839 | <gh_stars>0
import gpflow
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from shgp.data.dataset import PlatformDataset
from shgp.utilities.general import invlink
from shgp.utilities.train_pgpr import train_pgpr
np.random.seed(42)
tf.random.set_seed(42)
"""
Demonstration of non-sparse PGPR on the 'platform' dataset.
We plot the datapoints and the predictive decision boundaries.
"""
def classification_demo():
# Train model
m, elbo = train_pgpr(
X, Y,
10, 1000, 10,
kernel_type=gpflow.kernels.Matern52
)
print(elbo)
# Take predictions
X_test_mean, X_test_var = m.predict_f(X_test)
# Plot mean prediction
plt.plot(X_test, X_test_mean, "C0", lw=1)
# Plot linked / 'squashed' predictions
P_test = invlink(X_test_mean)
plt.plot(X_test, P_test, "C1", lw=1)
# Plot data classification
X_train_mean, _ = m.predict_f(X)
P_train = invlink(X_train_mean)
correct = P_train.round() == Y
plt.scatter(X[correct], Y[correct], c="g", s=40, marker='x', label='correct')
plt.scatter(X[~correct], Y[~correct], c="r", s=40, marker='x', label='incorrect')
# Meta
plt.title("PGPR - Platform Dataset")
plt.xlim((-2, 2))
plt.ylim((-0.5, 1.5))
# Display
plt.legend()
plt.show()
if __name__ == '__main__':
# Load data
X, Y = PlatformDataset().load_data()
X_test = np.linspace(-2, 2, 200).reshape(-1, 1)
# Plot params
plt.rcParams["figure.figsize"] = (8, 4)
classification_demo()
| 2.3125 | 2 |
Adafruit_BMP/BMP280.py | DragonflyNet67/Adafruit_Python_BMP | 0 | 12767840 | # Author: <NAME> <<EMAIL>>
#
# Based on the Adafruit BMP280 Driver C++ driver and the BMP085 python lib.
# - https://github.com/adafruit/Adafruit_BMP280_Library
# - https://github.com/adafruit/Adafruit_Python_BMP
#
# Datasheet: https://www.adafruit.com/datasheets/BST-BMP280-DS001-11.pdf
from __future__ import division
import logging
# BMP280 default address.
BMP280_I2CADDR = 0x77
BMP280_CHIPID = 0xD0
# BMP280 Registers
BMP280_DIG_T1 = 0x88 # R Unsigned Calibration data (16 bits)
BMP280_DIG_T2 = 0x8A # R Signed Calibration data (16 bits)
BMP280_DIG_T3 = 0x8C # R Signed Calibration data (16 bits)
BMP280_DIG_P1 = 0x8E # R Unsigned Calibration data (16 bits)
BMP280_DIG_P2 = 0x90 # R Signed Calibration data (16 bits)
BMP280_DIG_P3 = 0x92 # R Signed Calibration data (16 bits)
BMP280_DIG_P4 = 0x94 # R Signed Calibration data (16 bits)
BMP280_DIG_P5 = 0x96 # R Signed Calibration data (16 bits)
BMP280_DIG_P6 = 0x98 # R Signed Calibration data (16 bits)
BMP280_DIG_P7 = 0x9A # R Signed Calibration data (16 bits)
BMP280_DIG_P8 = 0x9C # R Signed Calibration data (16 bits)
BMP280_DIG_P9 = 0x9E # R Signed Calibration data (16 bits)
BMP280_CONTROL = 0xF4
BMP280_RESET = 0xE0
BMP280_CONFIG = 0xF5
BMP280_PRESSUREDATA = 0xF7
BMP280_TEMPDATA = 0xFA
class BMP280(object):
def __init__(self, address=BMP280_I2CADDR, i2c=None, **kwargs):
self._logger = logging.getLogger('Adafruit_BMP.BMP280')
# Create I2C device.
if i2c is None:
import Adafruit_GPIO.I2C as I2C
i2c = I2C
self._device = i2c.get_i2c_device(address, **kwargs)
if self._device.readU8(BMP280_CHIPID) != 0x58:
raise Exception('Unsupported chip')
# Load calibration values.
self._load_calibration()
self._device.write8(BMP280_CONTROL, 0x3F)
def _load_calibration(self):
self.cal_t1 = int(self._device.readU16(BMP280_DIG_T1)) # UINT16
self.cal_t2 = int(self._device.readS16(BMP280_DIG_T2)) # INT16
self.cal_t3 = int(self._device.readS16(BMP280_DIG_T3)) # INT16
self.cal_p1 = int(self._device.readU16(BMP280_DIG_P1)) # UINT16
self.cal_p2 = int(self._device.readS16(BMP280_DIG_P2)) # INT16
self.cal_p3 = int(self._device.readS16(BMP280_DIG_P3)) # INT16
self.cal_p4 = int(self._device.readS16(BMP280_DIG_P4)) # INT16
self.cal_p5 = int(self._device.readS16(BMP280_DIG_P5)) # INT16
self.cal_p6 = int(self._device.readS16(BMP280_DIG_P6)) # INT16
self.cal_p7 = int(self._device.readS16(BMP280_DIG_P7)) # INT16
self.cal_p8 = int(self._device.readS16(BMP280_DIG_P8)) # INT16
self.cal_p9 = int(self._device.readS16(BMP280_DIG_P9)) # INT16
self._logger.debug('T1 = {0:6d}'.format(self.cal_t1))
self._logger.debug('T2 = {0:6d}'.format(self.cal_t2))
self._logger.debug('T3 = {0:6d}'.format(self.cal_t3))
self._logger.debug('P1 = {0:6d}'.format(self.cal_p1))
self._logger.debug('P2 = {0:6d}'.format(self.cal_p2))
self._logger.debug('P3 = {0:6d}'.format(self.cal_p3))
self._logger.debug('P4 = {0:6d}'.format(self.cal_p4))
self._logger.debug('P5 = {0:6d}'.format(self.cal_p5))
self._logger.debug('P6 = {0:6d}'.format(self.cal_p6))
self._logger.debug('P7 = {0:6d}'.format(self.cal_p7))
self._logger.debug('P8 = {0:6d}'.format(self.cal_p8))
self._logger.debug('P9 = {0:6d}'.format(self.cal_p9))
def _load_datasheet_calibration(self):
# Set calibration from values in the datasheet example. Useful for debugging the
# temp and pressure calculation accuracy.
self.cal_t1 = 27504
self.cal_t2 = 26435
self.cal_t3 = -1000
self.cal_p1 = 36477
self.cal_p2 = -10685
self.cal_p3 = 3024
self.cal_p4 = 2855
self.cal_p5 = 140
self.cal_p6 = -7
self.cal_p7 = 15500
self.cal_p8 = -14500
self.cal_p9 = 6000
def read_raw(self, register):
"""Reads the raw (uncompensated) temperature or pressure from the sensor."""
raw = self._device.readU16BE(register)
raw <<= 8
raw = raw | self._device.readU8(register + 2)
raw >>= 4
self._logger.debug('Raw value 0x{0:X} ({1})'.format(raw & 0xFFFF, raw))
return raw
def _compensate_temp(self, raw_temp):
""" Compensate temperature """
t1 = (((raw_temp >> 3) - (self.cal_t1 << 1)) *
(self.cal_t2)) >> 11
t2 = (((((raw_temp >> 4) - (self.cal_t1)) *
((raw_temp >> 4) - (self.cal_t1))) >> 12) *
(self.cal_t3)) >> 14
return t1 + t2
def read_temperature(self):
"""Gets the compensated temperature in degrees celsius."""
raw_temp = self.read_raw(BMP280_TEMPDATA)
compensated_temp = self._compensate_temp(raw_temp)
temp = float(((compensated_temp * 5 + 128) >> 8)) // 100
self._logger.debug('Calibrated temperature {0}'.format(temp))
return temp
def read_pressure(self):
"""Gets the compensated pressure in Pascals."""
raw_temp = self.read_raw(BMP280_TEMPDATA)
compensated_temp = self._compensate_temp(raw_temp)
raw_pressure = self.read_raw(BMP280_PRESSUREDATA)
p1 = compensated_temp - 128000
p2 = p1 * p1 * self.cal_p6
p2 += (p1 * self.cal_p5) << 17
p2 += self.cal_p4 << 35
p1 = ((p1 * p1 * self.cal_p3) >> 8) + ((p1 * self.cal_p2) << 12)
p1 = ((1 << 47) + p1) * (self.cal_p1) >> 33
if 0 == p1:
return 0
p = 1048576 - raw_pressure
p = (((p << 31) - p2) * 3125) // p1
p1 = (self.cal_p9 * (p >> 13) * (p >> 13)) >> 25
p2 = (self.cal_p8 * p) >> 19
p = ((p + p1 + p2) >> 8) + ((self.cal_p7) << 4)
return float(p // 256)
def read_altitude(self, sealevel_pa=101325.0):
"""Calculates the altitude in meters."""
# Calculation taken straight from section 3.6 of the datasheet.
pressure = float(self.read_pressure())
# altitude = 44330.0 * (1.0 - pow(pressure // sealevel_pa, (1.0 // 5.255))) # nlsn DEL
altitude = 44330.0 * (1.0 - pow(pressure // sealevel_pa, (1.0 / 5.255))) / 100 # nlsn INS
self._logger.debug('Altitude {0} m'.format(altitude))
return altitude
def read_sealevel_pressure(self, altitude_m=0.0):
"""Calculates the pressure at sealevel when given a known altitude in
meters. Returns a value in Pascals."""
pressure = float(self.read_pressure())
p0 = pressure // pow(1.0 - altitude_m // 44330.0, 5.255)
self._logger.debug('Sealevel pressure {0} Pa'.format(p0))
return p0
| 1.703125 | 2 |
test/common.py | stavros7167/distributed_protocol_completion | 0 | 12767841 | import automaton
def automaton1():
a = automaton.Automaton()
a.add_string('label1L a? b! label2L')
return a
def process_example():
process = automaton.Automaton()
process.add_edge('p0', 'p1', label='a!')
return process
def environment_example():
environment = automaton.Automaton()
environment.add_edge('e0', 'e1', label='a?')
environment.add_edge('e1', 'e0', label='b!')
return environment
| 2.703125 | 3 |
2312.py | gabzin/uri | 3 | 12767842 | d=dict()
for _ in range(int(input())):
s=input().split(' ',1)
d[s[0]]=list(map(int,s[1].split()))
d=dict(sorted(d.items(), key=lambda x: x[0]))
d=dict(sorted(d.items(), key=lambda x: x[1][2],reverse=True))
d=dict(sorted(d.items(), key=lambda x: x[1][1],reverse=True))
d=dict(sorted(d.items(), key=lambda x: x[1][0],reverse=True))
for i,j in d.items():print(i,*j,sep=" ")
| 3.34375 | 3 |
server/config.py | smartscenes/multiscan | 6 | 12767843 | import os
import util
# Server configuration
DATA_SERVER = 'http://localhost:3030'
TEMP_FOLDER = 'tmp'
STAGING_FOLDER = 'staging'
AUTOPROCESS = True
# General paths to binaries
SCRIPT_DIR = util.getScriptPath()
SOURCE_DIR = os.path.join(SCRIPT_DIR, '..')
DATA_DIR = 'staging/'
COLOR_FOLDER = 'color'
DEPTH_FOLDER = 'depth'
RECONS_RESULT_DIR = ''
PHOTOGRAMMETRY_RESULT_DIR = ''
# System specific paths for processing server binaries
TOOLS_DIR = '../'
DECODE_DIR = 'scripts'
RECONS_DIR = 'reconstruction'
PHOTOGRAMMETRY_DIR = 'meshroom'
# where scan data is stored under as subdirs with unique ids
# STAGING_FOLDER_LOCAL = os.path.join(DATA_DIR, 'scans', 'staging')
| 2.140625 | 2 |
tests/test_compression_func_default.py | dylan-profiler/compressio | 16 | 12767844 | import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_series_equal
from visions import StandardSet
from compressio.compress import compress_func
from compressio.type_compressor import DefaultCompressor
bool_dtype = "boolean" if int(pd.__version__.split(".")[0]) >= 1 else "Bool"
@pytest.mark.parametrize(
"series,before,expected",
[
(
pd.Series([10.0, 100.0, np.iinfo(np.int16).max * 1.0], dtype=np.float64),
np.float64,
"int16",
),
(pd.Series([np.nan, 1], dtype=np.float64), np.float64, "Int8"),
(
pd.Series([True, False, None, None, None, None, True, False] * 1000),
np.object,
bool_dtype,
),
],
)
def test_compress_series(series, before, expected):
assert series.dtype == before
compressed_series = compress_func(
series,
typeset=StandardSet(),
compressor=DefaultCompressor(),
with_inference=True,
inplace=False,
)
assert str(compressed_series.dtype) == expected
assert_series_equal(series, compressed_series, check_dtype=False)
| 2.296875 | 2 |
toucan/alert_api/migrations/0001_initial.py | toucan-project/TOUCAN | 4 | 12767845 | <reponame>toucan-project/TOUCAN
# Generated by Django 2.2.3 on 2019-08-27 10:12
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='CanaryAlertItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField()),
('identifier', models.CharField(max_length=50)),
('canary_type', models.CharField(max_length=4)),
('location', models.CharField(max_length=50)),
('ip', models.CharField(max_length=15)),
('node', models.CharField(blank=True, max_length=10)),
('user_agent', models.CharField(blank=True, max_length=250)),
('smb_loc', models.CharField(blank=True, max_length=50)),
('filename', models.CharField(blank=True, max_length=255)),
],
),
migrations.CreateModel(
name='MimiAlertItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('machinename', models.CharField(max_length=50)),
('sid', models.CharField(max_length=20)),
('pid', models.IntegerField()),
('date', models.DateTimeField()),
('source', models.CharField(max_length=255)),
('md5', models.CharField(max_length=32)),
('sha1', models.CharField(max_length=40)),
('target', models.CharField(max_length=255)),
('accessMask', models.CharField(max_length=4)),
('stack', models.CharField(max_length=999)),
],
),
migrations.CreateModel(
name='SampleItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('md5', models.CharField(max_length=32, unique=True)),
('sha1', models.CharField(max_length=40, unique=True)),
('sha256', models.CharField(max_length=64, unique=True)),
('ssdeep', models.CharField(max_length=148)),
('sample', models.FileField(upload_to='samples')),
],
),
]
| 1.867188 | 2 |
lib/python2.7/site-packages/samba/tests/common.py | abankalarm/pth-toolkit | 480 | 12767846 | <filename>lib/python2.7/site-packages/samba/tests/common.py
# Unix SMB/CIFS implementation. Tests for common.py routines
# Copyright (C) <NAME> 2011
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Tests for samba.common"""
import samba, os
import samba.tests
from samba.common import *
from samba.samdb import SamDB
class CommonTests(samba.tests.TestCase):
def test_normalise_int32(self):
self.assertEquals('17', normalise_int32(17))
self.assertEquals('17', normalise_int32('17'))
self.assertEquals('-123', normalise_int32('-123'))
self.assertEquals('-1294967296', normalise_int32('3000000000'))
def test_dsdb_Dn(self):
sam = samba.Ldb(url='dntest.ldb')
dn1 = dsdb_Dn(sam, "DC=foo,DC=bar")
dn2 = dsdb_Dn(sam, "B:8:0000000D:<GUID=b3f0ec29-17f4-452a-b002-963e1909d101>;DC=samba,DC=example,DC=com")
self.assertEquals(dn2.binary, "0000000D")
self.assertEquals(13, dn2.get_binary_integer())
os.unlink('dntest.ldb')
| 2.40625 | 2 |
shopping_mall/order/admin.py | djangojeng-e/djangoproejcts | 0 | 12767847 | <filename>shopping_mall/order/admin.py<gh_stars>0
from django.contrib import admin
from .models import Order
# Register your models here.
class OrderAdmin(admin.ModelAdmin):
list_display = ('fcuser', 'product')
admin.site.register(Order, OrderAdmin) | 1.515625 | 2 |
python/local-for.py | tbedford/code-snippets | 0 | 12767848 | <filename>python/local-for.py
def my_func(count=4):
for i in range (1, 5):
print("count", count)
if count == 2:
print("count", count)
count = count - 1
my_func()
| 3.40625 | 3 |
sypt_train.py | pan-webis-de/hosseinia18 | 1 | 12767849 | import torch, os, argparse
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
import sypt_dataset, sypt_utils
from torch.utils.data import DataLoader
from sypt_utils import *
from sypt_dataset import create_pt_pan2018
US = "\x1f" # unit separator => sentence separator
soh = "\x02"
class PTFAttenPRNN(nn.Module):
def __init__(self, vocab_size, embedding_dim, hidden_dim, embedding_martix, batch_size, iscuda= True):
super(PTFAttenPRNN, self).__init__()
self.batch_size = batch_size
self.ptf_hidden_size = hidden_dim
self.ptf_embed_dim = embedding_dim
self.iscuda = iscuda
self.ptf_embed = nn.Embedding(vocab_size, embedding_dim)
self.ptf_embed.weight.data.copy_(torch.from_numpy(embedding_martix))
self.lstm = nn.LSTM(embedding_dim, hidden_dim)
self.ptf_context_vector = self.init_ptf_contx_vector()
self.ptf_hidden = self.init_ptf_hidden()
self.lin_attention = nn.Linear(self.ptf_hidden_size, self.ptf_hidden_size)
def init_ptf_hidden(self):
if self.iscuda:
return Variable(torch.zeros(1, self.batch_size, self.ptf_hidden_size)).cuda(),\
Variable(torch.zeros(1, self.batch_size, self.ptf_hidden_size)).cuda()
else:
return Variable(torch.zeros(1, self.batch_size, self.ptf_hidden_size)), \
Variable(torch.zeros(1, self.batch_size, self.ptf_hidden_size))
def init_ptf_contx_vector(self):
return nn.Parameter(torch.Tensor(self.ptf_hidden_size, 1).uniform_(-0.1, 0.1)) # changed
def get_ptf_attention(self, ptf_encoded):
u = F.tanh(self.lin_attention(ptf_encoded))
mul = torch.matmul(u, self.ptf_context_vector.squeeze())
assert mul.size() == torch.Size([ptf_encoded.size(0), self.batch_size])
alpha = F.softmax(mul, dim=0).unsqueeze(2)# (seq_length, batch_size)->(seq_length,batch_size,1)
return alpha * ptf_encoded
def forward(self, ptf_sequence, ptf_hidden_state):
embeded_ptfs = self.ptf_embed(ptf_sequence).view(len(ptf_sequence), self.batch_size, -1)
(ptf_output, ptf_hidden_state) = self.lstm(embeded_ptfs, ptf_hidden_state)
ptf_attention = self.get_ptf_attention(ptf_output)
s_i = torch.sum(ptf_attention, dim=0).unsqueeze(0)
return s_i, ptf_hidden_state
class PTSentAttenRNN(nn.Module):
def __init__(self, batch_size, sent_hidden_size, ptf_hidden_size, class_no, drop_rate, iscuda=True, fuse=True):
super(PTSentAttenRNN, self).__init__()
self.batch_size = batch_size
self.ptf_hidden_size = ptf_hidden_size
self.sent_hidden_size = sent_hidden_size
self.drop_rate = drop_rate
self.iscuda = iscuda
self.fuse = fuse
self.sent_lstm_l = nn.LSTM(ptf_hidden_size, sent_hidden_size)
self.sent_context_vector_l = self.init_sent_contx_vector()
self.sent_hidden_l = self.init_sent_hidden()
self.lin_attention_l = nn.Linear(self.sent_hidden_size, self.sent_hidden_size)
self.sent_lstm_r = nn.LSTM(ptf_hidden_size, sent_hidden_size)
self.sent_context_vector_r = self.init_sent_contx_vector()
self.sent_hidden_r = self.init_sent_hidden()
self.lin_attention_r = nn.Linear(self.sent_hidden_size, self.sent_hidden_size)
self.lin = nn.Linear(7, class_no) if self.fuse else nn.Linear(2*self.sent_hidden_size, class_no)
def forward(self, ptf_atten_sequence, sent_hidden_state):
ptf_atten_seq_l, ptf_atten_seq_r = ptf_atten_sequence[0], ptf_atten_sequence[1]
sent_hidden_state_l, sent_hidden_state_r = sent_hidden_state[0], sent_hidden_state[1]
(sent_output_l, sent_hidden_state_l) = self.sent_lstm_l(ptf_atten_seq_l, sent_hidden_state_l)
sent_attention_l = self.get_sent_attention_l(sent_output_l)
l_hidden = torch.sum(sent_attention_l, dim=0)
(sent_output_r, sent_hidden_state_r) = self.sent_lstm_r(ptf_atten_seq_r, sent_hidden_state_r)
sent_attention_r = self.get_sent_attention_r(sent_output_r)
r_hidden = torch.sum(sent_attention_r, dim=0)
sent_hidden_state = [sent_hidden_state_l, sent_hidden_state_r]
merged = PTSentAttenRNN.get_last_layer(l_hidden, r_hidden, self.fuse)
merged = F.dropout(merged, p=self.drop_rate, training=self.training)
merged = self.lin(merged)
return F.log_softmax(merged, dim=1), sent_hidden_state
def get_sent_attention_l(self, sent_encoded):
u = F.tanh(self.lin_attention_l(sent_encoded))
mul = torch.matmul(u, self.sent_context_vector_l.squeeze())
assert mul.size() == torch.Size([sent_encoded.size(0), self.batch_size])
alpha = F.softmax(mul, dim=0).unsqueeze(2) # (sent_no, batch_size)->(sent_no,batch_size,1)
return alpha * sent_encoded
def get_sent_attention_r(self, sent_encoded):
u = F.tanh(self.lin_attention_r(sent_encoded))
mul = torch.matmul(u, self.sent_context_vector_r.squeeze())
assert mul.size() == torch.Size([sent_encoded.size(0), self.batch_size])
alpha = F.softmax(mul, dim=0).unsqueeze(2) # (sent_no, batch_size)->(sent_no,batch_size,1)
return alpha * sent_encoded
def init_sent_contx_vector(self):
return nn.Parameter(torch.Tensor(self.sent_hidden_size, 1).uniform_(-0.1, 0.1)) ## changed
@staticmethod
def get_last_layer(l_hidden, r_hidden, fuse=True):
if fuse:
cos = F.cosine_similarity(l_hidden, r_hidden, dim=1).view(1, -1)
euc = sypt_utils.euclidean_distance(l_hidden, r_hidden, dim=1).view(1, -1)
dot_dis = sypt_utils.dot(l_hidden, r_hidden, dim=1).view(1, -1)
mean_l1 = sypt_utils.mean_of_l1(l_hidden, r_hidden, dim=1).view(1, -1)
sig = sypt_utils.sigmoid_kernel(l_hidden, r_hidden, dim=1).view(1, -1)
chi = sypt_utils.chi_squared(l_hidden, r_hidden, dim=1).view(1, -1)
rbf = sypt_utils.rbf_kernel(l_hidden, r_hidden, dim=1).view(1, -1)
return torch.cat([cos, euc, dot_dis, mean_l1, sig, chi, rbf], dim=0).view(1, -1)
else:
return torch.cat([l_hidden, r_hidden], dim=1).view(1, -1)
def init_sent_hidden(self):
if self.iscuda:
return Variable(torch.zeros(1, self.batch_size, self.sent_hidden_size)).cuda(),\
Variable(torch.zeros(1, self.batch_size, self.sent_hidden_size)).cuda()
else:
return Variable(torch.zeros(1, self.batch_size, self.sent_hidden_size)),\
Variable(torch.zeros(1, self.batch_size, self.sent_hidden_size))
def make_context_vector(context, ptf_index): # ok
return [ptf_index[word] for word in context if word in ptf_index]
def train_data(x_train, y_target, ptf_attn_model, sent_attn_model, ptf_optimizer, sent_optimizer, criterion):
ptf_attn_model_l, ptf_attn_model_r = ptf_attn_model[0], ptf_attn_model[1]
ptf_optimizer_l, ptf_optimizer_r = ptf_optimizer[0], ptf_optimizer[1]
state_ptf_l, state_ptf_r = ptf_attn_model_l.init_ptf_hidden(), ptf_attn_model_r.init_ptf_hidden()
state_sent = [sent_attn_model.init_sent_hidden(), sent_attn_model.init_sent_hidden()]
y_target = Variable(torch.LongTensor(y_target))
ptf_optimizer_l.zero_grad()
ptf_optimizer_r.zero_grad()
sent_optimizer.zero_grad()
s_l, s_r = None, None
for i in range(len(x_train[0])):
ptf_idx_seq = Variable(torch.LongTensor(x_train[0][i])).cuda()
_s, state_ptf_l = ptf_attn_model_l(ptf_idx_seq, state_ptf_l)
if s_l is None:
s_l = _s
else:
s_l = torch.cat((s_l, _s), 0)
assert len(x_train[0]) == len(s_l)
for i in range(len(x_train[1])):
ptf_idx_seq = Variable(torch.LongTensor(x_train[1][i])).cuda()
_s, state_ptf_r = ptf_attn_model_r(ptf_idx_seq, state_ptf_r)
if s_r is None:
s_r = _s
else:
s_r = torch.cat((s_r, _s), 0)
assert len(x_train[1]) == len(s_r)
y_pred, state_sent = sent_attn_model([s_l, s_r], state_sent)
loss_train = criterion(y_pred.cuda(), y_target.cuda())
loss_train.backward()
# `clip_grad_norm_` helps prevent the exploding gradient problem in LSTMs
torch.nn.utils.clip_grad_norm_(ptf_attn_model_l.parameters(), 0.25)
torch.nn.utils.clip_grad_norm_(ptf_attn_model_r.parameters(), 0.25)
torch.nn.utils.clip_grad_norm_(sent_attn_model.parameters(), 0.25)
ptf_optimizer_l.step()
ptf_optimizer_r.step()
sent_optimizer.step()
return loss_train.data.item()
def tst_data(x_test, y_target, ptf_attn_model, sent_attn_model, criterion, iscuda):
ptf_attn_model_l, ptf_attn_model_r = ptf_attn_model[0], ptf_attn_model[1]
state_ptf_l, state_ptf_r = ptf_attn_model_l.init_ptf_hidden(), ptf_attn_model_r.init_ptf_hidden()
state_sent = [sent_attn_model.init_sent_hidden(), sent_attn_model.init_sent_hidden()]
s_l, s_r = None, None
for i in range(len(x_test[0])):
ptf_idx_seq = Variable(torch.LongTensor(x_test[0][i]))
if iscuda:
ptf_idx_seq = ptf_idx_seq.cuda()
_s, state_ptf_l = ptf_attn_model_l(ptf_idx_seq, state_ptf_l)
if s_l is None:
s_l = _s
else:
s_l = torch.cat((s_l, _s), 0)
assert len(x_test[0]) == len(s_l)
for i in range(len(x_test[1])):
ptf_idx_seq = Variable(torch.LongTensor(x_test[1][i]))
if iscuda:
ptf_idx_seq = ptf_idx_seq.cuda()
_s, state_ptf_r = ptf_attn_model_r(ptf_idx_seq, state_ptf_r)
if s_r is None:
s_r = _s
else:
s_r = torch.cat((s_r, _s), 0)
assert len(x_test[1]) == len(s_r)
y_pred, state_sent = sent_attn_model([s_l, s_r], state_sent)
if iscuda:
loss_test = criterion(y_pred.cuda(), y_target.cuda())
else:
loss_test = criterion(y_pred, y_target)
return y_pred, loss_test.data.item()
def eval(dataloader, ptf_index, criterion, return_json=False, models=None, iscuda=True):
for mdl in models.values():
mdl.eval()
ptf_model_l = models["ptf_model_l"]
ptf_model_r = models["ptf_model_r"]
sent_model = models["sent_model"]
total, correct = 0, 0
total_loss = torch.Tensor([0])
if iscuda:
total_loss = total_loss.cuda()
if return_json:
json={}
for itr, d in enumerate(dataloader):
l_doc = d["doc"][0]
l_doc = l_doc.split(US)
target = d["label"]
l_vec = []
for e in l_doc:
cv = make_context_vector(e.split(soh), ptf_index)
if len(cv) != 0:
l_vec.append(cv)
r_vec = backward(l_vec)
l_vec = list_of_list_to_long_tensor(l_vec)
r_vec = list_of_list_to_long_tensor(r_vec)
target = Variable(torch.LongTensor(target))
if iscuda:
target = target.cuda()
data_test = [l_vec, r_vec]
ptf_model = [ptf_model_l, ptf_model_r]
outputs, loss = tst_data(data_test, target, ptf_model, sent_model, criterion, iscuda)
_, predicted = torch.max(outputs.data, 1)
total += target.size(0)
if return_json:
json[d["id"][0]] = bool(predicted.cpu().numpy()[0])
correct += (predicted == target.data).sum()
total_loss += loss
if return_json:
return (100 * correct / total), (total_loss/len(dataloader))[0], json
else:
return (100 * correct / total), (total_loss / len(dataloader))[0]
def backward(doc):
rdoc = list(reversed(doc))
return [list(reversed(e)) for e in rdoc]
def list_of_list_to_long_tensor(src_list):
des_list = [torch.LongTensor(e) for e in src_list]
return des_list
def train_epoch(dataloader, ptf_index, models, optmzrs, loss_func):
ptf_optim_l = optmzrs["ptf_optim_l"]
ptf_optim_r = optmzrs["ptf_optim_r"]
sent_optim = optmzrs["sent_optim"]
for mdl in models.values():
mdl.train()
ptf_model_l = models["ptf_model_l"]
ptf_model_r = models["ptf_model_r"]
sent_model = models["sent_model"]
total_loss = torch.Tensor([0]).cuda()
for itr, d in enumerate(dataloader):
l_doc = d["doc"][0]
l_doc = l_doc.split(US)
l_vec = []
for e in l_doc:
cv = make_context_vector(e.split(soh), ptf_index)
if len(cv) != 0:
l_vec.append(cv)
r_vec = backward(l_vec)
l_vec = list_of_list_to_long_tensor(l_vec)
r_vec = list_of_list_to_long_tensor(r_vec)
x_train = [l_vec, r_vec]
ptf_model = [ptf_model_l, ptf_model_r]
ptf_optim = [ptf_optim_l, ptf_optim_r]
loss = train_data(x_train, d["label"], ptf_model, sent_model, ptf_optim, sent_optim, loss_func)
total_loss += loss
return (total_loss/len(dataloader))[0]
def get_params():
params = dict()
params["EMBEDDING_DIM"] = 100
params["ptf_HIDDEN_DIM"] = 8
params["SENT_HIDDEN_DIM"] = 8
params["EPOCHS"] = 30
params["dropout_rate"] = 0.3
params["CLASS_NO"] = 2
params["fuse"] = True
params["iscuda"] = True
return params
def save_checkpoint(models, is_best, model_name):
"""Save checkpoint if a new best is achieved"""
if is_best:
print ("=> Saving a new best")
torch.save(models['ptf_model_l'].state_dict(), 'ptf_model_l' + model_name)
torch.save(models['ptf_model_r'].state_dict(), 'ptf_model_r' + model_name)
torch.save(models['sent_model'].state_dict(), 'sent_model' + model_name)
else:
print ("=> Validation Accuracy did not improve")
def train_model(train_path, val_path, model_name):
'''
train the model.
:param train_path:
:param val_path:
:param model_name:
:return:
'''
params = get_params()
EMBEDDING_DIM = params["EMBEDDING_DIM"]
ptf_HIDDEN_DIM = params["ptf_HIDDEN_DIM"]
SENT_HIDDEN_DIM = params["SENT_HIDDEN_DIM"]
EPOCHS = params["EPOCHS"]
dropout_rate = params["dropout_rate"]
batch_size = 1 # code should change a bit for batch size > 1
CLASS_NO = params["CLASS_NO"]
fuse = params["fuse"]
for p,v in params.items():
print('param %s = %s' % (p, str(v)))
ds_files = dict()
ds_files['train'] = train_path
datasets, ptf_index, embd_matrix, index_word = sypt_dataset.load_dataset_and_pt_embedding\
(ds_files, EMBEDDING_DIM)
datasets["val"] = sypt_dataset.PAN_Dataset(val_path, None)
train_dataloader = DataLoader(datasets["train"], 1, True)
val_dataloader = DataLoader(datasets["val"], 1, True)
VOCAB_SIZE = len(ptf_index)
print('Vocab Size %d' % VOCAB_SIZE)
print('train = %s , val = %s' % (train_path, val_path))
# model definition
ptf_model_l = PTFAttenPRNN(VOCAB_SIZE, EMBEDDING_DIM, ptf_HIDDEN_DIM, embd_matrix, batch_size).cuda()
ptf_model_r = PTFAttenPRNN(VOCAB_SIZE, EMBEDDING_DIM, ptf_HIDDEN_DIM, embd_matrix, batch_size).cuda()
pt_sent_model = PTSentAttenRNN(batch_size, SENT_HIDDEN_DIM, ptf_HIDDEN_DIM, CLASS_NO, dropout_rate, True, fuse=fuse).cuda()
models = dict()
models["ptf_model_l"] = ptf_model_l
models["ptf_model_r"] = ptf_model_r
models["sent_model"] = pt_sent_model
# optimizers
ptf_optim_l = optim.RMSprop(ptf_model_l.parameters(), lr=1e-03)
ptf_optim_r = optim.RMSprop(ptf_model_r.parameters(), lr=1e-03)
sent_optim = optim.RMSprop(pt_sent_model.parameters(), lr=1e-03)
optmzrs = dict()
optmzrs["ptf_optim_l"] = ptf_optim_l
optmzrs["ptf_optim_r"] = ptf_optim_r
optmzrs["sent_optim"] = sent_optim
# loss function
loss_func = nn.NLLLoss()
# training and evaluation
best_accuracy = 0.0
for epoch in range(1, EPOCHS + 1):
train_loss = train_epoch(train_dataloader, ptf_index, models, optmzrs, loss_func)
val_acc, val_loss = eval(val_dataloader, ptf_index, loss_func, False, models)
print('Epoch: %d and train loss: %.4F val loss: %.4f val acc: %.4F' % (epoch, train_loss, val_loss, val_acc))
# Get bool not ByteTensor
is_best = bool(val_acc > best_accuracy)
# Get greater Tensor to keep track best acc
best_accuracy = max(val_acc, best_accuracy)
# Save checkpoint if is a new best
save_checkpoint(models, is_best, model_name)
# show the final results
train_acc, train_loss = eval(train_dataloader, ptf_index, loss_func, False, models)
print('train acc: %.4F train loss: %.10f ' % (train_acc, train_loss))
val_acc, val_loss= eval(val_dataloader, ptf_index, loss_func, False, models)
print('val acc: %.4F val loss: %.10f ' % (val_acc, val_loss))
def get_args():
'''
get arguments from command line
:return: a dic of all arguments
'''
parser = argparse.ArgumentParser()
parser.add_argument('-c', action='store', default='data/', help='source path')
parser.add_argument('-o', action='store', default='data/', help='destination path')
results = parser.parse_args()
print(results)
return vars(results)
if __name__ == "__main__":
# param setting
params = get_args()
csv_path = params["c"]
pt_path = params["o"]
model_name = ''
server = 'corenlp'
train = f'{pt_path}train.{server}.pt'
val = f'{pt_path}val.{server}.pt'
train_csv = f'{csv_path}train.csv'
val_csv = f'{csv_path}val.csv'
# create ptf of train and val dataset
if not os.path.exists(pt_path):
os.mkdir(pt_path)
if not os.path.exists(train):
create_pt_pan2018(train_csv, train, root='', server_type=server)
if not os.path.exists(val):
create_pt_pan2018(val_csv, val, root='', server_type=server)
# train the model
train_model(train, val, model_name)
| 2.265625 | 2 |
metagov/metagov/core/migrations/0006_auto_20211101_2053.py | ajnam12/gateway | 8 | 12767850 | # Generated by Django 3.2.2 on 2021-11-01 20:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0005_remove_linkedaccount_unique_identifer_on_community_platform'),
]
operations = [
migrations.AddField(
model_name='plugin',
name='community_platform_id',
field=models.CharField(blank=True, help_text='Optional identifier for this instance. If multiple instances are allowed per community, this field must be set to a unique value for each instance.', max_length=100, null=True),
),
migrations.AlterUniqueTogether(
name='plugin',
unique_together={('name', 'community', 'community_platform_id')},
),
]
| 1.664063 | 2 |