hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1d31e93e31c793bdd05dfba69ef56693f1358e4d | 715 | py | Python | poem_generator/PoemCallback.py | Aaronsom/poem-generation | 10cccad88d073f82f6556374fdfb23a5b5e3769a | [
"MIT"
] | null | null | null | poem_generator/PoemCallback.py | Aaronsom/poem-generation | 10cccad88d073f82f6556374fdfb23a5b5e3769a | [
"MIT"
] | null | null | null | poem_generator/PoemCallback.py | Aaronsom/poem-generation | 10cccad88d073f82f6556374fdfb23a5b5e3769a | [
"MIT"
] | null | null | null | from tensorflow.keras.callbacks import Callback
from poem_generator.word_generator import generate_poem
class PoemCallback(Callback):
def __init__(self, poems, seed_length, dictionary, single=True):
super(PoemCallback, self).__init__()
self.poems = poems
self.dictionary = dictionary
self.reverse_dictionary = {dictionary[key]: key for key in dictionary.keys()}
self.seed_length = seed_length
self.single = single
def on_epoch_end(self, epoch, logs=None):
for i in range(self.poems):
print(f"Poem {i+1}/{self.poems}")
generate_poem(self.model, self.reverse_dictionary, self.dictionary, self.seed_length, single=self.single) | 39.722222 | 117 | 0.699301 | 609 | 0.851748 | 0 | 0 | 0 | 0 | 0 | 0 | 26 | 0.036364 |
1d33a06767c4e42865ab97028637606945438718 | 1,456 | py | Python | Inprocessing/Thomas/Python/core/optimizers/linear_shatter.py | maliha93/Fairness-Analysis-Code | acf13c6e7993704fc627249fe4ada44d8b616264 | [
"MIT"
] | 9 | 2020-07-09T07:32:57.000Z | 2022-02-25T12:21:28.000Z | Inprocessing/Thomas/Python/core/optimizers/linear_shatter.py | maliha93/Fairness-Analysis-Code | acf13c6e7993704fc627249fe4ada44d8b616264 | [
"MIT"
] | null | null | null | Inprocessing/Thomas/Python/core/optimizers/linear_shatter.py | maliha93/Fairness-Analysis-Code | acf13c6e7993704fc627249fe4ada44d8b616264 | [
"MIT"
] | 3 | 2020-07-09T12:35:09.000Z | 2021-09-06T13:54:19.000Z | import numpy as np
from scipy.spatial import ConvexHull
from core.optimizers import SMLAOptimizer
class LinearShatterBFOptimizer(SMLAOptimizer):
def __init__(self, X, buffer_angle=5.0, has_intercept=False, use_chull=True):
self._samplef = self.get_linear_samplef(X, buffer_angle, has_intercept, use_chull)
@staticmethod
def cli_key():
return 'linear-shatter'
def minimize(self, evalf, n_iters, theta0=None):
min_val = np.inf
theta_opt = None
for inum in range(n_iters):
theta = self.get_theta()
val = evalf(theta)
if val < min_val:
theta_opt = theta.copy()
min_val = val
return theta_opt, {}
def get_theta(self):
return self._samplef()
@staticmethod
def get_linear_samplef(X, buffer_angle=5.0, has_intercept=False, use_chull=True):
nf = X.shape[1]
c = 360.0 / (2*np.pi)
Z = X[:,:-1] if has_intercept else X.copy()
W = X if not(use_chull) else X[ConvexHull(Z, qhull_options='QJ').vertices]
WN = W / np.linalg.norm(W,axis=1)[:,None]
def samplef():
while True:
t = np.random.normal(0,1,(nf,1))
t = t / np.linalg.norm(t)
s = W.dot(t).flatten()
y = np.sign(s)
# If the solution shatters the samples, return
if len(np.unique(y)) == 2:
return t.flatten()
# If the DB is close enough to the convex hull, return
s = WN.dot(t).flatten()
a = 90.0 - c*np.arccos(np.abs(s).min())
if a < buffer_angle:
return t.flatten()
return samplef | 28 | 84 | 0.662775 | 1,346 | 0.924451 | 0 | 0 | 814 | 0.559066 | 0 | 0 | 120 | 0.082418 |
1d366388fe3fd891610cd7eed119bc9220611ae4 | 13,464 | py | Python | vendor/pyLibrary/env/big_data.py | klahnakoski/auth0-api | eda9c2554c641da76687f64445b8d35543d012d9 | [
"MIT"
] | null | null | null | vendor/pyLibrary/env/big_data.py | klahnakoski/auth0-api | eda9c2554c641da76687f64445b8d35543d012d9 | [
"MIT"
] | null | null | null | vendor/pyLibrary/env/big_data.py | klahnakoski/auth0-api | eda9c2554c641da76687f64445b8d35543d012d9 | [
"MIT"
] | null | null | null | # encoding: utf-8
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski (kyle@lahnakoski.com)
#
from __future__ import absolute_import, division, unicode_literals
from mo_future import is_text, is_binary
import gzip
from io import BytesIO
import struct
from tempfile import TemporaryFile
import time
import zipfile
import zlib
from mo_future import PY3, long, text
from mo_logs import Log
from mo_logs.exceptions import suppress_exception
import mo_math
# LIBRARY TO DEAL WITH BIG DATA ARRAYS AS ITERATORS OVER (IR)REGULAR SIZED
# BLOCKS, OR AS ITERATORS OVER LINES
DEBUG = False
MIN_READ_SIZE = 8 * 1024
MAX_STRING_SIZE = 1 * 1024 * 1024
class FileString(text):
"""
ACTS LIKE A STRING, BUT IS A FILE
"""
def __init__(self, file):
self.file = file
def decode(self, encoding):
if encoding != "utf8":
Log.error("can not handle {{encoding}}", encoding= encoding)
self.encoding = encoding
return self
def split(self, sep):
if sep != "\n":
Log.error("Can only split by lines")
self.file.seek(0)
return LazyLines(self.file)
def __len__(self):
temp = self.file.tell()
self.file.seek(0, 2)
file_length = self.file.tell()
self.file.seek(temp)
return file_length
def __getslice__(self, i, j):
j = mo_math.min(j, len(self))
if j - 1 > 2 ** 28:
Log.error("Slice of {{num}} bytes is too big", num=j - i)
try:
self.file.seek(i)
output = self.file.read(j - i).decode(self.encoding)
return output
except Exception as e:
Log.error(
"Can not read file slice at {{index}}, with encoding {{encoding}}",
index=i,
encoding=self.encoding,
cause=e
)
def __add__(self, other):
self.file.seek(0, 2)
self.file.write(other)
def __radd__(self, other):
new_file = TemporaryFile()
new_file.write(other)
self.file.seek(0)
for l in self.file:
new_file.write(l)
new_file.seek(0)
return FileString(new_file)
def __getattr__(self, attr):
return getattr(self.file, attr)
def __del__(self):
self.file, temp = None, self.file
if temp:
temp.close()
def __iter__(self):
self.file.seek(0)
return self.file
if PY3:
def __str__(self):
if self.encoding == "utf8":
temp = self.file.tell()
self.file.seek(0, 2)
file_length = self.file.tell()
self.file.seek(0)
output = self.file.read(file_length).decode(self.encoding)
self.file.seek(temp)
return output
else:
def __unicode__(self):
if self.encoding == "utf8":
temp = self.file.tell()
self.file.seek(0, 2)
file_length = self.file.tell()
self.file.seek(0)
output = self.file.read(file_length).decode(self.encoding)
self.file.seek(temp)
return output
def safe_size(source):
"""
READ THE source UP TO SOME LIMIT, THEN COPY TO A FILE IF TOO BIG
RETURN A str() OR A FileString()
"""
if source is None:
return None
total_bytes = 0
bytes = []
b = source.read(MIN_READ_SIZE)
while b:
total_bytes += len(b)
bytes.append(b)
if total_bytes > MAX_STRING_SIZE:
try:
data = FileString(TemporaryFile())
for bb in bytes:
data.write(bb)
del bytes
del bb
b = source.read(MIN_READ_SIZE)
while b:
total_bytes += len(b)
data.write(b)
b = source.read(MIN_READ_SIZE)
data.seek(0)
Log.note("Using file of size {{length}} instead of str()", length= total_bytes)
return data
except Exception as e:
Log.error("Could not write file > {{num}} bytes", num= total_bytes, cause=e)
b = source.read(MIN_READ_SIZE)
data = b"".join(bytes)
del bytes
return data
class LazyLines(object):
"""
SIMPLE LINE ITERATOR, BUT WITH A BIT OF CACHING TO LOOK LIKE AN ARRAY
"""
def __init__(self, source, encoding="utf8"):
"""
ASSUME source IS A LINE ITERATOR OVER utf8 ENCODED BYTE STREAM
"""
self.source = source
self.encoding = encoding
self._iter = self.__iter__()
self._last = None
self._next = 0
def __getslice__(self, i, j):
if i == self._next - 1:
def output():
yield self._last
for v in self._iter:
self._next += 1
yield v
return output()
if i == self._next:
return self._iter
Log.error("Do not know how to slice this generator")
def __iter__(self):
def output():
for v in self.source:
self._last = v
yield self._last
return output()
def __getitem__(self, item):
try:
if item == self._next:
self._next += 1
return self._iter.next()
elif item == self._next - 1:
return self._last
else:
Log.error("can not index out-of-order too much")
except Exception as e:
Log.error("Problem indexing", e)
class CompressedLines(LazyLines):
"""
KEEP COMPRESSED HTTP (content-type: gzip) IN BYTES ARRAY
WHILE PULLING OUT ONE LINE AT A TIME FOR PROCESSING
"""
def __init__(self, compressed, encoding="utf8"):
"""
USED compressed BYTES TO DELIVER LINES OF TEXT
LIKE LazyLines, BUT HAS POTENTIAL TO seek()
"""
self.compressed = compressed
LazyLines.__init__(self, None, encoding=encoding)
self._iter = self.__iter__()
def __iter__(self):
return LazyLines(ibytes2ilines(compressed_bytes2ibytes(self.compressed, MIN_READ_SIZE), encoding=self.encoding)).__iter__()
def __getslice__(self, i, j):
if i == self._next:
return self._iter
if i == 0:
return self.__iter__()
if i == self._next - 1:
def output():
yield self._last
for v in self._iter:
yield v
return output()
Log.error("Do not know how to slice this generator")
def __getitem__(self, item):
try:
if item == self._next:
self._last = self._iter.next()
self._next += 1
return self._last
elif item == self._next - 1:
return self._last
else:
Log.error("can not index out-of-order too much")
except Exception as e:
Log.error("Problem indexing", e)
def __radd__(self, other):
new_file = TemporaryFile()
new_file.write(other)
self.file.seek(0)
for l in self.file:
new_file.write(l)
new_file.seek(0)
return FileString(new_file)
def compressed_bytes2ibytes(compressed, size):
"""
CONVERT AN ARRAY OF BYTES TO A BYTE-BLOCK GENERATOR
USEFUL IN THE CASE WHEN WE WANT TO LIMIT HOW MUCH WE FEED ANOTHER
GENERATOR (LIKE A DECOMPRESSOR)
"""
decompressor = zlib.decompressobj(16 + zlib.MAX_WBITS)
for i in range(0, mo_math.ceiling(len(compressed), size), size):
try:
block = compressed[i: i + size]
yield decompressor.decompress(block)
except Exception as e:
Log.error("Not expected", e)
def ibytes2ilines(generator, encoding="utf8", flexible=False, closer=None):
"""
CONVERT A GENERATOR OF (ARBITRARY-SIZED) byte BLOCKS
TO A LINE (CR-DELIMITED) GENERATOR
:param generator:
:param encoding: None TO DO NO DECODING
:param closer: OPTIONAL FUNCTION TO RUN WHEN DONE ITERATING
:return:
"""
decode = get_decoder(encoding=encoding, flexible=flexible)
_buffer = generator.next()
s = 0
e = _buffer.find(b"\n")
while True:
while e == -1:
try:
next_block = generator.next()
_buffer = _buffer[s:] + next_block
s = 0
e = _buffer.find(b"\n")
except StopIteration:
_buffer = _buffer[s:]
del generator
if closer:
closer()
if _buffer:
yield decode(_buffer)
return
yield decode(_buffer[s:e])
s = e + 1
e = _buffer.find(b"\n", s)
def ibytes2icompressed(source):
yield (
b'\037\213\010\000' + # Gzip file, deflate, no filename
struct.pack('<L', long(time.time())) + # compression start time
b'\002\377' # maximum compression, no OS specified
)
crc = zlib.crc32(b"")
length = 0
compressor = zlib.compressobj(9, zlib.DEFLATED, -zlib.MAX_WBITS, zlib.DEF_MEM_LEVEL, 0)
for d in source:
crc = zlib.crc32(d, crc) & 0xffffffff
length += len(d)
chunk = compressor.compress(d)
if chunk:
yield chunk
yield compressor.flush()
yield struct.pack("<2L", crc, length & 0xffffffff)
class GzipLines(CompressedLines):
"""
SAME AS CompressedLines, BUT USING THE GzipFile FORMAT FOR COMPRESSED BYTES
"""
def __init__(self, compressed, encoding="utf8"):
CompressedLines.__init__(self, compressed, encoding=encoding)
def __iter__(self):
buff = BytesIO(self.compressed)
return LazyLines(gzip.GzipFile(fileobj=buff, mode='r'), encoding=self.encoding).__iter__()
class ZipfileLines(CompressedLines):
"""
SAME AS CompressedLines, BUT USING THE ZipFile FORMAT FOR COMPRESSED BYTES
"""
def __init__(self, compressed, encoding="utf8"):
CompressedLines.__init__(self, compressed, encoding=encoding)
def __iter__(self):
buff = BytesIO(self.compressed)
archive = zipfile.ZipFile(buff, mode='r')
names = archive.namelist()
if len(names) != 1:
Log.error("*.zip file has {{num}} files, expecting only one.", num= len(names))
stream = archive.open(names[0], "r")
return LazyLines(sbytes2ilines(stream), encoding=self.encoding).__iter__()
def icompressed2ibytes(source):
"""
:param source: GENERATOR OF COMPRESSED BYTES
:return: GENERATOR OF BYTES
"""
decompressor = zlib.decompressobj(16 + zlib.MAX_WBITS)
last_bytes_count = 0 # Track the last byte count, so we do not show too many debug lines
bytes_count = 0
for bytes_ in source:
try:
data = decompressor.decompress(bytes_)
except Exception as e:
Log.error("problem", cause=e)
bytes_count += len(data)
if mo_math.floor(last_bytes_count, 1000000) != mo_math.floor(bytes_count, 1000000):
last_bytes_count = bytes_count
DEBUG and Log.note("bytes={{bytes}}", bytes=bytes_count)
yield data
def scompressed2ibytes(stream):
"""
:param stream: SOMETHING WITH read() METHOD TO GET MORE BYTES
:return: GENERATOR OF UNCOMPRESSED BYTES
"""
def more():
try:
while True:
bytes_ = stream.read(4096)
if not bytes_:
return
yield bytes_
except Exception as e:
Log.error("Problem iterating through stream", cause=e)
finally:
with suppress_exception:
stream.close()
return icompressed2ibytes(more())
def sbytes2ilines(stream, encoding="utf8", closer=None):
"""
CONVERT A STREAM (with read() method) OF (ARBITRARY-SIZED) byte BLOCKS
TO A LINE (CR-DELIMITED) GENERATOR
"""
def read():
try:
while True:
bytes_ = stream.read(4096)
if not bytes_:
return
yield bytes_
except Exception as e:
Log.error("Problem iterating through stream", cause=e)
finally:
try:
stream.close()
except Exception:
pass
if closer:
try:
closer()
except Exception:
pass
return ibytes2ilines(read(), encoding=encoding)
def get_decoder(encoding, flexible=False):
"""
RETURN FUNCTION TO PERFORM DECODE
:param encoding: STRING OF THE ENCODING
:param flexible: True IF YOU WISH TO TRY OUR BEST, AND KEEP GOING
:return: FUNCTION
"""
if encoding == None:
def no_decode(v):
return v
return no_decode
elif flexible:
def do_decode1(v):
return v.decode(encoding, 'ignore')
return do_decode1
else:
def do_decode2(v):
return v.decode(encoding)
return do_decode2
| 29.142857 | 131 | 0.562908 | 6,683 | 0.496361 | 5,163 | 0.383467 | 0 | 0 | 0 | 0 | 2,979 | 0.221257 |
1d39d676517e96a73bf0a00218bd894b6ea7add5 | 820 | py | Python | chapter_10_testing_and_tdd/dependent/test_dependent_mocked_test.py | Tm2197/Python-Architecture-Patterns | 8091b4d8e2580763ceb55a83c75aa9b6225fcb72 | [
"MIT"
] | 12 | 2021-07-20T12:55:39.000Z | 2022-02-05T10:53:38.000Z | chapter_10_testing_and_tdd/dependent/test_dependent_mocked_test.py | Tm2197/Python-Architecture-Patterns | 8091b4d8e2580763ceb55a83c75aa9b6225fcb72 | [
"MIT"
] | null | null | null | chapter_10_testing_and_tdd/dependent/test_dependent_mocked_test.py | Tm2197/Python-Architecture-Patterns | 8091b4d8e2580763ceb55a83c75aa9b6225fcb72 | [
"MIT"
] | 9 | 2021-07-22T06:01:03.000Z | 2022-03-01T05:50:45.000Z | from unittest.mock import patch
from dependent import parameter_dependent
@patch('math.sqrt')
def test_negative(mock_sqrt):
assert parameter_dependent(-1) == 0
mock_sqrt.assert_not_called()
@patch('math.sqrt')
def test_zero(mock_sqrt):
mock_sqrt.return_value = 0
assert parameter_dependent(0) == 0
mock_sqrt.assert_called_once_with(0)
@patch('math.sqrt')
def test_twenty_five(mock_sqrt):
mock_sqrt.return_value = 5
assert parameter_dependent(25) == 5
mock_sqrt.assert_called_with(25)
@patch('math.sqrt')
def test_hundred(mock_sqrt):
mock_sqrt.return_value = 10
assert parameter_dependent(100) == 10
mock_sqrt.assert_called_with(100)
@patch('math.sqrt')
def test_hundred_and_one(mock_sqrt):
assert parameter_dependent(101) == 10
mock_sqrt.assert_not_called()
| 22.777778 | 41 | 0.747561 | 0 | 0 | 0 | 0 | 731 | 0.891463 | 0 | 0 | 55 | 0.067073 |
1d39ff1e6bd4edf957d03269b5fb60c6c678992b | 416 | py | Python | test_world_rowing/test_dashboard.py | matthewghgriffiths/rowing | d682e66ca8d8a811e033ae32c2a1948376659123 | [
"MIT"
] | 1 | 2022-02-21T09:52:12.000Z | 2022-02-21T09:52:12.000Z | test_world_rowing/test_dashboard.py | matthewghgriffiths/rowing | d682e66ca8d8a811e033ae32c2a1948376659123 | [
"MIT"
] | null | null | null | test_world_rowing/test_dashboard.py | matthewghgriffiths/rowing | d682e66ca8d8a811e033ae32c2a1948376659123 | [
"MIT"
] | null | null | null |
import pytest
from world_rowing import dashboard
def test_dashboard_main():
dashboard.main(block=False)
def test_dashboard_predict():
dash = dashboard.Dashboard.load_last_race()
live_data, intermediates = dash.race_tracker.update_livedata()
dash.update(
live_data.loc[:300],
intermediates[[500, 1000]]
)
dash.update(
live_data,
intermediates
)
| 15.407407 | 66 | 0.670673 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
1d3d8280c20d0be738af64bc749e533b575ef043 | 1,665 | py | Python | plerr/tests/test_package.py | b2bs-team/pylint-errors | f1362c8afbe6b7075f805560d7699f63ad35a10b | [
"MIT"
] | 2 | 2020-10-28T23:53:59.000Z | 2020-10-29T03:31:20.000Z | plerr/tests/test_package.py | b2bs-team/pylint-errors | f1362c8afbe6b7075f805560d7699f63ad35a10b | [
"MIT"
] | null | null | null | plerr/tests/test_package.py | b2bs-team/pylint-errors | f1362c8afbe6b7075f805560d7699f63ad35a10b | [
"MIT"
] | 1 | 2020-10-28T23:53:47.000Z | 2020-10-28T23:53:47.000Z | """Tests a package installation on a user OS."""
import pathlib
import subprocess
import unittest
class TestPlErrPackage(unittest.TestCase):
def test_plerr_error_getter(self):
# Given: a command to get a description of a pylint error by an
# error code.
command = ['python3', '-m', 'plerr', 'R1710']
# When: the command invokes.
proc = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
stdout, stderr = proc.communicate()
expected_stdout = (
(
pathlib.Path(__file__).resolve().parent /
'command_output_fixture.txt'
)
.read_bytes()
)
# Then: it produces a highlighted output to stdout of the given error
# with the exit code 0.
assert stdout == expected_stdout
assert not stderr
assert proc.returncode == 0
def test_plerr_non_existent_error(self):
# Given: a command to get a description of a pylint error with an
# existent error code.
command = ['python3', '-m', 'plerr', 'R0000']
# When: the command invokes.
proc = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
stdout, stderr = proc.communicate()
expected_stdout = (
b'Cannot find R0000 pylint error by such error code.\n'
)
# Then: it produces an error message to stderr with the exit code 1.
assert stderr == expected_stdout
assert not stdout
assert proc.returncode == 1
| 30.833333 | 77 | 0.583183 | 1,564 | 0.939339 | 0 | 0 | 0 | 0 | 0 | 0 | 564 | 0.338739 |
1d3ea69b3e4a276b5c416f98c802081047762711 | 2,826 | py | Python | src/api2db/install/make_lab.py | TristenHarr/api2db | 8c8b14280441f5153ff146c23359a0eb91022ddb | [
"MIT"
] | 45 | 2021-05-05T01:34:20.000Z | 2021-11-02T08:41:34.000Z | src/api2db/install/make_lab.py | TristenHarr/api2db | 8c8b14280441f5153ff146c23359a0eb91022ddb | [
"MIT"
] | 1 | 2021-06-02T11:43:33.000Z | 2021-06-02T20:32:29.000Z | src/api2db/install/make_lab.py | TristenHarr/api2db | 8c8b14280441f5153ff146c23359a0eb91022ddb | [
"MIT"
] | 3 | 2021-05-08T21:49:24.000Z | 2021-05-13T23:14:09.000Z | import os
_lab_components = """from api2db.ingest import *
CACHE=True # Caches API data so that only a single API call is made if True
def import_target():
return None
def pre_process():
return None
def data_features():
return None
def post_process():
return None
if __name__ == "__main__":
api_form = ApiForm(name="lab",
pre_process=pre_process(),
data_features=data_features(),
post_process=post_process()
)
api_form.experiment(CACHE, import_target)
"""
def mlab():
"""
This shell command is used for creation of a lab. Labs offer an easier way to design an ApiForm.
Given a project directory
::
project_dir-----/
|
apis-----/
| |- __init__.py
| |- FooCollector.py
| |- BarCollector.py
|
AUTH-----/
| |- bigquery_auth_template.json
| |- omnisci_auth_template.json
| |- sql_auth_template.json
|
CACHE/
|
STORE/
|
helpers.py
|
main.py
**Shell Command:** ``path/to/project_dir> mlab``
::
project_dir-----/
|
apis-------/
| |- __init__.py
| |- FooCollector.py
| |- BarCollector.py
|
AUTH-------/
| |- bigquery_auth_template.json
| |- omnisci_auth_template.json
| |- sql_auth_template.json
|
CACHE/
|
STORE/
|
laboratory-/
| |- lab.py EDIT THIS FILE!
|
helpers.py
|
main.py
Returns:
None
"""
lab_dir_path = os.path.join(os.getcwd(), "laboratory")
if not os.path.isdir(lab_dir_path):
os.makedirs(lab_dir_path)
with open(os.path.join(lab_dir_path, "lab.py"), "w") as f:
for line in _lab_components:
f.write(line)
print("Lab has been created. Edit the file found in laboratory/lab.py")
else:
print("Lab already exists!")
| 28.836735 | 100 | 0.381812 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,486 | 0.879689 |
1d4106adaf0d42a3f4d5f358b322161fcf83843b | 485 | py | Python | 30-39/37. sliceview/sliceview.py | dcragusa/PythonMorsels | 5f75b51a68769036e4004e9ccdada6b220124ab6 | [
"MIT"
] | 1 | 2021-11-30T05:03:24.000Z | 2021-11-30T05:03:24.000Z | 30-39/37. sliceview/sliceview.py | dcragusa/PythonMorsels | 5f75b51a68769036e4004e9ccdada6b220124ab6 | [
"MIT"
] | null | null | null | 30-39/37. sliceview/sliceview.py | dcragusa/PythonMorsels | 5f75b51a68769036e4004e9ccdada6b220124ab6 | [
"MIT"
] | 2 | 2021-04-18T05:26:43.000Z | 2021-11-28T18:46:43.000Z | from collections.abc import Sequence
class SliceView(Sequence):
def __init__(self, sequence, start=None, stop=None, step=None):
self.sequence = sequence
self.range = range(*slice(start, stop, step).indices(len(sequence)))
def __len__(self):
return len(self.range)
def __getitem__(self, item):
if isinstance(item, slice):
return SliceView(self, item.start, item.stop, item.step)
return self.sequence[self.range[item]]
| 30.3125 | 76 | 0.663918 | 445 | 0.917526 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
1d4187041cb8f8754084b1a0b8f675142f96aee6 | 2,348 | py | Python | docs/examples/use_cases/video_superres/common/loss_scaler.py | cyyever/DALI | e2b2d5a061da605e3e9e681017a7b2d53fe41a62 | [
"ECL-2.0",
"Apache-2.0"
] | 3,967 | 2018-06-19T04:39:09.000Z | 2022-03-31T10:57:53.000Z | docs/examples/use_cases/video_superres/common/loss_scaler.py | cyyever/DALI | e2b2d5a061da605e3e9e681017a7b2d53fe41a62 | [
"ECL-2.0",
"Apache-2.0"
] | 3,494 | 2018-06-21T07:09:58.000Z | 2022-03-31T19:44:51.000Z | docs/examples/use_cases/video_superres/common/loss_scaler.py | cyyever/DALI | e2b2d5a061da605e3e9e681017a7b2d53fe41a62 | [
"ECL-2.0",
"Apache-2.0"
] | 531 | 2018-06-19T23:53:10.000Z | 2022-03-30T08:35:59.000Z | import torch
class LossScaler:
def __init__(self, scale=1):
self.cur_scale = scale
# `params` is a list / generator of torch.Variable
def has_overflow(self, params):
return False
# `x` is a torch.Tensor
def _has_inf_or_nan(x):
return False
# `overflow` is boolean indicating whether we overflowed in gradient
def update_scale(self, overflow):
pass
@property
def loss_scale(self):
return self.cur_scale
def scale_gradient(self, module, grad_in, grad_out):
return tuple(self.loss_scale * g for g in grad_in)
def backward(self, loss):
scaled_loss = loss*self.loss_scale
scaled_loss.backward()
class DynamicLossScaler:
def __init__(self,
init_scale=2**32,
scale_factor=2.,
scale_window=1000):
self.cur_scale = init_scale
self.cur_iter = 0
self.last_overflow_iter = -1
self.scale_factor = scale_factor
self.scale_window = scale_window
# `params` is a list / generator of torch.Variable
def has_overflow(self, params):
# return False
for p in params:
if p.grad is not None and DynamicLossScaler._has_inf_or_nan(p.grad.data):
return True
return False
# `x` is a torch.Tensor
def _has_inf_or_nan(x):
inf_count = torch.sum(x.abs() == float('inf'))
if inf_count > 0:
return True
nan_count = torch.sum(x != x)
return nan_count > 0
# `overflow` is boolean indicating whether we overflowed in gradient
def update_scale(self, overflow):
if overflow:
#self.cur_scale /= self.scale_factor
self.cur_scale = max(self.cur_scale/self.scale_factor, 1)
self.last_overflow_iter = self.cur_iter
else:
if (self.cur_iter - self.last_overflow_iter) % self.scale_window == 0:
self.cur_scale *= self.scale_factor
# self.cur_scale = 1
self.cur_iter += 1
@property
def loss_scale(self):
return self.cur_scale
def scale_gradient(self, module, grad_in, grad_out):
return tuple(self.loss_scale * g for g in grad_in)
def backward(self, loss):
scaled_loss = loss*self.loss_scale
scaled_loss.backward()
| 28.634146 | 85 | 0.617121 | 2,331 | 0.99276 | 0 | 0 | 130 | 0.055366 | 0 | 0 | 371 | 0.158007 |
1d4187891be83646d904f0d1cc49e72009c69797 | 3,172 | py | Python | shop/forms.py | dwx9/test | a74e38369de40b9e5f481f6ac9dda6d5eb161da0 | [
"BSD-3-Clause"
] | 1 | 2021-02-11T10:01:11.000Z | 2021-02-11T10:01:11.000Z | shop/forms.py | dwx9/test | a74e38369de40b9e5f481f6ac9dda6d5eb161da0 | [
"BSD-3-Clause"
] | null | null | null | shop/forms.py | dwx9/test | a74e38369de40b9e5f481f6ac9dda6d5eb161da0 | [
"BSD-3-Clause"
] | 1 | 2020-11-08T17:56:45.000Z | 2020-11-08T17:56:45.000Z | #-*- coding: utf-8 -*-
"""Forms for the django-shop app."""
from django import forms
from django.conf import settings
from django.forms.models import modelformset_factory
from django.utils.translation import ugettext_lazy as _
from shop.backends_pool import backends_pool
from shop.models.cartmodel import CartItem
from shop.util.loader import load_class
def get_shipping_backends_choices():
shipping_backends = backends_pool.get_shipping_backends_list()
return tuple([(x.url_namespace, getattr(x, 'backend_verbose_name', x.backend_name)) for x in shipping_backends])
def get_billing_backends_choices():
billing_backends = backends_pool.get_payment_backends_list()
return tuple([(x.url_namespace, getattr(x, 'backend_verbose_name', x.backend_name)) for x in billing_backends])
class BillingShippingForm(forms.Form):
"""
A form displaying all available payment and shipping methods (the ones
defined in settings.SHOP_SHIPPING_BACKENDS and
settings.SHOP_PAYMENT_BACKENDS)
"""
shipping_method = forms.ChoiceField(choices=get_shipping_backends_choices(), label=_('Shipping method'))
payment_method = forms.ChoiceField(choices=get_billing_backends_choices(), label=_('Payment method'))
class CartItemModelForm(forms.ModelForm):
"""A form for the CartItem model. To be used in the CartDetails view."""
quantity = forms.IntegerField(min_value=0, max_value=9999)
class Meta:
model = CartItem
fields = ('quantity', )
def save(self, *args, **kwargs):
"""
We don't save the model using the regular way here because the
Cart's ``update_quantity()`` method already takes care of deleting
items from the cart when the quantity is set to 0.
"""
quantity = self.cleaned_data['quantity']
instance = self.instance.cart.update_quantity(self.instance.pk,
quantity)
return instance
def get_cart_item_modelform_class():
"""
Return the class of the CartItem ModelForm.
The default `shop.forms.CartItemModelForm` can be overridden settings
``SHOP_CART_ITEM_FORM`` parameter in settings
"""
cls_name = getattr(settings, 'SHOP_CART_ITEM_FORM', 'shop.forms.CartItemModelForm')
cls = load_class(cls_name)
return cls
def get_cart_item_formset(cart_items=None, data=None):
"""
Returns a CartItemFormSet which can be used in the CartDetails view.
:param cart_items: The queryset to be used for this formset. This should
be the list of updated cart items of the current cart.
:param data: Optional POST data to be bound to this formset.
"""
assert(cart_items is not None)
CartItemFormSet = modelformset_factory(CartItem, form=get_cart_item_modelform_class(),
extra=0)
kwargs = {'queryset': cart_items, }
form_set = CartItemFormSet(data, **kwargs)
# The Django ModelFormSet pulls the item out of the database again and we
# would lose the updated line_subtotals
for form in form_set:
for cart_item in cart_items:
if form.instance.pk == cart_item.pk:
form.instance = cart_item
return form_set
| 36.45977 | 116 | 0.720996 | 1,125 | 0.354666 | 0 | 0 | 0 | 0 | 0 | 0 | 1,265 | 0.398802 |
1d41a5d36753a39f9bdaaccc33c457eebde52284 | 6,298 | py | Python | conflowgen/tests/posthoc_analyses/test_quay_side_throughput_analysis.py | 1grasse/conflowgen | 142330ab6427254109af3b86102a30a13144ba0c | [
"MIT"
] | 5 | 2022-02-16T11:44:42.000Z | 2022-02-24T20:02:17.000Z | conflowgen/tests/posthoc_analyses/test_quay_side_throughput_analysis.py | 1grasse/conflowgen | 142330ab6427254109af3b86102a30a13144ba0c | [
"MIT"
] | 90 | 2021-12-08T14:05:44.000Z | 2022-03-24T08:53:31.000Z | conflowgen/tests/posthoc_analyses/test_quay_side_throughput_analysis.py | 1grasse/conflowgen | 142330ab6427254109af3b86102a30a13144ba0c | [
"MIT"
] | 5 | 2021-12-07T16:05:15.000Z | 2022-02-16T08:24:07.000Z | import datetime
import unittest
from conflowgen.domain_models.arrival_information import TruckArrivalInformationForPickup, \
TruckArrivalInformationForDelivery
from conflowgen.domain_models.container import Container
from conflowgen.domain_models.data_types.container_length import ContainerLength
from conflowgen.domain_models.data_types.mode_of_transport import ModeOfTransport
from conflowgen.domain_models.data_types.storage_requirement import StorageRequirement
from conflowgen.domain_models.distribution_models.mode_of_transport_distribution import ModeOfTransportDistribution
from conflowgen.domain_models.distribution_seeders import mode_of_transport_distribution_seeder
from conflowgen.domain_models.large_vehicle_schedule import Schedule, Destination
from conflowgen.domain_models.vehicle import LargeScheduledVehicle, Truck, Feeder
from conflowgen.posthoc_analyses.quay_side_throughput_analysis import QuaySideThroughputAnalysis
from conflowgen.tests.substitute_peewee_database import setup_sqlite_in_memory_db
class TestQuaySideThroughputAnalysis(unittest.TestCase):
def setUp(self) -> None:
"""Create container database in memory"""
self.sqlite_db = setup_sqlite_in_memory_db()
self.sqlite_db.create_tables([
Schedule,
Container,
LargeScheduledVehicle,
Truck,
TruckArrivalInformationForDelivery,
TruckArrivalInformationForPickup,
Feeder,
ModeOfTransportDistribution,
Destination
])
mode_of_transport_distribution_seeder.seed()
self.analysis = QuaySideThroughputAnalysis(
transportation_buffer=0.2
)
def test_with_no_data(self):
"""If no schedules are provided, no capacity is needed"""
no_action_at_quay_side = self.analysis.get_throughput_over_time()
self.assertEqual(no_action_at_quay_side, {})
def test_with_single_container(self):
now = datetime.datetime.now()
schedule = Schedule.create(
vehicle_type=ModeOfTransport.feeder,
service_name="TestFeederService",
vehicle_arrives_at=now.date(),
vehicle_arrives_at_time=now.time(),
average_vehicle_capacity=300,
average_moved_capacity=300,
)
feeder_lsv = LargeScheduledVehicle.create(
vehicle_name="TestFeeder1",
capacity_in_teu=300,
moved_capacity=schedule.average_moved_capacity,
scheduled_arrival=now,
schedule=schedule
)
Feeder.create(
large_scheduled_vehicle=feeder_lsv
)
aip = TruckArrivalInformationForPickup.create(
realized_container_pickup_time=datetime.datetime.now() + datetime.timedelta(hours=25)
)
truck = Truck.create(
delivers_container=False,
picks_up_container=True,
truck_arrival_information_for_delivery=None,
truck_arrival_information_for_pickup=aip
)
Container.create(
weight=20,
length=ContainerLength.twenty_feet,
storage_requirement=StorageRequirement.standard,
delivered_by=ModeOfTransport.feeder,
delivered_by_large_scheduled_vehicle=feeder_lsv,
picked_up_by=ModeOfTransport.truck,
picked_up_by_initial=ModeOfTransport.truck,
picked_up_by_truck=truck
)
used_quay_side_capacity_over_time = self.analysis.get_throughput_over_time()
self.assertEqual(len(used_quay_side_capacity_over_time), 3)
self.assertSetEqual(set(used_quay_side_capacity_over_time.values()), {0, 1})
def test_with_two_containers(self):
now = datetime.datetime.now()
schedule = Schedule.create(
vehicle_type=ModeOfTransport.feeder,
service_name="TestFeederService",
vehicle_arrives_at=now.date(),
vehicle_arrives_at_time=now.time(),
average_vehicle_capacity=300,
average_moved_capacity=300,
)
feeder_lsv = LargeScheduledVehicle.create(
vehicle_name="TestFeeder1",
capacity_in_teu=300,
moved_capacity=schedule.average_moved_capacity,
scheduled_arrival=now,
schedule=schedule
)
Feeder.create(
large_scheduled_vehicle=feeder_lsv
)
aip = TruckArrivalInformationForPickup.create(
realized_container_pickup_time=datetime.datetime.now() + datetime.timedelta(hours=25)
)
truck = Truck.create(
delivers_container=False,
picks_up_container=True,
truck_arrival_information_for_delivery=None,
truck_arrival_information_for_pickup=aip
)
Container.create(
weight=20,
length=ContainerLength.twenty_feet,
storage_requirement=StorageRequirement.standard,
delivered_by=ModeOfTransport.feeder,
delivered_by_large_scheduled_vehicle=feeder_lsv,
picked_up_by=ModeOfTransport.truck,
picked_up_by_initial=ModeOfTransport.truck,
picked_up_by_truck=truck
)
aip_2 = TruckArrivalInformationForPickup.create(
realized_container_pickup_time=datetime.datetime.now() + datetime.timedelta(hours=12)
)
truck_2 = Truck.create(
delivers_container=False,
picks_up_container=True,
truck_arrival_information_for_delivery=None,
truck_arrival_information_for_pickup=aip_2
)
Container.create(
weight=20,
length=ContainerLength.forty_feet,
storage_requirement=StorageRequirement.standard,
delivered_by=ModeOfTransport.feeder,
delivered_by_large_scheduled_vehicle=feeder_lsv,
picked_up_by=ModeOfTransport.truck,
picked_up_by_initial=ModeOfTransport.truck,
picked_up_by_truck=truck_2
)
used_quay_side_capacity_over_time = self.analysis.get_throughput_over_time()
self.assertEqual(len(used_quay_side_capacity_over_time), 3)
self.assertSetEqual(set(used_quay_side_capacity_over_time.values()), {0, 2})
| 42.268456 | 115 | 0.694665 | 5,268 | 0.836456 | 0 | 0 | 0 | 0 | 0 | 0 | 162 | 0.025722 |
1d41b1db26751bf84729eea34f1bc555d8b62d08 | 591 | py | Python | backend/tests/access/test_access_user_remove.py | fjacob21/mididecweb | b65f28eb6fdeafa265796b6190a4264a5eac54ce | [
"MIT"
] | null | null | null | backend/tests/access/test_access_user_remove.py | fjacob21/mididecweb | b65f28eb6fdeafa265796b6190a4264a5eac54ce | [
"MIT"
] | 88 | 2016-11-12T14:54:38.000Z | 2018-08-02T00:25:07.000Z | backend/tests/access/test_access_user_remove.py | mididecouverte/mididecweb | b65f28eb6fdeafa265796b6190a4264a5eac54ce | [
"MIT"
] | null | null | null | from src.access import UserRemoveAccess
from generate_access_data import generate_access_data
def test_remove_user_access():
sessions = generate_access_data()
user = sessions['user'].users.get('user')
useraccess = UserRemoveAccess(sessions['user'], user)
manageraccess = UserRemoveAccess(sessions['manager'], user)
superaccess = UserRemoveAccess(sessions['super'], user)
noneaccess = UserRemoveAccess(sessions['none'], user)
assert useraccess.granted()
assert not manageraccess.granted()
assert superaccess.granted()
assert not noneaccess.granted()
| 36.9375 | 63 | 0.752961 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 40 | 0.067682 |
1d42c5d5454b1ecc51005c189c8d3fc67484d164 | 541 | py | Python | tests/gdb/execute_nacl_manifest_twice.py | kapkic/native_client | 51c8bc8c249d55606232ae011bdfc8b4cab3d794 | [
"BSD-3-Clause"
] | 1 | 2021-12-23T00:36:43.000Z | 2021-12-23T00:36:43.000Z | tests/gdb/execute_nacl_manifest_twice.py | kapkic/native_client | 51c8bc8c249d55606232ae011bdfc8b4cab3d794 | [
"BSD-3-Clause"
] | null | null | null | tests/gdb/execute_nacl_manifest_twice.py | kapkic/native_client | 51c8bc8c249d55606232ae011bdfc8b4cab3d794 | [
"BSD-3-Clause"
] | null | null | null | # -*- python2 -*-
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from gdb_test import AssertEquals
import gdb_test
def test(gdb):
# The second superfluous call to LoadManifestFile. This is a regression test
# for issue https://code.google.com/p/nativeclient/issues/detail?id=3262 .
gdb.LoadManifestFile()
gdb.ResumeCommand('continue')
gdb.Quit()
if __name__ == '__main__':
gdb_test.RunTest(test, '') | 28.473684 | 79 | 0.735675 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 358 | 0.661738 |
1d4318fe475f3ff0a677d7ca22d845bd46a02756 | 1,403 | py | Python | dev/3_30_2018/UPS_Main.py | npwebste/UPS_Controller | a90ce2229108197fd48f956310ae2929e0fa5d9a | [
"AFL-1.1"
] | null | null | null | dev/3_30_2018/UPS_Main.py | npwebste/UPS_Controller | a90ce2229108197fd48f956310ae2929e0fa5d9a | [
"AFL-1.1"
] | null | null | null | dev/3_30_2018/UPS_Main.py | npwebste/UPS_Controller | a90ce2229108197fd48f956310ae2929e0fa5d9a | [
"AFL-1.1"
] | null | null | null | # Universal Power Supply Controller
# USAID Middle East Water Security Initiative
#
# Developed by: Nathan Webster
# Primary Investigator: Nathan Johnson
#
# Version History (mm_dd_yyyy)
# 1.00 03_24_2018_NW
#
######################################################
# Import Libraries
import Config
import time
import sqlite3
import VFD_Modbus_Wrapper
import PWM_Wrapper
# Declare Variables
speed =
# Main UPS Loop
while True:
VFD.VFDInit("/dev/ttyUSB0".encode('ascii'),9600,8,1,1)
time.sleep(5)
VFD.VFDWrite(8192,1)
time.sleep(5)
VFD.VFDWrite(269,7680)
time.sleep(5)
VFD.VFDWrite(269,3840)
time.sleep(5)
VFD.VFDWrite(8192,3)
time.sleep(5)
VFD.VFDRead(269)
time.sleep(5)
VFDClose()
"""
# Set parameters and declare variables
Run_Config()
print(Run_Config_Return)
Initialize_Solar()
Initialize_VFD()
# UPS Control Loop
while True:
TankCheck()
SolarMeasured()
if P_Solar_Measured > P_Solar_Max*P_Min_Percent:
setPWM()
if startVFD() != 0:
startVFD()
setVFD()
else
setGrid()
if startVFD() != 0:
startVFD()
setVFD()
ProtectionCheck()
"""
### SQL STUFF
#conn = sqlite3.connect('example.db')
#c = conn.cursor()
#c.execute('''CREATE TABLE Power(Date text, Voltage real, Current real, Power real)''')
#c.execute("INSERT INTO Power VALUES('2017',100,25,2500)")
#conn.commit()
#conn.close()
| 15.086022 | 87 | 0.652887 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,001 | 0.713471 |
1d450a8c2c7ee65bca377acf9e9a6b2af8f1bf8a | 4,054 | py | Python | bioconverters/utils.py | jakelever/biotext | 3925524f38fdeea3944bf829c8c563f09d62a13e | [
"MIT"
] | 6 | 2020-10-07T13:33:09.000Z | 2021-12-24T22:46:52.000Z | bioconverters/utils.py | jakelever/biotext | 3925524f38fdeea3944bf829c8c563f09d62a13e | [
"MIT"
] | 11 | 2021-05-21T00:06:16.000Z | 2022-02-16T12:23:25.000Z | bioconverters/utils.py | jakelever/biotext | 3925524f38fdeea3944bf829c8c563f09d62a13e | [
"MIT"
] | 4 | 2020-10-01T02:26:03.000Z | 2022-03-23T04:35:06.000Z | import re
import unicodedata
# Remove empty brackets (that could happen if the contents have been removed already
# e.g. for citation ( [3] [4] ) -> ( ) -> nothing
def remove_brackets_without_words(text: str) -> str:
fixed = re.sub(r"\([\W\s]*\)", " ", text)
fixed = re.sub(r"\[[\W\s]*\]", " ", fixed)
fixed = re.sub(r"\{[\W\s]*\}", " ", fixed)
return fixed
# Some older articles have titles like "[A study of ...]."
# This removes the brackets while retaining the full stop
def remove_weird_brackets_from_old_titles(title_text: str) -> str:
title_text = title_text.strip()
if title_text[0] == "[" and title_text[-2:] == "].":
title_text = title_text[1:-2] + "."
return title_text
def cleanup_text(text: str) -> str:
# Remove some "control-like" characters (left/right separator)
text = text.replace(u"\u2028", " ").replace(u"\u2029", " ")
text = "".join(ch for ch in text if unicodedata.category(ch)[0] != "C")
text = "".join(ch if unicodedata.category(ch)[0] != "Z" else " " for ch in text)
# Remove repeated commands and commas next to periods
text = re.sub(r",(\s*,)*", ",", text)
text = re.sub(r"(,\s*)*\.", ".", text)
return text.strip()
# XML elements to ignore the contents of
ignore_list = [
"table",
"table-wrap",
"xref",
"disp-formula",
"inline-formula",
"ref-list",
"bio",
"ack",
"graphic",
"media",
"tex-math",
"mml:math",
"object-id",
"ext-link",
]
# XML elements to separate text between
separation_list = ["title", "p", "sec", "break", "def-item", "list-item", "caption"]
def extract_text_from_elem(elem):
# Extract any raw text directly in XML element or just after
head = ""
if elem.text:
head = elem.text
tail = ""
if elem.tail:
tail = elem.tail
# Then get the text from all child XML nodes recursively
child_text = []
for child in elem:
child_text = child_text + extract_text_from_elem(child)
# Check if the tag should be ignore (so don't use main contents)
if elem.tag in ignore_list:
return [tail.strip()]
# Add a zero delimiter if it should be separated
elif elem.tag in separation_list:
return [0] + [head] + child_text + [tail]
# Or just use the whole text
else:
return [head] + child_text + [tail]
# Merge a list of extracted text blocks and deal with the zero delimiter
def extract_text_from_elem_list_merge(list):
text_list = []
current = ""
# Basically merge a list of text, except separate into a new list
# whenever a zero appears
for t in list:
if t == 0: # Zero delimiter so split
if len(current) > 0:
text_list.append(current)
current = ""
else: # Just keep adding
current = current + " " + t
current = current.strip()
if len(current) > 0:
text_list.append(current)
return text_list
# Main function that extracts text from XML element or list of XML elements
def extract_text_from_elem_list(elem_list):
text_list = []
# Extracts text and adds delimiters (so text is accidentally merged later)
if isinstance(elem_list, list):
for e in elem_list:
text_list = text_list + extract_text_from_elem(e) + [0]
else:
text_list = extract_text_from_elem(elem_list) + [0]
# Merge text blocks with awareness of zero delimiters
merged_list = extract_text_from_elem_list_merge(text_list)
# Remove any newlines (as they can be trusted to be syntactically important)
merged_list = [text.replace("\n", " ") for text in merged_list]
# Remove no-break spaces
merged_list = [cleanup_text(text) for text in merged_list]
return merged_list
def trim_sentence_lengths(text: str) -> str:
MAXLENGTH = 90000
return ".".join(line[:MAXLENGTH] for line in text.split("."))
| 32.174603 | 85 | 0.609521 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,563 | 0.385545 |
1d4544e69d225092db077d2bbcbd9fa3de697615 | 17,887 | py | Python | pylinx/core.py | raczben/pylinx | 230f9e4079942c3c561681d8fc0884904f0de893 | [
"Apache-2.0"
] | null | null | null | pylinx/core.py | raczben/pylinx | 230f9e4079942c3c561681d8fc0884904f0de893 | [
"Apache-2.0"
] | 1 | 2020-07-26T14:57:42.000Z | 2020-08-26T12:26:44.000Z | pylinx/core.py | raczben/pylinx | 230f9e4079942c3c561681d8fc0884904f0de893 | [
"Apache-2.0"
] | 3 | 2019-11-28T15:29:36.000Z | 2021-11-22T07:15:59.000Z | #!/usr/bin/env python3
#
# Import built in packages
#
import logging
import platform
import os
import time
import socket
import subprocess
import signal
import psutil
from .util import setup_logger
from .util import PylinxException
import re
# Import 3th party modules:
# - wexpect/pexpect to launch ant interact with subprocesses.
if platform.system() == 'Windows':
import wexpect as expect
print(expect.__version__)
else: # Linux
import pexpect as expect
# The directory of this script file.
__here__ = os.path.dirname(os.path.realpath(__file__))
#
# Get the logger (util.py has sets it)
#
setup_logger()
logger = logging.getLogger('pylinx')
# xsct_line_end is the line endings in the XSCT console. It doesn't depend on the platform. It is
# always Windows-style \\r\\n.
xsct_line_end = '\r\n'
# The default host and port.
HOST = '127.0.0.1' # Standard loop-back interface address (localhost)
PORT = 4567
class XsctServer:
"""The controller of the XSCT server application. This is an optional feature. The commands will
be given to the client.
"""
def __init__(self, xsct_executable=None, port=PORT, verbose=False):
""" Initialize the Server object.
:param xsct_executable: The full-path to the XSCT/XSDB executable
:param port: TCP port where the server should be started
:param verbose: True: prints the XSCT's stdout to python's stdout.
"""
self._xsct_server = None
if (xsct_executable is not None) and (port is not None):
self.start_server(xsct_executable, port, verbose)
def start_server(self, xsct_executable=None, port=PORT, verbose=False):
"""Starts the server.
:param xsct_executable: The full-path to the XSCT/XSDB executable
:param port: TCP port where the server should be started
:param verbose: True: prints the XSCT's stdout to python's stdout.
:return: None
"""
if (xsct_executable is None) or (port is None):
raise ValueError("xsct_executable and port must be non None.")
start_server_command = 'xsdbserver start -port {}'.format(port)
start_command = '{} -eval "{}" -interactive'.format(xsct_executable, start_server_command)
self._launch_child(start_command)
def _start_dummy_server(self):
"""Starts a dummy server, just for test purposes.
:return: None
"""
dummy_executable = os.path.abspath(os.path.join(__here__, 'dummy_xsct.tcl'))
start_command = ['tclsh', dummy_executable]
self._launch_child(start_command)
def _launch_child(self, start_command, verbose=False):
logger.info('Starting xsct server: %s', start_command)
if verbose:
stdout = None
else:
stdout = open(os.devnull, 'w')
self._xsct_server = subprocess.Popen(start_command, stdout=stdout)
logger.info('xsct started with PID: %d', self._xsct_server.pid)
def stop_server(self, wait=True):
"""Kills the server.
:param wait: Wait for complete kill, or just send kill signals.
:return: None
"""
if not self._xsct_server:
logger.debug('The server is not started or it has been killed.')
return
poll = self._xsct_server.poll()
if poll is None:
logger.debug("The server is alive, let's kill it.")
# Kill all child process the XSCT starts in a terminal.
current_process = None
try:
current_process = psutil.Process(self._xsct_server.pid)
children = current_process.children(recursive=True)
children.append(current_process)
for child in reversed(children):
logger.debug("Killing child with pid: %d", child.pid)
os.kill(child.pid, signal.SIGTERM) # or signal.SIGKILL
except psutil._exceptions.NoSuchProcess as e:
logger.debug('psutil.NoSuchProcess process no longer exists.')
if wait:
poll = self._xsct_server.poll()
while poll is None:
logger.debug("The server is still alive, wait for it.")
time.sleep(.1)
poll = self._xsct_server.poll()
self._xsct_server = None
else:
logger.debug("The server is not alive, return...")
def pid(self):
return self._xsct_server.pid
class Xsct:
"""The XSCT client class. This communicates with the server and sends commands.
"""
def __init__(self, host=HOST, port=PORT):
"""Initializes the client object.
:param host: the URL of the machine address where the XSDB server is running.
:param port: the port of the the XSDB server is running.
"""
self._socket = None
if host is not None:
self.connect(host, port)
def connect(self, host=HOST, port=PORT, timeout=10):
"""Connect to the xsdbserver
:param host: Host machine where the xsdbserver is running.
:param port: Port of the xsdbserver.
:param timeout: Set a timeout on blocking socket operations. The value argument can be a non-negative float
expressing seconds.
:return: None
"""
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.connect((host, port))
if timeout is not None:
self._socket.settimeout(timeout)
logger.info('Connected to: %s...', repr((host, port)))
def close(self):
"""Closes the connection
:return: None
"""
self._socket.close()
def send(self, msg):
"""Sends a simple message to the xsdbserver through the socket. Note, that this method don't appends
line-endings. It just sends natively the message. Use `do` instead.
:param msg: The message to be sent.
:return: Noting
"""
if isinstance(msg, str):
msg = msg.encode()
logger.debug('Sending message: %s ...', repr(msg))
self._socket.sendall(msg)
def recv(self, bufsize=1024, timeout=None):
"""Receives the answer from the server. Not recommended to use it natively. Use `do`
:param bufsize:The maximum amount of data to be received at once is specified by bufsize.
:param timeout:
:return:
"""
if timeout is not None:
self._socket.settimeout(timeout)
ans = ''
while True:
data = self._socket.recv(bufsize)
logger.debug('Data received: %s ...', repr(data))
ans += data.decode("utf-8")
ans = ans.split(xsct_line_end)
if len(ans) > 1:
return ans[0]
def do(self, command):
"""The main function of the client. Sends a command and returns the return value of the command.
:param command:
:return:
"""
command += xsct_line_end
logger.info('Sending command: %s ...', repr(command))
self.send(command)
ans = self.recv()
if ans.startswith('okay'):
return ans[5:]
if ans.startswith('error'):
raise PylinxException(ans[6:])
raise PylinxException('Illegal start-string in protocol. Answer is: ' + ans)
default_vivado_prompt = 'Vivado% '
class Vivado:
"""Vivado is a native interface towards the Vivado TCL console. You can run TCL commands in it
using do() method. This is a quasi state-less class
"""
def __init__(self, executable, args=None, name='Vivado_01',
prompt=default_vivado_prompt, timeout=10, encoding="utf-8", wait_startup=True):
self.child_proc = None
self.name = name
self.prompt = prompt
self.timeout = timeout
self.encoding = encoding
self.last_cmds = []
self.last_befores = []
self.last_prompts = []
if args is None:
args = ['-mode', 'tcl']
if executable is not None: # None is fake run
logger.info('Spawning Vivado: ' + executable + str(args))
self.child_proc = expect.spawn(executable, args)
if wait_startup:
self.wait_startup()
def wait_startup(self, **kwargs):
self.do(cmd=None, **kwargs)
def do(self, cmd, prompt=None, timeout=None, wait_prompt=True, errmsgs=[], encoding="utf-8",
native_answer=False):
""" do a simple command in Vivado console
:rtype: str
"""
if self.child_proc.terminated:
logger.error('The process has been terminated. Sending command is not possible.')
raise PylinxException('The process has been terminated. Sending command is not possible.')
if cmd is not None:
logger.debug('Sending command: ' + str(cmd))
if platform.system() == 'Windows':
self.child_proc.sendline(cmd)
else:
self.child_proc.sendline(cmd.encode())
if prompt is None:
prompt = self.prompt
if timeout is None:
timeout = self.timeout
if encoding is None:
encoding = self.encoding
if wait_prompt:
self.child_proc.expect(prompt, timeout=timeout)
logger.debug("before: " + repr(self.child_proc.before))
self.last_cmds.append(cmd)
if platform.system() == 'Windows':
before = self.child_proc.before
prompt = self.child_proc.after
else:
before = self.child_proc.before.decode(encoding)
prompt = self.child_proc.after.decode(encoding)
self.last_befores.append(before)
self.last_prompts.append(prompt)
for em in errmsgs:
if isinstance(em, str):
em = re.compile(em)
if em.search(before):
logger.error('during running command: {}, before: {}'.format(cmd, before))
raise PylinxException('during running command: {}, before: {}'.format(cmd, before))
if native_answer:
return before
else:
# remove first line, which is always empty
ret = os.linesep.join(before.split(xsct_line_end)[1:-1])
# print(repr(before.split(xsct_line_end)))
return ret.rstrip()
return None
def interact(self, cmd=None, **kwargs):
if cmd is not None:
self.do(cmd, **kwargs)
before_to_print = os.linesep.join(self.last_befores[-1].split(xsct_line_end)[1:])
print(before_to_print, end='')
print(self.last_prompts[-1], end='')
def get_var(self, varname, **kwargs):
no_var_msg = 'can\'t read "{}": no such variable'.format(varname)
errmsgs = [re.compile(no_var_msg)]
command = 'puts ${}'.format(varname)
ans = self.do(command, errmsgs=errmsgs, **kwargs)
return ans
def set_var(self, varname, value, **kwargs):
command = 'set {} {}'.format(varname, value)
ans = self.do(command, **kwargs)
return ans
def get_property(self, propName, objectName, **kwargs):
""" does a get_property command in vivado terminal.
It fetches the given property and returns it.
"""
cmd = 'get_property {} {}'.format(propName, objectName)
return self.do(cmd, **kwargs).strip()
def set_property(self, propName, value, objectName, **kwargs):
""" Sets a property.
"""
cmd = 'set_property {} {} {}'.format(propName, value, objectName)
self.do(cmd, **kwargs)
def pid(self):
parent = psutil.Process(self.child_proc.pid)
children = parent.children(recursive=True)
if len(children) == 0:
return self.child_proc.pid
for child in children:
if re.match(".*vivado.*", child.name(), re.I):
return child.pid
raise PylinxException('Unknown pid')
def exit(self, force=False, **kwargs):
logger.debug('start')
if self.child_proc is None:
return None
if self.child_proc.terminated:
logger.warning('This process has been terminated.')
return None
else:
if force:
return self.child_proc.terminate()
else:
self.do('exit', wait_prompt=False, **kwargs)
return self.child_proc.wait()
class VivadoHWServer(Vivado):
"""VivadoHWServer adds hw_server dependent handlers for the Vivado class.
"""
'''allDevices is a static variable. Its stores all the devices for all hardware server. The indexes
are the urls and the values are lists of the available hardware devices. The default behaviour
is the following: One key is "localhost:3121" (which is the default hw server) and this key
indexes a list with all local devices (which are normally includes two devices).
See get_devices() and fetchDevices for more details.'''
allDevices = {} # type: dict[str, list]
def __init__(self, executable, hw_server_url='localhost:3121', wait_startup=True, full_init=True, **kwargs):
self.hw_server_url = hw_server_url
self.sio = None
self.sioLink = None
self.hw_server_url = hw_server_url
super(VivadoHWServer, self).__init__(executable, wait_startup=wait_startup, **kwargs)
if full_init:
assert wait_startup
hw_server_tcl = os.path.join(__here__, 'hw_server.tcl')
hw_server_tcl = hw_server_tcl.replace(os.sep, '/')
self.do('source ' + hw_server_tcl, errmsgs=['no such file or directory'])
self.do('init ' + hw_server_url)
def fetch_devices(self, force=True):
"""_fetchDevices go thorugh the blasters and fetches all the hw devices and stores into the
allDevices dict. Private method, use get_devices, which will fetch devices if it needed.
"""
if force or self.get_devices(auto_fetch=False) is None:
logger.info('Exploring target devices (fetch_devices: this can take a while)')
self.do('set devices [fetch_devices]', errmsgs=["Labtoolstcl 44-133", "No target blaster found"])
try:
devices = self.get_var('devices')
except PylinxException as ex:
raise PylinxException('No target device found. Please connect and power up your device(s)')
# Get a list of all devices on all target.
# Remove the brackets. (fetch_devices returns lists.)
logger.debug("devices: " + str(devices))
devices = re.findall(r'\{(.+?)\}', devices)
VivadoHWServer.allDevices[self.hw_server_url] = devices
logger.debug("allDevices: " + str(VivadoHWServer.allDevices))
return self.get_devices(auto_fetch=False)
def get_devices(self, auto_fetch=True, hw_server_url=None):
"""Returns the hardware devices. auto_fetch fetches automatically the devices, if they have
not fetched yet."""
if hw_server_url is None:
hw_server_url = self.hw_server_url
try:
return VivadoHWServer.allDevices[hw_server_url]
except KeyError:
if auto_fetch and hw_server_url == self.hw_server_url:
return self.fetch_devices(force=True)
raise PylinxException('KeyError: No devices has fetched yet. Use fetchDevices() first!')
def choose_device(self, **kwargs):
""" set the hw target (blaster) and device (FPGA) for TX and RX side.
"""
# Print devices to user to choose from them.
devices = self.get_devices()
if len(devices) < 1:
raise PylinxException("There is no devices! Please use fetch_devices() first!")
for i, dev in enumerate(devices):
print(str(i) + ' ' + dev)
device_id = input('Choose device for {} (Give a number): '.format(self.name))
device_id = int(device_id)
device = devices[device_id]
errmsgs = ['DONE status = 0', 'The debug hub core was not detected.']
self.do('set_device ' + device, errmsgs=errmsgs, **kwargs)
def choose_sio(self, createLink=True, **kwargs):
""" Set the transceiver channel for TX/RX side.
"""
self.do('', **kwargs)
errmsgs = ['No matching hw_sio_gts were found.']
sios = self.do('get_hw_sio_gts', errmsgs=errmsgs, **kwargs).strip()
sios = sios.split(' ')
for i, sio in enumerate(sios):
print(str(i) + ' ' + sio)
print('Print choose a SIO for {} side (Give a number): '.format(self.name), end='')
sio_id = int(input())
self.sio = sios[sio_id]
if createLink:
self.do('create_link ' + self.sio, **kwargs)
def reset_gt(self):
resetName = 'PORT.GT{}RESET'.format(self.name)
self.set_property(resetName, '1', '[get_hw_sio_gts {{}}]'.format(self.sio))
self.commit_hw_sio()
self.set_property(resetName, '0', '[get_hw_sio_gts {{}}]'.format(self.sio))
self.commit_hw_sio()
def commit_hw_sio(self):
self.set_property('commit_hw_sio' '0' '[get_hw_sio_gts {{}}]'.format(self.sio))
| 38.301927 | 116 | 0.592497 | 16,852 | 0.942137 | 0 | 0 | 0 | 0 | 0 | 0 | 6,329 | 0.353832 |
1d479001ca8c194710be9daccfb27ed5e279b01d | 532 | py | Python | b0012_integer_to_roman.py | savarin/algorithms | 4d1f8f2361de12a02f376883f648697562d177ae | [
"MIT"
] | 1 | 2020-06-16T23:22:54.000Z | 2020-06-16T23:22:54.000Z | b0012_integer_to_roman.py | savarin/algorithms | 4d1f8f2361de12a02f376883f648697562d177ae | [
"MIT"
] | null | null | null | b0012_integer_to_roman.py | savarin/algorithms | 4d1f8f2361de12a02f376883f648697562d177ae | [
"MIT"
] | null | null | null |
lookup = [
(10, "x"),
(9, "ix"),
(5, "v"),
(4, "iv"),
(1, "i"),
]
def to_roman(integer):
#
"""
"""
for decimal, roman in lookup:
if decimal <= integer:
return roman + to_roman(integer - decimal)
return ""
def main():
print(to_roman(1))
print(to_roman(2))
print(to_roman(4))
print(to_roman(5))
print(to_roman(6))
print(to_roman(9))
print(to_roman(10))
print(to_roman(11))
print(to_roman(36))
if __name__ == "__main__":
main()
| 15.2 | 54 | 0.513158 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 41 | 0.077068 |
1d4b5de957e9dedc94124939d23c8450668c5a52 | 1,781 | py | Python | IAD-Laboratory-Work-1/include/dispatch.py | TolimanStaR/Intelligent-Data-Analysis-Minor | 4e50abbb14aafeb07e9037a208bbbedff4758246 | [
"MIT"
] | null | null | null | IAD-Laboratory-Work-1/include/dispatch.py | TolimanStaR/Intelligent-Data-Analysis-Minor | 4e50abbb14aafeb07e9037a208bbbedff4758246 | [
"MIT"
] | null | null | null | IAD-Laboratory-Work-1/include/dispatch.py | TolimanStaR/Intelligent-Data-Analysis-Minor | 4e50abbb14aafeb07e9037a208bbbedff4758246 | [
"MIT"
] | null | null | null | from .service import *
import multiprocessing
import threading
__recognizer = None
def init_session(*args) -> None:
global __recognizer
# __recognizer = LiveSpeech()
# daemon = multiprocessing.Process(target=__recognizer.daemon(), args=(), )
# daemon.start()
# r = threading.Thread(target=shell, args=(0,))
# r.start()
# p = multiprocessing.Process(target=LiveSpeech().daemon(), args=(), daemon=True)
# p.start()
__narrator = FRIDAY()
# Инициализация записной книги
__database = PhoneBook()
greeting()
def complete_session(*args) -> None:
__database = PhoneBook()
__database.save()
farewell()
quit(0)
def shell(mode: int = 0) -> None:
while True:
print('>>> ', end='')
action = Command()
if mode == 0:
command = InputData()
command.get_data()
if not command.data:
continue
action.recognize_command_from_text(command.data)
else:
FRIDAY().say(random.choice(AI_command_prompt))
command = __recognizer
command.listen()
action.recognize_command_from_speech()
try:
if mode == 1:
FRIDAY().say(random.choice(AI_ask_args))
arguments = InputData()
arguments.get_data()
command.data = arguments.data
command_action[action.command](command.data + space)
except KeyError:
print(command_not_found)
command_action: dict = {
'list': list_action,
'search': search,
'add': add,
'delete': delete,
'pdelete': delete_by_phone,
'edit': edit,
'get_age': get_age,
'quit': complete_session,
'help': help_action,
}
| 20.709302 | 85 | 0.578888 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 397 | 0.219701 |
1d4dd929550bf7b4692d9cb7d58e97e19856e8bd | 3,618 | py | Python | behave_tests/steps/get_events.py | Sindhuja-SRL/back-end | d84dae8ed212913339dec646b46a67fcc0b77f52 | [
"MIT"
] | null | null | null | behave_tests/steps/get_events.py | Sindhuja-SRL/back-end | d84dae8ed212913339dec646b46a67fcc0b77f52 | [
"MIT"
] | null | null | null | behave_tests/steps/get_events.py | Sindhuja-SRL/back-end | d84dae8ed212913339dec646b46a67fcc0b77f52 | [
"MIT"
] | 1 | 2022-03-11T01:45:39.000Z | 2022-03-11T01:45:39.000Z | from behave import *
import requests
from django.contrib.auth.models import User
from rest_framework.authtoken.models import Token
use_step_matcher("re")
@given("that I am a registered host of privilege walk events and exists events on my username")
def step_impl(context):
context.username = "12thMan"
context.password = "SomePassword123"
context.first_name = "12th"
context.last_name = "Man"
context.email = "twelve@testtamu.edu"
usr = User.objects.create_user(
context.username,
context.email,
context.password
)
usr.first_name = context.first_name
usr.last_name = context.last_name
usr.save()
registered_user = User.objects.filter(username="12thMan")
assert len(registered_user) == 1
user_auth_token, _ = Token.objects.get_or_create(user=usr)
context.key = user_auth_token.key
data = {
"name": "New year event",
"x_label_min": "Some text to be displayed on the graph",
"x_label_max": "Something else you want to be displayed on the graph",
}
headers = {
'Authorization':'Token '+ context.key
}
resp = requests.post(context.test.live_server_url + "/host/events/create/", data, headers=headers)
@when("I make an API call to the get events API with my correct username")
def step_impl(context):
headers = {
'Authorization':'Token '+ context.key
}
resp = requests.get(context.test.live_server_url + "/host/events/all/", headers=headers)
assert resp.status_code >= 200 and resp.status_code < 300
context.api_response_data = resp.json()
@then("I expect the response that gives the list of events on my username as host")
def step_impl(context):
assert context.api_response_data["events"][0]["name"] == "New year event"
@given("that I am a registered host of privilege walk events and there exists no events on my username")
def step_impl(context):
context.username = "12thMan"
context.password = "SomePassword123"
context.first_name = "12th"
context.last_name = "Man"
context.email = "twelve@testtamu.edu"
usr = User.objects.create_user(
context.username,
context.email,
context.password
)
usr.first_name = context.first_name
usr.last_name = context.last_name
usr.save()
registered_user = User.objects.filter(username="12thMan")
assert len(registered_user) == 1
user_auth_token, _ = Token.objects.get_or_create(user=usr)
context.key = user_auth_token.key
@when("I make an API call to the get events API with my username")
def step_impl(context):
headers = {
'Authorization':'Token '+ context.key
}
resp = requests.get(context.test.live_server_url + "/host/events/all/", headers=headers)
assert resp.status_code >= 200 and resp.status_code < 300
context.api_response_data = resp.json()
@then("I expect the response that gives the empty list as response")
def step_impl(context):
assert context.api_response_data["events"] == []
@given("that I am a registered host of privilege walk events and forgot my username")
def step_impl(context):
pass
@when("I make an API call to the get events API with wrong username")
def step_impl(context):
resp = requests.get(context.test.live_server_url + "/host/events/all/")
assert resp.status_code >= 400 and resp.status_code < 500
context.api_response_data = resp.json()
@then("I expect the response that says username doesn't exists")
def step_impl(context):
assert context.api_response_data["detail"] == "Authentication credentials were not provided." | 29.900826 | 104 | 0.701216 | 0 | 0 | 0 | 0 | 3,435 | 0.94942 | 0 | 0 | 1,163 | 0.321448 |
1d4e1390d738eb0ddc1e3c14bffd7c96ac769e6a | 1,011 | py | Python | P20-Stack Abstract Data Type/Stack - Base Converter.py | necrospiritus/Python-Working-Examples | 075d410673e470fc7c4ffc262e92109a3032132f | [
"MIT"
] | null | null | null | P20-Stack Abstract Data Type/Stack - Base Converter.py | necrospiritus/Python-Working-Examples | 075d410673e470fc7c4ffc262e92109a3032132f | [
"MIT"
] | null | null | null | P20-Stack Abstract Data Type/Stack - Base Converter.py | necrospiritus/Python-Working-Examples | 075d410673e470fc7c4ffc262e92109a3032132f | [
"MIT"
] | null | null | null | class Stack:
def __init__(self):
self.items = []
def is_empty(self): # test to see whether the stack is empty.
return self.items == []
def push(self, item): # adds a new item to the top of the stack.
self.items.append(item)
def pop(self): # removes the top item from the stack.
return self.items.pop()
def peek(self): # return the top item from the stack.
return self.items[len(self.items) - 1]
def size(self): # returns the number of items on the stack.
return len(self.items)
def base_converter(dec_number, base):
digits = "0123456789ABCDEF"
rem_stack = Stack()
while dec_number > 0:
rem = dec_number % base
rem_stack.push(rem)
dec_number = dec_number // base
new_string = ""
while not rem_stack.is_empty():
new_string = new_string + digits[rem_stack.pop()]
return new_string
print(base_converter(196, 2))
print(base_converter(25, 8))
print(base_converter(26, 16)) | 25.923077 | 69 | 0.635015 | 559 | 0.552918 | 0 | 0 | 0 | 0 | 0 | 0 | 221 | 0.218595 |
1d4e5f95e9a8949b22d01dfef301296ff8f780c4 | 4,422 | py | Python | model_api/model_training/trigger_ner/model/soft_encoder.py | INK-USC/LEAN-LIFE | e3d6debc3e4c41145ef1c03236c4cf57bfd8be7d | [
"MIT"
] | 21 | 2020-09-29T12:45:50.000Z | 2022-03-27T13:11:12.000Z | model_api/model_training/trigger_ner/model/soft_encoder.py | INK-USC/LEAN-LIFE | e3d6debc3e4c41145ef1c03236c4cf57bfd8be7d | [
"MIT"
] | 3 | 2020-12-03T10:34:54.000Z | 2021-03-29T09:01:05.000Z | model_api/model_training/trigger_ner/model/soft_encoder.py | INK-USC/LEAN-LIFE | e3d6debc3e4c41145ef1c03236c4cf57bfd8be7d | [
"MIT"
] | 3 | 2021-02-14T08:39:02.000Z | 2021-07-29T02:33:14.000Z | """soft_encoder.py: Encoding sentence with LSTM.
It encodes sentence with Bi-LSTM.
After encoding, it uses all tokens for sentence, and extract some parts for trigger.
Written in 2020 by Dong-Ho Lee.
"""
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import torch.nn as nn
import torch
from ..utilities.config import ContextEmb
from .charbilstm import CharBiLSTM
class SoftEncoder(nn.Module):
def __init__(self, config, encoder=None):
super(SoftEncoder, self).__init__()
self.config = config
self.device = config.device
self.use_char = config.use_char_rnn
self.context_emb = config.context_emb
self.input_size = config.embedding_dim
if self.context_emb != ContextEmb.none:
self.input_size += config.context_emb_size
if self.use_char:
self.char_feature = CharBiLSTM(config)
self.input_size += config.charlstm_hidden_dim
self.word_embedding = nn.Embedding.from_pretrained(torch.FloatTensor(config.word_embedding), freeze=False).to(
self.device)
self.word_drop = nn.Dropout(config.dropout).to(self.device)
self.lstm = nn.LSTM(self.input_size, config.hidden_dim // 2, num_layers=1, batch_first=True,
bidirectional=True).to(self.device)
if encoder is not None:
if self.use_char:
self.char_feature = encoder.char_feature
self.word_embedding = encoder.word_embedding
self.word_drop = encoder.word_drop
self.lstm = encoder.lstm
def forward(self, word_seq_tensor: torch.Tensor,
word_seq_lens: torch.Tensor,
batch_context_emb: torch.Tensor,
char_inputs: torch.Tensor,
char_seq_lens: torch.Tensor,
trigger_position):
"""
Get sentence and trigger encodings by Bi-LSTM
:param word_seq_tensor:
:param word_seq_lens:
:param batch_context_emb:
:param char_inputs:
:param char_seq_lens:
:param trigger_position: trigger positions in sentence (e.g. [1,4,5])
:return:
"""
# lstm_encoding
word_emb = self.word_embedding(word_seq_tensor)
if self.context_emb != ContextEmb.none:
word_emb = torch.cat([word_emb, batch_context_emb.to(self.device)], 2)
if self.use_char:
char_features = self.char_feature(char_inputs, char_seq_lens)
word_emb = torch.cat([word_emb, char_features], 2)
word_rep = self.word_drop(word_emb)
sorted_seq_len, permIdx = word_seq_lens.sort(0, descending=True)
_, recover_idx = permIdx.sort(0, descending=False)
sorted_seq_tensor = word_rep[permIdx]
packed_words = pack_padded_sequence(sorted_seq_tensor, sorted_seq_len, True)
output, _ = self.lstm(packed_words, None)
output, _ = pad_packed_sequence(output, batch_first=True)
output = output[recover_idx]
sentence_mask = (word_seq_tensor != torch.tensor(0)).float()
# trigger part extraction
if trigger_position is not None:
max_length = 0
output_e_list = []
output_list = [output[i, :, :] for i in range(0, word_rep.size(0))]
for output_l, trigger_p in zip(output_list, trigger_position):
output_e = torch.stack([output_l[p, :] for p in trigger_p])
output_e_list.append(output_e)
if max_length < output_e.size(0):
max_length = output_e.size(0)
trigger_vec = []
trigger_mask = []
for output_e in output_e_list:
trigger_vec.append(
torch.cat([output_e, output_e.new_zeros(max_length - output_e.size(0), self.config.hidden_dim)], 0))
t_ms = []
for i in range(output_e.size(0)):
t_ms.append(True)
for i in range(output_e.size(0), max_length):
t_ms.append(False)
t_ms = torch.tensor(t_ms)
trigger_mask.append(t_ms)
trigger_vec = torch.stack(trigger_vec)
trigger_mask = torch.stack(trigger_mask).float()
else:
trigger_vec = None
trigger_mask = None
return output, sentence_mask, trigger_vec, trigger_mask
| 40.568807 | 120 | 0.622795 | 4,027 | 0.910674 | 0 | 0 | 0 | 0 | 0 | 0 | 562 | 0.127092 |
1d515818a17c0385781342938f73ea5ce795d738 | 1,040 | py | Python | TrajPlot/main.py | CodesDope/python-pro-course-projects | 1c334ff70f38b7db4314826853a982b7dd89ead0 | [
"MIT"
] | null | null | null | TrajPlot/main.py | CodesDope/python-pro-course-projects | 1c334ff70f38b7db4314826853a982b7dd89ead0 | [
"MIT"
] | null | null | null | TrajPlot/main.py | CodesDope/python-pro-course-projects | 1c334ff70f38b7db4314826853a982b7dd89ead0 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import numpy as np
import math
angle = float(input("Enter angle (degree): "))
velocity = float(input("Enter Velocity (m/s): "))
g = 9.81
cos_theta = math.cos(math.radians(angle))
sin_theta = math.sin(math.radians(angle))
v_x = velocity * cos_theta
v_y = velocity * sin_theta
time_of_flight = (2 * v_y) / g
range_of_flight = time_of_flight * v_x
max_height = (v_y ** 2) / (2 * g)
time = np.linspace(0, time_of_flight, num=200)
y_coords = [(v_y * t) - (0.5 * g * (t ** 2)) for t in time]
plt.ylabel("height (m)")
plt.xlabel("time (s)")
plt.title("Projectile Trajectory")
plt.text(
time_of_flight / 2,
max_height / 2,
f"Time of Flight: {time_of_flight}s",
ha="center",
va="center",
)
plt.text(
time_of_flight / 2,
max_height / 3,
f"Range of Flight: {range_of_flight}m",
ha="center",
va="center",
)
plt.text(
time_of_flight / 2,
max_height / 4,
f"Maximum Height: {max_height}m",
ha="center",
va="center",
)
plt.plot(time, y_coords)
plt.show()
| 20.392157 | 59 | 0.641346 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 247 | 0.2375 |
1d523451cc925d640abefc8dc4a8dd62ec1de69a | 305 | py | Python | myscrumy/remiljscrumy/urls.py | mikkeyiv/Django-App | b1114e9e53bd673119a38a1acfefb7a9fd9f172e | [
"MIT"
] | null | null | null | myscrumy/remiljscrumy/urls.py | mikkeyiv/Django-App | b1114e9e53bd673119a38a1acfefb7a9fd9f172e | [
"MIT"
] | null | null | null | myscrumy/remiljscrumy/urls.py | mikkeyiv/Django-App | b1114e9e53bd673119a38a1acfefb7a9fd9f172e | [
"MIT"
] | null | null | null | from django.urls import include,path
from remiljscrumy import views
app_name = 'remiljscrumy'
urlpatterns = [
path('',views.index,name='index'),
path('<int:goal_id>/', views.move_goal, name = "move_goal"),
path('accounts/', include('django.contrib.auth.urls')),
]
| 16.944444 | 65 | 0.632787 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 87 | 0.285246 |
1d527a9ea85f89d985ce11b29b779c2d5f4e16c8 | 914 | py | Python | src/django_idom/config.py | idom-team/django-idom | 940fbe21a2a5492f69fd0ba822917e4f4625c582 | [
"MIT"
] | 82 | 2021-07-23T04:42:24.000Z | 2022-03-31T10:43:30.000Z | src/django_idom/config.py | idom-team/django-idom | 940fbe21a2a5492f69fd0ba822917e4f4625c582 | [
"MIT"
] | 47 | 2021-05-20T06:53:43.000Z | 2022-03-05T22:25:21.000Z | src/django_idom/config.py | idom-team/django-idom | 940fbe21a2a5492f69fd0ba822917e4f4625c582 | [
"MIT"
] | 4 | 2021-05-17T21:51:48.000Z | 2022-02-08T08:36:08.000Z | from typing import Dict
from django.conf import settings
from django.core.cache import DEFAULT_CACHE_ALIAS
from idom.core.proto import ComponentConstructor
IDOM_REGISTERED_COMPONENTS: Dict[str, ComponentConstructor] = {}
IDOM_BASE_URL = getattr(settings, "IDOM_BASE_URL", "_idom/")
IDOM_WEBSOCKET_URL = IDOM_BASE_URL + "websocket/"
IDOM_WEB_MODULES_URL = IDOM_BASE_URL + "web_module/"
IDOM_WS_MAX_RECONNECT_DELAY = getattr(settings, "IDOM_WS_MAX_RECONNECT_DELAY", 604800)
_CACHES = getattr(settings, "CACHES", {})
if _CACHES:
if "idom_web_modules" in getattr(settings, "CACHES", {}):
IDOM_WEB_MODULE_CACHE = "idom_web_modules"
else:
IDOM_WEB_MODULE_CACHE = DEFAULT_CACHE_ALIAS
else:
IDOM_WEB_MODULE_CACHE = None
# the LRU cache size for the route serving IDOM_WEB_MODULES_DIR files
IDOM_WEB_MODULE_LRU_CACHE_SIZE = getattr(
settings, "IDOM_WEB_MODULE_LRU_CACHE_SIZE", None
)
| 31.517241 | 86 | 0.784464 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 230 | 0.251641 |
1d54ee0f796a2f72b2598319b4e1cc6534789204 | 496 | py | Python | ox_herd/core/plugins/awstools_plugin/forms.py | empower-capital/ox_herd | 2aa77db945296c152dc8d420f42a6d6455d514fa | [
"BSD-2-Clause"
] | 1 | 2021-11-28T20:35:31.000Z | 2021-11-28T20:35:31.000Z | ox_herd/core/plugins/awstools_plugin/forms.py | empower-capital/ox_herd | 2aa77db945296c152dc8d420f42a6d6455d514fa | [
"BSD-2-Clause"
] | 5 | 2017-11-21T00:21:13.000Z | 2021-06-30T19:47:54.000Z | ox_herd/core/plugins/awstools_plugin/forms.py | empower-capital/ox_herd | 2aa77db945296c152dc8d420f42a6d6455d514fa | [
"BSD-2-Clause"
] | 4 | 2021-12-17T10:58:15.000Z | 2021-12-23T14:38:40.000Z | """Forms for ox_herd commands.
"""
from wtforms import StringField
from ox_herd.core.plugins import base
class BackupForm(base.GenericOxForm):
"""Use this form to enter parameters for a new backup job.
"""
bucket_name = StringField(
'bucket_name', [], description=(
'Name of AWS bucket to put backup into.'))
bucket_name = StringField(
'prefix', [], default='misc', description=(
'Prefix to use in creating remote backup name.'))
| 24.8 | 62 | 0.645161 | 382 | 0.770161 | 0 | 0 | 0 | 0 | 0 | 0 | 214 | 0.431452 |
1d55e3c66873c14775b1b524718c409625a3347a | 1,455 | py | Python | rpiRobot/test/dexterity/infrastructure/test_electromagnetGPIO.py | olgam4/design3 | 6e05d123a24deae7dda646df535844a158ef5cc0 | [
"WTFPL"
] | null | null | null | rpiRobot/test/dexterity/infrastructure/test_electromagnetGPIO.py | olgam4/design3 | 6e05d123a24deae7dda646df535844a158ef5cc0 | [
"WTFPL"
] | null | null | null | rpiRobot/test/dexterity/infrastructure/test_electromagnetGPIO.py | olgam4/design3 | 6e05d123a24deae7dda646df535844a158ef5cc0 | [
"WTFPL"
] | null | null | null | from unittest import TestCase
from unittest.mock import patch, Mock, DEFAULT, call
from dexterity.infrastructure.electromagnetGPIO import ElectromagnetGPIO
class TestElectromagnetGPIO(TestCase):
def setUp(self) -> None:
led_patcher = patch('dexterity.infrastructure.electromagnetGPIO.Led')
self.Led = led_patcher.start()
self.addCleanup(led_patcher.stop)
self.hold_pin = 5
self.hold_led = Mock()
self.grab_pin = 6
self.grab_led = Mock()
self.Led.side_effect = self.mock_return
self.magnet = ElectromagnetGPIO(self.hold_pin, self.grab_pin)
def mock_return(self, pin_number, **kwargs) -> Mock:
if pin_number == self.hold_pin:
return self.hold_led
if pin_number == self.grab_pin:
return self.grab_led
return DEFAULT
def test_when_created_then_grab_led_is_active_high(self) -> None:
self.Led.assert_called_with(self.grab_pin)
def test_when_grab_then_hold_led_is_set_on(self) -> None:
self.magnet.grab()
self.assertTrue(self.hold_led.on.called)
def test_when_grab_then_grab_led_is_set_on_then_off(self) -> None:
self.magnet.grab()
expected_calls = [call.on, call.off]
self.grab_led.assert_has_calls(expected_calls)
def test_when_let_go_then_hold_led_is_set_off(self) -> None:
self.magnet.let_go()
self.assertTrue(self.hold_led.off.called)
| 31.630435 | 77 | 0.692784 | 1,295 | 0.890034 | 0 | 0 | 0 | 0 | 0 | 0 | 48 | 0.03299 |
1d576f6588eb1d2aae7c778d0ad217e3ca9a9ecd | 35 | py | Python | cog/__init__.py | uniphil/cog | deae32a3b06ee379fa44f68477ecfc00a2fc723d | [
"MIT"
] | 158 | 2018-07-09T02:46:54.000Z | 2022-03-06T15:56:49.000Z | cog/__init__.py | uniphil/cog | deae32a3b06ee379fa44f68477ecfc00a2fc723d | [
"MIT"
] | 18 | 2018-07-12T14:59:01.000Z | 2022-01-02T04:57:20.000Z | cog/__init__.py | uniphil/cog | deae32a3b06ee379fa44f68477ecfc00a2fc723d | [
"MIT"
] | 22 | 2019-01-31T14:57:39.000Z | 2022-03-16T07:25:53.000Z | def cog():
return "Cog is alive."
| 11.666667 | 23 | 0.628571 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 0.428571 |
1d5874fe18b2e75012e32310519149b4c42547fe | 81 | py | Python | app_folder/schemas/api.py | Nuznhy/day-f-hack | 9f3dbcd73e73ea4e7807e5197bf0b0ded76bc9f3 | [
"MIT"
] | 2 | 2021-10-02T12:12:57.000Z | 2021-11-16T11:36:15.000Z | app_folder/schemas/api.py | Nuznhy/day-f-hack | 9f3dbcd73e73ea4e7807e5197bf0b0ded76bc9f3 | [
"MIT"
] | null | null | null | app_folder/schemas/api.py | Nuznhy/day-f-hack | 9f3dbcd73e73ea4e7807e5197bf0b0ded76bc9f3 | [
"MIT"
] | null | null | null | from pydantic import BaseModel
class ReadyResponse(BaseModel):
status: str
| 13.5 | 31 | 0.777778 | 47 | 0.580247 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
1d590653c6d0bc80cf945214103ea57aa9992bdd | 1,087 | py | Python | bots/rand/rand.py | markmelnic/IS-Project | 85a4418c1ec9da4bd4fd105c20f4ca5c50d99ff5 | [
"MIT"
] | 51 | 2019-02-01T19:43:37.000Z | 2022-03-16T09:07:03.000Z | bots/rand/rand.py | markmelnic/IS-Project | 85a4418c1ec9da4bd4fd105c20f4ca5c50d99ff5 | [
"MIT"
] | 2 | 2019-02-23T18:54:22.000Z | 2019-11-09T01:30:32.000Z | bots/rand/rand.py | markmelnic/IS-Project | 85a4418c1ec9da4bd4fd105c20f4ca5c50d99ff5 | [
"MIT"
] | 35 | 2019-02-08T02:00:31.000Z | 2022-03-01T23:17:00.000Z | """
RandomBot -- A simple strategy: enumerates all legal moves, and picks one
uniformly at random.
"""
# Import the API objects
from api import State
import random
class Bot:
def __init__(self):
pass
def get_move(self, state):
# type: (State) -> tuple[int, int]
"""
Function that gets called every turn. This is where to implement the strategies.
Be sure to make a legal move. Illegal moves, like giving an index of a card you
don't own or proposing an illegal mariage, will lose you the game.
TODO: add some more explanation
:param State state: An object representing the gamestate. This includes a link to
the states of all the cards, the trick and the points.
:return: A tuple of integers or a tuple of an integer and None,
indicating a move; the first indicates the card played in the trick, the second a
potential spouse.
"""
# All legal moves
moves = state.moves()
# Return a random choice
return random.choice(moves) | 31.057143 | 93 | 0.645814 | 920 | 0.846366 | 0 | 0 | 0 | 0 | 0 | 0 | 861 | 0.792088 |
1d5c20c56742eec9a208d57b7cd8d133f379fa4b | 8,252 | py | Python | storeAdjust/models.py | FreeGodCode/store | 1ea1d6f0d6030fb58bce9a4e2d428342a0c3ad19 | [
"MIT"
] | null | null | null | storeAdjust/models.py | FreeGodCode/store | 1ea1d6f0d6030fb58bce9a4e2d428342a0c3ad19 | [
"MIT"
] | 1 | 2021-03-05T15:00:38.000Z | 2021-03-05T15:00:38.000Z | storeAdjust/models.py | FreeGodCode/store | 1ea1d6f0d6030fb58bce9a4e2d428342a0c3ad19 | [
"MIT"
] | null | null | null | import datetime
from django.db import models
class TransferRequest(models.Model):
"""转库申请单"""
STR_STATUS_CHOICES = (
(0, '草稿'),
(1, '已审批')
)
id = models.AutoField(primary_key=True)
str_identify = models.CharField(max_length=15, verbose_name='转库申请单编号')
str_serial = models.CharField(max_length=4, verbose_name='转库申请单流水号')
organization = models.ForeignKey('base.Organization', verbose_name='组织', related_name='org_str', on_delete=models.CASCADE)
str_to_house = models.CharField(max_length=20, verbose_name='转入仓库名字')
str_from_house = models.CharField(max_length=20, verbose_name='转出仓库名字')
str_date = models.DateTimeField(default=datetime.datetime.now, verbose_name='转库申请日期')
str_department = models.CharField(max_length=20, verbose_name='转库申请部门')
str_status = models.IntegerField(choices=STR_STATUS_CHOICES, default=0, verbose_name='转库申请单状态')
str_creator = models.CharField(max_length=20, verbose_name='转库出库单创建人名字')
str_creator_identify = models.CharField(max_length=20, verbose_name='转库出库单创建人工号')
str_created_at = models.DateTimeField(auto_now_add=True, verbose_name='销售出库单创建日期')
class Meta:
db_table = 'db_transfer_request'
verbose_name = "转库申请单"
def __str__(self):
return self.str_identify
class TransferRequestDetail(models.Model):
"""转库申请单明细"""
USED_CHOICES = (
(0, '未使用'),
(1, '已使用')
)
id = models.AutoField(primary_key=True)
transfer_request = models.ForeignKey('TransferRequest', verbose_name='转库申请单', related_name='str_trd', on_delete=models.CASCADE)
material = models.ForeignKey('base.Material', verbose_name='物料', related_name='material_trd', on_delete=models.CASCADE)
trd_num = models.IntegerField(verbose_name='转库申请数量')
trd_present_num = models.IntegerField(verbose_name='材料现存量')
trd_used = models.IntegerField(choices=USED_CHOICES, default=0, verbose_name='是否使用过')
trd_remarks = models.TextField(max_length=400, verbose_name='转库单明细备注')
class Meta:
db_table = 'db_transfer_request_detail'
verbose_name = "转库申请单详情"
class Transfer(models.Model):
"""转库单"""
ST_STATUS_CHOICES = (
(0, '草稿'),
(1, '已审批')
)
id = models.AutoField(primary_key=True)
st_identify = models.CharField(max_length=15, verbose_name='转库单编号')
st_serial = models.CharField(max_length=4, verbose_name='转库单流水号')
organization = models.ForeignKey('base.Organization', verbose_name='组织', related_name='org_st', on_delete=models.CASCADE)
# transfer_request = models.OneToOneField('TransferRequest', verbose_name='转库申请单', on_delete=models.CASCADE)
# str_identify = models.CharField(max_length=15, verbose_name='转库申请单编号', null=True) # 转库申请单编号,如果为空就为新增
st_to_house = models.CharField(max_length=20, verbose_name='转入仓库名字')
st_from_house = models.CharField(max_length=20, verbose_name='转出仓库名字')
st_date = models.DateTimeField(default=datetime.datetime.now, verbose_name='转库日期')
st_status = models.IntegerField(choices=ST_STATUS_CHOICES, default=0, verbose_name='转库单状态')
st_creator = models.CharField(max_length=20, verbose_name='转库单创建者名字')
st_creator_identify = models.CharField(max_length=20, verbose_name='转库单创建者编号')
st_created_at = models.DateTimeField(auto_now_add=True, verbose_name='转库单创建时间')
class Meta:
db_table = 'db_transfer'
verbose_name = "转库单"
def __str__(self):
return self.st_identify
class TransferDetail(models.Model):
"""转库单明细 """
id = models.AutoField(primary_key=True)
transfer = models.ForeignKey('Transfer', verbose_name='转库单', related_name='st_td', on_delete=models.CASCADE)
str_identify = models.CharField(max_length=15, verbose_name='转库申请单编号', null=True) # 转库申请单编号,如果为空就为新增
material = models.ForeignKey('base.Material', verbose_name='物料', related_name='material_td', on_delete=models.CASCADE)
# 转库申请数量可以通过转库单调用转库申请单再调用申请单详情实现
td_apply_num = models.IntegerField(verbose_name='转库申请数量')
td_real_num = models.IntegerField(verbose_name='转库实发数量')
td_present_num = models.IntegerField(verbose_name='材料现存量')
# 转库申请iden可以通过转库单调用转库申请实现
td_remarks = models.TextField(max_length=400, verbose_name='转库单明细备注')
class Meta:
db_table = 'db_transfer_detail'
verbose_name = "转库单明细"
# class Inventory(models.Model):
# """
# 库存盘点单
# """
#
# STA_STATUS_CHOICES = (
# (0, '草稿'),
# (1, '已审批')
# )
# id = models.AutoField(primary_key=True)
# sta_identify = models.CharField(max_length=15, verbose_name='库存盘点单编号')
# sta_serial = models.CharField(max_length=4, verbose_name='库存盘点单流水号')
# organization = models.ForeignKey('base.Organization', verbose_name='组织', related_name='org_sta', on_delete=models.CASCADE)
# sta_ware_house = models.CharField(max_length=20, verbose_name='库存盘点仓库名字')
# sta_date = models.DateTimeField(default=datetime.now, verbose_name='库存盘点日期')
# sta_status = models.IntegerField(choices=STA_STATUS_CHOICES, verbose_name='库存盘点状态')
# sta_creator = models.CharField(max_length=20, verbose_name='库存盘点单创建者名字')
# sta_creator_identify = models.CharField(max_length=20, verbose_name='库存盘点单创建者编号')
# sta_createDate = models.DateTimeField(auto_now_add=True, verbose_name='库存盘点单创建时间')
#
# class Meta:
# verbose_name = "库存盘点单"
#
# def __str__(self):
# return self.sta_identify
#
#
# class StaDetail(models.Model):
# """
# 库存盘点明细
# """
# id = models.AutoField(primary_key=True)
# inventory = models.ForeignKey('Inventory', verbose_name='库存盘点单', related_name='sta_sd', on_delete=models.CASCADE)
# material = models.ForeignKey('base.Material', verbose_name='物料', related_name='material_sd',
# on_delete=models.CASCADE)
# sd_paper_num = models.IntegerField(verbose_name='账面数量')
# sd_real_num = models.IntegerField(verbose_name='盘点数量')
# sd_diff_num = models.IntegerField(verbose_name='差异数量')
# sd_adjm_price = models.DecimalField(max_digits=10, decimal_places=2, verbose_name='调整单价') # 读取库存组织下的单价
# sd_adjm_sum = models.DecimalField(max_digits=10, decimal_places=2, verbose_name='调整金额')
# sd_remarks = models.TextField(max_length=400, verbose_name='库存盘点明细备注')
#
# class Meta:
# verbose_name = "库存盘明细"
# class OpeningInventory(models.Model):
# """
# 期初库存盘点
# 这个是某些材料写入数据库要统计的表
# """
# STA_STATUS_CHOICES = (
# (0, '草稿'),
# (1, '已审批')
# )
# id = models.AutoField(primary_key=True)
# oi_identify = models.CharField(max_length=15, verbose_name='期初库存单编号')
# organization = models.ForeignKey('base.Origanization', verbose_name='组织', related_name='org_oi',
# on_delete=models.CASCADE)
# oi_ware_house_identify = models.CharField(max_length=6, verbose_name='期初库存盘点仓库编码')
# oi_date = models.DateTimeField(auto_now_add=True, verbose_name='期初库存盘点日期')
# oi_status = models.IntegerField(choices=STA_STATUS_CHOICES, verbose_name='期初库存盘点状态')
# oi_creator = models.CharField(max_length=20, verbose_name='期初库存盘点单创建者')
# oi_createDate = models.DateTimeField(auto_now_add=True, verbose_name='期初库存盘点单创建时间')
#
# class Meta:
# verbose_name = "期初库存盘点单"
#
# def __str__(self):
# return self.oi_identify
#
#
# class OiDetail(models.Model):
# """期初库存盘点明细"""
#
# id = models.AutoField(primary_key=True)
# opening_inventory = models.ForeignKey('OpeningInventory', verbose_name='期初库存盘点', related_name='oi_oid',
# on_delete=models.CASCADE)
# material = models.ForeignKey('base.Material', verbose_name='物料', related_name='material_oid',
# on_delete=models.CASCADE)
# oid_num = models.IntegerField(verbose_name='入库数量')
# oid_price = models.DecimalField(max_digits=10, decimal_places=2, verbose_name='入库单价')
# oid_sum = models.DecimalField(max_digits=10, decimal_places=2, verbose_name='入库总价')
# oid_date = models.DateTimeField(auto_now_add=True, verbose_name='入库时间')
# oid_remarks = models.TextField(max_length=400, verbose_name='期初库存盘点明细备注')
#
# class Meta:
# verbose_name = "期初库存盘点明细"
| 44.847826 | 131 | 0.702739 | 4,969 | 0.522722 | 0 | 0 | 0 | 0 | 0 | 0 | 5,981 | 0.629182 |
1d5d4eb18eb25d8b1449b6d42e4176ad70ac4a53 | 401 | py | Python | auctions/context_processors/footer_ctx.py | AH-SALAH/CS50W-commerce | 19663da14721b5fbadb691763d79d9ae66a40faa | [
"CNRI-Python"
] | null | null | null | auctions/context_processors/footer_ctx.py | AH-SALAH/CS50W-commerce | 19663da14721b5fbadb691763d79d9ae66a40faa | [
"CNRI-Python"
] | null | null | null | auctions/context_processors/footer_ctx.py | AH-SALAH/CS50W-commerce | 19663da14721b5fbadb691763d79d9ae66a40faa | [
"CNRI-Python"
] | null | null | null | from auctions.models import Category, Listing
from django.utils.timezone import now as tz_now
def footer_ctx(request):
listings = Listing.objects.filter(is_active=True, published_date__lte=tz_now(),
expiry_date__gt=tz_now())[:5]
categories = Category.objects.all()[:5]
return {
"listings": listings,
"categories": categories
} | 33.416667 | 83 | 0.643392 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 22 | 0.054863 |
1d5d6d8998dc584931f9eeaf41067c368cf6390e | 4,237 | py | Python | sga/operators.py | ggarrett13/genetic-algorithm-example | 02dc8664245728fff74c54493c504ec1e7bae482 | [
"MIT"
] | 1 | 2020-08-10T15:29:59.000Z | 2020-08-10T15:29:59.000Z | sga/operators.py | ggarrett13/genetic-algorithm-example | 02dc8664245728fff74c54493c504ec1e7bae482 | [
"MIT"
] | null | null | null | sga/operators.py | ggarrett13/genetic-algorithm-example | 02dc8664245728fff74c54493c504ec1e7bae482 | [
"MIT"
] | null | null | null | import numpy as np
import operator
# TODO: Make Mutation Operator.
class TerminationCriteria:
@staticmethod
def _convergence_check(convergence_ratio, population_fitness):
if abs((np.max(population_fitness) - np.mean(population_fitness)) / np.mean(
population_fitness)) <= convergence_ratio / 2:
return True
else:
return False
@staticmethod
def _fitness_level_check(fitness_level, population_fitness, _operator):
ops = {'>': operator.gt,
'<': operator.lt,
'>=': operator.ge,
'<=': operator.le,
'=': operator.eq}
inp = abs(np.max(population_fitness))
relate = _operator
cut = fitness_level
return ops[relate](inp, cut)
@staticmethod
def _generations_check(generations, generation_limit):
if generations >= generation_limit:
return True
else:
return False
def __init__(self):
self._checks = []
self._convergence_limit = None
self._fitness_limit = None
self._generation_limit = None
self._operator = None
def _checker_of_convergence(self):
def _checker(population_fitness, generation_number):
return self._convergence_check(self._convergence_limit, population_fitness)
return _checker
def _checker_of_fitness(self):
def _checker(population_fitness, generation_number):
return self._fitness_level_check(self._convergence_limit, population_fitness, self._operator)
return _checker
def _checker_of_generations(self):
def _checker(population_fitness, generation_number):
return self._generations_check(generation_number, self._generation_limit)
return _checker
def add_convergence_limit(self, convergence_ratio):
self._checks.append(self._checker_of_convergence())
self._convergence_limit = convergence_ratio
def add_fitness_limit(self, operator, fitness_level):
self._checks.append(self._checker_of_fitness())
self._generation_limit = fitness_level
self._operator = operator
def add_generation_limit(self, generation_limit):
self._checks.append(self._checker_of_generations())
self._generation_limit = generation_limit
def check(self, population_fitness, generation_number):
if np.any([check(population_fitness, generation_number) for check in self._checks]) == True:
return True
else:
return False
# def convergence_or_100(population_fitness, convergence_ratio):
# if abs((np.max(population_fitness) - np.mean(population_fitness)) / np.mean(
# population_fitness)) <= convergence_ratio / 2:
# return True
# elif abs(np.max(population_fitness)) == 100:
# return True
# else:
# return False
class SelectionOperator:
@staticmethod
def supremacy(m, contestants, fitness):
return np.argpartition(np.array(fitness), -m)[-m:], np.array(contestants)[
np.argpartition(np.array(fitness), -m)[-m:]]
@staticmethod
def random(m, contestants, fitness):
# TODO: Update for idx return. (BROKEN)
# used = None
# assert fitness is not used
return list(np.random.choice(contestants, m))
class CrossoverOperator:
@staticmethod
def random_polygamous(parents, n_children):
gene_lst = []
child_ls = []
for gene_idx in range(len(parents[0].split(' '))):
gene_col = np.random.choice(np.array([parent.split(' ') for parent in parents])[:, gene_idx], n_children)
gene_lst.append(gene_col)
gene_arr = np.array(gene_lst).T
for child_idx in range(len(gene_arr[:, 0])):
child_new = ' '.join(list(gene_arr[child_idx, :]))
child_ls.append(child_new)
return child_ls
@staticmethod
def supremecy_polygamous(parents, n_children, fitness):
raise NotImplemented("Supremacy not implemented yet")
def fitness_function_himmelblau(x, y): # execute himmelblau function
f = (x ** 2. + y - 11.) ** 2. + (x + y ** 2. - 7.) ** 2.
return 100 - f
| 33.101563 | 117 | 0.64928 | 3,667 | 0.865471 | 0 | 0 | 1,950 | 0.460231 | 0 | 0 | 528 | 0.124616 |
1d5e37d663df33d8a715b4a60c72cd71296f8689 | 29 | py | Python | validation/__init__.py | pauloubuntu/ocr-processing-service | 3a8e755e6ec62336499280e4a48ffbd0d41df3cf | [
"MIT"
] | 22 | 2015-06-10T22:40:11.000Z | 2018-08-12T04:26:57.000Z | validation/__init__.py | pauloubuntu/ocr-processing-service | 3a8e755e6ec62336499280e4a48ffbd0d41df3cf | [
"MIT"
] | 6 | 2015-07-31T17:52:49.000Z | 2017-03-06T11:36:00.000Z | validation/__init__.py | nfscan/ocr-processing-service | 3a8e755e6ec62336499280e4a48ffbd0d41df3cf | [
"MIT"
] | 20 | 2015-06-10T12:47:10.000Z | 2020-03-24T20:23:14.000Z | __author__ = 'paulo.rodenas'
| 14.5 | 28 | 0.758621 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 0.517241 |
1d611badfcce77d76278b903a3e886a36ee2bfd5 | 443 | py | Python | setup.py | gabeabrams/niu | a2979b6b3ed497a1cfa421f105c9e919d7709832 | [
"MIT"
] | null | null | null | setup.py | gabeabrams/niu | a2979b6b3ed497a1cfa421f105c9e919d7709832 | [
"MIT"
] | null | null | null | setup.py | gabeabrams/niu | a2979b6b3ed497a1cfa421f105c9e919d7709832 | [
"MIT"
] | null | null | null | from distutils.core import setup
setup(
name = 'niu',
packages = ['niu'],
version = '0.2',
description = 'A grouping and pairing library',
author = 'Gabriel Abrams',
author_email = 'gabeabrams@gmail.com',
url = 'https://github.com/gabeabrams/niu',
download_url = 'https://github.com/gabeabrams/niu/archive/0.1.tar.gz',
keywords = ['grouping', 'pairing', 'matching'],
install_requires=[
'pulp'
],
classifiers = []
) | 26.058824 | 72 | 0.656885 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 209 | 0.471783 |
1d61ecbda3d99cd0aa543ea4733231e129cbee3a | 544 | py | Python | etherscan/stats.py | adamzhang1987/py-etherscan-api | 9a3accfa455eb1d9d82ac7be41012948a58f90e3 | [
"MIT"
] | 458 | 2016-07-21T19:49:30.000Z | 2022-03-23T18:01:19.000Z | etherscan/stats.py | adamzhang1987/py-etherscan-api | 9a3accfa455eb1d9d82ac7be41012948a58f90e3 | [
"MIT"
] | 71 | 2016-06-17T19:34:18.000Z | 2022-03-06T20:13:37.000Z | etherscan/stats.py | adamzhang1987/py-etherscan-api | 9a3accfa455eb1d9d82ac7be41012948a58f90e3 | [
"MIT"
] | 269 | 2016-06-20T09:51:17.000Z | 2022-03-17T19:19:10.000Z | from .client import Client
class Stats(Client):
def __init__(self, api_key='YourApiKeyToken'):
Client.__init__(self, address='', api_key=api_key)
self.url_dict[self.MODULE] = 'stats'
def get_total_ether_supply(self):
self.url_dict[self.ACTION] = 'ethsupply'
self.build_url()
req = self.connect()
return req['result']
def get_ether_last_price(self):
self.url_dict[self.ACTION] = 'ethprice'
self.build_url()
req = self.connect()
return req['result']
| 27.2 | 58 | 0.628676 | 514 | 0.944853 | 0 | 0 | 0 | 0 | 0 | 0 | 63 | 0.115809 |
1d6268f350e6d98480c5a0858a845fd5f515b56e | 2,842 | py | Python | hidrocomp/graphics/gantt.py | clebsonpy/HydroComp | 9d17fa533e8a15c760030df5246ff531ddb4cb22 | [
"MIT"
] | 4 | 2020-05-14T20:03:49.000Z | 2020-05-22T19:56:43.000Z | hidrocomp/graphics/gantt.py | clebsonpy/HydroComp | 9d17fa533e8a15c760030df5246ff531ddb4cb22 | [
"MIT"
] | 19 | 2019-06-27T18:12:27.000Z | 2020-04-28T13:28:03.000Z | hidrocomp/graphics/gantt.py | clebsonpy/HydroComp | 9d17fa533e8a15c760030df5246ff531ddb4cb22 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
import calendar
import datetime
import plotly as py
import plotly.graph_objs as go
class Gantt(object):
def __init__(self, data):
self.data = data
def get_gantt(self, df, less, index):
color = 0
n = 1
for j in less.index:
df.at[index, 'Task'] = self.data.name
df.at[index, 'Description'] = self.data.name + ' - %s' % j
df.at[index, 'IndexCol'] = color
df.at[index, 'Start'] = less['Start'].loc[j]
df.at[index, 'Finish'] = less['Finish'].loc[j]
color += (100 * n)
n *= -1
index += 1
return df, index
@staticmethod
def get_spells(data_peaks, month_water):
df_spells = pd.DataFrame(columns=['Task', 'Start', 'Finish', 'Complete', 'Name'])
index = 0
dates = pd.date_range(start=pd.to_datetime('1/%s/1998' % month_water[0], dayfirst=True), periods=365, freq='D')
if len(data_peaks) > 0:
inter = data_peaks['Peaks'].max() - data_peaks['Peaks'].min()
for groups in data_peaks.groupby(pd.Grouper(freq=month_water[1])):
for i in groups[1].index:
df_spells.at[index, 'Complete'] = 100-(100*(data_peaks['Peaks'].max() - data_peaks['Peaks'].loc[i])/inter)
start = data_peaks['Start'].loc[i]
end = data_peaks['End'].loc[i]
df_spells.at[index, 'Name'] = data_peaks['Peaks'].loc[i]
df_spells.at[index, 'Task'] = int(groups[0].year)
len_days = len(pd.date_range(start, end))
for date in dates:
if date.month == start.month and date.day == start.day:
inter_date = pd.date_range(start=date, periods=len_days)
if inter_date[-1] > dates[-1]:
date_start = pd.to_datetime(
'%s/%s/%s' % (inter_date[0].day, inter_date[0].month, inter_date[0].year - 1),
dayfirst=True)
date_end = pd.to_datetime(
'%s/%s/%s' % (inter_date[-1].day, inter_date[-1].month, inter_date[-1].year - 1),
dayfirst=True)
df_spells.at[index, 'Start'] = date_start
df_spells.at[index, 'Finish'] = date_end
else:
df_spells.at[index, 'Start'] = inter_date[0]
df_spells.at[index, 'Finish'] = inter_date[-1]
index += 1
else:
pass
return df_spells, index, dates[0], dates[-1]
| 40.028169 | 126 | 0.478184 | 2,716 | 0.955665 | 0 | 0 | 2,147 | 0.755454 | 0 | 0 | 236 | 0.08304 |
1d632172c51d0d9115fa84dc7ab954ae04bb0eb7 | 229 | py | Python | solid/recources/lab_drafts/01_SRP/books.py | BoyanPeychinov/object_oriented_programming | a960721c7c17710bd7b151a9025647e953435962 | [
"MIT"
] | null | null | null | solid/recources/lab_drafts/01_SRP/books.py | BoyanPeychinov/object_oriented_programming | a960721c7c17710bd7b151a9025647e953435962 | [
"MIT"
] | null | null | null | solid/recources/lab_drafts/01_SRP/books.py | BoyanPeychinov/object_oriented_programming | a960721c7c17710bd7b151a9025647e953435962 | [
"MIT"
] | null | null | null | class Book:
def __init__(self, title, author, location):
self.title = title
self.author = author
self.location = location
self.page = 0
def turn_page(self, page):
self.page = page
| 22.9 | 48 | 0.585153 | 228 | 0.995633 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
1d642def5287972efd9e7b0713ed0b36627e2f0c | 8,530 | py | Python | scripts/clues_ancient_samples.py | ekirving/mesoneo_paper | 7b5ac1b5beff8bfc09dca18554502518f6f15e36 | [
"MIT"
] | null | null | null | scripts/clues_ancient_samples.py | ekirving/mesoneo_paper | 7b5ac1b5beff8bfc09dca18554502518f6f15e36 | [
"MIT"
] | null | null | null | scripts/clues_ancient_samples.py | ekirving/mesoneo_paper | 7b5ac1b5beff8bfc09dca18554502518f6f15e36 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = "Evan K. Irving-Pease"
__copyright__ = "Copyright 2020, University of Copenhagen"
__email__ = "evan.irvingpease@gmail.com"
__license__ = "MIT"
import os
import sys
from math import log
import click
import pysam
import yaml
sys.path.append(os.getcwd())
from scripts.utils import get_samples
# ancestral path codes used in the VCF
PATH_ANA = "1" # Anatolian Farmers -> Neolithic
PATH_CHG = "2" # Caucasus Hunter-gatherers -> Yamnaya
PATH_WHG = "3" # Western Hunter-gatherers -> Neolithic
PATH_EHG = "4" # Eastern Hunter-gatherers -> Yamnaya
PATH_EHG_WHG = "5" # North European ancestry (WHG or EHG path) but unable to be more specific
PATH_ANA_CHG = "6" # West Asian ancestry (CHG or Anatolian path) but unable to be more specific
PATH_UNKNOWN = "0" # Unable to assign specific path (This labels 0,5,6,9 and 10)
# map 3-letter codes to ancestral path codes
ANCESTRY_MAP = {
"ALL": None,
"ANA": [PATH_ANA, PATH_ANA_CHG],
"CHG": [PATH_CHG, PATH_ANA_CHG],
"WHG": [PATH_WHG, PATH_EHG_WHG],
"EHG": [PATH_EHG, PATH_EHG_WHG],
}
SEXES = ["XX", "XY", "any"]
BASES = ["A", "C", "G", "T"]
BASES_N = BASES + ["N", "0"]
# the minimum number of samples to model the trajectory
MIN_ANCIENT_SAMPLES = 2
@click.command()
@click.option("--vcf", "vcf_file", metavar="<file>", help="VCF file", type=click.Path(exists=True), required=True)
@click.option("--chr", "chrom", metavar="<chr>", help="Chromosome of the variant", required=True)
@click.option("--pos", metavar="<int>", help="Position of the variant", type=int, required=True)
@click.option("--ancestral", metavar="<chr>", help="The ancestral allele", type=click.Choice(BASES_N), required=True)
@click.option("--dataset", metavar="<string>", help="Name of the dataset", required=True)
@click.option("--population", metavar="<string>", help="Name of the population", required=True)
@click.option("--ancestry", metavar="<string>", help="Ancestry code", type=click.Choice(ANCESTRY_MAP), required=True)
@click.option("--sex", metavar="<string>", help="Sample sex", type=click.Choice(SEXES), required=True)
@click.option("--gen-time", metavar="<int>", help="Years per generation", type=int, required=True)
@click.option("--mod-freq", metavar="<file>", type=click.File("w"), help="Modern frequency filename", required=True)
@click.option("--output", metavar="<file>", type=click.File("w"), help="Output filename", required=True)
def clues_ancient_samples(
vcf_file, chrom, pos, ancestral, dataset, population, ancestry, sex, gen_time, mod_freq, output
):
"""
Generate the ancient samples genotype likelihood file for `clues`.
See https://github.com/35ajstern/clues/
"""
with open("config.yaml") as fin:
config = yaml.safe_load(fin)
# get all the ancient samples in the current analysis group
samples = get_samples(config, dataset, population)
ancients = samples[samples["age"] != 0]
ancients = ancients[ancients["age"].notnull()]
ancients = ancients.sort_values("age")
# also get all the modern samples
moderns = samples[samples["age"] == 0]
# apply the sex filter
if sex != "any":
ancients = ancients[ancients["sex"] == sex]
moderns = moderns[moderns["sex"] == sex]
# load the VCF file with the sample genotypes
vcf = pysam.VariantFile(vcf_file)
try:
# fetch the record from the VCF
rec = next(vcf.fetch(chrom, pos - 1, pos))
except (StopIteration, ValueError):
# variant not in the VCF
raise RuntimeError(f"SNP {chrom}:{pos} not found in {vcf_file}")
alleles = [rec.ref] + list(rec.alts)
if len(alleles) > 2:
raise RuntimeError(f"{chrom}:{pos} {rec.id} SNP is polyallelic {alleles} in {vcf_file}")
if ancestral == "N":
raise RuntimeError(f"{chrom}:{pos} {rec.id} Cannot handle SNPs with unknown ancestral allele")
if ancestral not in alleles:
raise RuntimeError(f"{chrom}:{pos} {rec.id} Ancestral allele {ancestral} is missing {alleles} in {vcf_file}")
derived = (set(alleles) - {ancestral}).pop()
num_samples = 0
# is the current dataset using genotype likelihoods for ancient samples
is_likelihood = config["samples"][dataset]["is_likelihood"]
for sample, sample_row in ancients.iterrows():
gen = sample_row["age"] / gen_time
if None in rec.samples[sample].alleles:
# skip sites without diploid coverage
continue
if is_likelihood:
# get the Phred-scaled genotype likelihoods
pred_likelihoods = rec.samples[sample].get("PL", None)
# convert from Phred back into a probability
gp_diploid = [10 ** (-Q / 10) for Q in pred_likelihoods]
else:
# get the diploid genotype probabilities
gp_diploid = rec.samples[sample].get("GP", None)
# handle samples without a genotype probability
if gp_diploid is None:
# convert regular diploid genotypes into a GP tuple
alts = rec.samples[sample].alleles.count(rec.alts[0])
gp_diploid = [0, 0, 0]
gp_diploid[alts] = 1
# treat call as diploid when we're not conditioning on ancestry
if ancestry == "ALL":
if rec.ref != ancestral:
# polarise the probabilities
gp_diploid = reversed(gp_diploid)
# convert GP calls into pseudo-likelihoods
geno_ll = [log(geno, 10) if geno > 0 else float("-inf") for geno in gp_diploid]
# output the pseudo-likelihoods
output.write("{:f} {:f} {:f} {:f}\n".format(gen, *geno_ll))
num_samples += 1
else:
# get the genotype tuple
gt = rec.samples[sample].get("GT")
# treat each call as pseudo-haploid
for geno, path in zip(gt, rec.samples[sample].get("AP", "")[0].split("|")):
if path in ANCESTRY_MAP[ancestry]:
if gt == (1, 1):
gp_haploid = (gp_diploid[0] + (gp_diploid[1] / 2), gp_diploid[2])
elif gt == (0, 0):
gp_haploid = (gp_diploid[0], (gp_diploid[1] / 2) + gp_diploid[2])
else:
# the haploid probability of a het call depend on which call it is
if geno == 1:
gp_haploid = (gp_diploid[0], gp_diploid[1] + gp_diploid[2])
else:
gp_haploid = (gp_diploid[0] + gp_diploid[1], gp_diploid[2])
if rec.ref != ancestral:
# polarise the probabilities
gp_haploid = reversed(gp_haploid)
# convert GP calls into pseudo-likelihoods
geno_ll = [log(geno, 10) if geno > 0 else float("-inf") for geno in gp_haploid]
# output the pseudo-likelihoods
output.write("{:f} {:f} {:f}\n".format(gen, *geno_ll))
num_samples += 1
if num_samples < MIN_ANCIENT_SAMPLES:
# output some null records so CLUES doesn't throw an error
for _ in range(MIN_ANCIENT_SAMPLES - num_samples):
if ancestry == "ALL":
output.write("1.0 0.0 -inf -inf\n")
else:
output.write("1.0 0.0 -inf\n")
# calculate the modern frequency
focal, total = 0, 0
for sample, sample_row in moderns.iterrows():
if None in rec.samples[sample].alleles:
# skip sites without diploid coverage
continue
if ancestry == "ALL":
# count diploid occurrences of the derived allele
focal += rec.samples[sample].alleles.count(derived)
total += len(rec.samples[sample].alleles)
else:
# count haploid occurrences of the derived allele
calls = []
# filter for genotypes belonging to this ancestry
for call, path in zip(rec.samples[sample].alleles, rec.samples[sample].get("AP", "")[0].split("|")):
if path in ANCESTRY_MAP[ancestry]:
# count the ancestry specific genotypes
calls.append(call)
focal += calls.count(derived)
total += len(calls)
mod_freq.write("{:.4f}".format((focal / total) if total else 0))
if __name__ == "__main__":
clues_ancient_samples()
| 38.423423 | 117 | 0.605393 | 0 | 0 | 0 | 0 | 7,179 | 0.841618 | 0 | 0 | 3,056 | 0.358265 |
1d66db9f64a830eb8e8e7037d37357c250c45e15 | 5,773 | py | Python | auth/app.py | Celeo/GETIN-HR | 3acc4bd1b09abf4fe7da05e25fd849ebe7c6f6a6 | [
"MIT"
] | null | null | null | auth/app.py | Celeo/GETIN-HR | 3acc4bd1b09abf4fe7da05e25fd849ebe7c6f6a6 | [
"MIT"
] | null | null | null | auth/app.py | Celeo/GETIN-HR | 3acc4bd1b09abf4fe7da05e25fd849ebe7c6f6a6 | [
"MIT"
] | null | null | null | import logging
from datetime import timedelta
from flask import Flask, render_template, redirect, request, url_for, flash
from flask_login import LoginManager, login_user, logout_user, current_user
from preston.crest import Preston as CREST
from preston.xmlapi import Preston as XMLAPI
from auth.shared import db, eveapi
from auth.models import User
from auth.hr.app import app as hr_blueprint
# from auth.wiki.app import app as wiki_blueprint
# Create and configure app
app = Flask(__name__)
app.permanent_session_lifetime = timedelta(days=14)
app.config.from_pyfile('config.cfg')
# EVE XML API connection
user_agent = 'GETIN HR app ({})'.format(app.config['CONTACT_EMAIL'])
eveapi['user_agent'] = user_agent
eveapi['xml'] = XMLAPI(user_agent=user_agent)
# EVE CREST API connection
eveapi['crest'] = CREST(
user_agent=user_agent,
client_id=app.config['EVE_OAUTH_CLIENT_ID'],
client_secret=app.config['EVE_OAUTH_SECRET'],
callback_url=app.config['EVE_OAUTH_CALLBACK']
)
# Database connection
db.app = app
db.init_app(app)
# User management
login_manager = LoginManager(app)
login_manager.login_message = ''
login_manager.login_view = 'login'
# Application logging
app.logger.setLevel(app.config['LOGGING_LEVEL'])
handler = logging.FileHandler('log.txt')
handler.setFormatter(logging.Formatter(style='{', fmt='{asctime} [{levelname}] {message}', datefmt='%Y-%m-%d %H:%M:%S'))
handler.setLevel(app.config['LOGGING_LEVEL'])
app.logger.addHandler(handler)
# Blueprints
app.register_blueprint(hr_blueprint, url_prefix='/hr')
# app.register_blueprint(wiki_blueprint, url_prefix='/wiki')
app.logger.info('Initialization complete')
@login_manager.user_loader
def load_user(user_id):
"""Takes a string int and returns a auth.models.User object for Flask-Login.
Args:
user_id (str): user model id
Returns:
auth.models.User: user with that id
"""
return User.query.filter_by(id=int(user_id)).first()
@app.route('/')
def landing():
return render_template('landing.html')
@app.route('/login')
def login():
"""Shows a user the EVE SSO link so they can log in.
Args:
None
Returns;
str: rendered template 'login.html'
"""
return render_template('login.html', url=eveapi['crest'].get_authorize_url())
@app.route('/eve/callback')
def eve_oauth_callback():
"""Completes the EVE SSO login. Here, hr.models.User models
and hr.models.Member models are created for the user if they don't
exist and the user is redirected the the page appropriate for their
access level.
Args:
None
Returns:
str: redirect to the login endpoint if something failed, join endpoint if
the user is a new user, or the index endpoint if they're already a member.
"""
if 'error' in request.path:
app.logger.error('Error in EVE SSO callback: ' + request.url)
flash('There was an error in EVE\'s response', 'error')
return url_for('login')
try:
auth = eveapi['crest'].authenticate(request.args['code'])
except Exception as e:
app.logger.error('CREST signing error: ' + str(e))
flash('There was an authentication error signing you in.', 'error')
return redirect(url_for('login'))
character_info = auth.whoami()
character_name = character_info['CharacterName']
user = User.query.filter_by(name=character_name).first()
if user:
login_user(user)
app.logger.debug('{} logged in with EVE SSO'.format(current_user.name))
flash('Logged in', 'success')
return redirect(url_for('landing'))
corporation = get_corp_for_name(character_name)
user = User(character_name, corporation)
db.session.add(user)
db.session.commit()
login_user(user)
app.logger.info('{} created an account'.format(current_user.name))
return redirect(url_for('landing'))
@app.route('/logout')
def logout():
"""Logs the user out of the site.
Args:
None
Returns:
str: redirect to the login endpoint
"""
app.logger.debug('{} logged out'.format(current_user.name if not current_user.is_anonymous else 'unknown user'))
logout_user()
return redirect(url_for('login'))
@app.errorhandler(404)
def error_404(e):
"""Catches 404 errors in the app and shows the user an error page.
Args:
e (Exception): the exception from the server
Returns:
str: rendered template 'error_404.html'
"""
app.logger.error('404 error at "{}" by {}: {}'.format(
request.url, current_user.name if not current_user.is_anonymous else 'unknown user', str(e))
)
return render_template('error_404.html')
@app.errorhandler(500)
def error_500(e):
"""Catches 500 errors in the app and shows the user an error page.
Args:
e (Exception): the exception from the server
Returns:
str: rendered template 'error_404.html'
"""
app.logger.error('500 error at "{}" by {}: {}'.format(
request.url, current_user.name if not current_user.is_anonymous else 'unknown user', str(e))
)
return render_template('error_500.html')
def get_corp_for_name(name):
"""Takes a character's name and returns their EVE character ID.
Args:
name (str): full character name
Returns:
int: value of their EVE character ID
"""
return get_corp_for_id(eveapi['xml'].eve.CharacterId(names=name)['rowset']['row']['@characterID'])
def get_corp_for_id(id):
"""Takes a character's id and returns their corporation name.
Args:
name (str): full character name
Returns:
str: value of their corporation's name
"""
return eveapi['xml'].eve.CharacterAffiliation(ids=id)['rowset']['row']['@corporationName']
| 30.067708 | 120 | 0.690802 | 0 | 0 | 0 | 0 | 3,478 | 0.60246 | 0 | 0 | 2,751 | 0.476529 |
1d67b713f23618331d3a012ef4bff6ddf73e4235 | 3,891 | py | Python | setup.py | cathalmccabe/IIoT-SPYN | 3d7ac560bfc31ecd763ac3a8c266fc63874f20ad | [
"BSD-3-Clause"
] | 1 | 2019-01-01T14:52:51.000Z | 2019-01-01T14:52:51.000Z | setup.py | cathalmccabe/IIoT-SPYN | 3d7ac560bfc31ecd763ac3a8c266fc63874f20ad | [
"BSD-3-Clause"
] | null | null | null | setup.py | cathalmccabe/IIoT-SPYN | 3d7ac560bfc31ecd763ac3a8c266fc63874f20ad | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2018, Xilinx, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from setuptools import setup, find_packages
import shutil
import sys
import os
import site
__author__ = "KV Thanjavur Bhaaskar, Naveen Purushotham"
__copyright__ = "Copyright 2018, Xilinx"
__email__ = "kvt@xilinx.com, npurusho@xilinx.com"
GIT_DIR = os.path.dirname(os.path.realpath(__file__))
# Board specific package delivery setup
def exclude_from_files(exclude, path):
return [file for file in os.listdir(path)
if os.path.isfile(os.path.join(path, file))
and file not in exclude]
def exclude_from_dirs(exclude, path):
return [folder for folder in os.listdir(path)
if os.path.isdir(os.path.join(path, folder))
and folder not in exclude]
def collect_data_files():
return [(os.path.join(
'{}/spyn/overlays'.format(os.path.dirname(site.__file__) +
"/site-packages"), ol),
[os.path.join(board_folder, ol, f)
for f in exclude_from_files(
['makefile'], os.path.join(board_folder, ol))])
for ol in exclude_from_dirs(['notebooks', 'vivado'],
board_folder)]
if 'BOARD' not in os.environ:
print("Please set the BOARD environment variable "
"to get any BOARD specific overlays (e.g. Pynq-Z1).")
board = None
board_folder = None
data_files = None
else:
board = os.environ['BOARD']
board_folder = 'boards/{}'.format(board)
data_files = collect_data_files()
# Notebook delivery
def fill_notebooks():
src_nb = os.path.join(GIT_DIR, 'notebooks')
dst_nb_dir = '/home/xilinx/jupyter_notebooks/spyn'
if os.path.exists(dst_nb_dir):
shutil.rmtree(dst_nb_dir)
shutil.copytree(src_nb, dst_nb_dir)
print("Filling notebooks done ...")
if len(sys.argv) > 1 and sys.argv[1] == 'install':
fill_notebooks()
setup(name='spyn',
version='1.0',
description='Motor Control using PYNQ package',
author='Xilinx ISM + PYNQ',
author_email='kvt@xilinx.com',
url='https://github.com/Xilinx/IIoT-SPYN',
packages=find_packages(),
download_url='https://github.com/Xilinx/IIoT-SPYN',
package_data={
'': ['*.bin', '*.so'],
},
data_files=data_files
)
| 35.372727 | 79 | 0.67926 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,191 | 0.563094 |
1d6897c7f3fa33acc1091f86b4c9ad7e47b3a66b | 4,141 | py | Python | eth2/beacon/tools/builder/initializer.py | Jwomers/trinity | 3a09276d2f21adfc5b233272e1b1cc2ad1a0c11e | [
"MIT"
] | null | null | null | eth2/beacon/tools/builder/initializer.py | Jwomers/trinity | 3a09276d2f21adfc5b233272e1b1cc2ad1a0c11e | [
"MIT"
] | null | null | null | eth2/beacon/tools/builder/initializer.py | Jwomers/trinity | 3a09276d2f21adfc5b233272e1b1cc2ad1a0c11e | [
"MIT"
] | null | null | null | from typing import (
Dict,
Sequence,
Tuple,
Type,
)
from eth2.beacon.on_startup import (
get_genesis_block,
get_initial_beacon_state,
)
from eth2.beacon.state_machines.configs import BeaconConfig
from eth2.beacon.types.blocks import (
BaseBeaconBlock,
)
from eth2.beacon.types.deposits import Deposit
from eth2.beacon.types.deposit_data import DepositData
from eth2.beacon.types.deposit_input import DepositInput
from eth2.beacon.types.eth1_data import Eth1Data
from eth2.beacon.types.forks import Fork
from eth2.beacon.types.states import BeaconState
from eth2.beacon.typing import (
BLSPubkey,
Timestamp,
)
from eth2.beacon.tools.builder.validator import (
sign_proof_of_possession,
)
def create_mock_initial_validator_deposits(
num_validators: int,
config: BeaconConfig,
pubkeys: Sequence[BLSPubkey],
keymap: Dict[BLSPubkey, int]) -> Tuple[Deposit, ...]:
# Mock data
withdrawal_credentials = b'\x22' * 32
randao_commitment = b'\x33' * 32
deposit_timestamp = 0
fork = Fork(
previous_version=config.GENESIS_FORK_VERSION,
current_version=config.GENESIS_FORK_VERSION,
epoch=config.GENESIS_EPOCH,
)
initial_validator_deposits = tuple(
Deposit(
branch=(
b'\x11' * 32
for j in range(10)
),
index=i,
deposit_data=DepositData(
deposit_input=DepositInput(
pubkey=pubkeys[i],
withdrawal_credentials=withdrawal_credentials,
randao_commitment=randao_commitment,
proof_of_possession=sign_proof_of_possession(
deposit_input=DepositInput(
pubkey=pubkeys[i],
withdrawal_credentials=withdrawal_credentials,
randao_commitment=randao_commitment,
),
privkey=keymap[pubkeys[i]],
fork=fork,
slot=config.GENESIS_SLOT,
epoch_length=config.EPOCH_LENGTH,
),
),
amount=config.MAX_DEPOSIT_AMOUNT,
timestamp=deposit_timestamp,
),
)
for i in range(num_validators)
)
return initial_validator_deposits
def create_mock_genesis(
num_validators: int,
config: BeaconConfig,
keymap: Dict[BLSPubkey, int],
genesis_block_class: Type[BaseBeaconBlock],
genesis_time: Timestamp=0) -> Tuple[BeaconState, BaseBeaconBlock]:
latest_eth1_data = Eth1Data.create_empty_data()
assert num_validators <= len(keymap)
pubkeys = list(keymap)[:num_validators]
initial_validator_deposits = create_mock_initial_validator_deposits(
num_validators=num_validators,
config=config,
pubkeys=pubkeys,
keymap=keymap,
)
state = get_initial_beacon_state(
initial_validator_deposits=initial_validator_deposits,
genesis_time=genesis_time,
latest_eth1_data=latest_eth1_data,
genesis_slot=config.GENESIS_SLOT,
genesis_epoch=config.GENESIS_EPOCH,
genesis_fork_version=config.GENESIS_FORK_VERSION,
genesis_start_shard=config.GENESIS_START_SHARD,
shard_count=config.SHARD_COUNT,
seed_lookahead=config.SEED_LOOKAHEAD,
latest_block_roots_length=config.LATEST_BLOCK_ROOTS_LENGTH,
latest_index_roots_length=config.LATEST_INDEX_ROOTS_LENGTH,
epoch_length=config.EPOCH_LENGTH,
max_deposit_amount=config.MAX_DEPOSIT_AMOUNT,
latest_penalized_exit_length=config.LATEST_PENALIZED_EXIT_LENGTH,
latest_randao_mixes_length=config.LATEST_RANDAO_MIXES_LENGTH,
entry_exit_delay=config.ENTRY_EXIT_DELAY,
)
block = get_genesis_block(
startup_state_root=state.root,
genesis_slot=config.GENESIS_SLOT,
block_class=genesis_block_class,
)
assert len(state.validator_registry) == num_validators
return state, block
| 32.351563 | 74 | 0.658295 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 32 | 0.007728 |
1d6909e7697fe6f7ef13f4e0b61221718a698804 | 1,525 | py | Python | settings.py | oogles/django-goodies | bef5f322f848e2bd466cc4955061ead9bed8c6c5 | [
"BSD-3-Clause"
] | 2 | 2020-08-28T00:36:48.000Z | 2021-07-01T07:14:31.000Z | settings.py | oogles/djem | bef5f322f848e2bd466cc4955061ead9bed8c6c5 | [
"BSD-3-Clause"
] | 2 | 2018-03-22T05:46:17.000Z | 2022-02-10T11:41:26.000Z | settings.py | oogles/djem | bef5f322f848e2bd466cc4955061ead9bed8c6c5 | [
"BSD-3-Clause"
] | null | null | null | # Minimal settings file to allow the running of tests, execution of migrations,
# and several other useful management commands.
SECRET_KEY = 'abcde12345' # nosec
# Needs to point to something to allow tests to perform url resolving. The file
# doesn't actually need to contain any urls (but does need to define "urlpatterns").
ROOT_URLCONF = 'djem.tests'
# For TimeZoneHelper/TimeZoneField tests
USE_TZ = True
INSTALLED_APPS = [
'django.contrib.contenttypes', # for django.contrib.auth
'django.contrib.auth', # for tests
'django.contrib.messages', # for tests
'djem',
]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
},
}
# For testing template tags
TEMPLATES = [{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth'
],
},
}]
DEFAULT_AUTO_FIELD = 'django.db.models.AutoField' # suppress system check warning
# Add django-extensions to INSTALLED_APPS if it is present. This provides extra
# dev tools, e.g. shell_plus, but isn't required - e.g. for testing.
try:
import django_extensions # noqa: F401 (import unused)
except ImportError:
pass
else:
INSTALLED_APPS.append('django_extensions')
SHELL_PLUS_POST_IMPORTS = (
('djem.utils.dev', 'Developer'),
('djem.utils.mon', 'Mon'),
('djem.utils.inspect', 'pp'),
)
| 27.727273 | 84 | 0.664918 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,043 | 0.683934 |
1d6e65b0e5d6c4ee6ad11a44b07d0b7c7fe3d49f | 360 | py | Python | SimpleSign.py | wanzhiguo/mininero | 7dd71b02a4613478b59b2670ccf7c74a22cc2ffd | [
"BSD-3-Clause"
] | 182 | 2016-02-05T18:33:09.000Z | 2022-03-23T12:31:54.000Z | SimpleSign.py | wanzhiguo/mininero | 7dd71b02a4613478b59b2670ccf7c74a22cc2ffd | [
"BSD-3-Clause"
] | 81 | 2016-09-04T14:00:24.000Z | 2022-03-28T17:22:52.000Z | SimpleSign.py | wanzhiguo/mininero | 7dd71b02a4613478b59b2670ccf7c74a22cc2ffd | [
"BSD-3-Clause"
] | 63 | 2016-02-05T19:38:06.000Z | 2022-03-07T06:07:46.000Z | import MiniNero
import ed25519
import binascii
import PaperWallet
import cherrypy
import os
import time
import bitmonerod
import SimpleXMR2
import SimpleServer
message = "send0d000114545737471em2WCg9QKxRxbo6S3xKF2K4UDvdu6hMc"
message = "send0d0114545747771em2WCg9QKxRxbo6S3xKF2K4UDvdu6hMc"
sec = raw_input("sec?")
print(SimpleServer.Signature(message, sec))
| 21.176471 | 65 | 0.858333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 114 | 0.316667 |
1d6e919f05dbd8e9f87f5045a93be702a3ed1a8c | 599 | py | Python | lib/logger.py | amkolhar/JamaAutomation | 840d404df0b1483cdbb85e17e1e5059a36c69cc7 | [
"Apache-2.0"
] | null | null | null | lib/logger.py | amkolhar/JamaAutomation | 840d404df0b1483cdbb85e17e1e5059a36c69cc7 | [
"Apache-2.0"
] | null | null | null | lib/logger.py | amkolhar/JamaAutomation | 840d404df0b1483cdbb85e17e1e5059a36c69cc7 | [
"Apache-2.0"
] | null | null | null | # Atharv Kolhar (atharv)
import logging
import warnings
warnings.filterwarnings("ignore")
jamalogger = logging.getLogger("JAMALIB")
jamalogger.setLevel(logging.DEBUG)
handle = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s',
"%Y-%m-%d %H:%M:%S")
handle.setFormatter(formatter)
handle.setLevel(logging.INFO)
jamalogger.addHandler(handle)
log_handle = logging.FileHandler("../JamaAutomation_log.txt")
log_handle.setFormatter(formatter)
log_handle.setLevel(logging.DEBUG)
jamalogger.addHandler(log_handle)
| 27.227273 | 85 | 0.737896 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 141 | 0.235392 |
1d6f6b83566cbb11fd4ba3d140094d32e8247565 | 1,851 | py | Python | src/common/download.py | Nut-Guo/adv | b0fdcfbf8acfe59916118e152d3c63d27ea88611 | [
"MIT"
] | 2 | 2021-09-25T05:26:47.000Z | 2022-03-09T13:42:10.000Z | src/common/download.py | Nut-Guo/adv | b0fdcfbf8acfe59916118e152d3c63d27ea88611 | [
"MIT"
] | null | null | null | src/common/download.py | Nut-Guo/adv | b0fdcfbf8acfe59916118e152d3c63d27ea88611 | [
"MIT"
] | null | null | null | import os
import requests
import logging
from tqdm import tqdm as tq
import zipfile
import tarfile
def download_file(url: str, path: str, verbose: bool = False) -> None:
"""
Download file with progressbar
Usage:
download_file('http://web4host.net/5MB.zip')
"""
if not os.path.exists(path):
os.makedirs(path)
local_filename = os.path.join(path, url.split('/')[-1])
if not os.path.exists(local_filename):
r = requests.get(url, stream=True)
file_size = int(r.headers.get('Content-Length', 0))
chunk = 1
chunk_size = 1024
num_bars = int(file_size / chunk_size)
if verbose:
logging.info(f'file size: {file_size}\n# bars: {num_bars}')
with open(local_filename, 'wb') as fp:
for chunk in tq(
r.iter_content(chunk_size=chunk_size),
total=num_bars,
unit='KB',
desc=local_filename,
leave=True # progressbar stays
):
fp.write(chunk) # type: ignore
if '.zip' in local_filename:
if os.path.exists(local_filename):
with zipfile.ZipFile(local_filename, 'r') as zip_ref:
zip_ref.extractall(path)
elif '.tar.gz' in local_filename:
if os.path.exists(local_filename):
with tarfile.open(local_filename, 'r') as tar_ref:
tar_ref.extractall(path)
def download_data(url: str, path: str = "data/") -> None:
"""
Downloads data automatically from the given url to the path. Defaults to data/ for the path.
Automatically handles .csv, .zip
Example::
from flash import download_data
Args:
url: path
path: local
"""
download_file(url, path)
def main():
pass
if __name__ == '__main__':
main() | 26.442857 | 96 | 0.589411 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 505 | 0.272825 |
1d72d39d1134cd19db279903a576512ac8b9b659 | 139 | py | Python | vedasal/criteria/losses/builder.py | Kuro96/vedasal | 3c5588bf12059af5bd7bc779fd5f9dc0b2901cb2 | [
"Apache-2.0"
] | 2 | 2020-11-06T06:39:04.000Z | 2020-11-11T03:39:22.000Z | vedasal/criteria/losses/builder.py | Kuro96/vedasal | 3c5588bf12059af5bd7bc779fd5f9dc0b2901cb2 | [
"Apache-2.0"
] | null | null | null | vedasal/criteria/losses/builder.py | Kuro96/vedasal | 3c5588bf12059af5bd7bc779fd5f9dc0b2901cb2 | [
"Apache-2.0"
] | null | null | null | from vedacore.misc import registry, build_from_cfg
def build_loss(cfg):
loss = build_from_cfg(cfg, registry, 'loss')
return loss
| 19.857143 | 50 | 0.741007 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 | 0.043165 |
1d73ed57b830c4f17e023e29c9340e779af1482f | 6,667 | py | Python | django_rest_scaffold/management/commands/create-model.py | regisec/django-rest-scaffold | 8ba5c5ad3105812d4b0143f4e69bb687ba1ac166 | [
"BSD-3-Clause"
] | null | null | null | django_rest_scaffold/management/commands/create-model.py | regisec/django-rest-scaffold | 8ba5c5ad3105812d4b0143f4e69bb687ba1ac166 | [
"BSD-3-Clause"
] | 4 | 2019-08-05T05:00:45.000Z | 2021-06-10T19:28:56.000Z | django_rest_scaffold/management/commands/create-model.py | regisec/django-rest-scaffold | 8ba5c5ad3105812d4b0143f4e69bb687ba1ac166 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: UTF-8 -*-
"""
Created by Régis Eduardo Crestani <regis.crestani@gmail.com> on 19/06/2016.
"""
import os
from datetime import datetime
from django.core.management.base import BaseCommand, CommandError
from django_rest_scaffold.settings import DJANGO_REST_SCAFFOLD_SETTINGS as SETTINGS
class Command(BaseCommand):
help = 'Creates a model, serializer and views'
def add_arguments(self, parser):
parser.add_argument('model-name', type=str)
parser.add_argument('resource-name', type=str)
parser.add_argument('file-mode', nargs='?', type=str)
@staticmethod
def gen_header() -> list:
today = datetime.now().strftime('%d/%m/%Y')
return [
'# -*- coding: UTF-8 -*-\n',
'"""\n',
' Created by Régis Eduardo Crestani <regis.crestani@gmail.com> on %s.\n' % today,
'"""\n'
]
@staticmethod
def build_comment(model_name: str, text: str) -> str:
line = '# {0}\n'.format('-' * 118)
return '{0}# {1} {2}\n{0}'.format(line, model_name.upper(), text.upper())
def handle(self, *args, **options):
model_name = options.get('model-name')
resource_name = options.get('resource-name')
file_mode = options.get('file-mode', None) or 'a+'
model_name_slug = ''.join([v if p == 0 or v.islower() else '-' + v for p, v in enumerate(model_name)]).lower()
serializer_name = '%sSerializer' % model_name
view_resource_name = '%sResourceAPIView' % model_name
view_detail_name = '%sDetailAPIView' % model_name
resource_path = os.path.join(SETTINGS['APPS_FOLDER'], resource_name)
apps_folder_name = SETTINGS['APPS_FOLDER_NAME']
apps_folder_package = '' if apps_folder_name is None else '{0}.'.format(apps_folder_name)
if not os.path.exists(resource_path):
CommandError('Resource %s not found.' % resource_name)
# CREATE THE MODELS FILE
models_path = os.path.join(resource_path, 'models.py')
if not os.path.exists(models_path) or 'w' in file_mode:
models_lines = Command.gen_header()
models_lines.append('from django.db import models\n')
else:
models_lines = []
models_lines += [
'\n\n',
'class {0}(models.Model):\n'.format(model_name),
' pass\n'
]
with open(models_path, file_mode, encoding='utf-8') as models_file:
models_file.writelines(models_lines)
# CREATE THE SERIALIZERS FILE
serializers_path = os.path.join(resource_path, 'serializers.py')
if not os.path.exists(serializers_path) or 'w' in file_mode:
serializers_lines = Command.gen_header()
serializers_lines.append('from rest_framework import serializers')
else:
serializers_lines = []
serializers_lines += [
'\n\n',
Command.build_comment(model_name_slug, 'serializers'),
'from {0}{1}.models import {2}\n'.format(apps_folder_package, resource_name, model_name),
'\n\n',
'class {0}(serializers.ModelSerializer):\n'.format(serializer_name),
' class Meta:\n',
' model = {0}\n'.format(model_name)
]
with open(serializers_path, file_mode, encoding='utf-8') as serializers_file:
serializers_file.writelines(serializers_lines)
# CREATE THE VIEWS FILE
views_path = os.path.join(resource_path, 'views.py')
if not os.path.exists(views_path) or 'w' in file_mode:
views_lines = Command.gen_header()
views_lines.append('from rest_framework import generics')
else:
views_lines = []
views_lines += [
'\n\n',
Command.build_comment(model_name_slug, 'views'),
'from {0}{1}.models import {2}\n'.format(apps_folder_package, resource_name, model_name),
'from {0}{1}.serializers import {2}\n'.format(apps_folder_package, resource_name, serializer_name),
'\n\n',
'class {0}(generics.ListCreateAPIView):\n'.format(view_resource_name),
' serializer_class = {0}\n'.format(serializer_name),
' queryset = {0}.objects\n'.format(model_name),
'\n\n',
'class {0}(generics.RetrieveUpdateDestroyAPIView):\n'.format(view_detail_name),
' serializer_class = {0}\n'.format(serializer_name),
' queryset = {0}.objects\n'.format(model_name),
]
with open(views_path, file_mode, encoding='utf-8') as views_file:
views_file.writelines(views_lines)
# CREATE THE URLS FILE
urls_path = os.path.join(resource_path, 'urls.py')
if not os.path.exists(urls_path) or 'w' in file_mode:
urls_lines = Command.gen_header()
urls_lines += [
'from django.conf.urls import url\n',
'\n',
'urlpatterns = []\n'
]
else:
urls_lines = []
urls_lines += [
'\n',
Command.build_comment(model_name_slug, 'endpoints'),
'from {0}{1}.views import {2}, {3}\n'.format(apps_folder_package, resource_name, view_resource_name,
view_detail_name),
'\n',
'urlpatterns += [\n',
" url(r'^{0}s/$', {1}.as_view(), name='{0}-resource'),\n".format(model_name_slug, view_resource_name),
" url(r'^{0}s/(?P<pk>\d+)[/]?$', {1}.as_view(), name='{0}-detail')\n".format(model_name_slug,
view_detail_name),
']\n'
]
with open(urls_path, file_mode, encoding='utf-8') as urls_file:
urls_file.writelines(urls_lines)
# CREATE THE ADMIN FILE
admin_path = os.path.join(resource_path, 'admin.py')
if not os.path.exists(admin_path) or 'w' in file_mode:
admin_lines = Command.gen_header()
admin_lines.append('from django.contrib import admin')
else:
admin_lines = []
admin_lines += [
'\n\n',
Command.build_comment(model_name_slug, 'admin register'),
'from {0}{1}.models import {2}\n'.format(apps_folder_package, resource_name, model_name),
'\n',
'admin.site.register({0})\n'.format(model_name),
]
with open(admin_path, file_mode, encoding='utf-8') as admin_file:
admin_file.writelines(admin_lines)
| 44.152318 | 118 | 0.578071 | 6,362 | 0.953966 | 0 | 0 | 501 | 0.075124 | 0 | 0 | 1,742 | 0.261209 |
1d740317cdf78e3f80e81cac80068c2f4804af40 | 3,066 | py | Python | social_api/urls.py | muhfajar/social_api | 05ca23b215db5cb4f9060134b464b643c49beb0f | [
"MIT"
] | null | null | null | social_api/urls.py | muhfajar/social_api | 05ca23b215db5cb4f9060134b464b643c49beb0f | [
"MIT"
] | null | null | null | social_api/urls.py | muhfajar/social_api | 05ca23b215db5cb4f9060134b464b643c49beb0f | [
"MIT"
] | null | null | null | """social_api URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth import views as auth_views
from modules.event.views import (
core, venue, event,
event_type, ticket,
)
urlpatterns = [
url(r'^$', core.home, name='home'),
url(r'^venue/$', venue.venue, name='venue'),
url(r'^venue/(?P<venue_id>[0-9]+)/$', venue.venue_update, name='venue-update'),
url(r'^venue/(?P<venue_id>[0-9]+)/delete/$', venue.venue_delete, name='venue-delete'),
url(r'^venue/list/$', venue.venue_list, name='venue-list'),
url(r'^event-type/$', event_type.event_type, name='event-type'),
url(r'^event-type/(?P<event_type_id>[0-9]+)/$', event_type.event_type_update, name='event-type-update'),
url(r'^event-type/(?P<event_type_id>[0-9]+)/delete/$', event_type.event_type_delete, name='event-type-delete'),
url(r'^event-type/list/$', event_type.event_type_list, name='event-type-list'),
url(r'^event/$', event.event, name='event'),
url(r'^event/get/(?P<event_id>[0-9]+)/$', event.event_detail, name='event-detail'),
url(r'^event/(?P<event_id>[0-9]+)/$', event.event_update, name='event-update'),
url(r'^event/(?P<event_id>[0-9]+)/delete/$', event.event_delete, name='event-delete'),
url(r'^event/list/$', event.event_list, name='event-list'),
url(r'^event/category/(?P<event_type>\w+)/$', event.category, name='event-category'),
url(r'^event/tweet/(?P<event_id>\w+)/$', event.tweet, name='event-tweet'),
url(r'^ticket/(?P<event_id>[0-9]+)/$', ticket.ticket, name='ticket'),
url(r'^ticket/(?P<event_id>[0-9]+)/(?P<ticket_id>[0-9]+)/$', ticket.ticket_update, name='ticket-update'),
url(r'^ticket/(?P<ticket_id>[0-9]+)/delete/$', ticket.ticket_delete, name='ticket-delete'),
url(r'^ticket/(?P<event_id>[0-9]+)/list/$', ticket.ticket_list, name='ticket-list'),
url(r'^login/$', auth_views.login, name='login'),
url(r'^logout/$', auth_views.logout, name='logout'),
url(r'^signup/$', core.signup, name='signup'),
url(r'^settings/$', core.settings, name='settings'),
url(r'^settings/password/$', core.password, name='password'),
url(r'^oauth/', include('social_django.urls', namespace='social')),
url(r'^admin/', admin.site.urls),
]
# Change admin site title
admin.site.site_header = _("Event Management")
admin.site.site_title = _("Admin Panel")
| 47.90625 | 115 | 0.665688 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,730 | 0.564253 |
1d7424d7e6f29bb89685d0551b4a9bc7832c6b24 | 5,186 | py | Python | src/meta_memcache/base/connection_pool.py | RevenueCat/meta-memcache-py | 755ecfc4cfbb029d3435590c5f9aa8a8aa2a36e3 | [
"MIT"
] | null | null | null | src/meta_memcache/base/connection_pool.py | RevenueCat/meta-memcache-py | 755ecfc4cfbb029d3435590c5f9aa8a8aa2a36e3 | [
"MIT"
] | 4 | 2022-01-10T23:06:43.000Z | 2022-03-08T22:49:39.000Z | src/meta_memcache/base/connection_pool.py | RevenueCat/meta-memcache-py | 755ecfc4cfbb029d3435590c5f9aa8a8aa2a36e3 | [
"MIT"
] | null | null | null | import itertools
import logging
import socket
import time
from collections import deque
from contextlib import contextmanager
from typing import Callable, Deque, Generator, NamedTuple, Optional
from meta_memcache.base.memcache_socket import MemcacheSocket
from meta_memcache.errors import MemcacheServerError
from meta_memcache.protocol import ServerVersion
from meta_memcache.settings import DEFAULT_MARK_DOWN_PERIOD_S, DEFAULT_READ_BUFFER_SIZE
_log: logging.Logger = logging.getLogger(__name__)
class PoolCounters(NamedTuple):
# Available connections in the pool, ready to use
available: int
# The # of connections active, currently in use, out of the pool
active: int
# Current stablished connections (available + active)
stablished: int
# Total # of connections created. If this keeps growing
# might meen the pool size is too small and we are
# constantly needing to create new connections:
total_created: int
# Total # of connection or socket errors
total_errors: int
class ConnectionPool:
def __init__(
self,
server: str,
socket_factory_fn: Callable[[], socket.socket],
initial_pool_size: int,
max_pool_size: int,
mark_down_period_s: float = DEFAULT_MARK_DOWN_PERIOD_S,
read_buffer_size: int = DEFAULT_READ_BUFFER_SIZE,
version: ServerVersion = ServerVersion.STABLE,
) -> None:
self.server = server
self._socket_factory_fn = socket_factory_fn
self._initial_pool_size: int = min(initial_pool_size, max_pool_size)
self._max_pool_size = max_pool_size
self._mark_down_period_s = mark_down_period_s
self._created_counter: itertools.count[int] = itertools.count(start=1)
self._created = 0
self._errors_counter: itertools.count[int] = itertools.count(start=1)
self._errors = 0
self._destroyed_counter: itertools.count[int] = itertools.count(start=1)
self._destroyed = 0
self._marked_down_until: Optional[float] = None
# We don't use maxlen because deque will evict the first element when
# appending one over maxlen, and we won't be closing the connection
# proactively, relying on GC instead. We use a soft max limit, after
# all is not that critical to respect the number of connections
# exactly.
self._pool: Deque[MemcacheSocket] = deque()
self._read_buffer_size = read_buffer_size
self._version = version
for _ in range(self._initial_pool_size):
try:
self._pool.append(self._create_connection())
except MemcacheServerError:
pass
def get_counters(self) -> PoolCounters:
available = len(self._pool)
total_created, total_destroyed = self._created, self._destroyed
stablished = total_created - total_destroyed
active = available - stablished
return PoolCounters(
available=available,
active=active,
stablished=stablished,
total_created=total_created,
total_errors=self._errors,
)
def _create_connection(self) -> MemcacheSocket:
if marked_down_until := self._marked_down_until:
if time.time() < marked_down_until:
raise MemcacheServerError(
self.server, f"Server marked down: {self.server}"
)
self._marked_down_until = None
try:
conn = self._socket_factory_fn()
except Exception as e:
_log.exception("Error connecting to memcache")
self._errors = next(self._errors_counter)
self._marked_down_until = time.time() + self._mark_down_period_s
raise MemcacheServerError(
self.server, f"Server marked down: {self.server}"
) from e
self._created = next(self._created_counter)
return MemcacheSocket(conn, self._read_buffer_size, version=self._version)
def _discard_connection(self, conn: MemcacheSocket, error: bool = False) -> None:
try:
conn.close()
except Exception: # noqa: S110
pass
if error:
self._errors = next(self._errors_counter)
self._destroyed = next(self._destroyed_counter)
@contextmanager
def get_connection(self) -> Generator[MemcacheSocket, None, None]:
try:
conn = self._pool.popleft()
except IndexError:
conn = self._create_connection()
try:
yield conn
except Exception:
# Errors, assume connection is in bad state
_log.exception("Error during cache conn context (discarding connection)")
self._discard_connection(conn, error=True)
raise
else:
if len(self._pool) < self._max_pool_size:
# If there is a race, the deque might end with more than
# self._max_pool_size but it is not a big deal, we want this
# to be a soft limit.
self._pool.append(conn)
else:
self._discard_connection(conn)
| 38.414815 | 87 | 0.652912 | 4,681 | 0.902622 | 831 | 0.160239 | 851 | 0.164096 | 0 | 0 | 987 | 0.19032 |
1d7bf9fe9ade355b6e87b40c8b814174167a1aed | 1,599 | py | Python | src/derl/tracker.py | tpiekarski/derl | b2687f8f02870b2a29bc7466195d4ed45f192cbf | [
"MIT"
] | 10 | 2020-06-17T12:03:28.000Z | 2021-09-07T04:03:34.000Z | src/derl/tracker.py | tpiekarski/derl | b2687f8f02870b2a29bc7466195d4ed45f192cbf | [
"MIT"
] | 42 | 2020-06-17T12:27:26.000Z | 2021-09-05T10:51:43.000Z | src/derl/tracker.py | tpiekarski/derl | b2687f8f02870b2a29bc7466195d4ed45f192cbf | [
"MIT"
] | 1 | 2020-06-17T12:03:30.000Z | 2020-06-17T12:03:30.000Z | #
# derl: CLI Utility for searching for dead URLs <https://github.com/tpiekarski/derl>
# ---
# Copyright 2020 Thomas Piekarski <t.piekarski@deloquencia.de>
#
from time import perf_counter
from derl.model.stats import Stats
class Singleton(type):
_instances = {}
def __call__(cls: "Singleton", *args: tuple, **kwargs: dict) -> "Tracker":
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class Tracker(metaclass=Singleton):
start_time = None
stop_time = None
stats = Stats()
test = False
def start(self: "Tracker"):
if self.start_time is None:
self.start_time = perf_counter()
def stop(self: "Tracker"):
if self.stop_time is None:
self.stop_time = perf_counter()
def calc_time(self: "Tracker") -> float:
if self.test:
return -1
return round(self.stop_time - self.start_time)
def reset(self: "Tracker"):
self.start_time = 0
self.stop_time = 0
self.stats = Stats()
def set_test(self: "Tracker"):
self.test = True
def __str__(self: "Tracker") -> str:
output = ""
if self.start_time is not None and self.stop_time is not None:
output += "\nFinished checking URLs after {0:.2f} second(s).\n".format(self.calc_time())
output += self.stats.__str__()
return output
def __repr__(self: "Tracker") -> str:
return self.__str__()
def get_tracker() -> "Tracker":
return Tracker()
| 24.227273 | 100 | 0.614134 | 1,313 | 0.821138 | 0 | 0 | 0 | 0 | 0 | 0 | 300 | 0.187617 |
1d808fc6752a54152448f2c150ce9d43facfe6b6 | 1,813 | py | Python | class/lect/Lect-06/shuffle.py | MikenzieAlasca/F21-1010 | a7c15b8d9bf84f316aa6921f6d8a588c513a22b8 | [
"MIT"
] | 5 | 2021-09-09T21:08:14.000Z | 2021-12-14T02:30:52.000Z | class/lect/Lect-06/shuffle.py | MikenzieAlasca/F21-1010 | a7c15b8d9bf84f316aa6921f6d8a588c513a22b8 | [
"MIT"
] | null | null | null | class/lect/Lect-06/shuffle.py | MikenzieAlasca/F21-1010 | a7c15b8d9bf84f316aa6921f6d8a588c513a22b8 | [
"MIT"
] | 8 | 2021-09-09T17:46:07.000Z | 2022-02-08T22:41:35.000Z | import random
import copy
rr = random.Random ( 22 )
def readNameList(fn):
f = open(fn,"r")
if f == None:
print ( f"Invalid file {fn} - failed to open" )
return None
dt = f.readlines()
f.close()
for i in range (len(dt)):
s = dt[i].rstrip()
dt[i] = s
return dt
letters = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
def readKey():
global letters, rr
key = {}
match = copy.deepcopy(letters)
# random.shuffle(match)
rr.shuffle(match)
print ( f"match={match}" )
for i in range(26):
key[match[i]] = letters[i]
return key
def encrypt(ifn,ofn):
dt = readNameList(ifn)
out_list = []
for line_no in range(len(dt)):
line = dt[line_no]
line = line.lower()
new_line = ""
for c in line:
if c in key:
new_line += key[c]
else:
new_line += c
out_list.append ( new_line )
f = open ( ofn, "w" )
for j in out_list:
print ( f"{j}", file=f )
f.close()
def decrypt(ifn,ofn):
dt = readNameList(ifn)
revkey = {}
for k in key.keys():
v = key[k]
revkey[v] = k
print ( f"revkey = {revkey}" )
out_list = []
for line_no in range(len(dt)):
line = dt[line_no]
line = line.lower()
new_line = ""
for c in line:
if c in revkey:
new_line += revkey[c]
else:
new_line += c
out_list.append ( new_line )
f = open ( ofn, "w" )
for j in out_list:
print ( f"{j}", file=f )
f.close()
key = readKey()
print ( f"key = {key}" )
encrypt("test2.txt", "test2.enc")
decrypt("test2.enc", "test2.orig")
| 22.382716 | 115 | 0.479868 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 263 | 0.145063 |
1d8175f2740077e4e182203daab2cb883fd6dc00 | 3,641 | py | Python | step_impl/http.py | WorldHealthOrganization/ddcc-gateway-api-tests | 914e81243e162c3e8375786df2f21f8e1008fc0d | [
"Apache-2.0"
] | null | null | null | step_impl/http.py | WorldHealthOrganization/ddcc-gateway-api-tests | 914e81243e162c3e8375786df2f21f8e1008fc0d | [
"Apache-2.0"
] | null | null | null | step_impl/http.py | WorldHealthOrganization/ddcc-gateway-api-tests | 914e81243e162c3e8375786df2f21f8e1008fc0d | [
"Apache-2.0"
] | 2 | 2022-02-04T11:10:27.000Z | 2022-02-17T09:06:29.000Z | # ---license-start
# eu-digital-green-certificates / dgc-api-tests
# ---
# Copyright (C) 2021 T-Systems International GmbH and all other contributors
# ---
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ---license-end
from getgauge.python import data_store, step
from requests import Response
@step("check that the response had no error")
def check_that_the_response_had_no_error():
response: Response = data_store.scenario["response"]
assert response.ok, f"Response Code had an error but it shoudn't. Status Code {response.status_code} {response.text[:1024]}"
@step("check that the response had an error")
def check_that_the_response_had_an_error():
response: Response = data_store.scenario["response"]
assert not response.ok, f"Response Code had no error but it should. Status Code {response.status_code}"
@step("check that the response had the status code <int>")
def check_that_the_response_had_the_status_code(expected):
response = data_store.scenario["response"]
status_code = response.status_code
assert status_code == int(
expected), f"response status code was {status_code} but expected {expected}"
@step("check that the response had the status code <int> or None")
def check_that_the_response_had_the_status_code(expected):
response = data_store.scenario["response"]
status_code = response.status_code
assert status_code is None or status_code == int(
expected), f"response status code was {status_code} but expected {expected} or None"
@step("check that the response is not empty")
def check_that_the_response_is_not_empty():
response: Response = data_store.scenario["response"]
assert response.text != "", "response was empty"
@step("check that all responses had no error")
def check_that_all_repsonses_had_no_error():
responses: List[Response] = data_store.scenario["responses"]
for response in responses:
assert response.ok, "Response Code had an error but it shoudn't"
@step("check that all repsonses had an error")
def check_that_all_repsonses_had_an_error():
responses: List[Response] = data_store.scenario["responses"]
for response in responses:
assert not response.ok, "Response Code had no error but it should"
@step("check that all responses had the status code <200>")
def check_that_all_responses_had_the_status_code(expected):
responses: List[Response] = data_store.scenario["responses"]
for response in responses:
assert int(expected) == response.status_code, f"response code should be {expected} but it was {response.status_code}"
@step("check that all responses had the status code <200> or None")
def check_that_all_responses_had_the_status_code(expected):
responses: List[Response] = data_store.scenario["responses"]
for response in responses:
assert response.status_code is None or int(expected) == response.status_code
@step("check that all responses are not empty")
def check_that_all_responses_are_not_empty():
responses: List[Response] = data_store.scenario["responses"]
for response in responses:
assert response.text != "", "response was empty"
| 41.375 | 128 | 0.753364 | 0 | 0 | 0 | 0 | 2,822 | 0.775062 | 0 | 0 | 1,779 | 0.488602 |
1d83766ae57b1b09e9fc0fe2cbd36a3f3b83c731 | 4,503 | py | Python | celligner2/surgery/trvae.py | broadinstitute/celligner2 | ddddb3f4c13c66277bc93b08ebe65c1cc0bd941c | [
"Unlicense"
] | null | null | null | celligner2/surgery/trvae.py | broadinstitute/celligner2 | ddddb3f4c13c66277bc93b08ebe65c1cc0bd941c | [
"Unlicense"
] | null | null | null | celligner2/surgery/trvae.py | broadinstitute/celligner2 | ddddb3f4c13c66277bc93b08ebe65c1cc0bd941c | [
"Unlicense"
] | null | null | null | import numpy as np
import torch
import anndata
from celligner2.othermodels.trvae.trvae import trVAE
from celligner2.trainers.trvae.unsupervised import trVAETrainer
def trvae_operate(
network: trVAE,
data: anndata,
condition_key: str = None,
size_factor_key: str = None,
n_epochs: int = 20,
freeze: bool = True,
freeze_expression: bool = True,
remove_dropout: bool = True,
) -> [trVAE, trVAETrainer]:
"""Transfer Learning function for new data. Uses old trained Network and expands it for new conditions.
Parameters
----------
network: trVAE
A scNet model object.
data: Anndata
Query anndata object.
condition_key: String
Key where the conditions in the data can be found.
size_factor_key: String
Key where the size_factors in the data can be found.
n_epochs: Integer
Number of epochs for training the network on query data.
freeze: Boolean
If 'True' freezes every part of the network except the first layers of encoder/decoder.
freeze_expression: Boolean
If 'True' freeze every weight in first layers except the condition weights.
remove_dropout: Boolean
If 'True' remove Dropout for Transfer Learning.
Returns
-------
new_network: trVAE
Newly network that got trained on query data.
new_trainer: trVAETrainer
Trainer for the newly network.
"""
conditions = network.conditions
new_conditions = []
data_conditions = data.obs[condition_key].unique().tolist()
# Check if new conditions are already known
for item in data_conditions:
if item not in conditions:
new_conditions.append(item)
n_new_conditions = len(new_conditions)
# Add new conditions to overall conditions
for condition in new_conditions:
conditions.append(condition)
# Update DR Rate
new_dr = network.dr_rate
if remove_dropout:
new_dr = 0.0
print("Surgery to get new Network...")
new_network = trVAE(
network.input_dim,
conditions=conditions,
hidden_layer_sizes=network.hidden_layer_sizes,
latent_dim=network.latent_dim,
dr_rate=new_dr,
use_mmd=network.use_mmd,
mmd_boundary=network.mmd_boundary,
recon_loss=network.recon_loss,
)
# Expand First Layer weights of encoder/decoder of old network by new conditions
encoder_input_weights = network.encoder.FC.L0.cond_L.weight
to_be_added_encoder_input_weights = np.random.randn(encoder_input_weights.size()[0], n_new_conditions) * np.sqrt(
2 / (encoder_input_weights.size()[0] + 1 + encoder_input_weights.size()[1]))
to_be_added_encoder_input_weights = torch.from_numpy(to_be_added_encoder_input_weights).float().to(network.device)
network.encoder.FC.L0.cond_L.weight.data = torch.cat((encoder_input_weights,
to_be_added_encoder_input_weights), 1)
decoder_input_weights = network.decoder.FirstL.L0.cond_L.weight
to_be_added_decoder_input_weights = np.random.randn(decoder_input_weights.size()[0], n_new_conditions) * np.sqrt(
2 / (decoder_input_weights.size()[0] + 1 + decoder_input_weights.size()[1]))
to_be_added_decoder_input_weights = torch.from_numpy(to_be_added_decoder_input_weights).float().to(network.device)
network.decoder.FirstL.L0.cond_L.weight.data = torch.cat((decoder_input_weights,
to_be_added_decoder_input_weights), 1)
# Set the weights of new network to old network weights
new_network.load_state_dict(network.state_dict())
# Freeze parts of the network
if freeze:
new_network.freeze = True
for name, p in new_network.named_parameters():
p.requires_grad = False
if freeze_expression:
if 'cond_L.weight' in name:
p.requires_grad = True
else:
if "L0" in name or "B0" in name:
p.requires_grad = True
new_trainer = trVAETrainer(
new_network,
data,
condition_key=condition_key,
size_factor_key=size_factor_key,
batch_size=1024,
n_samples=4096
)
new_trainer.train(
n_epochs=n_epochs,
lr=0.001
)
return new_network, new_trainer
| 37.525 | 118 | 0.653342 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,385 | 0.307573 |
1d8538ea9563fe089e63d6ca2260f4ed77161708 | 2,519 | py | Python | helpers/connections.py | cheak1974/remi-app-template | 6c724dda0294b41b906134b9062eadce0f6c0617 | [
"Apache-2.0"
] | 12 | 2020-02-24T08:44:29.000Z | 2022-02-21T07:18:31.000Z | helpers/connections.py | cheak1974/remi-app-template | 6c724dda0294b41b906134b9062eadce0f6c0617 | [
"Apache-2.0"
] | null | null | null | helpers/connections.py | cheak1974/remi-app-template | 6c724dda0294b41b906134b9062eadce0f6c0617 | [
"Apache-2.0"
] | 3 | 2020-05-02T15:47:09.000Z | 2021-06-12T23:56:52.000Z | import datetime
import remi
import core.globals
connected_clients = {} # Dict with key=session id of App Instance and value=ws_client.client_address of App Instance
connected_clients['number'] = 0 # Special Dict Field for amount of active connections
client_route_url_to_view = {} # Dict to store URL extensions related to session. This is used to switch a view based on url
def handle_connections(AppInst=None):
# Take care of the connection. It is only alive if the websocket still is active.
# Check, if there is a new websocket connection for this App session (= Instance)
if AppInst.connection_established == False and len(AppInst.websockets) == 1:
for session_id, app_inst in remi.server.clients.items():
if session_id == AppInst.session:
for ws_client in app_inst.websockets:
AppInst.logger.info(f'New Session with ID <{AppInst.session}> from host {ws_client.client_address}') # Host Information for direct connection
connected_clients[AppInst.session] = ws_client.client_address
AppInst.logger.info(f'Session <{AppInst.session}> host headers: {ws_client.headers}')
connected_clients['number'] = connected_clients['number'] + 1
AppInst.logger.info(f'Connected clients ({connected_clients["number"]} in total): {connected_clients}')
AppInst.connect_time = datetime.datetime.now()
AppInst.connection_established = True # Set Flag. This can be used by other threads as end signal.
# Check, if the websocket connection is still alive. REMI removes the Websocket from the List if dead.
if len(remi.server.clients[AppInst.session].websockets) == 0 and AppInst.connection_established == True:
AppInst.disconnect_time = datetime.datetime.now() # Store the disconnect time
connection_duration = f'{(AppInst.disconnect_time - AppInst.connect_time).seconds} sec'
AppInst.logger.info(f'Session <{AppInst.session}> from host {connected_clients[AppInst.session]} has disconnected. Connection duration: {connection_duration}')
AppInst.connection_established = False # Set Flag. This can be used by other threads as end signal.
del connected_clients[AppInst.session]
connected_clients['number'] = connected_clients['number'] - 1
AppInst.logger.info(f'Still connected clients: {connected_clients}')
| 66.289474 | 167 | 0.691147 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,205 | 0.478364 |
1d8628fbeb788aef2e5985787b2abb68a9b0d934 | 2,111 | py | Python | tests/api/endpoints/admin/test_two_factor_auth.py | weimens/seahub | 5ecf78ed7a2ddc72a23961804ee41be21c24893f | [
"Apache-2.0"
] | 420 | 2015-01-03T11:34:46.000Z | 2022-03-10T07:15:41.000Z | tests/api/endpoints/admin/test_two_factor_auth.py | weimens/seahub | 5ecf78ed7a2ddc72a23961804ee41be21c24893f | [
"Apache-2.0"
] | 735 | 2015-01-04T21:22:51.000Z | 2022-03-31T09:26:07.000Z | tests/api/endpoints/admin/test_two_factor_auth.py | weimens/seahub | 5ecf78ed7a2ddc72a23961804ee41be21c24893f | [
"Apache-2.0"
] | 379 | 2015-01-05T17:08:03.000Z | 2022-03-06T00:11:50.000Z | import os
import pytest
from django.urls import reverse
from seahub.options.models import (UserOptions, KEY_FORCE_2FA, VAL_FORCE_2FA)
from seahub.test_utils import BaseTestCase
from seahub.two_factor.models import TOTPDevice, devices_for_user
TRAVIS = 'TRAVIS' in os.environ
@pytest.mark.skipif(TRAVIS, reason="")
class TwoFactorAuthViewTest(BaseTestCase):
def setUp(self):
self.login_as(self.admin)
def test_can_disable_two_factor_auth(self):
totp = TOTPDevice(user=self.admin, name="", confirmed=1)
totp.save()
devices = devices_for_user(self.admin)
i = 0
for device in devices_for_user(self.admin):
if device:
i+=1
assert i > 0
resp = self.client.delete(reverse('two-factor-auth-view', args=[str(self.admin.username)]))
assert resp.status_code == 200
i = 0
for device in devices_for_user(self.admin):
if device:
i+=1
assert i == 0
def tearDown(self):
try:
for device in devices_for_user(self.admin):
device.delete()
except:
pass
def test_force_2fa(self):
assert len(UserOptions.objects.filter(email=self.user.email,
option_key=KEY_FORCE_2FA)) == 0
resp = self.client.put(
reverse('two-factor-auth-view', args=[self.user.username]),
'force_2fa=1',
'application/x-www-form-urlencoded',
)
self.assertEqual(200, resp.status_code)
assert len(UserOptions.objects.filter(email=self.user.email,
option_key=KEY_FORCE_2FA)) == 1
resp = self.client.put(
reverse('two-factor-auth-view', args=[self.user.username]),
'force_2fa=0',
'application/x-www-form-urlencoded',
)
self.assertEqual(200, resp.status_code)
assert len(UserOptions.objects.filter(email=self.user.email,
option_key=KEY_FORCE_2FA)) == 0
| 32.476923 | 99 | 0.58882 | 1,792 | 0.848887 | 0 | 0 | 1,831 | 0.867361 | 0 | 0 | 174 | 0.082425 |
1d873a9042df6f711d83b72908d095913ffcd921 | 1,495 | py | Python | saucenao/http.py | DaRealFreak/saucenao | 28a1abc4fba6b7efdda54b140ba0c164e0b9c11a | [
"MIT"
] | 30 | 2018-04-04T19:58:39.000Z | 2021-11-24T16:25:51.000Z | saucenao/http.py | DaRealFreak/saucenao | 28a1abc4fba6b7efdda54b140ba0c164e0b9c11a | [
"MIT"
] | 6 | 2019-04-02T04:53:47.000Z | 2020-09-25T09:50:15.000Z | saucenao/http.py | DaRealFreak/saucenao | 28a1abc4fba6b7efdda54b140ba0c164e0b9c11a | [
"MIT"
] | 4 | 2018-04-04T05:07:20.000Z | 2020-03-10T14:19:03.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
import requests
from saucenao.exceptions import *
PREVIOUS_STATUS_CODE = None
STATUS_CODE_OK = 1
STATUS_CODE_SKIP = 2
STATUS_CODE_REPEAT = 3
def verify_status_code(request_response: requests.Response) -> tuple:
"""Verify the status code of the post request to the search url and raise exceptions if the code is unexpected
:type request_response: requests.Response
:return:
"""
if request_response.status_code == 200:
return STATUS_CODE_OK, ''
elif request_response.status_code == 429:
if 'user\'s rate limit' in request_response.text:
msg = "Search rate limit reached"
return STATUS_CODE_REPEAT, msg
if 'limit of 150 searches' in request_response.text:
raise DailyLimitReachedException('Daily search limit for unregistered users reached')
elif 'limit of 300 searches' in request_response.text:
raise DailyLimitReachedException('Daily search limit for basic users reached')
else:
raise DailyLimitReachedException('Daily search limit reached')
elif request_response.status_code == 403:
raise InvalidOrWrongApiKeyException("Invalid or wrong API key")
elif request_response.status_code == 413:
msg = "Payload too large, skipping file"
return STATUS_CODE_SKIP, msg
else:
msg = "Unknown status code: {0:d}".format(request_response.status_code)
return STATUS_CODE_REPEAT, msg
| 35.595238 | 114 | 0.70301 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 524 | 0.350502 |
1d8982e4961fe4520d62952b9772740d30716f97 | 6,711 | py | Python | dialogue/sinodoju.py | jeanlucancey/pronunciamento | a7c7407cda8e5dace3f4cd8eebbfee9bdd469e14 | [
"BSD-2-Clause"
] | null | null | null | dialogue/sinodoju.py | jeanlucancey/pronunciamento | a7c7407cda8e5dace3f4cd8eebbfee9bdd469e14 | [
"BSD-2-Clause"
] | null | null | null | dialogue/sinodoju.py | jeanlucancey/pronunciamento | a7c7407cda8e5dace3f4cd8eebbfee9bdd469e14 | [
"BSD-2-Clause"
] | null | null | null | import time
from os import system
from django.http import HttpResponse
from django.template import Context, loader
from django.views.decorators.csrf import csrf_exempt # Pour des formulaires POST libres
from jla_utils.utils import Fichier
from .models import ElementDialogue
class Tunnel:
def __init__(self, longueurArg, generationArg):
self.longueur = longueurArg
self.generation = generationArg
def alimenteBaseDeDonnees (nomEntree, identifiantSerpicon, descriptifTunnel, serveur):
ElementDialogue.objects.create(
nom = nomEntree,
param1 = identifiantSerpicon,
param2 = descriptifTunnel,
param3 = serveur
)
def analyseGraine (ligneLue):
graine = ligneLue[10:len(ligneLue) - 1]
return graine
def analyseNbCell (ligneLue):
nbCellString = ligneLue[9:len(ligneLue)]
nbCell = int(nbCellString)
return nbCell
def analyseTunnel (request):
nomFichTunnel = "resultat_longtun2.txt"
numLigneLue = 0
fichALire = Fichier(nomFichTunnel, 0)
chouBlanc = True # Par defaut
nbCell = 0
graine = ""
mesTunnels = []
while fichALire.index < fichALire.longueur:
ligneLue = fichALire.litUneLigne()
numLigneLue += 1
if numLigneLue == 1:
nbCell = analyseNbCell(ligneLue)
elif numLigneLue == 2:
graine = analyseGraine(ligneLue)
else:
if (len(ligneLue) > 10) and (ligneLue[0:6] == "Tunnel"):
chouBlanc = False
monTunnelNormalise = analyseTunnelMoteur(ligneLue)
mesTunnels.append(monTunnelNormalise)
fichALire.close()
print("Le nombre de cellules est de %d." % (nbCell))
print("La graine est [%s]." % (graine))
nomEntreeDeBase = fabriqueTempsSyntaxeUrl()
identifiantSerpicon = "%d %s" % (nbCell, graine)
nomServeur = "alwaysdata"
if chouBlanc:
alimenteBaseDeDonnees(nomEntreeDeBase, identifiantSerpicon, "Chou blanc !", nomServeur)
else:
for numTunnel in range(len(mesTunnels)):
monTunnel = mesTunnels[numTunnel]
maLongueur = monTunnel.longueur
maGeneration = monTunnel.generation
print("Tunnel de %s a la generation %s" % \
(separateurMille(maLongueur, ' '),
separateurMille(maGeneration, ' ')))
nomEntreeDeBase = fabriqueTempsSyntaxeUrl()
nomEntree = nomEntreeDeBase + "__" + separateurMille(maLongueur, '_')
descriptifTunnel = separateurMille(maLongueur, ' ') + " en " \
+ separateurMille(maGeneration, ' ')
alimenteBaseDeDonnees(nomEntree, identifiantSerpicon, descriptifTunnel, nomServeur)
if numTunnel < len(mesTunnels) - 1:
attend(5.0)
# time.sleep(2.0) # A tout hasard, pour ne pas venir trop vite apres les requetes
# d'analyse_tunnel.py
# lanceSinodoju () # On va laisser courteline s'occuper de relancer amarelia
tableauDeLignes = []
tableauDeLignes.append("Cette page est la page de l'analyse des tunnels.")
template = loader.get_template('cestmoilechef/petite_merdasse.html')
context = Context({ 'tabDeLignes': tableauDeLignes })
output = template.render(context)
return HttpResponse(output)
def attend (dureeEnSecondes):
time.sleep(dureeEnSecondes)
def analyseTunnelMoteur (ligneLue):
chaineLongueur = ""
chaineGeneration = ""
caracLu = ""
numSigne = 10
eTrouve = False
while (not eTrouve) and (numSigne < len(ligneLue)):
signeLu = ligneLue[numSigne]
if signeLu == "e":
eTrouve = True
else:
chaineLongueur += signeLu
numSigne += 1
chaineLongueur = chaineLongueur[0:len(chaineLongueur) - 1] # pour virer l'espace finale
maLongueur = int(vireSigne(chaineLongueur, ' '))
numSigne += 2
chaineGeneration = ligneLue[numSigne:len(ligneLue)]
maGene = int(vireSigne(chaineGeneration, ' '))
monTunnel = Tunnel(maLongueur, maGene)
return monTunnel
def fabriqueTempsSyntaxeGraine ():
graine = time.strftime("jlancey%Y%m%da%Hh%Mm%S", time.localtime())
return graine
def fabriqueTempsSyntaxeUrl ():
# tempsSyntaxeUrl = time.strftime("%Y-%m-%d_%H-%M-%S", time.gmtime())
tempsSyntaxeUrl = time.strftime("%Y-%m-%d_%H-%M-%S", time.localtime())
return tempsSyntaxeUrl
def lanceSinodoju ():
conn = http.client.HTTPConnection("www.amarelia.ch")
conn.request("GET", "/sinodoju/sinodoju.php")
r1 = conn.getresponse()
print(r1.status, r1.reason)
data1 = r1.read()
# print(data1)
conn.close()
def separateurMille (monEntier, monSeparateur):
maChaine0 = "%d" % (monEntier)
maChaine1 = ""
for numSigne in range(len(maChaine0)):
numSigne2 = len(maChaine0) -1 - numSigne
monSigne = maChaine0[numSigne2]
if (numSigne % 3 == 0) and numSigne > 0:
maChaine1 = monSeparateur + maChaine1
maChaine1 = monSigne + maChaine1
return maChaine1
@csrf_exempt # En théorie, c'est une brèche de sécurité; en pratique... ca depend
def viewSinodoju (request):
tableauDeLignes = []
tableauDeLignes.append("Cette page est la page de Sinodoju.")
graine = fabriqueTempsSyntaxeGraine()
nbBitsFournis = len(graine) * 6
tableauDeLignes.append("La graine est [%s], soit assez pour %d bits." % (graine, nbBitsFournis))
nbCellules = 145
system("./sinodoju.pl %d %s > cr_perl.txt 2> cr2_perl.txt &" % (nbCellules, graine))
tableauDeLignes.append("En principe, si vous lisez ça, c'est qu'un daemon Sinodoju a été lancé.")
tableauDeLignes.append("Donc ça aura un effet... quand le daemon aura fini de travailler.")
tableauDeLignes.append("Ce template a été écrit pour vous rendre la main tout de suite...")
tableauDeLignes.append("... mais des limitations d'AlwaysData, compréhensibles d'ailleurs,")
tableauDeLignes.append("imposent d'attendre quand même la fin du processus. Cette page ne")
tableauDeLignes.append("sert donc qu'à titre de test.")
template = loader.get_template('cestmoilechef/petite_merdasse.html')
context = Context({ 'tabDeLignes': tableauDeLignes })
output = template.render(context)
return HttpResponse(output)
def vireSigne (maChaine, monSigneAVirer):
maChainePurgee = ""
for numSigne in range(len(maChaine)):
monSigne = maChaine[numSigne]
if monSigne != monSigneAVirer:
maChainePurgee += monSigne
return maChainePurgee
| 35.696809 | 101 | 0.649829 | 141 | 0.020963 | 0 | 0 | 1,256 | 0.186738 | 0 | 0 | 1,366 | 0.203092 |
1d8a630a7af7286dcfe25ff650a3b9fddf8961f0 | 1,294 | py | Python | tools/python/boutiques/tests/test_bids.py | shots47s/boutiques | 831f937a6b1491af63a800786967e4d9bca1e262 | [
"MIT"
] | 54 | 2016-07-21T19:14:13.000Z | 2021-11-16T11:49:15.000Z | tools/python/boutiques/tests/test_bids.py | shots47s/boutiques | 831f937a6b1491af63a800786967e4d9bca1e262 | [
"MIT"
] | 539 | 2016-07-20T20:09:38.000Z | 2022-03-17T00:45:26.000Z | tools/python/boutiques/tests/test_bids.py | shots47s/boutiques | 831f937a6b1491af63a800786967e4d9bca1e262 | [
"MIT"
] | 52 | 2016-07-22T18:09:59.000Z | 2021-02-03T15:22:55.000Z | #!/usr/bin/env python
from unittest import TestCase
from boutiques.bosh import bosh
from boutiques.bids import validate_bids
from boutiques import __file__ as bofile
from jsonschema.exceptions import ValidationError
from boutiques.validator import DescriptorValidationError
import os.path as op
import simplejson as json
import os
class TestBIDS(TestCase):
def test_bids_good(self):
fil = op.join(op.split(bofile)[0], 'schema/examples/bids_good.json')
self.assertFalse(bosh(["validate", fil, '-b']))
def test_bids_bad1(self):
fil = op.join(op.split(bofile)[0], 'schema/examples/bids_bad1.json')
self.assertRaises(DescriptorValidationError, bosh, ["validate",
fil, '-b'])
def test_bids_bad2(self):
fil = op.join(op.split(bofile)[0], 'schema/examples/bids_bad2.json')
self.assertRaises(DescriptorValidationError, bosh, ["validate",
fil, '-b'])
def test_bids_invalid(self):
fil = op.join(op.split(bofile)[0], 'schema/examples/bids_bad2.json')
descriptor = json.load(open(fil))
self.assertRaises(DescriptorValidationError, validate_bids,
descriptor, False)
| 36.971429 | 76 | 0.641422 | 959 | 0.741113 | 0 | 0 | 0 | 0 | 0 | 0 | 191 | 0.147604 |
1d8b6f8e19293ba21c2228c7a6bbaaebe7835fb6 | 425 | py | Python | libs/yowsup/yowsup/yowsup/layers/protocol_receipts/protocolentities/test_receipt_outgoing.py | akshitpradhan/TomHack | 837226e7b38de1140c19bc2d478eeb9e379ed1fd | [
"MIT"
] | 22 | 2017-07-14T20:01:17.000Z | 2022-03-08T14:22:39.000Z | libs/yowsup/yowsup/yowsup/layers/protocol_receipts/protocolentities/test_receipt_outgoing.py | akshitpradhan/TomHack | 837226e7b38de1140c19bc2d478eeb9e379ed1fd | [
"MIT"
] | 6 | 2017-07-14T21:03:50.000Z | 2021-06-10T19:08:32.000Z | libs/yowsup/yowsup/yowsup/layers/protocol_receipts/protocolentities/test_receipt_outgoing.py | akshitpradhan/TomHack | 837226e7b38de1140c19bc2d478eeb9e379ed1fd | [
"MIT"
] | 13 | 2017-07-14T20:13:14.000Z | 2020-11-12T08:06:05.000Z | from yowsup.layers.protocol_receipts.protocolentities import OutgoingReceiptProtocolEntity
from yowsup.structs.protocolentity import ProtocolEntityTest
import unittest
class OutgoingReceiptProtocolEntityTest(ProtocolEntityTest, unittest.TestCase):
def setUp(self):
self.ProtocolEntity = OutgoingReceiptProtocolEntity
self.node = OutgoingReceiptProtocolEntity("123", "target", "read").toProtocolTreeNode() | 53.125 | 95 | 0.828235 | 256 | 0.602353 | 0 | 0 | 0 | 0 | 0 | 0 | 19 | 0.044706 |
1d8e88981310f80f962aaedf4421391c24b8f208 | 2,769 | py | Python | empire/server/modules/powershell/situational_awareness/network/get_sql_server_info.py | awsmhacks/Empire | 6a6f0881798ce92a54ce9896d2ffe4855855872d | [
"BSD-3-Clause"
] | null | null | null | empire/server/modules/powershell/situational_awareness/network/get_sql_server_info.py | awsmhacks/Empire | 6a6f0881798ce92a54ce9896d2ffe4855855872d | [
"BSD-3-Clause"
] | null | null | null | empire/server/modules/powershell/situational_awareness/network/get_sql_server_info.py | awsmhacks/Empire | 6a6f0881798ce92a54ce9896d2ffe4855855872d | [
"BSD-3-Clause"
] | null | null | null | from __future__ import print_function
import pathlib
from builtins import object, str
from typing import Dict
from empire.server.common import helpers
from empire.server.common.module_models import PydanticModule
from empire.server.utils import data_util
from empire.server.utils.module_util import handle_error_message
class Module(object):
@staticmethod
def generate(
main_menu,
module: PydanticModule,
params: Dict,
obfuscate: bool = False,
obfuscation_command: str = "",
):
username = params["Username"]
password = params["Password"]
instance = params["Instance"]
check_all = params["CheckAll"]
# read in the common module source code
script, err = main_menu.modules.get_module_source(
module_name="situational_awareness/network/Get-SQLServerInfo.ps1",
obfuscate=obfuscate,
obfuscate_command=obfuscation_command,
)
script_end = ""
if check_all:
# read in the common module source code
script, err = main_menu.modules.get_module_source(
module_name="situational_awareness/network/Get-SQLInstanceDomain.ps1",
obfuscate=obfuscate,
obfuscate_command=obfuscation_command,
)
try:
with open(sql_instance_source, "r") as auxSource:
auxScript = auxSource.read()
script += " " + auxScript
except:
print(
helpers.color(
"[!] Could not read additional module source path at: "
+ str(sql_instance_source)
)
)
script_end = " Get-SQLInstanceDomain "
if username != "":
script_end += " -Username " + username
if password != "":
script_end += " -Password " + password
script_end += " | "
script_end += " Get-SQLServerInfo"
if username != "":
script_end += " -Username " + username
if password != "":
script_end += " -Password " + password
if instance != "" and not check_all:
script_end += " -Instance " + instance
outputf = params.get("OutputFunction", "Out-String")
script_end += (
f" | {outputf} | "
+ '%{$_ + "`n"};"`n'
+ str(module.name.split("/")[-1])
+ ' completed!"'
)
script = main_menu.modules.finalize_module(
script=script,
script_end=script_end,
obfuscate=obfuscate,
obfuscation_command=obfuscation_command,
)
return script
| 33.361446 | 86 | 0.552907 | 2,444 | 0.882629 | 0 | 0 | 2,418 | 0.873239 | 0 | 0 | 499 | 0.180209 |
1d8eb891210e75a2298ded03715a2b3415109050 | 230 | py | Python | gameplay/urls.py | Urosh91/TicTacToe | 40d6c763993b525327d2c3cd3f132dc8822c85fe | [
"MIT"
] | null | null | null | gameplay/urls.py | Urosh91/TicTacToe | 40d6c763993b525327d2c3cd3f132dc8822c85fe | [
"MIT"
] | null | null | null | gameplay/urls.py | Urosh91/TicTacToe | 40d6c763993b525327d2c3cd3f132dc8822c85fe | [
"MIT"
] | null | null | null | from django.urls import path
from .views import game_detail, make_move
urlpatterns = [
path(r'detail/<int:id>/', game_detail, name="gameplay_detail"),
path(r'make_move/<int:id>', make_move, name="gameplay_make_move")
]
| 23 | 69 | 0.721739 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 77 | 0.334783 |
1d9140bd33fb078d1bfd2d6231763b19bed995bc | 740 | py | Python | setup.py | Bonifatius94/sc2sim | ac765f826e2465354aa4b619ab84d52249eec474 | [
"MIT"
] | null | null | null | setup.py | Bonifatius94/sc2sim | ac765f826e2465354aa4b619ab84d52249eec474 | [
"MIT"
] | null | null | null | setup.py | Bonifatius94/sc2sim | ac765f826e2465354aa4b619ab84d52249eec474 | [
"MIT"
] | null | null | null | from setuptools import setup
def load_pip_dependency_list():
with open('./requirements.txt', 'r', encoding='utf-8') as file:
return file.read().splitlines()
def load_readme_desc():
with open("README.md", "r", encoding="utf-8") as readme_file:
return readme_file.read()
setup(
name="sc2sim",
version="1.0.0",
author="Marco Tröster",
author_email="marco@troester-gmbh.de",
description="A StarCraft II environment for reinforcement learning purposes",
long_description=load_readme_desc(),
long_description_content_type="text/markdown",
url="https://github.com/Bonifatius94/sc2sim",
packages=["sc2sim"],
python_requires=">=3",
install_requires=load_pip_dependency_list()
)
| 30.833333 | 81 | 0.7 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 238 | 0.321188 |
1d91db395da59fee0f0dc15b16ea80b100b25421 | 1,906 | py | Python | 2016/24/air_duct_spelunking.py | GeoffRiley/AdventOfCode | 27fe8670a1923cb3b0675784f5e855ad18c29c93 | [
"Unlicense"
] | 2 | 2020-12-12T03:18:45.000Z | 2021-12-17T00:35:33.000Z | 2016/24/air_duct_spelunking.py | GeoffRiley/AdventOfCode | 27fe8670a1923cb3b0675784f5e855ad18c29c93 | [
"Unlicense"
] | null | null | null | 2016/24/air_duct_spelunking.py | GeoffRiley/AdventOfCode | 27fe8670a1923cb3b0675784f5e855ad18c29c93 | [
"Unlicense"
] | null | null | null | from collections import defaultdict
from itertools import permutations
import networkx as nx
def air_duct_spelunking(inp, part1=True):
max_y = len(inp)
max_x = max(len(line) for line in inp)
grid = defaultdict(lambda: '#')
numbers = defaultdict(lambda: '')
route_list = defaultdict(lambda: 0)
graph = nx.Graph()
for y, row in enumerate(inp):
yo = 1j * y
for x, ch in enumerate(row):
grid[x + yo] = ch
for y in range(max_y):
yo = 1j * y
for x in range(max_x):
if grid[x + yo] == '#':
continue
ch = grid[x + yo]
node_address = str(x + yo)
if ch.isdigit():
graph.add_node(node_address, num=int(ch))
numbers[ch] = node_address
else:
graph.add_node(node_address)
for offset in [1j, 1, -1j, -1]:
if grid[x + yo + offset] != '#':
graph.add_edge(node_address, str(x + yo + offset))
# find shortest path
short_route = 1e9
for route in permutations([n for n in numbers.keys() if n > '0']):
path = 0
if part1:
r = ['0'] + list(route)
else:
r = ['0'] + list(route) + ['0']
for u, v in zip(r[:-1], r[1:]):
if route_list[(u, v)] == 0:
route_list[(u, v)] = nx.shortest_path_length(graph, numbers[u], numbers[v])
path += route_list[(u, v)]
if short_route > path:
short_route = path
return short_route
if __name__ == '__main__':
with open('input.txt') as cave_file:
cave_lines = cave_file.read().splitlines(keepends=False)
print(f'Day 24, part 1: {air_duct_spelunking(cave_lines)}')
print(f'Day 24, part 2: {air_duct_spelunking(cave_lines, False)}')
# Day 24, part 1: 518
# Day 24, part 2: 716
| 33.438596 | 91 | 0.53043 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 217 | 0.113851 |
1d92705a0e03671b2fdc534d799cc8d39e832d40 | 3,889 | py | Python | src/bo4e/com/angebotsteil.py | bo4e/BO4E-python | 28b12f853c8a496d14b133759b7aa2d6661f79a0 | [
"MIT"
] | 1 | 2022-03-02T12:49:44.000Z | 2022-03-02T12:49:44.000Z | src/bo4e/com/angebotsteil.py | bo4e/BO4E-python | 28b12f853c8a496d14b133759b7aa2d6661f79a0 | [
"MIT"
] | 21 | 2022-02-04T07:38:46.000Z | 2022-03-28T14:01:53.000Z | src/bo4e/com/angebotsteil.py | bo4e/BO4E-python | 28b12f853c8a496d14b133759b7aa2d6661f79a0 | [
"MIT"
] | null | null | null | """
Contains Angebotsteil class
and corresponding marshmallow schema for de-/serialization
"""
from typing import List, Optional
import attr
from marshmallow import fields
from bo4e.bo.marktlokation import Marktlokation, MarktlokationSchema
from bo4e.com.angebotsposition import Angebotsposition, AngebotspositionSchema
from bo4e.com.betrag import Betrag, BetragSchema
from bo4e.com.com import COM, COMSchema
from bo4e.com.menge import Menge, MengeSchema
from bo4e.com.zeitraum import Zeitraum, ZeitraumSchema
from bo4e.validators import check_list_length_at_least_one
# pylint: disable=too-few-public-methods
@attr.s(auto_attribs=True, kw_only=True)
class Angebotsteil(COM):
"""
Mit dieser Komponente wird ein Teil einer Angebotsvariante abgebildet.
Hier werden alle Angebotspositionen aggregiert.
Angebotsteile werden im einfachsten Fall für eine Marktlokation oder Lieferstellenadresse erzeugt.
Hier werden die Mengen und Gesamtkosten aller Angebotspositionen zusammengefasst.
Eine Variante besteht mindestens aus einem Angebotsteil.
.. HINT::
`Angebotsteil JSON Schema <https://json-schema.app/view/%23?url=https://raw.githubusercontent.com/Hochfrequenz/BO4E-python/main/json_schemas/com/AngebotsteilSchema.json>`_
"""
# required attributes
#: Einzelne Positionen, die zu diesem Angebotsteil gehören
positionen: List[Angebotsposition] = attr.ib(
validator=[
attr.validators.deep_iterable(
member_validator=attr.validators.instance_of(Angebotsposition),
iterable_validator=attr.validators.instance_of(list),
),
check_list_length_at_least_one,
]
)
# optional attributes
#: Identifizierung eines Subkapitels einer Anfrage, beispielsweise das Los einer Ausschreibung
anfrage_subreferenz: Optional[str] = attr.ib(
default=None, validator=attr.validators.optional(attr.validators.instance_of(str))
)
lieferstellenangebotsteil: Optional[List[Marktlokation]] = attr.ib(
default=None,
validator=attr.validators.optional(
attr.validators.deep_iterable(
member_validator=attr.validators.instance_of(Marktlokation),
iterable_validator=attr.validators.instance_of(list),
)
),
)
"""
Marktlokationen, für die dieses Angebotsteil gilt, falls vorhanden.
Durch die Marktlokation ist auch die Lieferadresse festgelegt
"""
#: Summe der Verbräuche aller in diesem Angebotsteil eingeschlossenen Lieferstellen
gesamtmengeangebotsteil: Optional[Menge] = attr.ib(
default=None, validator=attr.validators.optional(attr.validators.instance_of(Menge))
)
#: Summe der Jahresenergiekosten aller in diesem Angebotsteil enthaltenen Lieferstellen
gesamtkostenangebotsteil: Optional[Betrag] = attr.ib(
default=None, validator=attr.validators.optional(attr.validators.instance_of(Betrag))
)
#: Hier kann der Belieferungszeitraum angegeben werden, für den dieser Angebotsteil gilt
lieferzeitraum: Optional[Zeitraum] = attr.ib(
default=None, validator=attr.validators.optional(attr.validators.instance_of(Zeitraum))
)
class AngebotsteilSchema(COMSchema):
"""
Schema for de-/serialization of Angebotsteil.
"""
class_name = Angebotsteil
# required attributes
positionen = fields.List(fields.Nested(AngebotspositionSchema))
# optional attributes
anfrage_subreferenz = fields.Str(load_default=None, data_key="anfrageSubreferenz")
lieferstellenangebotsteil = fields.List(fields.Nested(MarktlokationSchema), load_default=None)
gesamtmengeangebotsteil = fields.Nested(MengeSchema, load_default=None)
gesamtkostenangebotsteil = fields.Nested(BetragSchema, load_default=None)
lieferzeitraum = fields.Nested(ZeitraumSchema, load_default=None)
| 41.37234 | 179 | 0.750579 | 3,234 | 0.830508 | 0 | 0 | 2,611 | 0.670519 | 0 | 0 | 1,447 | 0.371597 |
1d929ee149bb8ee418892e5d7ce4bfaffb5ee0ea | 63,926 | py | Python | lib/kb_kaiju/Utils/OutputBuilder.py | mclark58/kb_kaiju | 477b23ef7fdf8b75a9cffc2239c546c915875c74 | [
"MIT"
] | null | null | null | lib/kb_kaiju/Utils/OutputBuilder.py | mclark58/kb_kaiju | 477b23ef7fdf8b75a9cffc2239c546c915875c74 | [
"MIT"
] | null | null | null | lib/kb_kaiju/Utils/OutputBuilder.py | mclark58/kb_kaiju | 477b23ef7fdf8b75a9cffc2239c546c915875c74 | [
"MIT"
] | null | null | null | import os
import shutil
import ast
import sys
import time
import re
from datetime import datetime as dt
import pytz
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import random
from random import shuffle
from biokbase.workspace.client import Workspace as workspaceService
#from Workspace.WorkspaceClient import Workspace as workspaceService
from DataFileUtil.DataFileUtilClient import DataFileUtil
def log(message, prefix_newline=False):
"""Logging function, provides a hook to suppress or redirect log messages."""
print(('\n' if prefix_newline else '') + '{0:.2f}'.format(time.time()) + ': ' + str(message))
sys.stdout.flush()
class OutputBuilder(object):
'''
Constructs the output HTML report and artifacts based on Kaiju and Krona
runs. This includes creating matplotlib graphs of the summaries and
modifying the Krona HTML to offer tabbed href links between html pages
'''
def __init__(self, output_folders, scratch_dir, callback_url, workspace_url):
self.output_folders = output_folders
self.scratch = scratch_dir
self.callback_url = callback_url
self.workspace_url = workspace_url
self.wsClient = None
# store Kaiju DBs
self.NODES_DB = None
self.NAMES_DB = None
# store species counts by sample
self.species_abundance_by_sample = dict()
# store parsed info
self.parsed_summary = dict()
# leave out light colors
self.no_light_color_names = [
#'aliceblue',
'aqua',
'aquamarine',
#'azure',
#'beige',
#'bisque',
#'blanchedalmond',
'blue',
'blueviolet',
'brown',
'burlywood',
'cadetblue',
'chartreuse',
'chocolate',
'coral',
'cornflowerblue',
#'cornsilk',
'crimson',
'cyan',
'darkblue',
'darkcyan',
'darkgoldenrod',
'darkgreen',
'darkkhaki',
'darkmagenta',
'darkolivegreen',
'darkorange',
'darkorchid',
'darkred',
'darksalmon',
'darkseagreen',
'darkslateblue',
#'darkslategray',
'darkturquoise',
'darkviolet',
'deeppink',
'deepskyblue',
'dodgerblue',
'firebrick',
'forestgreen',
'fuchsia',
#'gainsboro',
'gold',
'goldenrod',
'green',
'greenyellow',
#'honeydew',
'hotpink',
'indianred',
'indigo',
'khaki',
#'lavender',
#'lavenderblush',
'lawngreen',
#'lemonchiffon',
'lightblue',
#'lightcoral',
#'lightcyan'
#'lightgoldenrodyellow',
'lightgreen',
'lightpink',
'lightsalmon',
'lightseagreen',
'lightskyblue',
#'lightslategray',
#'lightsteelblue',
#'lightyellow',
'lime',
'limegreen',
#'magenta', # magenta reserved for viruses
'maroon',
'mediumaquamarine',
'mediumblue',
'mediumorchid',
'mediumpurple',
'mediumseagreen',
'mediumslateblue',
'mediumspringgreen',
'mediumturquoise',
'mediumvioletred',
'midnightblue',
#'mintcream',
#'mistyrose',
#'moccasin',
'navy',
#'oldlace',
'olive',
'olivedrab',
'orange',
'orangered',
'orchid',
#'palegoldenrod',
'palegreen',
'paleturquoise',
'palevioletred',
#'papayawhip',
'peachpuff',
#'peru',
'pink',
'plum',
'powderblue',
'purple',
'red',
'rosybrown',
'royalblue',
'saddlebrown',
'salmon',
'sandybrown',
'seagreen',
#'seashell',
'sienna',
'skyblue',
'slateblue',
'springgreen',
'steelblue',
#'tan',
'teal',
#'thistle',
'tomato',
'turquoise',
'violet',
#'wheat',
#'yellow',
#'yellowgreen'
]
def package_folder(self, folder_path, zip_file_name, zip_file_description):
''' Simple utility for packaging a folder and saving to shock '''
if folder_path == self.scratch:
raise ValueError ("cannot package scatch itself. folder path: "+folder_path)
elif not folder_path.startswith(self.scratch):
raise ValueError ("cannot package folder that is not a subfolder of scratch. folder path: "+folder_path)
dfu = DataFileUtil(self.callback_url)
if not os.path.exists(folder_path):
raise ValueError ("cannot package folder that doesn't exist: "+folder_path)
output = dfu.file_to_shock({'file_path': folder_path,
'make_handle': 0,
'pack': 'zip'})
return {'shock_id': output['shock_id'],
'name': zip_file_name,
'label': zip_file_description}
def generate_sparse_biom1_0_matrix(self, ctx, options):
tax_level = options['tax_level']
db_type = options['db_type']
input_reads = options['input_reads']
in_folder = options['in_folder']
workspace_name = options['workspace_name']
output_obj_name = options['output_obj_name']
timestamp_epoch = options['timestamp_epoch']
abundance_matrix = []
abundance_by_sample = []
lineage_seen = dict()
lineage_order = []
#extra_bucket_order = []
sample_order = []
#classified_frac = []
biom_obj = dict()
# parse kaiju classification files and tally raw count abundance
for input_reads_item in input_reads:
sample_order.append(input_reads_item['name'])
this_classification_file = os.path.join (in_folder, input_reads_item['name']+'.kaiju')
(this_abundance_cnts, this_lineage_order) = self._parse_kaiju_classification_file (this_classification_file, tax_level, db_type)
for lineage_name in this_lineage_order:
if lineage_name not in lineage_seen:
lineage_seen[lineage_name] = True
#if lineage_name.startswith('tail (<') \
# or lineage_name.startswith('viruses') \
# or lineage_name.startswith('unassigned at'):
# #extra_bucket_order.append(lineage_name)
# continue
#else:
# lineage_order.append(lineage_name)
lineage_order.append(lineage_name)
abundance_by_sample.append(this_abundance_cnts)
#classified_frac.append(this_classified_frac)
# create sparse matrix (note: vals in each sample do not sum to 100% because we're dumping buckets)
biom_data = []
for lineage_i,lineage_name in enumerate(lineage_order):
for sample_i,sample_name in enumerate(sample_order):
if lineage_name in abundance_by_sample[sample_i]:
biom_data.append([lineage_i, sample_i, abundance_by_sample[sample_i][lineage_name]])
# build biom obj
shape = [len(lineage_order), len(sample_order)]
rows_struct = []
cols_struct = []
timestamp_iso = dt.fromtimestamp(timestamp_epoch,pytz.utc).strftime('%Y-%m-%d'+'T'+'%H:%M:%S')
for lineage_name in lineage_order:
# KBase BIOM typedef only supports string, not dict. This is wrong (see format_url below)
#rows_struct.append({'id': lineage_name, 'metadata': None}) # could add metadata full tax path if parsed from KaijuReport
rows_struct.append(lineage_name)
for sample_name in sample_order:
# KBase BIOM typedef only supports string, not dict. This is wrong (see format_url below)
#cols_struct.append({'id': sample_name, 'metadata': None}) # sample metadata not provided to App
cols_struct.append(sample_name)
biom_obj = { 'id': output_obj_name,
'format': 'Biological Observation Matrix 1.0',
'format_url': 'http://biom-format.org/documentation/format_versions/biom-1.0.html',
'type': 'Taxon table',
'generated_by': 'KBase Kaiju App (Kaiju v1.5.0, KBase App v1.0.0)',
'date': timestamp_iso,
'rows': rows_struct,
'columns': cols_struct,
'matrix_type': 'sparse',
#'matrix_element_type': 'float',
'matrix_element_type': 'int',
'shape': shape,
'data': biom_data
}
# extra KBase BIOM obj required fields that aren't part of biom-1.0 spec (probably custom to MG-RAST)
biom_obj['url'] = None
biom_obj['matrix_element_value'] = None
# save the biom obj to workspace
provenance = [{}]
if 'provenance' in ctx:
provenance = ctx['provenance']
# add additional info to provenance here, in this case the input data object reference
provenance[0]['input_ws_objects'] = []
for input_reads_item in input_reads:
if input_reads_item['ref'] not in provenance[0]['input_ws_objects']:
provenance[0]['input_ws_objects'].append(input_reads_item['ref'])
provenance[0]['service'] = 'kb_kaiju'
provenance[0]['method'] = 'run_kaiju'
if self.wsClient == None:
try:
self.wsClient = workspaceService(self.workspace_url, token=ctx['token'])
except:
raise ValueError ("Unable to connect to workspace service at workspace_url: "+self.workspace_url)
print ("SAVING BIOM OBJECT")
#print (biom_obj) # DEBUG
new_obj_info = self.wsClient.save_objects({'workspace':workspace_name,
'objects':[
{ 'type': 'Communities.Biom',
'data': biom_obj,
'name': output_obj_name,
'meta': {},
'provenance': provenance
}]
})[0]
[OBJID_I, NAME_I, TYPE_I, SAVE_DATE_I, VERSION_I, SAVED_BY_I, WSID_I, WORKSPACE_I, CHSUM_I, SIZE_I, META_I] = range(11) # object_info tuple
biom_obj_ref = str(new_obj_info[WSID_I])+'/'+str(new_obj_info[OBJID_I])+'/'+str(new_obj_info[VERSION_I])
return biom_obj_ref
def generate_kaijuReport_PerSamplePlots(self, options):
pass
def generate_kaijuReport_StackedPlots(self, options):
tax_level = options['tax_level']
abundance_matrix = []
abundance_by_sample = []
lineage_seen = dict()
lineage_order = []
extra_bucket_order = []
sample_order = []
classified_frac = []
# parse summary
for input_reads_item in options['input_reads']:
sample_order.append(input_reads_item['name'])
this_summary_file = os.path.join (options['in_folder'], input_reads_item['name']+'-'+tax_level+'.kaijuReport')
(this_abundance, this_lineage_order, this_classified_frac) = self._parse_kaiju_summary_file (this_summary_file, tax_level)
for lineage_name in this_lineage_order:
if lineage_name not in lineage_seen:
lineage_seen[lineage_name] = True
if lineage_name.startswith('tail (<') \
or lineage_name.startswith('viruses') \
or lineage_name.startswith('unassigned at'):
extra_bucket_order.append(lineage_name)
else:
lineage_order.append(lineage_name)
abundance_by_sample.append(this_abundance)
classified_frac.append(this_classified_frac)
for bucket_name in extra_bucket_order: # put extra buckets at end. necessary for sorting later.
lineage_order.append(bucket_name)
for lineage_i,lineage_name in enumerate(lineage_order):
abundance_matrix.append([])
for sample_i,sample_name in enumerate(sample_order):
if lineage_name in abundance_by_sample[sample_i]:
abundance_matrix[lineage_i].append(abundance_by_sample[sample_i][lineage_name])
else:
abundance_matrix[lineage_i].append(0.0)
# make plots
if options['plot_type'] == 'bar':
basename_ext = '-stacked_bar_plot'
return self._create_bar_plots (out_folder = options['stacked_plots_out_folder'],
out_file_basename = tax_level+basename_ext,
vals = abundance_matrix,
frac_vals = classified_frac,
#title = tax_level.title()+' Level',
title = tax_level.title(),
frac_y_label = 'fraction classified',
y_label = 'percent of classified reads',
sample_labels = sample_order,
element_labels = lineage_order,
sort_by = options['sort_taxa_by'])
elif options['plot_type'] == 'area':
basename_ext = '-stacked_area_plot'
return self._create_area_plots (out_folder = options['stacked_plots_out_folder'],
out_file_basename = tax_level+basename_ext,
vals = abundance_matrix,
frac_vals = classified_frac,
#title = tax_level.title()+' Level',
title = tax_level.title(),
frac_y_label = 'fraction classified',
y_label = 'percent of classified reads',
sample_labels = sample_order,
element_labels = lineage_order,
sort_by = options['sort_taxa_by'])
else:
raise ValueError ("Unknown plot type "+options['plot_type'])
def generate_kaijuReport_StackedAreaPlots(self, options):
pass
def build_html_for_kaijuReport_StackedPlots(self, input_reads, summary_folder, out_html_folder, plot_type, tax_levels, img_files):
img_height = 750 # in pixels
#key_scale = 25
key_scale = img_height / 36
img_local_path = 'img'
out_html_img_path = os.path.join (out_html_folder, img_local_path)
if not os.path.exists(out_html_img_path):
os.makedirs(out_html_img_path)
out_html_file = None
out_html_buf = []
# add header
plot_type_disp = plot_type.title()
out_html_buf.extend (self._build_plot_html_header('KBase Kaiju Stacked '+plot_type_disp+' Abundance Plots'))
# copy plot imgs to html folder and add img to html page
for tax_level in tax_levels:
src_plot_file = img_files[tax_level]
dst_local_path = os.path.join (img_local_path, plot_type+'-'+tax_level+'.PNG')
dst_plot_file = os.path.join (out_html_folder, dst_local_path)
shutil.copy2 (src_plot_file, dst_plot_file)
# increase height if key is long
lineage_seen = dict()
for input_reads_item in input_reads:
this_summary_file = os.path.join (summary_folder, input_reads_item['name']+'-'+tax_level+'.kaijuReport')
(this_abundance, this_lineage_order, this_classified_frac) = self._parse_kaiju_summary_file (this_summary_file, tax_level)
for lineage_name in this_lineage_order:
lineage_seen[lineage_name] = True
len_key = len(lineage_seen.keys())
if key_scale * len_key > img_height:
this_img_height = key_scale * len_key
else:
this_img_height = img_height
# add img to html buf
out_html_buf.append('<img src="'+dst_local_path+'" valign="top" height='+str(this_img_height)+'>')
# add footer
out_html_buf.extend (self._build_plot_html_footer())
# write file
out_local_path = plot_type+'.html'
out_html_path = os.path.join (out_html_folder, out_local_path)
self._write_buf_to_file(out_html_path, out_html_buf)
out_html_file = {'type': plot_type,
'name': plot_type.title(),
'local_path': out_local_path,
'abs_path': out_html_path
}
return [out_html_file]
def build_html_for_kaijuReport_PerSamplePlots(self, out_html_folder, img_files, input_reads, tax_levels):
img_local_path = 'img'
out_html_img_path = os.path.join (out_html_folder, img_local_path)
if not os.path.exists(out_html_img_path):
os.makedirs(out_html_img_path)
out_html_files = []
# one page per tax_level
for tax_level in tax_levels:
out_html_buf = []
# add header
out_html_buf.extend (self._build_plot_html_header('KBase Kaiju Per-Sample Abundance Plots'))
# copy plot imgs to html folder and add img to html page
for input_reads_item in options['input_reads']:
sample_name = input_reads_item['name']
src_plot_file = img_files[tax_level][sample_name]
dst_local_path = os.path.join (img_local_path, 'per_sample_abundance-'+tax_level+'-'+sample_name+'.PNG')
dst_plot_file = os.path.join (out_html_folder, dst_local_path)
shutil.copy2 (src_plot_file, dst_plot_file)
out_html_buf.append('<img src="'+dst_local_path+'">')
# add footer
out_html_buf.extend (self._build_plot_html_footer())
# write file
out_local_path = 'per_sample_abundance-'+tax_level+'.html'
out_html_path = os.path.join (out_html_folder, out_local_path)
self._write_buf_to_file(out_html_path, out_html_buf)
out_html_files.append({'type': 'per_sample',
'name': tax_level,
'local_path': out_local_path,
'abs_path': out_html_path})
return out_html_files
def add_top_nav(self, html_pages):
min_downshift = 25
for html_page in html_pages:
html_type = html_page['type']
name = html_page['name']
local_path = html_page['local_path']
abs_path = html_page['abs_path']
# build top nav
sp = ' '
sp_cnt = 2
top_nav_buf = []
for page_i,this_html_page in enumerate(html_pages):
this_name = this_html_page['name']
no_link = False
if this_name == name:
no_link = True
if this_name == 'Bar' or this_name == 'Area':
this_name = 'Stacked '+this_name+' Plots'
this_local_path = this_html_page['local_path']
if no_link:
disp_name = this_name.upper()
top_nav_item = '<b>'+disp_name+'</b>'
else:
top_nav_item = '<a href="'+this_local_path+'">'+this_name+'</a>'
if page_i == 0:
top_nav_item = (sp * sp_cnt) + top_nav_item
top_nav_buf.append(top_nav_item)
top_nav_str = ' | '.join(top_nav_buf)
top_nav_str += '<p>'
# add top nav to file
new_buf = []
with open (abs_path, 'r') as html_handle:
for line in html_handle.readlines():
line_copy = line.lstrip()
# pad top of krona plot
if html_type == 'krona' and line_copy.startswith('options.style.top ='):
#downshift = '50px'
#downshift = '25px'
downshift_scale_per_char = 0.15
downshift = int(downshift_scale_per_char*len(top_nav_str))
if downshift < min_downshift:
downshift = min_downshift
new_buf.append("\t options.style.top = '"+str(downshift)+"px';")
continue
# capture original html
new_buf.append(line)
# add top nav str
if line_copy.startswith('<body'):
new_buf.append(top_nav_str+"\n")
with open (abs_path, 'w') as html_handle:
for line_buf in new_buf:
html_handle.write(line_buf)
def _parse_kaiju_summary_file (self, summary_file, tax_level):
if summary_file in self.parsed_summary:
return (self.parsed_summary[summary_file]['abundance'],
self.parsed_summary[summary_file]['lineage_order'],
self.parsed_summary[summary_file]['classified_frac'])
abundance = dict()
unclassified_perc = 0.0
unassigned_perc = None
tail_perc = None
virus_perc = None
tail_cutoff = None
lineage_order = []
with open (summary_file, 'r') as summary_handle:
for line in summary_handle.readlines():
line = line.strip()
if line.startswith('-') or line.startswith('%'):
continue
(perc_str, reads_cnt_str, lineage_str) = line.split("\t")
perc = float(perc_str.strip())
reads_cnt = int(reads_cnt_str.strip())
lineage = lineage_str.strip()
if lineage == 'unclassified':
unclassified_perc = perc
elif lineage.startswith('cannot be assigned'):
unassigned_perc = perc
elif lineage.startswith('belong to a'):
chopped_str = re.sub(r'belong to a \S+ with less than ', '', lineage)
tail_cutoff = re.sub(r'% of all reads', '', chopped_str)
tail_perc = perc
elif lineage.startswith('Viruses'):
virus_perc = perc
else:
lineage_order.append(lineage)
abundance[lineage] = perc
if tail_cutoff != None:
this_key = 'tail (< '+tail_cutoff+'% each taxon)'
lineage_order.append(this_key)
abundance[this_key] = tail_perc
if virus_perc != None:
this_key = 'viruses'
lineage_order.append(this_key)
abundance[this_key] = virus_perc
if unassigned_perc != None:
this_key = 'unassigned at '+tax_level+' level'
lineage_order.append(this_key)
abundance[this_key] = unassigned_perc
# store to avoid repeat parse
classified_frac = 1.0 - unclassified_perc/100.0
self.parsed_summary[summary_file] = dict()
self.parsed_summary[summary_file]['abundance'] = abundance
self.parsed_summary[summary_file]['lineage_order'] = lineage_order
self.parsed_summary[summary_file]['classified_frac'] = classified_frac
return (abundance, lineage_order, classified_frac)
def _parse_kaiju_classification_file (self, classification_file, tax_level, db_type):
KAIJU_DB_DIR = os.path.join(os.path.sep, 'data', 'kaijudb', db_type)
KAIJU_DB_NODES = os.path.join(KAIJU_DB_DIR, 'nodes.dmp')
KAIJU_DB_NAMES = os.path.join(KAIJU_DB_DIR, 'names.dmp')
abundance_cnts = dict()
lineage_order = []
classified_cnt = 0
unclassified_cnt = 0
# store names db
if self.NAMES_DB != None:
largest_id = len(self.NAMES_DB)
else:
ID_I = 0
NAME_I = 1
CAT_I = 3
largest_id = 0
with open (KAIJU_DB_NAMES, 'r') as names_handle:
for names_line in names_handle.readlines():
names_line = names_line.rstrip()
names_line_info = names_line.split("\t|")
name_category = names_line_info[CAT_I].strip()
if name_category != 'scientific name':
continue
name_id = int(names_line_info[ID_I].strip())
if name_id > largest_id:
largest_id = name_id
self.NAMES_DB = []
for name_i in range(largest_id+1):
self.NAMES_DB.append(None)
with open (KAIJU_DB_NAMES, 'r') as names_handle:
for names_line in names_handle.readlines():
names_line = names_line.rstrip()
names_line_info = names_line.split("\t|")
name_category = names_line_info[CAT_I].strip()
if name_category != 'scientific name':
continue
name_id = int(names_line_info[ID_I].strip())
self.NAMES_DB[name_id] = names_line_info[NAME_I].strip()
# store nodes db
all_tax_levels = ['class',
'cohort',
'family',
'forma',
'genus',
'infraclass',
'infraorder',
'kingdom',
'no rank',
'order',
'parvorder',
'phylum',
'species',
'subclass',
'subfamily',
'subgenus',
'subkingdom',
'suborder',
'subphylum',
'subspecies',
'subtribe',
'superclass',
'superfamily',
'superkingdom',
'superorder',
'superphylum',
'tribe',
'varietas']
tax_level_id2str = []
tax_level_str2id = dict()
for tax_level_id,tax_level_str in enumerate(all_tax_levels):
tax_level_id2str.append(tax_level_str)
tax_level_str2id[tax_level_str] = tax_level_id
# store [PAR_ID, TAX_LEVEL_I]
if self.NODES_DB == None:
NODE_ID_I = 0
PAR_ID_I = 1
LEVEL_I = 2
self.NODES_DB = []
for node_i in range(largest_id+1):
self.NODES_DB.append(None)
with open (KAIJU_DB_NODES, 'r') as nodes_handle:
for nodes_line in nodes_handle.readlines():
nodes_line = nodes_line.rstrip()
nodes_line_info = nodes_line.split("\t|")
node_id = int(nodes_line_info[NODE_ID_I].strip())
par_id = int(nodes_line_info[PAR_ID_I].strip())
tax_level_str = nodes_line_info[LEVEL_I].strip()
if tax_level_str == 'species group' or tax_level_str == 'species subgroup':
tax_level_str = 'species'
tax_level_id = tax_level_str2id[tax_level_str]
self.NODES_DB[node_id] = [par_id, tax_level_id]
# parse species from kaiju read classification
if classification_file not in self.species_abundance_by_sample:
species_abundance_cnts = []
for node_i in range(largest_id+1):
species_abundance_cnts.append(0)
CLASS_FLAG_I = 0
READ_ID_I = 1
NODE_ID_I = 2
with open (classification_file, 'r') as class_handle:
for class_line in class_handle.readlines():
class_line.rstrip()
class_info = class_line.split("\t")
if class_info[CLASS_FLAG_I] == 'U':
continue
node_id = int(class_info[NODE_ID_I])
species_abundance_cnts[node_id] += 1
self.species_abundance_by_sample[classification_file] = species_abundance_cnts
# navigate up tax hierarchy until reach desired level and store abundance by name
abundance_cnts = dict()
PAR_ID_I = 0
TAX_LEVEL_ID_I = 1
level_limit = 100
for node_id,species_cnt in enumerate(self.species_abundance_by_sample[classification_file]):
if species_cnt > 0:
this_par_id = self.NODES_DB[node_id][PAR_ID_I]
this_tax_level_id = self.NODES_DB[node_id][TAX_LEVEL_ID_I]
level_lim_i = 0
while level_lim_i < level_limit:
level_lim_i += 1
if tax_level_id2str[this_tax_level_id] == tax_level:
node_name = self.NAMES_DB[node_id]
if node_name not in abundance_cnts:
abundance_cnts[node_name] = 0
abundance_cnts[node_name] += species_cnt
break
else:
node_id = this_par_id
this_par_id = self.NODES_DB[node_id][PAR_ID_I]
this_tax_level_id = self.NODES_DB[node_id][TAX_LEVEL_ID_I]
if this_par_id == 1:
break
lineage_order = abundance_cnts.keys()
return (abundance_cnts, lineage_order)
def _create_bar_plots (self, out_folder=None,
out_file_basename=None,
vals=None,
frac_vals=None,
title=None,
frac_y_label=None,
y_label=None,
sample_labels=None,
element_labels=None,
sort_by=None):
# DEBUG
#N = len(sample_labels)
#N_force = 40
#for sample_i in range(N_force):
# if sample_i >= N:
# sample_labels.append(sample_labels[0]+'.'+str(sample_i))
# frac_vals.append(frac_vals[0])
# for element_i,element_label in enumerate(element_labels):
# vals[element_i].append(vals[element_i][0])
#for sample_i,label in enumerate(sample_labels):
# sample_labels[sample_i] = 'foobar.'+str(sample_i)
# END DEBUG
# number of samples
N = len(sample_labels)
# colors
color_names = self.no_light_color_names
len_color_names = len(color_names)
random.seed(a=len(element_labels))
r = random.random()
shuffle(color_names, lambda: r)
for label_i,label in enumerate(element_labels):
if label_i >= len_color_names:
color_names.append(color_names[label_i % len_color_names])
if label.startswith('tail (<'):
color_names[label_i] = 'lightslategray'
elif label.startswith('viruses'):
color_names[label_i] = 'magenta'
elif label.startswith('unassigned at'):
color_names[label_i] = 'darkslategray'
# Sort vals
if sort_by != None:
print ("SORTING ELEMENTS by "+str(sort_by))
old_index = dict()
new_index = dict()
for label_i,label in enumerate(element_labels):
old_index[label] = label_i
#print ("LABEL: "+str(label)+" OLD_INDEX: "+str(label_i)) # DEBUG
# alphabet sort
if sort_by == 'alpha':
new_label_i = 0
for label in sorted(element_labels, reverse=True):
if label.startswith('tail (<') or label.startswith('viruses') or label.startswith('unassigned at'):
new_index[label] = old_index[label]
else:
new_index[label] = new_label_i
new_label_i += 1
#print ("LABEL: "+str(label)+" NEW_INDEX: "+str(new_index[label])) # DEBUG
# summed total sort
elif sort_by == 'totals':
totals = dict()
for label_i,label in enumerate(element_labels):
totals[label] = 0
for sample_i,sample in enumerate(sample_labels):
totals[label] += vals[label_i][sample_i]
totals_vals = []
labels_by_totals = dict()
for label in totals.keys():
if totals[label] not in totals_vals:
totals_vals.append(totals[label])
labels_by_totals[totals[label]] = []
labels_by_totals[totals[label]].append(label)
new_label_i = 0
for totals_val in sorted(totals_vals, reverse=True):
for label in labels_by_totals[totals_val]:
if label.startswith('tail (<') or label.startswith('viruses') or label.startswith('unassigned at'):
new_index[label] = old_index[label]
else:
new_index[label] = new_label_i
new_label_i += 1
#print ("LABEL: "+str(label)+" NEW_INDEX: "+str(new_index[label])) # DEBUG
# store new order
new_vals = []
new_element_labels = []
for label_i,label in enumerate(element_labels):
new_vals.append([])
new_element_labels.append(None)
for label_i,label in enumerate(element_labels):
new_element_i = new_index[label]
#print ("NEW_ELEMENT_I: "+str(new_element_i)) # DEBUG
new_vals[new_element_i] = vals[label_i]
new_element_labels[new_element_i] = label
# DEBUG
#print ("NEW LABEL: "+str(label)+" NEW_INDEX: "+str(new_element_i)+" OLD_INDEX: "+str(label_i)) # DEBUG
#for sample_i,val in enumerate(new_vals[new_element_i]):
# print ("\t"+"SAMPLE_I: "+str(sample_i)+" NEW_VAL: "+str(new_vals[new_element_i][sample_i]))
vals = new_vals
element_labels = new_element_labels
# reverse so that most important plots near top (below special 3 categories)
element_labels = element_labels[-4::-1] + element_labels[-3:]
vals = vals[-4::-1] + vals[-3:]
# plot dimensions
#per_unit_to_inch_scale = 0.25
per_unit_to_inch_scale = 0.5
bar_width_unit = 0.5
plot_x_pad_unit = bar_width_unit / 2.0
plot_width_unit = 2*plot_x_pad_unit + N
downscale_above_N = 20
extra_sample_scale = 0.5
if N > downscale_above_N:
plot_width_unit = 2*plot_x_pad_unit + downscale_above_N + extra_sample_scale*(N-downscale_above_N)
plot_height_unit = 8
# label dimensions
longest_sample_label_len = 0
longest_element_label_len = 0
len_elements_list = len(element_labels)
for label in sample_labels:
if len(label) > longest_sample_label_len:
longest_sample_label_len = len(label)
for label in element_labels:
if len(label) > longest_element_label_len:
longest_element_label_len = len(label)
#x_label_scale_unit = 0.015
#y_label_scale_unit = 0.015
x_label_scale_unit = 0.175
y_label_scale_unit = 0.16
key_label_scale = y_label_scale_unit * 50 / 30.0
x_label_pad_unit = x_label_scale_unit * longest_element_label_len
y_label_pad_unit = y_label_scale_unit * longest_sample_label_len
if key_label_scale * len_elements_list > y_label_pad_unit:
y_label_pad_unit = key_label_scale * len_elements_list
x_label_pad_inch = per_unit_to_inch_scale * x_label_pad_unit
y_label_pad_inch = per_unit_to_inch_scale * y_label_pad_unit
# build canvas dimensions
x_pad_unit = 1.0
y_pad_unit = 0.25
#x_pad_unit = 0.10
#y_pad_unit = 0.10
x_pad_inch = per_unit_to_inch_scale * x_pad_unit
y_pad_inch = per_unit_to_inch_scale * y_pad_unit
canvas_width_unit = 2*x_pad_unit + plot_width_unit + x_label_pad_unit
canvas_height_unit = 2*y_pad_unit + plot_height_unit + y_label_pad_unit
canvas_width_inch = per_unit_to_inch_scale * canvas_width_unit
canvas_height_inch = per_unit_to_inch_scale * canvas_height_unit
# instantiate fig
#
# lose axes with below grid, and so sharex property. instead match xlim, bar_width, hide ticks.
#fig, (ax_top, ax_bot) = plt.subplots(2, 1, sharex=True)
# gridspec_kw not in KBase docker notebook agg image (old python?)
#fig, (ax_top, ax_bot) = plt.subplots(2, 1, sharex=True, gridspec_kw = {'height_ratios':[1, 3]})
# subplot2grid(shape, loc, rowspan=1, colspan=1)
FIG_rows = 1000
FIG_cols = 1
top_frac = 0.22
top_rows = int(top_frac*FIG_rows)
bot_rows = FIG_rows-top_rows
fig = plt.figure()
ax_top = plt.subplot2grid((FIG_rows,FIG_cols), (0,0), rowspan=top_rows, colspan=1)
ax_bot = plt.subplot2grid((FIG_rows,FIG_cols), (top_rows,0), rowspan=bot_rows, colspan=1)
fig.set_size_inches(canvas_width_inch, canvas_height_inch)
fig.tight_layout()
#for ax in fig.axes:
# ax.xaxis.set_visible(False) # remove axis labels and ticks
# ax.yaxis.set_visible(False)
# for t in ax.get_xticklabels()+ax.get_yticklabels(): # remove tick labels
# t.set_visible(False)
#for ax in fig.axes:
# ax.spines['top'].set_visible(False) # Get rid of top axis line
# ax.spines['bottom'].set_visible(False) # bottom axis line
# ax.spines['left'].set_visible(False) # Get rid of bottom axis line
# ax.spines['right'].set_visible(False) # Get rid of bottom axis line
# indices
ind = np.arange(N) # the x locations for the groups
label_ind = []
for ind_i,this_ind in enumerate(ind):
ind[ind_i] = this_ind+bar_width_unit/2
label_ind.append(this_ind + bar_width_unit/2)
np_vals = []
for vec_i,val_vec in enumerate(vals):
np_vals.append(np.array(val_vec))
# plot fraction measured
frac = ax_top.bar(ind, frac_vals, bar_width_unit, color='black', alpha=0.5, ec='none')
ax_top.set_title(title, fontsize=11)
ax_top.grid(b=True, axis='y')
ax_top.set_ylabel(frac_y_label, fontsize=10)
ax_top.tick_params(axis='y', labelsize=9, labelcolor='black')
ax_top.set_yticks(np.arange(0.0, 1.01, .20))
ax_top.set_ylim([0,1])
ax_top.xaxis.set_visible(False) # remove axis labels and ticks
ax_top.set_xlim([-plot_x_pad_unit,N-plot_x_pad_unit])
# plot stacked
last_bottom = None
p = []
for vec_i,val_vec in enumerate(np_vals):
if vec_i == 0:
this_bottom = 0
last_bottom = val_vec
else:
this_bottom = last_bottom
last_bottom = this_bottom + val_vec
p.append (ax_bot.bar (ind, val_vec, bar_width_unit, bottom=this_bottom, color=color_names[vec_i], alpha=0.5, ec='none'))
ax_bot.set_ylabel(y_label, fontsize=10)
ax_bot.tick_params(axis='y', direction='in', length=4, width=0.5, colors='black', labelsize=9, labelcolor='black')
#plt.title(title)
#plt.xticks(label_ind, sample_labels, ha='right', rotation=45)
#ax_bot.set_xticks(label_ind, sample_labels, ha='center', rotation=90)
ax_bot.tick_params(axis='x', direction='out', length=0, width=0, colors='black', labelsize=9, labelcolor='black')
ax_bot.set_xticks(label_ind)
ax_bot.set_xticklabels(sample_labels, ha='center', rotation=90)
ax_bot.tick_params(axis='y', labelsize=9, labelcolor='black')
ax_bot.set_yticks(np.arange(0, 101, 20))
ax_bot.set_ylim([0,100])
ax_bot.set_xlim([-plot_x_pad_unit,N-plot_x_pad_unit])
# are positions now in units (btw. 0-1) or inches? seems to depend on the backend Agg
x_pad = x_pad_unit / canvas_width_unit
y_pad = y_pad_unit / canvas_height_unit
plot_width = plot_width_unit / canvas_width_unit
plot_height = plot_height_unit / canvas_height_unit
x_label_pad = x_label_pad_unit / canvas_width_unit
y_label_pad = y_label_pad_unit / canvas_height_unit
# Frac Plot sizing
# don't shrink frac plot. instead place it explictly since we built canvas for it
#
box = ax_top.get_position()
# ax_top.set_position([box.x0 + x_pad_inch,
# box.y0 + y_pad_inch,
# box.width - x_label_pad_inch - 2*x_pad_inch,
# #box.height - y_pad_inch
# box.height
# ])
top_pos = [x_0, y_0, w, h] = [0 + x_pad,
(1.0 - top_frac)*plot_height + y_label_pad,
plot_width,
top_frac*plot_height - 2*y_pad
]
ax_top.set_position(top_pos)
# DEBUG
#print ("AX_TOP: BOX:")
#print ([box.x0, box.y0, box.width, box.height])
#print ("AX_TOP: TOP_POS:")
#print (top_pos)
# Stacked Plot sizing
# don't shrink plot. instead place it explictly since we built canvas for it
#
box = ax_bot.get_position()
#ax_bot.set_position([box.x0 + x_pad_inch,
# #box.y0 + y_pad_inch + y_label_pad_inch,
# box.y0,
# box.width - x_label_pad_inch - 2*x_pad_inch,
# #box.height - y_label_pad_inch - y_pad_inch
# box.height
# ])
bot_pos = [x_0, y_0, w, h] = [0 + x_pad,
0 + y_label_pad + y_pad,
plot_width,
(1.0 - top_frac)*plot_height - 2*y_pad
]
ax_bot.set_position(bot_pos)
# DEBUG
#print ("AX_BOT: BOX:")
#print ([box.x0, box.y0, box.width, box.height])
#print ("AX_BOT: BOT_POS:")
#print (bot_pos)
# add key
key_colors = []
for each_p in reversed(p):
key_colors.append(each_p[0])
ax_bot.legend(key_colors, reversed(element_labels), loc='upper left', bbox_to_anchor=(1,1), fontsize=9)
# save
img_dpi = 200
#plt.show()
log("SAVING STACKED BAR PLOT")
png_file = out_file_basename+'.png'
pdf_file = out_file_basename+'.pdf'
output_png_file_path = os.path.join(out_folder, png_file);
output_pdf_file_path = os.path.join(out_folder, pdf_file);
fig.savefig(output_png_file_path, dpi=img_dpi)
fig.savefig(output_pdf_file_path, format='pdf')
return output_png_file_path
def _create_area_plots (self, out_folder=None,
out_file_basename=None,
vals=None,
frac_vals=None,
title=None,
frac_y_label=None,
y_label=None,
sample_labels=None,
element_labels=None,
sort_by=None):
# number of samples
N = len(sample_labels)
# colors
color_names = self.no_light_color_names
len_color_names = len(color_names)
random.seed(a=len(element_labels))
r = random.random()
shuffle(color_names, lambda: r)
for label_i,label in enumerate(element_labels):
if label_i >= len_color_names:
color_names.append(color_names[label_i % len_color_names])
if label.startswith('tail (<'):
color_names[label_i] = 'lightslategray'
elif label.startswith('viruses'):
color_names[label_i] = 'magenta'
elif label.startswith('unassigned at'):
color_names[label_i] = 'darkslategray'
# Sort vals
if sort_by != None:
print ("SORTING ELEMENTS by "+str(sort_by))
old_index = dict()
new_index = dict()
for label_i,label in enumerate(element_labels):
old_index[label] = label_i
#print ("LABEL: "+str(label)+" OLD_INDEX: "+str(label_i)) # DEBUG
# alphabet sort
if sort_by == 'alpha':
new_label_i = 0
for label in sorted(element_labels, reverse=True):
if label.startswith('tail (<') or label.startswith('viruses') or label.startswith('unassigned at'):
new_index[label] = old_index[label]
else:
new_index[label] = new_label_i
new_label_i += 1
#print ("LABEL: "+str(label)+" NEW_INDEX: "+str(new_index[label])) # DEBUG
# summed total sort
elif sort_by == 'totals':
totals = dict()
for label_i,label in enumerate(element_labels):
totals[label] = 0
for sample_i,sample in enumerate(sample_labels):
totals[label] += vals[label_i][sample_i]
totals_vals = []
labels_by_totals = dict()
for label in totals.keys():
if totals[label] not in totals_vals:
totals_vals.append(totals[label])
labels_by_totals[totals[label]] = []
labels_by_totals[totals[label]].append(label)
new_label_i = 0
for totals_val in sorted(totals_vals, reverse=True):
for label in labels_by_totals[totals_val]:
if label.startswith('tail (<') or label.startswith('viruses') or label.startswith('unassigned at'):
new_index[label] = old_index[label]
else:
new_index[label] = new_label_i
new_label_i += 1
#print ("LABEL: "+str(label)+" NEW_INDEX: "+str(new_index[label])) # DEBUG
# store new order
new_vals = []
new_element_labels = []
for label_i,label in enumerate(element_labels):
new_vals.append([])
new_element_labels.append(None)
for label_i,label in enumerate(element_labels):
new_element_i = new_index[label]
#print ("NEW_ELEMENT_I: "+str(new_element_i)) # DEBUG
new_vals[new_element_i] = vals[label_i]
new_element_labels[new_element_i] = label
# DEBUG
#print ("NEW LABEL: "+str(label)+" NEW_INDEX: "+str(new_element_i)+" OLD_INDEX: "+str(label_i)) # DEBUG
#for sample_i,val in enumerate(new_vals[new_element_i]):
# print ("\t"+"SAMPLE_I: "+str(sample_i)+" NEW_VAL: "+str(new_vals[new_element_i][sample_i]))
vals = new_vals
element_labels = new_element_labels
# reverse so that most important plots near top (below special 3 categories)
element_labels = element_labels[-4::-1] + element_labels[-3:]
vals = vals[-4::-1] + vals[-3:]
# plot dimensions
#per_unit_to_inch_scale = 0.25
per_unit_to_inch_scale = 0.5
bar_width_unit = 0.5
plot_x_pad_unit = bar_width_unit / 2.0
plot_width_unit = 2*plot_x_pad_unit + N
downscale_above_N = 20
extra_sample_scale = 0.5
if N > downscale_above_N:
plot_width_unit = 2*plot_x_pad_unit + downscale_above_N + extra_sample_scale*(N-downscale_above_N)
plot_height_unit = 8
# label dimensions
longest_sample_label_len = 0
longest_element_label_len = 0
len_elements_list = len(element_labels)
for label in sample_labels:
if len(label) > longest_sample_label_len:
longest_sample_label_len = len(label)
for label in element_labels:
if len(label) > longest_element_label_len:
longest_element_label_len = len(label)
#x_label_scale_unit = 0.015
#y_label_scale_unit = 0.015
x_label_scale_unit = 0.175
y_label_scale_unit = 0.16
key_label_scale = y_label_scale_unit * 50 / 30.0
x_label_pad_unit = x_label_scale_unit * longest_element_label_len
y_label_pad_unit = y_label_scale_unit * longest_sample_label_len
if key_label_scale * len_elements_list > y_label_pad_unit:
y_label_pad_unit = key_label_scale * len_elements_list
x_label_pad_inch = per_unit_to_inch_scale * x_label_pad_unit
y_label_pad_inch = per_unit_to_inch_scale * y_label_pad_unit
# build canvas dimensions
x_pad_unit = 1.0
y_pad_unit = 0.25
#x_pad_unit = 0.10
#y_pad_unit = 0.10
x_pad_inch = per_unit_to_inch_scale * x_pad_unit
y_pad_inch = per_unit_to_inch_scale * y_pad_unit
canvas_width_unit = 2*x_pad_unit + plot_width_unit + x_label_pad_unit
canvas_height_unit = 2*y_pad_unit + plot_height_unit + y_label_pad_unit
canvas_width_inch = per_unit_to_inch_scale * canvas_width_unit
canvas_height_inch = per_unit_to_inch_scale * canvas_height_unit
# instantiate fig
#
# lose axes with below grid, and so sharex property. instead match xlim, bar_width, hide ticks.
#fig, (ax_top, ax_bot) = plt.subplots(2, 1, sharex=True)
# gridspec_kw not in KBase docker notebook agg image (old python?)
#fig, (ax_top, ax_bot) = plt.subplots(2, 1, sharex=True, gridspec_kw = {'height_ratios':[1, 3]})
# subplot2grid(shape, loc, rowspan=1, colspan=1)
FIG_rows = 1000
FIG_cols = 1
top_frac = 0.22
top_rows = int(top_frac*FIG_rows)
bot_rows = FIG_rows-top_rows
fig = plt.figure()
ax_top = plt.subplot2grid((FIG_rows,FIG_cols), (0,0), rowspan=top_rows, colspan=1)
ax_bot = plt.subplot2grid((FIG_rows,FIG_cols), (top_rows,0), rowspan=bot_rows, colspan=1)
fig.set_size_inches(canvas_width_inch, canvas_height_inch)
fig.tight_layout()
#for ax in fig.axes:
# ax.xaxis.set_visible(False) # remove axis labels and ticks
# ax.yaxis.set_visible(False)
# for t in ax.get_xticklabels()+ax.get_yticklabels(): # remove tick labels
# t.set_visible(False)
#for ax in fig.axes:
# ax.spines['top'].set_visible(False) # Get rid of top axis line
# ax.spines['bottom'].set_visible(False) # bottom axis line
# ax.spines['left'].set_visible(False) # Get rid of bottom axis line
# ax.spines['right'].set_visible(False) # Get rid of bottom axis line
# indices
ind = np.arange(N) # the x locations for the groups
label_ind = []
for ind_i,this_ind in enumerate(ind):
ind[ind_i] = this_ind+bar_width_unit/2
label_ind.append(this_ind + bar_width_unit/2)
np_vals = []
for vec_i,val_vec in enumerate(vals):
np_vals.append(np.array(val_vec))
# plot fraction measured
frac = ax_top.bar(ind, frac_vals, bar_width_unit, color='black', alpha=0.5, ec='none')
ax_top.set_title(title, fontsize=11)
ax_top.grid(b=True, axis='y')
ax_top.set_ylabel(frac_y_label, fontsize=10)
ax_top.tick_params(axis='y', labelsize=9, labelcolor='black')
ax_top.set_yticks(np.arange(0.0, 1.01, .20))
ax_top.set_ylim([0,1])
ax_top.xaxis.set_visible(False) # remove axis labels and ticks
ax_top.set_xlim([-plot_x_pad_unit,N-plot_x_pad_unit])
"""
ax.stackplot (ind, np_vals, colors=color_names, alpha=0.5, edgecolor='none')
plt.ylabel(y_label)
plt.title(title)
plt.xticks(label_ind, sample_labels, ha='right', rotation=45)
plt.yticks(np.arange(0, 101, 10))
# creating the legend manually
key_colors = []
for color_i in reversed(np.arange(N-1)):
key_colors.append(mpatches.Patch(color=color_names[color_i], alpha=0.5, ec='black'))
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
plt.legend(key_colors, reversed(element_labels), loc='upper left', bbox_to_anchor=(1, 1))
"""
# plot stacked
ax_bot.stackplot (ind, np_vals, colors=color_names, alpha=0.5, edgecolor='none')
ax_bot.set_ylabel(y_label, fontsize=10)
ax_bot.tick_params(axis='y', direction='in', length=4, width=0.5, colors='black', labelsize=9, labelcolor='black')
#plt.title(title)
#plt.xticks(label_ind, sample_labels, ha='right', rotation=45)
#ax_bot.set_xticks(label_ind, sample_labels, ha='center', rotation=90)
ax_bot.tick_params(axis='x', direction='out', length=0, width=0, colors='black', labelsize=9, labelcolor='black')
ax_bot.set_xticks(label_ind)
ax_bot.set_xticklabels(sample_labels, ha='center', rotation=90)
ax_bot.tick_params(axis='y', labelsize=9, labelcolor='black')
ax_bot.set_yticks(np.arange(0, 101, 20))
ax_bot.set_ylim([0,100])
ax_bot.set_xlim([-plot_x_pad_unit,N-plot_x_pad_unit])
# are positions now in units (btw. 0-1) or inches? seems to depend on the backend Agg
x_pad = x_pad_unit / canvas_width_unit
y_pad = y_pad_unit / canvas_height_unit
plot_width = plot_width_unit / canvas_width_unit
plot_height = plot_height_unit / canvas_height_unit
x_label_pad = x_label_pad_unit / canvas_width_unit
y_label_pad = y_label_pad_unit / canvas_height_unit
# Frac Plot sizing
# don't shrink frac plot. instead place it explictly since we built canvas for it
#
box = ax_top.get_position()
# ax_top.set_position([box.x0 + x_pad_inch,
# box.y0 + y_pad_inch,
# box.width - x_label_pad_inch - 2*x_pad_inch,
# #box.height - y_pad_inch
# box.height
# ])
top_pos = [x_0, y_0, w, h] = [0 + x_pad,
(1.0 - top_frac)*plot_height + y_label_pad,
plot_width,
top_frac*plot_height - 2*y_pad
]
ax_top.set_position(top_pos)
# DEBUG
#print ("AX_TOP: BOX:")
#print ([box.x0, box.y0, box.width, box.height])
#print ("AX_TOP: TOP_POS:")
#print (top_pos)
# Stacked Plot sizing
# don't shrink plot. instead place it explictly since we built canvas for it
#
box = ax_bot.get_position()
#ax_bot.set_position([box.x0 + x_pad_inch,
# #box.y0 + y_pad_inch + y_label_pad_inch,
# box.y0,
# box.width - x_label_pad_inch - 2*x_pad_inch,
# #box.height - y_label_pad_inch - y_pad_inch
# box.height
# ])
bot_pos = [x_0, y_0, w, h] = [0 + x_pad,
0 + y_label_pad + y_pad,
plot_width,
(1.0 - top_frac)*plot_height - 2*y_pad
]
ax_bot.set_position(bot_pos)
# DEBUG
#print ("AX_BOT: BOX:")
#print ([box.x0, box.y0, box.width, box.height])
#print ("AX_BOT: BOT_POS:")
#print (bot_pos)
"""
# add key
key_colors = []
for each_p in reversed(p):
key_colors.append(each_p[0])
ax_bot.legend(key_colors, reversed(element_labels), loc='upper left', bbox_to_anchor=(1,1), fontsize=9)
"""
# create the legend manually
w_scale = 0.8
key_colors = []
for color_i in reversed(np.arange(N-1)):
key_colors.append(mpatches.Patch(color=color_names[color_i], alpha=0.5, ec='black'))
box = ax_top.get_position()
ax_top.set_position([box.x0, box.y0, box.width * w_scale, box.height])
box = ax_bot.get_position()
ax_bot.set_position([box.x0, box.y0, box.width * w_scale, box.height])
ax_bot.legend(key_colors, reversed(element_labels), loc='upper left', bbox_to_anchor=(1, 1))
# save
img_dpi = 200
#plt.show()
log("SAVING STACKED AREA PLOT")
png_file = out_file_basename+'.png'
pdf_file = out_file_basename+'.pdf'
output_png_file_path = os.path.join(out_folder, png_file);
output_pdf_file_path = os.path.join(out_folder, pdf_file);
fig.savefig(output_png_file_path, dpi=img_dpi)
fig.savefig(output_pdf_file_path, format='pdf')
return output_png_file_path
def _create_area_plots_OLD (self, abundances):
color_names = self.no_light_color_names
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import random
from random import shuffle
y_label = 'percent'
title = 'Lineage Proportion'
sample_labels = ['sample1', 'sample2', 'sample3', 'sample4', 'sample5']
element_labels = ['OTU_1', 'OTU_2', 'OTU_3', 'OTU_4']
N = len(sample_labels)
random.seed(a=len(element_labels))
r = random.random()
shuffle(color_names, lambda: r)
vals = [[20, 35, 20, 35, 27],
[25, 22, 34, 20, 15],
[45, 33, 36, 35, 48],
[10, 10, 10, 10, 10]
]
ind = np.arange(N) # the x locations for the groups
label_ind = ind
np_vals = []
for vec_i,val_vec in enumerate(vals):
np_vals.append(np.array(val_vec))
# Build image
if N < 10:
img_in_width = 2*N
elif N < 20:
img_in_width = N
else:
img_in_width = 20
img_in_height = 5
fig = plt.figure()
fig.set_size_inches(img_in_width, img_in_height)
ax = plt.subplot(111)
# Let's turn off visibility of all tic labels and boxes here
#for ax in fig.axes:
# ax.xaxis.set_visible(False) # remove axis labels and tics
# ax.yaxis.set_visible(False)
# for t in ax.get_xticklabels()+ax.get_yticklabels(): # remove tics
# t.set_visible(False)
# ax.spines['top'].set_visible(False) # Get rid of top axis line
# ax.spines['bottom'].set_visible(False) # bottom axis line
# ax.spines['left'].set_visible(False) # left axis line
# ax.spines['right'].set_visible(False) # right axis line
ax.stackplot (ind, np_vals, colors=color_names, alpha=0.5, edgecolor='none')
plt.ylabel(y_label)
plt.title(title)
plt.xticks(label_ind, sample_labels, ha='right', rotation=45)
plt.yticks(np.arange(0, 101, 10))
# creating the legend manually
key_colors = []
for color_i in reversed(np.arange(N-1)):
key_colors.append(mpatches.Patch(color=color_names[color_i], alpha=0.5, ec='black'))
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
plt.legend(key_colors, reversed(element_labels), loc='upper left', bbox_to_anchor=(1, 1))
#plt.grid()
plt.show()
def _build_plot_html_header(self, title):
buf = []
buf.append('<html>')
buf.append('<head>')
buf.append('<title>'+title+'</title>')
style = '''
<style style="text/css">
a {
color: #337ab7;
}
a:hover {
color: #23527c;
}
table {
border: 1px solid #bbb;
border-collapse: collapse;
}
th, td {
text-align: left;
border: 1px solid #bbb;
padding: 8px;
}
tr:nth-child(odd) {
background-color: #f9f9f9;
}
tr:hover {
background-color: #f5f5f5;
}
</style>'''
buf.append(style)
buf.append('</head>')
buf.append('<body>')
return buf
def _build_plot_html_footer(self):
buf = []
buf.append('</body>')
buf.append('</html>')
return buf
def _write_buf_to_file(self, filename, buf):
with open (filename, 'w') as handle:
for line_buf in buf:
handle.write(line_buf+"\n")
| 41.61849 | 148 | 0.543018 | 63,236 | 0.989206 | 0 | 0 | 0 | 0 | 0 | 0 | 16,547 | 0.258846 |
1d93308a11488dd842ee04ecb3fb3f177a82ba23 | 1,723 | py | Python | Gds/src/fprime_gds/wxgui/tools/PexpectRunnerConsolGUI.py | hunterpaulson/fprime | 70560897b56dc3037dc966c99751b708b1cc8a05 | [
"Apache-2.0"
] | null | null | null | Gds/src/fprime_gds/wxgui/tools/PexpectRunnerConsolGUI.py | hunterpaulson/fprime | 70560897b56dc3037dc966c99751b708b1cc8a05 | [
"Apache-2.0"
] | 5 | 2020-07-13T16:56:33.000Z | 2020-07-23T20:38:13.000Z | Gds/src/fprime_gds/wxgui/tools/PexpectRunnerConsolGUI.py | hunterpaulson/lgtm-fprime | 9eeda383c263ecba8da8188a45e1d020107ff323 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
###########################################################################
## Python code generated with wxFormBuilder (version May 29 2018)
## http://www.wxformbuilder.org/
##
## PLEASE DO *NOT* EDIT THIS FILE!
###########################################################################
import wx
import wx.xrc
###########################################################################
## Class PexpectRunnerGUI
###########################################################################
class PexpectRunnerGUI(wx.Frame):
def __init__(self, parent):
wx.Frame.__init__(
self,
parent,
id=wx.ID_ANY,
title=u"Pexpect Output",
pos=wx.DefaultPosition,
size=wx.Size(500, 300),
style=wx.DEFAULT_FRAME_STYLE | wx.TAB_TRAVERSAL,
)
self.SetSizeHints(wx.DefaultSize, wx.DefaultSize)
bSizer3 = wx.BoxSizer(wx.VERTICAL)
self.TextCtrlConsol = wx.TextCtrl(
self,
wx.ID_ANY,
wx.EmptyString,
wx.DefaultPosition,
wx.DefaultSize,
wx.TE_MULTILINE | wx.TE_READONLY | wx.TE_WORDWRAP,
)
bSizer3.Add(self.TextCtrlConsol, 1, wx.ALL | wx.EXPAND, 5)
self.SetSizer(bSizer3)
self.Layout()
self.Centre(wx.BOTH)
# Connect Events
self.Bind(wx.EVT_CLOSE, self.onWindowClose)
self.TextCtrlConsol.Bind(wx.EVT_MOUSEWHEEL, self.onMouseWheel)
def __del__(self):
pass
# Virtual event handlers, overide them in your derived class
def onWindowClose(self, event):
event.Skip()
def onMouseWheel(self, event):
event.Skip()
| 27.790323 | 75 | 0.490424 | 1,202 | 0.69762 | 0 | 0 | 0 | 0 | 0 | 0 | 574 | 0.33314 |
1d950e325ef85dc98b69ae74e351c6705f81fa42 | 20,121 | py | Python | web/openerp/addons/base/tests/test_ir_actions.py | diogocs1/comps | 63df07f6cf21c41e4527c06e2d0499f23f4322e7 | [
"Apache-2.0"
] | 1 | 2019-12-29T11:53:56.000Z | 2019-12-29T11:53:56.000Z | odoo/openerp/addons/base/tests/test_ir_actions.py | tuanquanghpvn/odoo8-tutorial | 52d25f1ca5f233c431cb9d3b24b79c3b4fb5127e | [
"MIT"
] | null | null | null | odoo/openerp/addons/base/tests/test_ir_actions.py | tuanquanghpvn/odoo8-tutorial | 52d25f1ca5f233c431cb9d3b24b79c3b4fb5127e | [
"MIT"
] | 3 | 2020-10-08T14:42:10.000Z | 2022-01-28T14:12:29.000Z | import unittest2
from openerp.osv.orm import except_orm
import openerp.tests.common as common
from openerp.tools import mute_logger
class TestServerActionsBase(common.TransactionCase):
def setUp(self):
super(TestServerActionsBase, self).setUp()
cr, uid = self.cr, self.uid
# Models
self.ir_actions_server = self.registry('ir.actions.server')
self.ir_actions_client = self.registry('ir.actions.client')
self.ir_values = self.registry('ir.values')
self.ir_model = self.registry('ir.model')
self.ir_model_fields = self.registry('ir.model.fields')
self.res_partner = self.registry('res.partner')
self.res_country = self.registry('res.country')
# Data on which we will run the server action
self.test_country_id = self.res_country.create(cr, uid, {
'name': 'TestingCountry',
'code': 'TY',
'address_format': 'SuperFormat',
})
self.test_country = self.res_country.browse(cr, uid, self.test_country_id)
self.test_partner_id = self.res_partner.create(cr, uid, {
'name': 'TestingPartner',
'city': 'OrigCity',
'country_id': self.test_country_id,
})
self.test_partner = self.res_partner.browse(cr, uid, self.test_partner_id)
self.context = {
'active_id': self.test_partner_id,
'active_model': 'res.partner',
}
# Model data
self.res_partner_model_id = self.ir_model.search(cr, uid, [('model', '=', 'res.partner')])[0]
self.res_partner_name_field_id = self.ir_model_fields.search(cr, uid, [('model', '=', 'res.partner'), ('name', '=', 'name')])[0]
self.res_partner_city_field_id = self.ir_model_fields.search(cr, uid, [('model', '=', 'res.partner'), ('name', '=', 'city')])[0]
self.res_partner_country_field_id = self.ir_model_fields.search(cr, uid, [('model', '=', 'res.partner'), ('name', '=', 'country_id')])[0]
self.res_partner_parent_field_id = self.ir_model_fields.search(cr, uid, [('model', '=', 'res.partner'), ('name', '=', 'parent_id')])[0]
self.res_country_model_id = self.ir_model.search(cr, uid, [('model', '=', 'res.country')])[0]
self.res_country_name_field_id = self.ir_model_fields.search(cr, uid, [('model', '=', 'res.country'), ('name', '=', 'name')])[0]
self.res_country_code_field_id = self.ir_model_fields.search(cr, uid, [('model', '=', 'res.country'), ('name', '=', 'code')])[0]
# create server action to
self.act_id = self.ir_actions_server.create(cr, uid, {
'name': 'TestAction',
'condition': 'True',
'model_id': self.res_partner_model_id,
'state': 'code',
'code': 'obj.write({"comment": "MyComment"})',
})
class TestServerActions(TestServerActionsBase):
def test_00_action(self):
cr, uid = self.cr, self.uid
# Do: eval 'True' condition
self.ir_actions_server.run(cr, uid, [self.act_id], self.context)
self.test_partner.refresh()
self.assertEqual(self.test_partner.comment, 'MyComment', 'ir_actions_server: invalid condition check')
self.test_partner.write({'comment': False})
# Do: eval False condition, that should be considered as True (void = True)
self.ir_actions_server.write(cr, uid, [self.act_id], {'condition': False})
self.ir_actions_server.run(cr, uid, [self.act_id], self.context)
self.test_partner.refresh()
self.assertEqual(self.test_partner.comment, 'MyComment', 'ir_actions_server: invalid condition check')
# Do: create contextual action
self.ir_actions_server.create_action(cr, uid, [self.act_id])
# Test: ir_values created
ir_values_ids = self.ir_values.search(cr, uid, [('name', '=', 'Run TestAction')])
self.assertEqual(len(ir_values_ids), 1, 'ir_actions_server: create_action should have created an entry in ir_values')
ir_value = self.ir_values.browse(cr, uid, ir_values_ids[0])
self.assertEqual(ir_value.value, 'ir.actions.server,%s' % self.act_id, 'ir_actions_server: created ir_values should reference the server action')
self.assertEqual(ir_value.model, 'res.partner', 'ir_actions_server: created ir_values should be linked to the action base model')
# Do: remove contextual action
self.ir_actions_server.unlink_action(cr, uid, [self.act_id])
# Test: ir_values removed
ir_values_ids = self.ir_values.search(cr, uid, [('name', '=', 'Run TestAction')])
self.assertEqual(len(ir_values_ids), 0, 'ir_actions_server: unlink_action should remove the ir_values record')
def test_10_code(self):
cr, uid = self.cr, self.uid
self.ir_actions_server.write(cr, uid, self.act_id, {
'state': 'code',
'code': """partner_name = obj.name + '_code'
self.pool["res.partner"].create(cr, uid, {"name": partner_name}, context=context)
workflow"""
})
run_res = self.ir_actions_server.run(cr, uid, [self.act_id], context=self.context)
self.assertFalse(run_res, 'ir_actions_server: code server action correctly finished should return False')
pids = self.res_partner.search(cr, uid, [('name', 'ilike', 'TestingPartner_code')])
self.assertEqual(len(pids), 1, 'ir_actions_server: 1 new partner should have been created')
def test_20_trigger(self):
cr, uid = self.cr, self.uid
# Data: code server action (at this point code-based actions should work)
act_id2 = self.ir_actions_server.create(cr, uid, {
'name': 'TestAction2',
'type': 'ir.actions.server',
'condition': 'True',
'model_id': self.res_partner_model_id,
'state': 'code',
'code': 'obj.write({"comment": "MyComment"})',
})
act_id3 = self.ir_actions_server.create(cr, uid, {
'name': 'TestAction3',
'type': 'ir.actions.server',
'condition': 'True',
'model_id': self.res_country_model_id,
'state': 'code',
'code': 'obj.write({"code": "ZZ"})',
})
# Data: create workflows
partner_wf_id = self.registry('workflow').create(cr, uid, {
'name': 'TestWorkflow',
'osv': 'res.partner',
'on_create': True,
})
partner_act1_id = self.registry('workflow.activity').create(cr, uid, {
'name': 'PartnerStart',
'wkf_id': partner_wf_id,
'flow_start': True
})
partner_act2_id = self.registry('workflow.activity').create(cr, uid, {
'name': 'PartnerTwo',
'wkf_id': partner_wf_id,
'kind': 'function',
'action': 'True',
'action_id': act_id2,
})
partner_trs1_id = self.registry('workflow.transition').create(cr, uid, {
'signal': 'partner_trans',
'act_from': partner_act1_id,
'act_to': partner_act2_id
})
country_wf_id = self.registry('workflow').create(cr, uid, {
'name': 'TestWorkflow',
'osv': 'res.country',
'on_create': True,
})
country_act1_id = self.registry('workflow.activity').create(cr, uid, {
'name': 'CountryStart',
'wkf_id': country_wf_id,
'flow_start': True
})
country_act2_id = self.registry('workflow.activity').create(cr, uid, {
'name': 'CountryTwo',
'wkf_id': country_wf_id,
'kind': 'function',
'action': 'True',
'action_id': act_id3,
})
country_trs1_id = self.registry('workflow.transition').create(cr, uid, {
'signal': 'country_trans',
'act_from': country_act1_id,
'act_to': country_act2_id
})
# Data: re-create country and partner to benefit from the workflows
self.test_country_id = self.res_country.create(cr, uid, {
'name': 'TestingCountry2',
'code': 'T2',
})
self.test_country = self.res_country.browse(cr, uid, self.test_country_id)
self.test_partner_id = self.res_partner.create(cr, uid, {
'name': 'TestingPartner2',
'country_id': self.test_country_id,
})
self.test_partner = self.res_partner.browse(cr, uid, self.test_partner_id)
self.context = {
'active_id': self.test_partner_id,
'active_model': 'res.partner',
}
# Run the action on partner object itself ('base')
self.ir_actions_server.write(cr, uid, [self.act_id], {
'state': 'trigger',
'use_relational_model': 'base',
'wkf_model_id': self.res_partner_model_id,
'wkf_model_name': 'res.partner',
'wkf_transition_id': partner_trs1_id,
})
self.ir_actions_server.run(cr, uid, [self.act_id], self.context)
self.test_partner.refresh()
self.assertEqual(self.test_partner.comment, 'MyComment', 'ir_actions_server: incorrect signal trigger')
# Run the action on related country object ('relational')
self.ir_actions_server.write(cr, uid, [self.act_id], {
'use_relational_model': 'relational',
'wkf_model_id': self.res_country_model_id,
'wkf_model_name': 'res.country',
'wkf_field_id': self.res_partner_country_field_id,
'wkf_transition_id': country_trs1_id,
})
self.ir_actions_server.run(cr, uid, [self.act_id], self.context)
self.test_country.refresh()
self.assertEqual(self.test_country.code, 'ZZ', 'ir_actions_server: incorrect signal trigger')
# Clear workflow cache, otherwise openerp will try to create workflows even if it has been deleted
from openerp.workflow import clear_cache
clear_cache(cr, uid)
def test_30_client(self):
cr, uid = self.cr, self.uid
client_action_id = self.registry('ir.actions.client').create(cr, uid, {
'name': 'TestAction2',
'tag': 'Test',
})
self.ir_actions_server.write(cr, uid, [self.act_id], {
'state': 'client_action',
'action_id': client_action_id,
})
res = self.ir_actions_server.run(cr, uid, [self.act_id], context=self.context)
self.assertEqual(res['name'], 'TestAction2', 'ir_actions_server: incorrect return result for a client action')
def test_40_crud_create(self):
cr, uid = self.cr, self.uid
_city = 'TestCity'
_name = 'TestNew'
# Do: create a new record in the same model and link it
self.ir_actions_server.write(cr, uid, [self.act_id], {
'state': 'object_create',
'use_create': 'new',
'link_new_record': True,
'link_field_id': self.res_partner_parent_field_id,
'fields_lines': [(0, 0, {'col1': self.res_partner_name_field_id, 'value': _name}),
(0, 0, {'col1': self.res_partner_city_field_id, 'value': _city})],
})
run_res = self.ir_actions_server.run(cr, uid, [self.act_id], context=self.context)
self.assertFalse(run_res, 'ir_actions_server: create record action correctly finished should return False')
# Test: new partner created
pids = self.res_partner.search(cr, uid, [('name', 'ilike', _name)])
self.assertEqual(len(pids), 1, 'ir_actions_server: TODO')
partner = self.res_partner.browse(cr, uid, pids[0])
self.assertEqual(partner.city, _city, 'ir_actions_server: TODO')
# Test: new partner linked
self.test_partner.refresh()
self.assertEqual(self.test_partner.parent_id.id, pids[0], 'ir_actions_server: TODO')
# Do: copy current record
self.ir_actions_server.write(cr, uid, [self.act_id], {'fields_lines': [[5]]})
self.ir_actions_server.write(cr, uid, [self.act_id], {
'state': 'object_create',
'use_create': 'copy_current',
'link_new_record': False,
'fields_lines': [(0, 0, {'col1': self.res_partner_name_field_id, 'value': 'TestCopyCurrent'}),
(0, 0, {'col1': self.res_partner_city_field_id, 'value': 'TestCity'})],
})
run_res = self.ir_actions_server.run(cr, uid, [self.act_id], context=self.context)
self.assertFalse(run_res, 'ir_actions_server: create record action correctly finished should return False')
# Test: new partner created
pids = self.res_partner.search(cr, uid, [('name', 'ilike', 'TestingPartner (copy)')]) # currently res_partner overrides default['name'] whatever its value
self.assertEqual(len(pids), 1, 'ir_actions_server: TODO')
partner = self.res_partner.browse(cr, uid, pids[0])
self.assertEqual(partner.city, 'TestCity', 'ir_actions_server: TODO')
self.assertEqual(partner.country_id.id, self.test_partner.country_id.id, 'ir_actions_server: TODO')
# Do: create a new record in another model
self.ir_actions_server.write(cr, uid, [self.act_id], {'fields_lines': [[5]]})
self.ir_actions_server.write(cr, uid, [self.act_id], {
'state': 'object_create',
'use_create': 'new_other',
'crud_model_id': self.res_country_model_id,
'link_new_record': False,
'fields_lines': [(0, 0, {'col1': self.res_country_name_field_id, 'value': 'obj.name', 'type': 'equation'}),
(0, 0, {'col1': self.res_country_code_field_id, 'value': 'obj.name[0:2]', 'type': 'equation'})],
})
run_res = self.ir_actions_server.run(cr, uid, [self.act_id], context=self.context)
self.assertFalse(run_res, 'ir_actions_server: create record action correctly finished should return False')
# Test: new country created
cids = self.res_country.search(cr, uid, [('name', 'ilike', 'TestingPartner')])
self.assertEqual(len(cids), 1, 'ir_actions_server: TODO')
country = self.res_country.browse(cr, uid, cids[0])
self.assertEqual(country.code, 'TE', 'ir_actions_server: TODO')
# Do: copy a record in another model
self.ir_actions_server.write(cr, uid, [self.act_id], {'fields_lines': [[5]]})
self.ir_actions_server.write(cr, uid, [self.act_id], {
'state': 'object_create',
'use_create': 'copy_other',
'crud_model_id': self.res_country_model_id,
'link_new_record': False,
'ref_object': 'res.country,%s' % self.test_country_id,
'fields_lines': [(0, 0, {'col1': self.res_country_name_field_id, 'value': 'NewCountry', 'type': 'value'}),
(0, 0, {'col1': self.res_country_code_field_id, 'value': 'NY', 'type': 'value'})],
})
run_res = self.ir_actions_server.run(cr, uid, [self.act_id], context=self.context)
self.assertFalse(run_res, 'ir_actions_server: create record action correctly finished should return False')
# Test: new country created
cids = self.res_country.search(cr, uid, [('name', 'ilike', 'NewCountry')])
self.assertEqual(len(cids), 1, 'ir_actions_server: TODO')
country = self.res_country.browse(cr, uid, cids[0])
self.assertEqual(country.code, 'NY', 'ir_actions_server: TODO')
self.assertEqual(country.address_format, 'SuperFormat', 'ir_actions_server: TODO')
def test_50_crud_write(self):
cr, uid = self.cr, self.uid
_name = 'TestNew'
# Do: create a new record in the same model and link it
self.ir_actions_server.write(cr, uid, [self.act_id], {
'state': 'object_write',
'use_write': 'current',
'fields_lines': [(0, 0, {'col1': self.res_partner_name_field_id, 'value': _name})],
})
run_res = self.ir_actions_server.run(cr, uid, [self.act_id], context=self.context)
self.assertFalse(run_res, 'ir_actions_server: create record action correctly finished should return False')
# Test: new partner created
pids = self.res_partner.search(cr, uid, [('name', 'ilike', _name)])
self.assertEqual(len(pids), 1, 'ir_actions_server: TODO')
partner = self.res_partner.browse(cr, uid, pids[0])
self.assertEqual(partner.city, 'OrigCity', 'ir_actions_server: TODO')
# Do: copy current record
self.ir_actions_server.write(cr, uid, [self.act_id], {'fields_lines': [[5]]})
self.ir_actions_server.write(cr, uid, [self.act_id], {
'use_write': 'other',
'crud_model_id': self.res_country_model_id,
'ref_object': 'res.country,%s' % self.test_country_id,
'fields_lines': [(0, 0, {'col1': self.res_country_name_field_id, 'value': 'obj.name', 'type': 'equation'})],
})
run_res = self.ir_actions_server.run(cr, uid, [self.act_id], context=self.context)
self.assertFalse(run_res, 'ir_actions_server: create record action correctly finished should return False')
# Test: new country created
cids = self.res_country.search(cr, uid, [('name', 'ilike', 'TestNew')])
self.assertEqual(len(cids), 1, 'ir_actions_server: TODO')
# Do: copy a record in another model
self.ir_actions_server.write(cr, uid, [self.act_id], {'fields_lines': [[5]]})
self.ir_actions_server.write(cr, uid, [self.act_id], {
'use_write': 'expression',
'crud_model_id': self.res_country_model_id,
'write_expression': 'object.country_id',
'fields_lines': [(0, 0, {'col1': self.res_country_name_field_id, 'value': 'NewCountry', 'type': 'value'})],
})
run_res = self.ir_actions_server.run(cr, uid, [self.act_id], context=self.context)
self.assertFalse(run_res, 'ir_actions_server: create record action correctly finished should return False')
# Test: new country created
cids = self.res_country.search(cr, uid, [('name', 'ilike', 'NewCountry')])
self.assertEqual(len(cids), 1, 'ir_actions_server: TODO')
@mute_logger('openerp.addons.base.ir.ir_model', 'openerp.models')
def test_60_multi(self):
cr, uid = self.cr, self.uid
# Data: 2 server actions that will be nested
act1_id = self.ir_actions_server.create(cr, uid, {
'name': 'Subaction1',
'sequence': 1,
'model_id': self.res_partner_model_id,
'state': 'code',
'code': 'action = {"type": "ir.actions.act_window"}',
})
act2_id = self.ir_actions_server.create(cr, uid, {
'name': 'Subaction2',
'sequence': 2,
'model_id': self.res_partner_model_id,
'state': 'object_create',
'use_create': 'copy_current',
})
act3_id = self.ir_actions_server.create(cr, uid, {
'name': 'Subaction3',
'sequence': 3,
'model_id': self.res_partner_model_id,
'state': 'code',
'code': 'action = {"type": "ir.actions.act_url"}',
})
self.ir_actions_server.write(cr, uid, [self.act_id], {
'state': 'multi',
'child_ids': [(6, 0, [act1_id, act2_id, act3_id])],
})
# Do: run the action
res = self.ir_actions_server.run(cr, uid, [self.act_id], context=self.context)
# Test: new partner created
pids = self.res_partner.search(cr, uid, [('name', 'ilike', 'TestingPartner (copy)')]) # currently res_partner overrides default['name'] whatever its value
self.assertEqual(len(pids), 1, 'ir_actions_server: TODO')
# Test: action returned
self.assertEqual(res.get('type'), 'ir.actions.act_url')
# Test loops
with self.assertRaises(except_orm):
self.ir_actions_server.write(cr, uid, [self.act_id], {
'child_ids': [(6, 0, [self.act_id])]
})
if __name__ == '__main__':
unittest2.main()
| 49.195599 | 163 | 0.610705 | 19,932 | 0.990607 | 0 | 0 | 1,846 | 0.091745 | 0 | 0 | 6,878 | 0.341832 |
1d955fce32b36603e242788ccaf03954bf57a21c | 506 | py | Python | linchpin/provision/filter_plugins/duplicateattr.py | seandst/linchpin | 427b6fb61f550a4d1120ac94c55d121fbecd70a6 | [
"Apache-2.0"
] | null | null | null | linchpin/provision/filter_plugins/duplicateattr.py | seandst/linchpin | 427b6fb61f550a4d1120ac94c55d121fbecd70a6 | [
"Apache-2.0"
] | null | null | null | linchpin/provision/filter_plugins/duplicateattr.py | seandst/linchpin | 427b6fb61f550a4d1120ac94c55d121fbecd70a6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import os
import sys
import abc
import StringIO
from ansible import errors
def duplicateattr(output, attr, dattr):
new_output = []
for group in output:
if attr in group:
new_group = group
new_group[dattr] = group[attr]
new_output.append(new_group)
return output
class FilterModule(object):
''' A filter to fix network format '''
def filters(self):
return {
'duplicateattr': duplicateattr
}
| 22 | 42 | 0.624506 | 163 | 0.322134 | 0 | 0 | 0 | 0 | 0 | 0 | 74 | 0.146245 |
1d95852843689e3fd89ad1bab5143b8e92c83354 | 3,564 | py | Python | memory.py | Bl41r/gb-emulator-python | 04917fa0cdd09eb522e1409fc992e41df34fbf9a | [
"MIT"
] | null | null | null | memory.py | Bl41r/gb-emulator-python | 04917fa0cdd09eb522e1409fc992e41df34fbf9a | [
"MIT"
] | null | null | null | memory.py | Bl41r/gb-emulator-python | 04917fa0cdd09eb522e1409fc992e41df34fbf9a | [
"MIT"
] | null | null | null | """Gameboy memory.
Cartridge
---------
[0000-3FFF] Cartridge ROM, bank 0: The first 16,384 bytes of the cartridge program are always available at this point in the memory map. Special circumstances apply:
[0000-00FF] BIOS: When the CPU starts up, PC starts at 0000h, which is the start of the 256-byte GameBoy BIOS code. Once the BIOS has run, it is removed from the memory map, and this area of the cartridge rom becomes addressable.
[0100-014F] Cartridge header: This section of the cartridge contains data about its name and manufacturer, and must be written in a specific format.
[4000-7FFF] Cartridge ROM, other banks: Any subsequent 16k "banks" of the cartridge program can be made available to the CPU here, one by one; a chip on the cartridge is generally used to switch between banks, and make a particular area accessible. The smallest programs are 32k, which means that no bank-selection chip is required.
System Mem
----------
[8000-9FFF] Graphics RAM: Data required for the backgrounds and sprites used by the graphics subsystem is held here, and can be changed by the cartridge program. This region will be examined in further detail in part 3 of this series.
[A000-BFFF] Cartridge (External) RAM: There is a small amount of writeable memory available in the GameBoy; if a game is produced that requires more RAM than is available in the hardware, additional 8k chunks of RAM can be made addressable here.
[C000-DFFF] Working RAM: The GameBoy's internal 8k of RAM, which can be read from or written to by the CPU.
[E000-FDFF] Working RAM (shadow): Due to the wiring of the GameBoy hardware, an exact copy of the working RAM is available 8k higher in the memory map. This copy is available up until the last 512 bytes of the map, where other areas are brought into access.
[FE00-FE9F] Graphics: sprite information: Data about the sprites rendered
by the graphics chip are held here, including the sprites' positions and
attributes.
[FF00-FF7F] Memory-mapped I/O: Each of the GameBoy's subsystems (graphics, sound, etc.) has control values, to allow programs to create effects and use the hardware. These values are available to the CPU directly on the address bus, in this area.
[FF80-FFFF] Zero-page RAM: A high-speed area of 128 bytes of RAM is available at the top of memory. Oddly, though this is "page" 255 of the memory, it is referred to as page zero, since most of the interaction between the program and the GameBoy hardware occurs through use of this page of memory.
"""
import array
class GbMemory(object):
"""Memory of the LC-3 VM."""
def __init__(self):
"""Init."""
self.mem_size = 2**16
self.memory = array.array('B', [0 for i in range(self.mem_size)])
self.cartridge_type = 0
def write_byte(self, address, value):
"""Write a byte to an address."""
self.memory[address] = value
# self._show_mem_around_addr(address)
def read_byte(self, address):
"""Return a byte from memory at an address."""
return self.memory[address]
def read_word(self, address):
"""Read a word from memoery @ address."""
return self.read_byte(address) + (self.read_byte(address + 1) << 8)
def write_word(self, address, value):
"""Write a word in mem @ address."""
self.write_byte(address, value & 255)
self.write_byte(address + 1, value >> 8)
def reset_memory(self):
"""Reset all memory slots to 0."""
for i in range(self.mem_size):
self.memory[i] = 0
self.cartridge_type = 0
| 59.4 | 332 | 0.720819 | 1,049 | 0.294332 | 0 | 0 | 0 | 0 | 0 | 0 | 2,766 | 0.776094 |
1d95ef9da2b683c95ffb53337138d330ab1ec3fd | 1,127 | py | Python | elstruct/reader/_orca4/surface.py | sjklipp/elstruct | 1db630a12e11bf70df91443f963f79637244e844 | [
"Apache-2.0"
] | null | null | null | elstruct/reader/_orca4/surface.py | sjklipp/elstruct | 1db630a12e11bf70df91443f963f79637244e844 | [
"Apache-2.0"
] | null | null | null | elstruct/reader/_orca4/surface.py | sjklipp/elstruct | 1db630a12e11bf70df91443f963f79637244e844 | [
"Apache-2.0"
] | null | null | null | """ gradient and hessian readers
"""
import numpy
import autoread as ar
import autoparse.pattern as app
def gradient(output_string):
""" read gradient from the output string
"""
grad = ar.matrix.read(
output_string,
start_ptt=app.padded(app.NEWLINE).join([
app.padded(app.escape('CARTESIAN GRADIENT'), app.NONNEWLINE),
app.LINE, app.LINE, '']),
line_start_ptt=app.LINESPACES.join([
app.UNSIGNED_INTEGER,
app.one_or_more(app.LETTER),
':']))
assert numpy.shape(grad)[1] == 3
return grad
def hessian(output_string):
""" read hessian from the output string
"""
comp_ptt = app.UNSIGNED_INTEGER
mat = ar.matrix.read(
output_string,
val_ptt=app.EXPONENTIAL_FLOAT,
start_ptt=app.padded(app.NEWLINE).join([
app.escape('$hessian'),
app.LINE, '']),
block_start_ptt=(app.series(comp_ptt, app.LINESPACES) +
app.padded(app.NEWLINE)),
line_start_ptt=comp_ptt,
tril=False)
mat = tuple(map(tuple, mat))
return mat
| 27.487805 | 73 | 0.60071 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 168 | 0.149068 |
1d95ff83525f3bfc63f6749e33f1c51f0190ec41 | 1,072 | py | Python | Language/Parser/lr1_item.py | Chains99/Battlefield-Simulator | 9dc209c34aac5160232e47d6799bbe1b1bfcebad | [
"MIT"
] | null | null | null | Language/Parser/lr1_item.py | Chains99/Battlefield-Simulator | 9dc209c34aac5160232e47d6799bbe1b1bfcebad | [
"MIT"
] | null | null | null | Language/Parser/lr1_item.py | Chains99/Battlefield-Simulator | 9dc209c34aac5160232e47d6799bbe1b1bfcebad | [
"MIT"
] | null | null | null | from Language.Grammar.grammar import Production, Symbol, Terminal
class LR1Item:
def __init__(self, production: Production, dot_index: int, lookahead: Terminal = None):
self._repr = ''
self.production = production
self.dot_index = dot_index
self.lookahead = lookahead
self._repr = f"{self.production.head} -> "
self._repr += " ".join(str(self.production.symbols[i]) for i in range(self.dot_index))
self._repr += " . "
self._repr += " ".join(str(self.production.symbols[i]) for i in range(self.dot_index,len(self.production.symbols)))
self._repr += f", {self.lookahead}"
def __repr__(self) -> str:
return self._repr
def get_symbol_at_dot(self) -> Symbol:
if self.dot_index < len(self.production.symbols):
return self.production.symbols[self.dot_index]
return None
def __eq__(self, o):
if isinstance(o, LR1Item):
return self._repr == o._repr
return False
def __hash__(self):
return hash(self.__repr__())
| 34.580645 | 123 | 0.630597 | 1,003 | 0.935634 | 0 | 0 | 0 | 0 | 0 | 0 | 63 | 0.058769 |
1d97b0cbdfb243aad3abbcdbd52cf67f81f74a12 | 1,402 | py | Python | example/0_Basic_usage_of_the_library/python_feapder/1_quick_start/1_quick_start.py | RecluseXU/learning_spider | 45fa790ed7970be57a21b40817cc66856de3d99b | [
"MIT"
] | 38 | 2020-08-30T11:41:53.000Z | 2022-03-23T04:30:26.000Z | example/0_Basic_usage_of_the_library/python_feapder/1_quick_start/1_quick_start.py | AndersonHJB/learning_spider | b855b7808fb5268e9564180cf73ba5b1fb133f58 | [
"MIT"
] | 2 | 2021-08-20T16:34:12.000Z | 2021-10-08T11:06:41.000Z | example/0_Basic_usage_of_the_library/python_feapder/1_quick_start/1_quick_start.py | AndersonHJB/learning_spider | b855b7808fb5268e9564180cf73ba5b1fb133f58 | [
"MIT"
] | 10 | 2020-11-24T09:15:42.000Z | 2022-02-25T06:05:16.000Z | # -*- coding: utf-8 -*-
"""
Created on 2021-03-11 18:53:58
---------
@summary:
抓糗事百科的案例
---------
@author: Administrator
"""
import feapder
class Spider(feapder.AirSpider):
def start_requests(self):
for page_num in range(1, 2):
url = "https://www.qiushibaike.com/8hr/page/{}/".format(page_num)
yield feapder.Request(url)
def parse(self, request, response):
articles = response.xpath('//li[@id]/div/a')
for article in articles:
title = article.xpath('./text()').extract_first()
# 这里解析<a>的href,会留意到,此处的href已经是合并完整了的
url = article.xpath('./@href').extract_first()
# 新的请求
# 用法类似于scrapy
# callback 为回调函数
# 若是有其它需要传递的参数,直接写入即可,如title
yield feapder.Request(url, callback=self.parse_detail, title=title)
def parse_detail(self, request, response):
print('title:{}'.format(request.title))
print('url:{}'.format(response.url))
print('author:{}'.format(response.xpath('//*[@id="articleSideLeft"]/a/img/@alt').extract_first()))
response.encoding_errors = 'ignore' # 由于文章内容可能含有utf-8不能解析的字符,这里设置遇到不能解析字符就调过
print('content:{}'.format(response.xpath('string(//div[@class="content"])').extract_first()))
if __name__ == "__main__":
# Spider().start()
Spider(thread_count=3).start() # 设置3个线程来加快爬取速度 | 34.195122 | 106 | 0.603424 | 1,334 | 0.821429 | 783 | 0.482143 | 0 | 0 | 0 | 0 | 730 | 0.449507 |
1d9ac71d3a8fcaba493e70c86ce538da80100fc4 | 613 | py | Python | Python3-GUI/Turtle01_Motion.py | anliven/L-Python | a814cab207bbaad6b5c69b9feeb8bf2f459baf2b | [
"Apache-2.0"
] | null | null | null | Python3-GUI/Turtle01_Motion.py | anliven/L-Python | a814cab207bbaad6b5c69b9feeb8bf2f459baf2b | [
"Apache-2.0"
] | null | null | null | Python3-GUI/Turtle01_Motion.py | anliven/L-Python | a814cab207bbaad6b5c69b9feeb8bf2f459baf2b | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
import turtle
t = turtle.Turtle()
t.goto(-50, 0)
for j in range(3):
t.forward(180)
t.right(120)
t.reset() # 清空窗口,重置turtle状态为起始状态
t2 = turtle.Turtle()
t2.speed(50)
for i in range(5):
t2.forward(150)
t2.right(144)
turtle.exitonclick() # 保持图案,直到鼠标点击才退出
# ### 标准库turtle模块
# turtle — Turtle graphics
# https://docs.python.org/3/library/turtle.html
#
# ### Turtle motion
# turtle.goto(x,y) 定位到坐标
# turtle.forward() 向前运动
# turtle.backward() 向后运动
# turtle.right() 向右偏转角度
# turtle.left() 向左偏转角度
# turtle.home() 回到起点
# turtle.speed() 运动速度
# ......
| 19.15625 | 48 | 0.619902 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 484 | 0.647925 |
1d9b3dd104b11b23ec830541750f56c9d580c920 | 519 | py | Python | backend/core/MlDiagnosis/ML_models/heartAttackPrediction/testDeploy.py | arc-arnob/Reddit-Clone | 607918160596a10b0aff85bc7f472c8b76ace7c5 | [
"Apache-2.0"
] | null | null | null | backend/core/MlDiagnosis/ML_models/heartAttackPrediction/testDeploy.py | arc-arnob/Reddit-Clone | 607918160596a10b0aff85bc7f472c8b76ace7c5 | [
"Apache-2.0"
] | null | null | null | backend/core/MlDiagnosis/ML_models/heartAttackPrediction/testDeploy.py | arc-arnob/Reddit-Clone | 607918160596a10b0aff85bc7f472c8b76ace7c5 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sat Mar 13 10:26:55 2021
@author: hp
"""
import importlib
import prediction
import numpy as np
dataframe_instance = []
msg = ["marital status","age","hypertension","heart","glucose"]
for i in range(13):
read = (float(input()))
dataframe_instance.append(read)
#data_np = np.array(dataframe_instance)
print(dataframe_instance)
#data_np = np.reshape(data_np,(-1,5))
model = prediction.heartAttackModel('model_heart_LG_V1.sav')
print(model.predict(dataframe_instance))
| 23.590909 | 63 | 0.71869 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 233 | 0.44894 |
1d9d41151655848833ede87fdc1e56259da9adc2 | 327 | py | Python | lib/signtest.py | Laurancy-Dorian/DrawTheTableau | c8b1e67286303f0b0098cdbb8fb662ccfa31c235 | [
"MIT"
] | null | null | null | lib/signtest.py | Laurancy-Dorian/DrawTheTableau | c8b1e67286303f0b0098cdbb8fb662ccfa31c235 | [
"MIT"
] | null | null | null | lib/signtest.py | Laurancy-Dorian/DrawTheTableau | c8b1e67286303f0b0098cdbb8fb662ccfa31c235 | [
"MIT"
] | null | null | null | import signal
import os
import time
def receive_signal(signum, stack):
print 'Received:', signum
signal.signal(signal.SIGUSR1, receive_signal)
signal.signal(signal.SIGUSR2, receive_signal)
signal.signal(signal.SIGINT, receive_signal)
print 'My PID is:', os.getpid()
while True:
print 'Waiting...'
time.sleep(3)
| 19.235294 | 45 | 0.746177 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 35 | 0.107034 |
1d9d9317657c45de513b0d2d559ef0e59aa2beb3 | 5,211 | py | Python | maskfirst/masker.py | xianpf/CenterMask | 287111f4abd58df356e7899a22bf47d23647ee72 | [
"BSD-2-Clause"
] | null | null | null | maskfirst/masker.py | xianpf/CenterMask | 287111f4abd58df356e7899a22bf47d23647ee72 | [
"BSD-2-Clause"
] | null | null | null | maskfirst/masker.py | xianpf/CenterMask | 287111f4abd58df356e7899a22bf47d23647ee72 | [
"BSD-2-Clause"
] | null | null | null | import torch
import torch.nn.functional as F
from maskrcnn_benchmark.structures.bounding_box import BoxList
# the next two functions should be merged inside Masker
# but are kept here for the moment while we need them
# temporarily gor paste_mask_in_image
def expand_boxes(boxes, scale):
w_half = (boxes[:, 2] - boxes[:, 0]) * .5
h_half = (boxes[:, 3] - boxes[:, 1]) * .5
x_c = (boxes[:, 2] + boxes[:, 0]) * .5
y_c = (boxes[:, 3] + boxes[:, 1]) * .5
w_half *= scale
h_half *= scale
boxes_exp = torch.zeros_like(boxes)
boxes_exp[:, 0] = x_c - w_half
boxes_exp[:, 2] = x_c + w_half
boxes_exp[:, 1] = y_c - h_half
boxes_exp[:, 3] = y_c + h_half
return boxes_exp
def expand_boxes_new(boxes, scale):
w_half = (boxes[:, 2] - boxes[:, 0]) * .5
h_half = (boxes[:, 3] - boxes[:, 1]) * .5
x_c = (boxes[:, 2] + boxes[:, 0]) * .5
y_c = (boxes[:, 3] + boxes[:, 1]) * .5
w_half *= scale[0]
h_half *= scale[1]
boxes_exp = torch.zeros_like(boxes)
boxes_exp[:, 0] = x_c - w_half
boxes_exp[:, 2] = x_c + w_half
boxes_exp[:, 1] = y_c - h_half
boxes_exp[:, 3] = y_c + h_half
return boxes_exp
def expand_masks(mask, padding):
N = mask.shape[0]
M = mask.shape[-1]
pad2 = 2 * padding
scale = float(M + pad2) / M
padded_mask = mask.new_zeros((N, 1, M + pad2, M + pad2))
padded_mask[:, :, padding:-padding, padding:-padding] = mask
return padded_mask, scale
def expand_masks_new(mask, padding):
N = mask.shape[0]
W = mask.shape[1]
H = mask.shape[2]
pad2 = 2 * padding
scaleW = float(W + pad2) / W
scaleH = float(H + pad2) / H
padded_mask = mask.new_zeros((N, 1, W + pad2, H + pad2))
padded_mask[:, :, padding:-padding, padding:-padding] = mask
return padded_mask, (scaleW, scaleH)
def paste_mask_in_image(mask, box, im_h, im_w, thresh=0.5, padding=1):
padded_mask, scale = expand_masks(mask[None], padding=padding)
mask = padded_mask[0, 0]
box = expand_boxes(box[None], scale)[0]
box = box.to(dtype=torch.int32)
TO_REMOVE = 1
w = box[2] - box[0] + TO_REMOVE
h = box[3] - box[1] + TO_REMOVE
w = max(w, 1)
h = max(h, 1)
# Set shape to [batchxCxHxW]
mask = mask.expand((1, 1, -1, -1))
# Resize mask
mask = mask.to(torch.float32)
mask = F.interpolate(mask, size=(h, w), mode='bilinear', align_corners=False)
mask = mask[0][0]
if thresh >= 0:
mask = mask > thresh
else:
# for visualization and debugging, we also
# allow it to return an unmodified mask
# mask = (mask * 255).to(torch.uint8)
mask = (mask * 255).to(torch.bool)
# im_mask = torch.zeros((im_h, im_w), dtype=torch.uint8)
im_mask = torch.zeros((im_h, im_w), dtype=torch.bool)
x_0 = max(box[0], 0)
x_1 = min(box[2] + 1, im_w)
y_0 = max(box[1], 0)
y_1 = min(box[3] + 1, im_h)
im_mask[y_0:y_1, x_0:x_1] = mask[
(y_0 - box[1]) : (y_1 - box[1]), (x_0 - box[0]) : (x_1 - box[0])
]
return im_mask
class Masker(object):
"""
Projects a set of masks in an image on the locations
specified by the bounding boxes
"""
def __init__(self, threshold=0.5, padding=1):
self.threshold = threshold
self.padding = padding
def forward_single_image(self, masks, boxes):
boxes = boxes.convert("xyxy")
im_w, im_h = boxes.size
res = [
paste_mask_in_image(mask[0], box, im_h, im_w, self.threshold, self.padding)
for mask, box in zip(masks, boxes.bbox)
]
if len(res) > 0:
res = torch.stack(res, dim=0)[:, None]
else:
res = masks.new_empty((0, 1, masks.shape[-2], masks.shape[-1]))
return res
def maskpyramid_masker(self, masks, boxes):
# import pdb; pdb.set_trace()
mask_h, mask_w = masks.shape[-2:]
box_w, box_h = boxes.size
data_w, data_h = tuple(boxes.get_field('dataloader_size').tolist())
masks_content = masks[:,:,:round(data_h/16), :round(data_w/16)]
bg_content = boxes.get_field('bg_logits')[:,:,:round(data_h/16), :round(data_w/16)]
masks_1 = F.interpolate(masks_content, size=(box_h, box_w), mode='bilinear', align_corners=False)
masks_0 = F.interpolate(bg_content, size=(box_h, box_w), mode='bilinear', align_corners=False)
res = masks_1 > masks_0
return res
def __call__(self, masks, boxes):
if isinstance(boxes, BoxList):
boxes = [boxes]
# Make some sanity check
assert len(boxes) == len(masks), "Masks and boxes should have the same length."
# TODO: Is this JIT compatible?
# If not we should make it compatible.
results = []
for mask, box in zip(masks, boxes):
assert mask.shape[0] == len(box), "Number of objects should be the same."
if 'bg_logits' in box.extra_fields.keys():
result = self.maskpyramid_masker(mask, box)
else:
result = self.forward_single_image(mask, box)
# import pdb; pdb.set_trace()
results.append(result)
return results
| 32.166667 | 105 | 0.587027 | 2,155 | 0.413548 | 0 | 0 | 0 | 0 | 0 | 0 | 776 | 0.148916 |
1d9dcef6f5f353c0ea5bbef7cb388e0ae27dc089 | 2,047 | py | Python | certbot-ventilator/tests/conftest.py | gerrito333/letsencrypt-cert-manager | 957ea555cc0f18fc3af9c275dc2fc5a8ab0a1668 | [
"MIT"
] | 2 | 2020-07-08T17:36:11.000Z | 2020-07-08T18:20:59.000Z | certbot-ventilator/tests/conftest.py | gerrito333/letsencrypt-cert-manager | 957ea555cc0f18fc3af9c275dc2fc5a8ab0a1668 | [
"MIT"
] | 2 | 2020-07-08T17:57:19.000Z | 2020-07-08T18:00:38.000Z | certbot-ventilator/tests/conftest.py | gerrito333/letsencrypt-cert-manager | 957ea555cc0f18fc3af9c275dc2fc5a8ab0a1668 | [
"MIT"
] | 1 | 2020-12-18T21:49:10.000Z | 2020-12-18T21:49:10.000Z | """Fixtures for tests."""
import json
import boto3
from moto import mock_dynamodb2
import pytest
from fixtures import LambdaContextMock
import payloads
@pytest.fixture
def event():
"""Return parsed event."""
with open('tests/payloads/success.json') as json_data:
return json.load(json_data)
@pytest.fixture
def context():
"""Return mock lambda context."""
return LambdaContextMock()
@pytest.fixture
def expected_table():
"""Return dynamodb table name fixture."""
return "test-certbot-ventilator-certificates"
@pytest.fixture
def region():
"""Return AWS region fixture."""
return "us-west-2"
@pytest.fixture
def setup_aws_creds(monkeypatch, region):
"""Set up AWS credential environment vars to make boto3 happy."""
monkeypatch.setenv("AWS_ACCESS_KEY_ID", "testing")
monkeypatch.setenv("AWS_SECRET_ACCESS_KEY", "testing")
monkeypatch.setenv("AWS_SECURITY_TOKEN", "testing")
monkeypatch.setenv("AWS_SESSION_TOKEN", "testing")
monkeypatch.setenv("AWS_DEFAULT_REGION", region)
@pytest.fixture(autouse=True)
def install_mock_dynamodb(setup_aws_creds):
"""Mock out boto3 S3 with moto."""
with mock_dynamodb2():
yield
@pytest.fixture(autouse=True)
def setup_table(install_mock_dynamodb, expected_table):
"""Create a table and populate it with a column value."""
dynamodb = boto3.client("dynamodb")
dynamodb.create_table(
TableName=expected_table,
KeySchema=[
{
'AttributeName': 'subject_alternative_name',
'KeyType': 'HASH'
},
],
AttributeDefinitions=[
{
'AttributeName': 'subject_alternative_name',
'AttributeType': 'S'
},
],
ProvisionedThroughput={
'ReadCapacityUnits': 5,
'WriteCapacityUnits': 5
}
)
item = {
"subject_alternative_name": {"S": "test-domain.pxsys.net"}
}
dynamodb.put_item(TableName=expected_table, Item=item)
| 24.662651 | 69 | 0.651685 | 0 | 0 | 123 | 0.060088 | 1,873 | 0.914998 | 0 | 0 | 744 | 0.363459 |
1d9dfc92ac3aa9a3c4519b13db031ca1c8e467a6 | 51,450 | py | Python | smarc_bt/src/bt_actions.py | svbhat/smarc_missions | 0fa9f858dff033a88385a156b5bb09b3f8fe2780 | [
"BSD-3-Clause"
] | null | null | null | smarc_bt/src/bt_actions.py | svbhat/smarc_missions | 0fa9f858dff033a88385a156b5bb09b3f8fe2780 | [
"BSD-3-Clause"
] | null | null | null | smarc_bt/src/bt_actions.py | svbhat/smarc_missions | 0fa9f858dff033a88385a156b5bb09b3f8fe2780 | [
"BSD-3-Clause"
] | null | null | null | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
# Ozer Ozkahraman (ozero@kth.se)
import py_trees as pt
import py_trees_ros as ptr
import time
import numpy as np
import rospy
import tf
import actionlib
# from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
from smarc_msgs.msg import GotoWaypointAction, GotoWaypointGoal
import actionlib_msgs.msg as actionlib_msgs
from geometry_msgs.msg import PointStamped, PoseArray, PoseStamped
from nav_msgs.msg import Path
from std_msgs.msg import Float64, Header, Bool, Empty
from visualization_msgs.msg import MarkerArray
from sensor_msgs.msg import NavSatFix
from std_srvs.srv import SetBool
from imc_ros_bridge.msg import EstimatedState, VehicleState, PlanDB, PlanDBInformation, PlanDBState, PlanControlState, PlanControl, PlanSpecification, Maneuver
import bb_enums
import imc_enums
import common_globals
from mission_plan import MissionPlan
from mission_log import MissionLog
class A_PublishFinalize(pt.behaviour.Behaviour):
def __init__(self, topic):
super(A_PublishFinalize, self).__init__(name="A_PublishFinalize")
self.bb = pt.blackboard.Blackboard()
self.topic = topic
self.last_published_time = None
self.message_object = Empty()
def setup(self, timeout):
self.pub = rospy.Publisher(self.topic, Empty, queue_size=1)
return True
def update(self):
if self.last_published_time is not None:
time_since = time.time() - self.last_published_time
self.feedback_message = "Last pub'd:{:.2f}s ago".format(time_since)
else:
self.feedback_message = "Never published!"
finalized = self.bb.get(bb_enums.MISSION_FINALIZED)
if not finalized:
try:
self.pub.publish(self.message_object)
self.last_published_time = time.time()
self.feedback_message = "Just published"
self.bb.set(bb_enums.MISSION_FINALIZED, True)
return pt.Status.SUCCESS
except:
msg = "Couldn't publish"
rospy.logwarn_throttle(1, msg)
self.feedback_message = msg
return pt.Status.FAILURE
return pt.Status.SUCCESS
class A_ManualMissionLog(pt.behaviour.Behaviour):
def __init__(self):
super(A_ManualMissionLog, self).__init__(name="A_ManualMissionLog")
self.bb = pt.blackboard.Blackboard()
self.started_logs = 0
self.num_saved_logs = 0
def start_new_log(self):
save_location = self.bb.get(bb_enums.MISSION_LOG_FOLDER)
log = MissionLog(mission_plan = None,
save_location = save_location)
self.bb.set(bb_enums.MANUAL_MISSION_LOG_OBJ, log)
rospy.loginfo("Started new manual mission log")
self.started_logs += 1
return log
def update(self):
enabled = self.bb.get(bb_enums.ENABLE_MANUAL_MISSION_LOG)
log = self.bb.get(bb_enums.MANUAL_MISSION_LOG_OBJ)
if not enabled:
# if we have a log, we save it now
# and set it to None, so next time we are
# disabled we dont do anything
if log is not None:
log.save()
self.bb.set(bb_enums.MANUAL_MISSION_LOG_OBJ, None)
self.num_saved_logs += 1
self.feedback_message = "Disabled, {} logs saved".format(self.num_saved_logs)
return pt.Status.SUCCESS
if log is None:
log = self.start_new_log()
# first add the auv pose
world_trans = self.bb.get(bb_enums.WORLD_TRANS)
x,y = world_trans[0], world_trans[1]
z = -self.bb.get(bb_enums.DEPTH)
log.navigation_trace.append((x,y,z))
# then add the raw gps
gps = self.bb.get(bb_enums.RAW_GPS)
if gps is None or gps.status.status == -1: # no fix
gps_utm_point = None
else:
# translate the latlon to utm point using the same service as the mission plan
gps_utm_x, gps_utm_y = mplan.latlon_to_utm(gps.latitude, gps.lonitude)
if gps_utm_x is None:
gps_utm_point = None
log.raw_gps_trace.append(gps_utm_point)
# then add the tree tip and its status
tree_tip = self.bb.get(bb_enums.TREE_TIP_NAME)
tip_status = self.bb.get(bb_enums.TREE_TIP_STATUS)
log.tree_tip_trace.append((tree_tip, tip_status))
self.feedback_message = "Log len:{} of log#{}".format(len(log.navigation_trace), self.started_logs)
return pt.Status.SUCCESS
class A_SaveMissionLog(pt.behaviour.Behaviour):
def __init__(self):
super(A_SaveMissionLog, self).__init__(name="A_SaveMissionLog")
self.bb = pt.blackboard.Blackboard()
self.num_saved_logs = 0
def update(self):
log = self.bb.get(bb_enums.MISSION_LOG_OBJ)
if log is not None:
log.save()
self.num_saved_logs += 1
self.bb.set(bb_enums.MISSION_LOG_OBJ, None)
self.feedback_message = "Saved log #{}!".format(self.num_saved_logs)
else:
self.feedback_message = "#saved logs:{}".format(self.num_saved_logs)
return pt.Status.SUCCESS
class A_UpdateMissionLog(pt.behaviour.Behaviour):
def __init__(self):
super(A_UpdateMissionLog, self).__init__(name="A_UpdateMissionLog")
self.bb = pt.blackboard.Blackboard()
self.started_logs = 0
def start_new_log(self, mplan):
save_location = self.bb.get(bb_enums.MISSION_LOG_FOLDER)
log = MissionLog(mission_plan = mplan,
save_location = save_location)
self.bb.set(bb_enums.MISSION_LOG_OBJ, log)
rospy.loginfo("Started new mission log")
self.started_logs += 1
return log
def update(self):
# only update if there is an unfinalized mission that has been started
mplan = self.bb.get(bb_enums.MISSION_PLAN_OBJ)
if mplan is None:
rospy.loginfo("Mission plan is None, can't make a log of this?")
self.feedback_message = "No mission plan!"
return pt.Status.FAILURE
log = self.bb.get(bb_enums.MISSION_LOG_OBJ)
if log is None:
log = self.start_new_log(mplan)
# check if the mission has changed in the meantime
# this can happen when the user starts a mission, stops it,
# and then starts a different one
# we dont wanna log the incomplete one
# did it change since we last got called?
if log.creation_time != mplan.creation_time:
# it changed!
# re-start a log
log = self.start_new_log(mplan)
# now we got a valid mission plan
# first add the auv pose
world_trans = self.bb.get(bb_enums.WORLD_TRANS)
x,y = world_trans[0], world_trans[1]
z = -self.bb.get(bb_enums.DEPTH)
log.navigation_trace.append((x,y,z))
# then add the raw gps
gps = self.bb.get(bb_enums.RAW_GPS)
if gps is None or gps.status.status == -1: # no fix
gps_utm_point = None
else:
# translate the latlon to utm point using the same service as the mission plan
gps_utm_x, gps_utm_y = mplan.latlon_to_utm(gps.latitude, gps.lonitude)
if gps_utm_x is None:
gps_utm_point = None
log.raw_gps_trace.append(gps_utm_point)
# then add the tree tip and its status
tree_tip = self.bb.get(bb_enums.TREE_TIP_NAME)
tip_status = self.bb.get(bb_enums.TREE_TIP_STATUS)
log.tree_tip_trace.append((tree_tip, tip_status))
self.feedback_message = "Log len:{} of log#{}".format(len(log.navigation_trace), self.started_logs)
return pt.Status.SUCCESS
class A_SetDVLRunning(pt.behaviour.Behaviour):
def __init__(self, dvl_on_off_service_name, running, cooldown):
super(A_SetDVLRunning, self).__init__(name="A_SetDVLRunning")
self.switcher_service = rospy.ServiceProxy(dvl_on_off_service_name,
SetBool)
self.bb = pt.blackboard.Blackboard()
self.sb = SetBool()
self.sb.data = running
self.running = running
self.last_toggle = 0
self.cooldown = cooldown
self.service_name = dvl_on_off_service_name
def update(self):
# try not to call the service every tick...
dvl_is_running = self.bb.get(bb_enums.DVL_IS_RUNNING)
if dvl_is_running is not None:
if dvl_is_running == self.sb.data:
rospy.loginfo_throttle_identical(20, "DVL is already running:"+str(self.sb.data))
return pt.Status.SUCCESS
# check if enough time has passed since last call
t = time.time()
if t - self.last_toggle < self.cooldown:
# nope, return running while we wait
rospy.loginfo_throttle_identical(5, "Waiting on DVL toggle cooldown")
return pt.Status.RUNNING
try:
ret = self.switcher_service(self.running)
except rospy.service.ServiceException:
rospy.logwarn_throttle_identical(60, "DVL Start/stop service not found! Succeeding by default namespace:{}".format(self.service_name))
return pt.Status.SUCCESS
if ret.success:
rospy.loginfo_throttle_identical(5, "DVL TOGGLED:"+str(self.sb.data))
self.last_toggle = time.time()
self.bb.set(bb_enums.DVL_IS_RUNNING, self.sb.data)
return pt.Status.SUCCESS
rospy.logwarn_throttle_identical(5, "DVL COULD NOT BE TOGGLED:{}, ret:{}".format(self.sb.data, ret))
return pt.Status.FAILURE
class A_EmergencySurface(ptr.actions.ActionClient):
def __init__(self, emergency_action_namespace):
"""
What to do when an emergency happens. This should be a very simple
action that is super unlikely to fail, ever. It should also 'just work'
without a goal.
Like surfacing.
"""
self.bb = pt.blackboard.Blackboard()
self.action_goal_handle = None
ptr.actions.ActionClient.__init__(
self,
name="A_EmergencySurface",
action_spec=GotoWaypointAction,
action_goal=None,
action_namespace= emergency_action_namespace,
override_feedback_message_on_running="EMERGENCY SURFACING"
)
self.action_server_ok = False
def setup(self, timeout):
"""
Overwriting the normal ptr action setup to stop it from failiing the setup step
and instead handling this failure in the tree.
"""
self.logger.debug("%s.setup()" % self.__class__.__name__)
self.action_client = actionlib.SimpleActionClient(
self.action_namespace,
self.action_spec
)
if not self.action_client.wait_for_server(rospy.Duration(timeout)):
self.logger.error("{0}.setup() could not connect to the action server at '{1}'".format(self.__class__.__name__, self.action_namespace))
self.action_client = None
self.action_server_ok = False
else:
self.action_server_ok = True
return True
def initialise(self):
if not self.action_server_ok:
rospy.logwarn_throttle_identical(5, "No Action Server found for emergency action, will just block the tree!")
return
self.feedback_message = "EMERGENCY SURFACING"
# construct the message
self.action_goal = GotoWaypointGoal()
self.sent_goal = False
def update(self):
if not self.action_server_ok:
self.feedback_message = "Action Server for emergency action can not be used!"
rospy.logerr_throttle_identical(5,self.feedback_message)
return pt.Status.FAILURE
# if your action client is not valid
if not self.action_client:
self.feedback_message = "ActionClient for emergency action is invalid!"
rospy.logwarn_throttle_identical(5,self.feedback_message)
return pt.Status.FAILURE
# if the action_goal is invalid
if not self.action_goal:
self.feedback_message = "No action_goal!"
rospy.logwarn(self.feedback_message)
return pt.Status.FAILURE
# if goal hasn't been sent yet
if not self.sent_goal:
self.action_goal_handle = self.action_client.send_goal(self.action_goal, feedback_cb=self.feedback_cb)
self.sent_goal = True
rospy.loginfo("Sent goal to action server:"+str(self.action_goal))
self.feedback_message = "Emergency goal sent"
return pt.Status.RUNNING
# if the goal was aborted or preempted
if self.action_client.get_state() in [actionlib_msgs.GoalStatus.ABORTED,
actionlib_msgs.GoalStatus.PREEMPTED]:
self.feedback_message = "Aborted emergency"
rospy.loginfo(self.feedback_message)
return pt.Status.FAILURE
result = self.action_client.get_result()
# if the goal was accomplished
if result:
self.feedback_message = "Completed emergency"
rospy.loginfo(self.feedback_message)
return pt.Status.SUCCESS
# if we're still trying to accomplish the goal
return pt.Status.RUNNING
def feedback_cb(self, msg):
pass
class A_SetNextPlanAction(pt.behaviour.Behaviour):
def __init__(self, do_not_visit=False):
"""
Sets the current plan action to the next one
SUCCESS if it can set it to something that is not None
FAILURE otherwise
if do_not_visit=True, then this action will only get the current wp
and set it and wont actually advance the plan forward.
This is useful for when you want to set the current wp right after
you created a plan.
"""
self.bb = pt.blackboard.Blackboard()
super(A_SetNextPlanAction, self).__init__('A_SetNextPlanAction')
self.do_not_visit = do_not_visit
def update(self):
mission_plan = self.bb.get(bb_enums.MISSION_PLAN_OBJ)
if mission_plan is None:
rospy.logwarn_throttle(5, "Mission plan was None!")
return pt.Status.FAILURE
if not self.do_not_visit:
mission_plan.visit_wp()
next_action = mission_plan.get_current_wp()
if next_action is None:
self.feedback_message = "Next action was None"
rospy.logwarn_throttle(5, "Mission is complete:{}".format(mission_plan.is_complete()))
return pt.Status.FAILURE
rospy.loginfo_throttle_identical(5, "Set CURRENT_PLAN_ACTION {} to: {}".format(self.do_not_visit, str(next_action)))
self.bb.set(bb_enums.CURRENT_PLAN_ACTION, next_action)
return pt.Status.SUCCESS
class A_GotoWaypoint(ptr.actions.ActionClient):
def __init__(self,
action_namespace,
goal_tf_frame = 'utm',
node_name = "A_GotoWaypoint"):
"""
Runs an action server that will move the robot to the given waypoint
"""
self.bb = pt.blackboard.Blackboard()
self.node_name = node_name
list_of_maneuvers = self.bb.get(bb_enums.MANEUVER_ACTIONS)
if list_of_maneuvers is None:
list_of_maneuvers = [self.node_name]
else:
list_of_maneuvers.append(self.node_name)
self.bb.set(bb_enums.MANEUVER_ACTIONS, list_of_maneuvers)
self.action_goal_handle = None
# become action client
ptr.actions.ActionClient.__init__(
self,
name = self.node_name,
action_spec = GotoWaypointAction,
action_goal = None,
action_namespace = action_namespace,
override_feedback_message_on_running = "Moving to waypoint"
)
self.action_server_ok = False
self.goal_tf_frame = goal_tf_frame
def setup(self, timeout):
"""
Overwriting the normal ptr action setup to stop it from failiing the setup step
and instead handling this failure in the tree.
"""
self.logger.debug("%s.setup()" % self.__class__.__name__)
self.action_client = actionlib.SimpleActionClient(
self.action_namespace,
self.action_spec
)
if not self.action_client.wait_for_server(rospy.Duration(timeout)):
self.logger.error("{0}.setup() could not connect to the action server at '{1}'".format(self.__class__.__name__, self.action_namespace))
self.action_client = None
else:
self.action_server_ok = True
return True
def initialise(self):
if not self.action_server_ok:
rospy.logwarn_throttle(5, "No action server found for A_GotoWaypoint!")
return
mission_plan = self.bb.get(bb_enums.MISSION_PLAN_OBJ)
if mission_plan is None:
rospy.logwarn("No mission plan found!")
return
wp = mission_plan.get_current_wp()
if wp is None:
rospy.loginfo("No wp found to execute! Does the plan have any waypoints that we understand?")
return
if wp.tf_frame != self.goal_tf_frame:
rospy.logerr_throttle(5, 'The frame of the waypoint({0}) does not match the expected frame({1}) of the action client!'.format(frame, self.goal_tf_frame))
return
if wp.maneuver_id != imc_enums.MANEUVER_GOTO:
rospy.loginfo("THIS IS A GOTO MANEUVER, WE ARE USING IT FOR SOMETHING ELSE")
# get the goal tolerance as a dynamic variable from the bb
goal_tolerance = self.bb.get(bb_enums.WAYPOINT_TOLERANCE)
# construct the message
goal = GotoWaypointGoal()
goal.waypoint_pose.pose.position.x = wp.x
goal.waypoint_pose.pose.position.y = wp.y
goal.goal_tolerance = goal_tolerance
# 0=None, 1=Depth, 2=Altitude in the action
# thankfully these are the same in IMC and in the Action
# but Action doesnt have 'height'
if wp.z_unit == imc_enums.Z_HEIGHT:
wp.z_unit = imc_enums.Z_NONE
goal.z_control_mode = wp.z_unit
goal.travel_depth = wp.z
# 0=None, 1=RPM, 2=speed in the action
# 0=speed, 1=rpm, 2=percentage in IMC
if wp.speed_unit == imc_enums.SPEED_UNIT_RPM:
goal.speed_control_mode = GotoWaypointGoal.SPEED_CONTROL_RPM
goal.travel_rpm = wp.speed
elif wp.speed_unit == imc_enums.SPEED_UNIT_MPS:
goal.speed_control_mode = GotoWaypointGoal.SPEED_CONTROL_SPEED
goal.travel_speed = wp.speed
else:
goal.speed_control_mode = GotoWaypointGoal.SPEED_CONTROL_NONE
rospy.logwarn_throttle(1, "Speed control of the waypoint action is NONE!")
self.action_goal = goal
rospy.loginfo(">>> Goto waypoint action goal initialized:"+str(goal))
# ensure that we still need to send the goal
self.sent_goal = False
def update(self):
"""
Check only to see whether the underlying action server has
succeeded, is running, or has cancelled/aborted for some reason and
map these to the usual behaviour return states.
"""
if not self.action_server_ok:
self.feedback_message = "Action Server for gotowp action can not be used!"
rospy.logerr_throttle_identical(5,self.feedback_message)
return pt.Status.FAILURE
# if your action client is not valid
if not self.action_client:
self.feedback_message = "ActionClient is invalid! Client:"+str(self.action_client)
rospy.logerr(self.feedback_message)
return pt.Status.FAILURE
# if the action_goal is invalid
if not self.action_goal:
self.feedback_message = "No action_goal!"
rospy.logwarn(self.feedback_message)
return pt.Status.FAILURE
# if goal hasn't been sent yet
if not self.sent_goal:
self.action_goal_handle = self.action_client.send_goal(self.action_goal, feedback_cb=self.feedback_cb)
self.sent_goal = True
rospy.loginfo("Sent goal to action server:"+str(self.action_goal))
self.feedback_message = "Goal sent"
return pt.Status.RUNNING
# if the goal was aborted or preempted
if self.action_client.get_state() in [actionlib_msgs.GoalStatus.ABORTED,
actionlib_msgs.GoalStatus.PREEMPTED]:
self.feedback_message = "Aborted goal"
rospy.loginfo(self.feedback_message)
return pt.Status.FAILURE
result = self.action_client.get_result()
# if the goal was accomplished
if result is not None and result.reached_waypoint:
self.feedback_message = "Completed goal"
rospy.loginfo(self.feedback_message)
return pt.Status.SUCCESS
return pt.Status.RUNNING
def feedback_cb(self, msg):
fb = str(msg.ETA)
self.feedback_message = "ETA:"+fb
rospy.loginfo_throttle(5, fb)
class A_UpdateTF(pt.behaviour.Behaviour):
def __init__(self, utm_link, base_link):
"""
reads the current translation and orientation from the TF tree
and puts that into the BB
utm_link and base_link are tf link names where utm_link is essentially the world coordinates.
check the neptus-related actions too for more info on utm_link
"""
super(A_UpdateTF, self).__init__("A_UpdateTF")
self.bb = pt.blackboard.Blackboard()
self.utm_link = utm_link
self.base_link = base_link
self.listener = tf.TransformListener()
self.tf_ok = False
self.last_read_time = None
def setup(self, timeout):
try:
rospy.loginfo_throttle(3, "Waiting for transform from {} to {}...".format(self.utm_link, self.base_link))
self.listener.waitForTransform(self.utm_link, self.base_link, rospy.Time(), rospy.Duration(timeout))
rospy.loginfo_throttle(3, "...Got it")
self.tf_ok = True
except:
rospy.logerr_throttle(5, "Could not find from "+self.utm_link+" to "+self.base_link + "... Nothing except safety will be run")
return True
def update(self):
if self.last_read_time is not None:
time_since = time.time() - self.last_read_time
self.feedback_message = "Last read:{:.2f}s ago".format(time_since)
else:
self.feedback_message = "No msg received ever"
try:
(world_trans, world_rot) = self.listener.lookupTransform(self.utm_link,
self.base_link,
rospy.Time(0))
self.last_read_time = time.time()
except (tf.LookupException, tf.ConnectivityException):
rospy.logerr_throttle_identical(5, "Could not get transform between {} and {}".format(self.utm_link, self.base_link))
return pt.Status.FAILURE
except:
rospy.logerr_throttle_identical(5, "Could not do tf lookup for some other reason")
return pt.Status.FAILURE
self.bb.set(bb_enums.WORLD_TRANS, world_trans)
self.bb.set(bb_enums.WORLD_ROT, world_rot)
# also create this pointstamped object so that we can transform this
# easily to w/e other frame is needed later
ps = PointStamped()
ps.header.frame_id = self.utm_link
ps.header.stamp = rospy.Time(0)
ps.point.x = world_trans[0]
ps.point.y = world_trans[1]
ps.point.z = world_trans[2]
self.bb.set(bb_enums.LOCATION_POINT_STAMPED, ps)
# the Z component is UP, so invert to get "depth"
self.bb.set(bb_enums.DEPTH, -world_trans[2])
return pt.Status.SUCCESS
class A_UpdateNeptusPlanControl(pt.behaviour.Behaviour):
def __init__(self, plan_control_topic):
super(A_UpdateNeptusPlanControl, self).__init__("A_UpdateNeptusPlanControl")
self.bb = pt.blackboard.Blackboard()
self.plan_control_msg = None
self.plan_control_topic = plan_control_topic
self.sub = None
def setup(self, timeout):
self.sub = rospy.Subscriber(self.plan_control_topic, PlanControl, self.plancontrol_cb)
return True
def plancontrol_cb(self, plan_control_msg):
# rospy.loginfo("plancontrol_cb {}".format(plan_control_msg))
self.plan_control_msg = plan_control_msg
def update(self):
plan_control_msg = self.plan_control_msg
if plan_control_msg is None:
# not receiving anything is ok.
return pt.Status.SUCCESS
# check if this message is a 'go' or 'no go' message
# imc/plan_control(569):
# int type:[0,1,2,3] req,suc,fail,in prog
# int op:[0,1,2,3] start, stop, load, get
# int request_id
# string plan_id
# int flags
# string info
# the start button in neptus sends:
# type:0 op:0 plan_id:"string" flags:1
# stop button sends:
# type:0 op:1 plan_id:'' flags:1
# teleop button sends:
# type:0 op:0 plan_id:"teleoperation-mode" flags:0
typee = plan_control_msg.type
op = plan_control_msg.op
plan_id = plan_control_msg.plan_id
flags = plan_control_msg.flags
# somehow this happens...
if plan_id is None:
plan_id=''
# separate well-defined ifs for possible future shenanigans.
if typee==0 and op==0 and plan_id!='' and flags==1:
# start button
# check if the start was given for our current plan
current_mission_plan = self.bb.get(bb_enums.MISSION_PLAN_OBJ)
self.bb.set(bb_enums.PLAN_IS_GO, True)
self.bb.set(bb_enums.ENABLE_AUTONOMY, False)
if current_mission_plan is not None and plan_id == current_mission_plan.plan_id:
rospy.loginfo("Started plan:{}".format(plan_id))
else:
if current_mission_plan is None:
rospy.logwarn("Start given for plan:{} but we don't have a plan!".format(plan_id))
else:
rospy.logwarn("Start given for plan:{} our plan:{}".format(plan_id, current_mission_plan.plan_id))
if typee==0 and op==1 and plan_id=='' and flags==1:
# stop button
self.bb.set(bb_enums.PLAN_IS_GO, False)
self.bb.set(bb_enums.ENABLE_AUTONOMY, False)
# this string is hardcoded in Neptus, so we hardcode it here too!
if typee==0 and op==0 and plan_id=='teleoperation-mode' and flags==0:
# teleop button
self.bb.set(bb_enums.ENABLE_AUTONOMY, True)
rospy.logwarn_throttle_identical(10, "AUTONOMOUS MODE")
# reset it until next message
self.plan_control_msg = None
return pt.Status.SUCCESS
class A_UpdateNeptusEstimatedState(pt.behaviour.Behaviour):
def __init__(self,
estimated_state_topic,
gps_fix_topic,
gps_nav_data_topic):
super(A_UpdateNeptusEstimatedState, self).__init__("A_UpdateNeptusEstimatedState")
self.bb = pt.blackboard.Blackboard()
self.estimated_state_pub = None
self.estimated_state_topic = estimated_state_topic
self.e_state = EstimatedState()
self.gps_fix_pub = None
self.gps_fix_topic = gps_fix_topic
self.gps_nav_data_pub = None
self.gps_nav_data_topic = gps_nav_data_topic
self.gps_fix = NavSatFix()
def setup(self, timeout):
self.estimated_state_pub = rospy.Publisher(self.estimated_state_topic, EstimatedState, queue_size=1)
self.gps_fix_pub = rospy.Publisher(self.gps_fix_topic, NavSatFix, queue_size=1)
self.gps_nav_data_pub = rospy.Publisher(self.gps_nav_data_topic, NavSatFix, queue_size=1)
return True
def update(self):
lat = self.bb.get(bb_enums.CURRENT_LATITUDE)
lon = self.bb.get(bb_enums.CURRENT_LONGITUDE)
depth = self.bb.get(bb_enums.DEPTH)
world_rot = self.bb.get(bb_enums.WORLD_ROT)
if depth is None:
reason = "depth was None, using 0"
self.feedback_message = reason
depth = 0
if lat is None or lon is None or world_rot is None:
rospy.logwarn_throttle_identical(10, "Could not update neptus estimated state because lat/lon/world_rot was None!")
return pt.Status.SUCCESS
# construct message for neptus
self.e_state.lat = np.radians(lat)
self.e_state.lon= np.radians(lon)
self.e_state.depth = depth
roll, pitch, yaw = tf.transformations.euler_from_quaternion(world_rot)
self.e_state.psi = np.pi/2. - yaw
# send the message to neptus
self.estimated_state_pub.publish(self.e_state)
# same thing with gps fix
# the bridge only looks at lat lon height=altitude
self.gps_fix.latitude = lat
self.gps_fix.longitude = lon
self.gps_fix.altitude = -depth
self.gps_fix.header.seq = int(time.time())
self.gps_fix_pub.publish(self.gps_fix)
self.gps_nav_data_pub.publish(self.gps_fix)
return pt.Status.SUCCESS
class A_UpdateNeptusPlanControlState(pt.behaviour.Behaviour):
def __init__(self, plan_control_state_topic):
super(A_UpdateNeptusPlanControlState, self).__init__("A_UpdateNeptusPlanControlState")
self.bb = pt.blackboard.Blackboard()
self.plan_control_state_pub = None
self.plan_control_state_topic = plan_control_state_topic
def setup(self, timeout):
self.plan_control_state_pub = rospy.Publisher(self.plan_control_state_topic, PlanControlState, queue_size=1)
return True
def update(self):
# construct current progress message for neptus
msg = PlanControlState()
tip_name = self.bb.get(bb_enums.TREE_TIP_NAME)
tip_status = self.bb.get(bb_enums.TREE_TIP_STATUS)
# this tip_status looks like: "Status.FAILURE"
# I just wanna get the first letter after dot.
msg.man_id = tip_name+'('+tip_status[7]+')'
mission_plan = self.bb.get(bb_enums.MISSION_PLAN_OBJ)
if mission_plan is None:
msg.plan_id = 'No plan'
msg.plan_progress = 100.0
elif mission_plan.is_complete():
msg.plan_id = 'Mission complete'
msg.plan_progress = 100.0
else:
current_wp_index = mission_plan.current_wp_index
current_man_id = mission_plan.waypoint_man_ids[current_wp_index]
total = len(mission_plan.waypoints)
msg.plan_id = str(mission_plan.plan_id)
if self.bb.get(bb_enums.PLAN_IS_GO):
msg.man_id = current_man_id
plan_progress = (current_wp_index * 100.0) / total # percent float
msg.plan_progress = plan_progress
if tip_name in imc_enums.EXECUTING_ACTION_NAMES:
msg.state = imc_enums.STATE_EXECUTING
elif tip_name in imc_enums.BLOCKED_ACTION_NAMES:
msg.state = imc_enums.STATE_BLOCKED
msg.plan_id = 'SAFETY FALLBACK'
msg.man_id = 'EMERGENCY'
msg.plan_progress = 0.0
else:
msg.state = imc_enums.STATE_READY
if self.bb.get(bb_enums.ENABLE_AUTONOMY):
msg.plan_id += '(AUTONOMOUS)'
# send message to neptus
self.plan_control_state_pub.publish(msg)
return pt.Status.SUCCESS
class A_UpdateNeptusVehicleState(pt.behaviour.Behaviour):
def __init__(self, vehicle_state_topic):
super(A_UpdateNeptusVehicleState, self).__init__("A_UpdateNeptusVehicleState")
self.bb = pt.blackboard.Blackboard()
self.vehicle_state_pub = None
self.vehicle_state_topic = vehicle_state_topic
def setup(self, timeout):
self.vehicle_state_pub = rospy.Publisher(self.vehicle_state_topic, VehicleState, queue_size=1)
return True
def update(self):
"""
this is the message that makes SAM:DISCONNECTED better.
"""
vs = VehicleState()
tip_name = self.bb.get(bb_enums.TREE_TIP_NAME)
if tip_name in imc_enums.EXECUTING_ACTION_NAMES:
vs.op_mode = imc_enums.OP_MODE_MANEUVER
elif tip_name == 'A_EmergencySurface':
vs.op_mode = imc_enums.OP_MODE_ERROR
else:
vs.op_mode = imc_enums.OP_MODE_SERVICE
self.vehicle_state_pub.publish(vs)
return pt.Status.SUCCESS
class A_UpdateNeptusPlanDB(pt.behaviour.Behaviour):
def __init__(self,
plandb_topic,
utm_link,
local_link,
latlontoutm_service_name,
latlontoutm_service_name_alternative):
super(A_UpdateNeptusPlanDB, self).__init__("A_UpdateNeptusPlanDB")
self.bb = pt.blackboard.Blackboard()
# neptus sends lat/lon, which we convert to utm, which we then convert to local
self.utm_link = utm_link
self.local_link = local_link
self.latlontoutm_service_name = latlontoutm_service_name
self.latlontoutm_service_name_alternative = latlontoutm_service_name_alternative
# the message body is largely the same, so we can re-use most of it
self.plandb_msg = PlanDB()
self.plandb_msg.type = imc_enums.PLANDB_TYPE_SUCCESS
self.plandb_msg.op = imc_enums.PLANDB_OP_SET
self.plandb_pub = None
self.plandb_sub = None
self.latest_plandb_msg = None
self.plandb_topic = plandb_topic
def setup(self, timeout):
self.plandb_pub = rospy.Publisher(self.plandb_topic, PlanDB, queue_size=1)
self.plandb_sub = rospy.Subscriber(self.plandb_topic, PlanDB, callback=self.plandb_cb, queue_size=1)
return True
def plandb_cb(self, plandb_msg):
"""
as an answer to OUR answer of 'type=succes, op=set', neptus sends a 'type=request, op=get_info'.
"""
# rospy.loginfo("plandb_db {}".format(plandb_msg))
self.latest_plandb_msg = plandb_msg
def make_plandb_info(self):
current_mission_plan = self.bb.get(bb_enums.MISSION_PLAN_OBJ)
plan_info = PlanDBInformation()
plan_info.plan_id = current_mission_plan.plan_id
plan_info.md5 = current_mission_plan.plandb_msg.plan_spec_md5
plan_info.change_time = current_mission_plan.creation_time/1000.0
return plan_info
def handle_request_get_info(self, plandb_msg):
# we need to respond to this with some info... but what?
rospy.loginfo_throttle_identical(30, "Got REQUEST GET_INFO planDB msg from Neptus")
current_mission_plan = self.bb.get(bb_enums.MISSION_PLAN_OBJ)
if current_mission_plan is None:
return
response = PlanDB()
response.plan_id = current_mission_plan.plan_id
response.type = imc_enums.PLANDB_TYPE_SUCCESS
response.op = imc_enums.PLANDB_OP_GET_INFO
response.plandb_information = self.make_plandb_info()
self.plandb_pub.publish(response)
rospy.loginfo_throttle_identical(30, "Answered GET_INFO for plan:"+str(response.plan_id))
def handle_request_get_state(self, plandb_msg):
rospy.loginfo_throttle_identical(30, "Got REQUEST GET_STATE planDB msg from Neptus")
current_mission_plan = self.bb.get(bb_enums.MISSION_PLAN_OBJ)
if current_mission_plan is None:
return
# https://github.com/LSTS/imcjava/blob/d95fddeab4c439e603cf5e30a32979ad7ace5fbc/src/java/pt/lsts/imc/adapter/PlanDbManager.java#L160
# See above for an example
# TODO it seems like we need to keep a planDB ourselves on this side, collect all the plans we
# received and answer this get_state with data from them all.
# lets try telling neptus that we just got one plan, maybe that'll be okay?
# seems alright, but after this message is sent, the plan goes red :/
response = PlanDB()
response.plan_id = current_mission_plan.plan_id
response.type = imc_enums.PLANDB_TYPE_SUCCESS
response.op = imc_enums.PLANDB_OP_GET_STATE
response.plandb_state = PlanDBState()
response.plandb_state.plan_count = 1
response.plandb_state.plans_info.append(self.make_plandb_info())
self.plandb_pub.publish(response)
rospy.loginfo_throttle_identical(30, "Answered GET_STATE for plan:\n"+str(response.plan_id))
def handle_set_plan(self, plandb_msg):
# there is a plan we can at least look at
mission_plan = MissionPlan(plan_frame = self.utm_link,
plandb_msg = plandb_msg,
latlontoutm_service_name = self.latlontoutm_service_name,
latlontoutm_service_name_alternative = self.latlontoutm_service_name_alternative,
coverage_swath = self.bb.get(bb_enums.SWATH),
vehicle_localization_error_growth = self.bb.get(bb_enums.LOCALIZATION_ERROR_GROWTH))
if mission_plan.no_service:
self.feedback_message = "MISSION PLAN HAS NO SERVICE"
rospy.logerr(self.feedback_message)
return
self.bb.set(bb_enums.MISSION_PLAN_OBJ, mission_plan)
self.bb.set(bb_enums.ENABLE_AUTONOMY, False)
self.bb.set(bb_enums.MISSION_FINALIZED, False)
self.bb.set(bb_enums.PLAN_IS_GO, False)
rospy.loginfo_throttle_identical(5, "Set the mission plan to:{} and un-finalized the mission.".format(mission_plan))
def handle_plandb_msg(self):
plandb_msg = self.latest_plandb_msg
if plandb_msg is None:
return
typee = plandb_msg.type
op = plandb_msg.op
# request get_info
if typee == imc_enums.PLANDB_TYPE_REQUEST and op == imc_enums.PLANDB_OP_GET_INFO:
self.handle_request_get_info(plandb_msg)
elif typee == imc_enums.PLANDB_TYPE_REQUEST and op == imc_enums.PLANDB_OP_GET_STATE:
self.handle_request_get_state(plandb_msg)
elif typee == imc_enums.PLANDB_TYPE_SUCCESS and op == imc_enums.PLANDB_OP_SET:
self.feedback_message = "Got SUCCESS for plandb set"
elif typee == imc_enums.PLANDB_TYPE_SUCCESS and op == imc_enums.PLANDB_OP_GET_INFO:
self.feedback_message = "Got SUCCESS for plandb get info"
elif typee == imc_enums.PLANDB_TYPE_SUCCESS and op == imc_enums.PLANDB_OP_GET_STATE:
self.feedback_message = "Got SUCCESS for plandb get state"
elif op == imc_enums.PLANDB_OP_SET:
self.handle_set_plan(plandb_msg)
else:
self.feedback_message = "Got some unhandled planDB message:\n"+str(plandb_msg)
def respond_set_success(self):
current_mission_plan = self.bb.get(bb_enums.MISSION_PLAN_OBJ)
if current_mission_plan is None:
self.feedback_message = "No mission plan obj!"
return
plan_id = current_mission_plan.plan_id
self.plandb_msg.plan_id = plan_id
self.plandb_pub.publish(self.plandb_msg)
self.feedback_message = "Answered set success for plan_id:"+str(plan_id)
def update(self):
# we just want to tell neptus we got the plan all the time
# this keeps the thingy green
self.respond_set_success()
self.handle_plandb_msg()
# reset
self.latest_plandb_msg = None
return pt.Status.SUCCESS
class A_UpdateMissonForPOI(pt.behaviour.Behaviour):
"""
creates a new diamond-shaped mission over a detected POI
and sets that as the current mission plan.
always returns SUCCESS
"""
def __init__(self, utm_link, poi_link, latlontoutm_service_name):
super(A_UpdateMissonForPOI, self).__init__(name="A_UpdateMissonForPOI")
self.bb = pt.blackboard.Blackboard()
self.utm_link = utm_link
self.poi_link = poi_link
self.tf_listener = tf.TransformListener()
self.latlontoutm_service_name = latlontoutm_service_name
self.poi_link_available = False
def setup(self, timeout):
try:
rospy.loginfo_throttle(3, "Waiting for transform from {} to {}...".format(self.poi_link, self.utm_link))
self.tf_listener.waitForTransform(self.poi_link, self.utm_link, rospy.Time(), rospy.Duration(timeout))
rospy.loginfo_throttle(3, "...Got it")
self.poi_link_available = True
except:
rospy.logerr_throttle(5, "Could not find tf from:"+self.poi_link+" to:"+self.utm_link+" disabling updates")
return True
def update(self):
#XXX UNTESTED STUFF HERE, RETURN FAILURE TO KEEP PPL
#XXX FROM USING THIS ACTION
return pt.Status.FAILURE
if not self.poi_link_available:
return pt.Status.FAILURE
poi = self.bb.get(bb_enums.POI_POINT_STAMPED)
if poi is None:
return pt.Status.SUCCESS
poi_local = self.tf_listener.transformPoint(self.utm_link, poi)
x = poi_local.point.x
y = poi_local.point.y
depth = poi.point.z
# construct the waypoints that we want to go to
inspection_depth = max(1, depth - 5)
radius = 10
# go east,west,north,south,center
# so we do bunch of fly-overs
waypoints = [
(x+radius, y, inspection_depth),
(x-radius, y, inspection_depth),
(x, y+radius, inspection_depth),
(x, y-radius, inspection_depth),
(x, y, 0)
]
waypoint_man_ids = ['east', 'west', 'north', 'south', 'surface_center']
# construct a planDB message to be given to the mission_plan
# we will not fill the plan_spec of this plandb message,
# and instead call a different constructor of MissionPlan
# to bypass the lat/lon stuff
pdb = PlanDB()
pdb.request_id = 42
pdb.plan_id = "POI"
# set it in the tree
mission_plan = MissionPlan(plan_frame = self.utm_link,
plandb_msg = pdb,
waypoints = waypoints,
waypoint_man_ids=waypoint_man_ids,
latlontoutm_service_name = self.latlontoutm_service_name)
self.bb.set(bb_enums.MISSION_PLAN_OBJ, mission_plan)
rospy.loginfo_throttle_identical(5, "Due to POI, set the mission plan to:"+str(mission_plan))
return pt.Status.SUCCESS
class A_VizPublishPlan(pt.behaviour.Behaviour):
"""
Publishes the current plans waypoints as a PoseArray
"""
def __init__(self, plan_viz_topic):
super(A_VizPublishPlan, self).__init__(name="A_VizPublishPlan")
self.bb = pt.blackboard.Blackboard()
self.pa_pub = None
self.plan_viz_topic = plan_viz_topic
def setup(self, timeout):
self.pa_pub = rospy.Publisher(self.plan_viz_topic, PoseArray, queue_size=1)
return True
def update(self):
mission = self.bb.get(bb_enums.MISSION_PLAN_OBJ)
if mission is not None:
pa = mission.get_pose_array(flip_z=True)
else:
pa = PoseArray()
self.pa_pub.publish(pa)
return pt.Status.SUCCESS
class A_FollowLeader(ptr.actions.ActionClient):
def __init__(self,
action_namespace,
leader_link):
"""
Runs an action server that will move the robot towards another tf link
"""
self.bb = pt.blackboard.Blackboard()
list_of_maneuvers = self.bb.get(bb_enums.MANEUVER_ACTIONS)
if list_of_maneuvers is None:
list_of_maneuvers = ["A_FollowLeader"]
else:
list_of_maneuvers.append("A_FollowLeader")
self.bb.set(bb_enums.MANEUVER_ACTIONS, list_of_maneuvers)
self.action_goal_handle = None
self.leader_link = leader_link
# become action client
ptr.actions.ActionClient.__init__(
self,
name="A_FollowLeader",
action_spec=GotoWaypointAction,
action_goal=None,
action_namespace = action_namespace,
override_feedback_message_on_running="Moving towards"+str(leader_link)
)
self.action_server_ok = False
def setup(self, timeout):
"""
Overwriting the normal ptr action setup to stop it from failiing the setup step
and instead handling this failure in the tree.
"""
self.logger.debug("%s.setup()" % self.__class__.__name__)
self.action_client = actionlib.SimpleActionClient(
self.action_namespace,
self.action_spec
)
if not self.action_client.wait_for_server(rospy.Duration(timeout)):
self.logger.error("{0}.setup() could not connect to the action server at '{1}'".format(self.__class__.__name__, self.action_namespace))
self.action_client = None
else:
self.action_server_ok = True
return True
def initialise(self):
# construct the message
self.action_goal = GotoWaypointGoal()
# leave 0,0,0 because we want to go to the frame's center
self.action_goal.target_pose.header.frame_id = self.leader_link
rospy.loginfo("Follow action goal initialized")
# ensure that we still need to send the goal
self.sent_goal = False
def update(self):
"""
Check only to see whether the underlying action server has
succeeded, is running, or has cancelled/aborted for some reason and
map these to the usual behaviour return states.
"""
if not self.action_server_ok:
self.feedback_message = "Action Server for follow leader action can not be used!"
rospy.logerr_throttle_identical(5,self.feedback_message)
return pt.Status.FAILURE
# if your action client is not valid
if not self.action_client:
self.feedback_message = "ActionClient is invalid! Client:"+str(self.action_client)
rospy.logerr(self.feedback_message)
return pt.Status.FAILURE
# if the action_goal is invalid
if not self.action_goal:
self.feedback_message = "No action_goal!"
rospy.logwarn(self.feedback_message)
return pt.Status.FAILURE
# if goal hasn't been sent yet
if not self.sent_goal:
self.action_goal_handle = self.action_client.send_goal(self.action_goal, feedback_cb=self.feedback_cb)
self.sent_goal = True
rospy.loginfo("Sent goal to action server:"+str(self.action_goal))
self.feedback_message = "Goal sent"
return pt.Status.RUNNING
# if the goal was aborted or preempted
if self.action_client.get_state() in [actionlib_msgs.GoalStatus.ABORTED,
actionlib_msgs.GoalStatus.PREEMPTED]:
self.feedback_message = "Aborted goal"
rospy.loginfo(self.feedback_message)
return pt.Status.FAILURE
result = self.action_client.get_result()
# if the goal was accomplished
if result:
self.feedback_message = "Completed goal"
rospy.loginfo(self.feedback_message)
return pt.Status.SUCCESS
return pt.Status.RUNNING
def feedback_cb(self, msg):
pass
class A_ReadBuoys(pt.behaviour.Behaviour):
'''
This action reads the uncertain positions
(mean and covariance) of buoys from the rostopic.
'''
def __init__(
self,
topic_name,
buoy_link,
utm_link,
latlon_utm_serv,
):
# rostopic name and type (e.g. marker array)
self.topic_name = topic_name
# frame IDs for TF
self.buoy_link = buoy_link
self.utm_link = utm_link
# lat/lon to utm service
self.latlon_utm_serv = latlon_utm_serv
# blackboard for info
self.bb = pt.blackboard.Blackboard()
# become a behaviour
pt.behaviour.Behaviour.__init__(
self,
name="A_ReadBuoys"
)
# for coordinate frame transformations
self.tf_listener = tf.TransformListener()
def setup(self, timeout):
# wait for TF transformation
try:
rospy.loginfo('Waiting for transform from {} to {}.'.format(
self.buoy_link,
self.utm_link
))
self.tf_listener.waitForTransform(
self.buoy_link,
self.utm_link,
rospy.Time(),
rospy.Duration(timeout)
)
except:
rospy.loginfo('Transform from {} to {} not found.'.format(
self.buoy_link,
self.utm_link
))
# subscribe to buoy positions
self.sub = rospy.Subscriber(
self.topic_name,
MarkerArray,
callback=self.cb,
queue_size=10
)
# self.bb.set(bb_enums.BUOYS, None)
self.buoys = None
return True
def cb(self, msg):
'''
This will read the uncertain buoy positions
from the SLAM backend and sensors.
But, for now, it just read the simulator buoys.
The buoys here are assumed to be in the map frame.
'''
# space for bouy positions
# rospy.loginfo('hello')
self.buoys = list()
# loop through visualization markers
for marker in msg.markers:
# convert their pose to pose stamped
pose = PoseStamped(
header=marker.header,
pose=marker.pose
)
# # transform it from local to UTM frame
# pose = self.tf_listener.transformPose(
# self.utm_link,
# pose
# )
# add it to the list
self.buoys.append([
pose.pose.position.x,
pose.pose.position.y,
pose.pose.position.z
])
# make it into a numpy array because why not
self.buoys = np.array(self.buoys)
self.buoys = self.buoys[np.argsort(self.buoys[:,0])]
self.buoys = self.buoys.reshape((-1, 3, 3))
self.buoys = np.sort(self.buoys, axis=1)
self.buoys = dict(
front=self.buoys[:,0,:],
left=self.buoys[0,:,:],
back=self.buoys[:,-1,:],
right=self.buoys[-1,:,:],
all=self.buoys
)
def update(self):
# put the buoy positions in the blackboard
self.bb.set(bb_enums.BUOYS, self.buoys)
return pt.Status.SUCCESS
| 36.437677 | 165 | 0.634772 | 50,424 | 0.980058 | 0 | 0 | 0 | 0 | 0 | 0 | 11,645 | 0.226336 |
1da32fba530eac4eeca4097a02c12b5c6d943941 | 464 | py | Python | back-end/src/handler/common/image.py | gfxcc/san11-platform | 1d085d818bc265f5ffa7e8a7279c3b686deed98c | [
"MIT"
] | 2 | 2021-12-23T06:18:35.000Z | 2021-12-23T06:18:39.000Z | back-end/src/handler/common/image.py | gfxcc/san11-platform-back-end | 74f60d201e21396c5c8601ddc404077ebd97871f | [
"MIT"
] | 9 | 2021-03-10T01:54:16.000Z | 2022-03-27T21:38:34.000Z | back-end/src/handler/common/image.py | gfxcc/san11-platform-back-end | 74f60d201e21396c5c8601ddc404077ebd97871f | [
"MIT"
] | null | null | null | from __future__ import annotations
import logging
import os
import os.path
from ..util import gcs
logger = logging.getLogger(os.path.basename(__file__))
class Image:
def __init__(self, url) -> None:
self.url = url
def __str__(self) -> str:
return self.url
def delete(self):
gcs.delete_resource(self.url)
logger.info(f'{self} is deleted')
@classmethod
def from_url(cls, url: str):
return cls(url)
| 17.846154 | 54 | 0.650862 | 305 | 0.657328 | 0 | 0 | 69 | 0.148707 | 0 | 0 | 20 | 0.043103 |
1da36580d402ceaa2d29765e76d1412b05300439 | 285 | py | Python | diofant/tests/utilities/test_misc.py | rajkk1/diofant | 6b361334569e4ec2e8c7d30dc324387a4ad417c2 | [
"BSD-3-Clause"
] | 57 | 2016-09-13T23:16:26.000Z | 2022-03-29T06:45:51.000Z | diofant/tests/utilities/test_misc.py | rajkk1/diofant | 6b361334569e4ec2e8c7d30dc324387a4ad417c2 | [
"BSD-3-Clause"
] | 402 | 2016-05-11T11:11:47.000Z | 2022-03-31T14:27:02.000Z | diofant/tests/utilities/test_misc.py | rajkk1/diofant | 6b361334569e4ec2e8c7d30dc324387a4ad417c2 | [
"BSD-3-Clause"
] | 20 | 2016-05-11T08:17:37.000Z | 2021-09-10T09:15:51.000Z | from diofant.utilities.decorator import no_attrs_in_subclass
__all__ = ()
def test_no_attrs_in_subclass():
class A:
x = 'test'
A.x = no_attrs_in_subclass(A, A.x)
class B(A):
pass
assert hasattr(A, 'x') is True
assert hasattr(B, 'x') is False
| 15.833333 | 60 | 0.631579 | 51 | 0.178947 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 0.042105 |
1da4cb39ba5d9ec1a0b4a7639eec537b2e91f417 | 921 | py | Python | ccc_client/app_repo/cli/upload_image.py | ohsu-comp-bio/ccc_client | 433a7fad3d8e6817362678b783110f38ca81e0a7 | [
"MIT"
] | null | null | null | ccc_client/app_repo/cli/upload_image.py | ohsu-comp-bio/ccc_client | 433a7fad3d8e6817362678b783110f38ca81e0a7 | [
"MIT"
] | null | null | null | ccc_client/app_repo/cli/upload_image.py | ohsu-comp-bio/ccc_client | 433a7fad3d8e6817362678b783110f38ca81e0a7 | [
"MIT"
] | null | null | null | import argparse
from ccc_client.app_repo.AppRepoRunner import AppRepoRunner
from ccc_client.utils import print_API_response
def run(args):
runner = AppRepoRunner(args.host, args.port, args.authToken)
r = runner.upload_image(args.imageBlob, args.imageName, args.imageTag)
print_API_response(r)
if args.metadata is not None:
r = runner.upload_metadata(None, args.metadata)
print_API_response(r)
parser = argparse.ArgumentParser()
parser.set_defaults(runner=run)
parser.add_argument(
"--imageBlob", "-b",
type=str,
help="name of file or path"
)
parser.add_argument(
"--imageName", "-n",
type=str,
help="name of docker image"
)
parser.add_argument(
"--imageTag", "-t",
type=str,
default="latest",
help="docker image version tag"
)
parser.add_argument(
"--metadata", "-m", type=str,
help="tool metadata; can be a filepath or json string"
)
| 23.615385 | 74 | 0.693811 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 193 | 0.209555 |
1da6c2497156cd58fc4b35c6c90260b47daa47fa | 1,781 | py | Python | src/simulation/entity.py | rah/optimal-search | 96d46ae1491b105a1ee21dc75e9297337249d466 | [
"MIT"
] | null | null | null | src/simulation/entity.py | rah/optimal-search | 96d46ae1491b105a1ee21dc75e9297337249d466 | [
"MIT"
] | 4 | 2018-04-26T01:49:12.000Z | 2022-01-08T08:12:52.000Z | src/simulation/entity.py | rah/optimal-search | 96d46ae1491b105a1ee21dc75e9297337249d466 | [
"MIT"
] | null | null | null | class Entity(object):
'''
An entity has:
- energy
- position(x,y)
- size(length, width)
An entity may have a parent entity
An entity may have child entities
'''
def __init__(
self,
p,
parent=None,
children=None):
self.p = p
self.energy = p.getfloat("ENTITY", "energy")
self.x_pos = p.getfloat("ENTITY", "x_pos")
self.y_pos = p.getfloat("ENTITY", "y_pos")
self.length = p.getint("ENTITY", "length")
self.width = p.getint("ENTITY", "width")
self.parent = parent
if children is None:
self.children = []
else:
self.children = children
def add_child(self, entity):
self.children.append(entity)
def remove_child(self, entity):
self.children.remove(entity)
def remove_self(self):
if self.parent is not None:
self.parent.children.remove(self)
def number_children(self):
if self.children:
return len(self.children)
else:
return 0
def total_energy(self):
'''
returns the sum of all energy
'''
total_energy = self.energy
for child in self.children:
total_energy += child.energy
return total_energy
def set_bounds(self, x, y):
'''
Ensure that x, y are within the bounds of this entity.
Reset x,y so that a torus is formed
'''
if x < 0.0:
x = self.length
if x > self.length:
x = 0.0
if y < 0.0:
y = self.width
if y > self.width:
y = 0.0
return x, y
| 25.442857 | 63 | 0.498596 | 1,779 | 0.998877 | 0 | 0 | 0 | 0 | 0 | 0 | 436 | 0.244806 |
1daa69dd3bb44dba1f878107d4e4e2d32c7a2934 | 43 | py | Python | utils/__init__.py | Lolik-Bolik/The-production-cells-formation-problem | 8c4f5790b92fbca7c9c5c8143c7e70fb3bb8b78b | [
"MIT"
] | 5 | 2020-06-01T18:58:14.000Z | 2020-06-17T04:52:49.000Z | utils/__init__.py | Lolik-Bolik/The-production-cells-formation-problem | 8c4f5790b92fbca7c9c5c8143c7e70fb3bb8b78b | [
"MIT"
] | null | null | null | utils/__init__.py | Lolik-Bolik/The-production-cells-formation-problem | 8c4f5790b92fbca7c9c5c8143c7e70fb3bb8b78b | [
"MIT"
] | null | null | null | from .dataloader import CellsProductionData | 43 | 43 | 0.906977 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
d517cc6539e57b415b0f04b37c782b97faf26a2f | 4,586 | py | Python | create-kickstart.py | ulzeraj/autobond-autoraid-kickstarter | 935d9857fe31c9a70a4cdda5e872060abf09238d | [
"Unlicense"
] | null | null | null | create-kickstart.py | ulzeraj/autobond-autoraid-kickstarter | 935d9857fe31c9a70a4cdda5e872060abf09238d | [
"Unlicense"
] | null | null | null | create-kickstart.py | ulzeraj/autobond-autoraid-kickstarter | 935d9857fe31c9a70a4cdda5e872060abf09238d | [
"Unlicense"
] | null | null | null | #!/usr/bin/python2.6
#-*- coding: utf-8 -*-
import signal
import subprocess
from glob import glob
from os import listdir
from os.path import basename, dirname
label = 'CentOS_6.9_Final'
def listifaces():
ethernet = []
for iface in listdir('/sys/class/net/'):
if iface != 'lo':
ethernet.append(iface)
return ethernet
def listblocks():
drive = '/sys/block/*/device'
return [basename(dirname(d)) for d in glob(drive)]
def listlabel(dev):
command = '/usr/sbin/blkid -o value -s LABEL {0}'.format(dev)
try:
lsblk = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
output = lsblk.communicate()[0].rstrip()
return output
except:
pass
def discoverdisks():
disklist = []
for dev in listblocks():
removable = open('/sys/block/{0}/removable'.format(dev)).readline()
disklist.append([dev, removable.rstrip()])
return disklist
def getinternal(disklist):
internal = []
for dev in disklist:
if dev[1] == '0':
internal.append(dev[0])
return internal
def getremovable(disklist):
removable = []
for dev in disklist:
if dev[1] == '1':
removable.append(dev[0])
return removable
def getinstallmedia(disklist):
for dev in disklist:
firstpart = '/dev/{0}1'.format(dev[0])
relativep = '{0}1'.format(dev[0])
partlabel = listlabel(firstpart)
if partlabel == label:
return relativep
discoverdisks = discoverdisks()
source = getinstallmedia(discoverdisks)
localdisks = sorted(getinternal(discoverdisks))[:2]
nics = ','.join(listifaces())
kickstart = """lang en_US.UTF-8
keyboard us
network --bootproto=static --device=bond0 --bootproto=dhcp --bondopts=miimon=100,mode=active-backup --bondslaves="{0}"
firewall --enabled --ssh
timezone --utc America/Sao_Paulo
zerombr yes
clearpart --drives="{1}" --all --initlabel
bootloader --location=mbr --driveorder="{1}" --append="crashkernel=auto rhgb quiet"
# Please remember to change this. In case you don't the password encrypted bellow is "cheekibreeki".
rootpw --iscrypted $6$JDAL2eOJcBzAkykb$o9v9XAVC2i9YLyMGWEyG60SO2vXSDO.C42CoI/M5Ai/UCVOoWD6SH1sd9e7ImZJj/rx1aljJShdVjKHJgRa8s/
authconfig --enableshadow --passalgo=sha512
selinux --enabled
skipx
# Disk proposal bellow. You should customize it to your needs.
part raid.0 --size=512 --ondisk {2} --asprimary
part raid.1 --size=512 --ondisk {3} --asprimary
part raid.2 --size=40000 --ondisk {2} --asprimary
part raid.3 --size=40000 --ondisk {3} --asprimary
part raid.4 --size=10000 --ondisk {2} --asprimary --grow
part raid.5 --size=10000 --ondisk {3} --asprimary --grow
raid /boot --fstype xfs --level=RAID1 --device=md0 raid.0 raid.1
raid pv.1 --fstype "physical volume (LVM)" --level=RAID1 --device=md1 raid.2 raid.3
raid pv.2 --fstype "physical volume (LVM)" --level=RAID1 --device=md2 raid.4 raid.5
volgroup system --pesize=32768 pv.1
volgroup data --pesize=32768 pv.2
logvol / --fstype xfs --name=root --vgname=system --size=4096 --fsoptions="noatime,nodiratime"
logvol /usr --fstype xfs --name=usr --vgname=system --size=8192 --fsoptions="noatime,nodiratime,nodev"
logvol /var --fstype xfs --name=var --vgname=system --size=4096 --fsoptions="noatime,nodiratime,nodev,nosuid"
logvol /var/log --fstype xfs --name=varlog --vgname=system --size=4096 --fsoptions="noatime,nodiratime,nodev,nosuid,noexec"
logvol /tmp --fstype xfs --name=tmp --vgname=system --size=4096 --fsoptions="noatime,nodiratime,nodev,nosuid"
logvol /opt --fstype xfs --name=opt --vgname=system --size=512 --fsoptions="noatime,nodiratime,nodev,nosuid"
logvol /srv --fstype xfs --name=srv --vgname=system --size=5120 --fsoptions="noatime,nodiratime,nodev,nosuid,noexec"
logvol swap --fstype swap --name=swap --vgname=system --size=4096
logvol /home --fstype xfs --name=home --vgname=data --size=512 --fsoptions="noatime,nodiratime,nodev,nosuid,noexec"
%packages
@base
@console-internet
@core
@debugging
@directory-client
@hardware-monitoring
@java-platform
@large-systems
@network-file-system-client
@performance
@perl-runtime
@portuguese-support
@server-platform
@server-policy
@workstation-policy
pax
python-dmidecode
oddjob
sgpio
device-mapper-persistent-data
samba-winbind
certmonger
pam_krb5
krb5-workstation
perl-DBD-SQLite
dos2unix
ca-certificates
dhcp
nfs-utils
ipa-client
tcpdump
expect
%post""".format(nics, ','.join(localdisks), localdisks[0], localdisks[1])
if __name__ == '__main__':
incks = open('/tmp/autogen.ks', 'w+')
incks.write(kickstart)
incks.close()
| 30.986486 | 126 | 0.703445 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,954 | 0.644134 |
d518b9809b1e54d82285859972d83bf60fe776bf | 6,023 | py | Python | src/kalman_filter.py | Ashwin-Rajesh/Kalman_filter_carla | 75887655043070842bb0d292bf511e8ddc8fd40c | [
"MIT"
] | 1 | 2021-04-10T09:01:08.000Z | 2021-04-10T09:01:08.000Z | src/kalman_filter.py | Ashwin-Rajesh/Kalman_filter_carla | 75887655043070842bb0d292bf511e8ddc8fd40c | [
"MIT"
] | 1 | 2021-11-22T10:42:11.000Z | 2021-11-22T10:42:11.000Z | src/kalman_filter.py | Ashwin-Rajesh/Kalman_filter_carla | 75887655043070842bb0d292bf511e8ddc8fd40c | [
"MIT"
] | 3 | 2021-04-06T08:41:23.000Z | 2021-09-24T09:50:10.000Z | #!/usr/bin/env python3
# MIT License
#
# Copyright (c) 2021 Ashwin Rajesh
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
class kalman_filter:
# State : A column vector with [x_pos, y_pos, yaw, x_vel, y_vel]
def __init__(self, init_state, init_time, accel_var, yaw_var, meas_var):
self.state = np.asarray(init_state).reshape(5,1)
self.prev_time = init_time
self.covar = np.zeros((5,5))
self.Q = np.diag([accel_var, accel_var, yaw_var])
self.R = np.diag([meas_var, meas_var])
self.states = []
self.covars = []
# Input : A column vector with [x_accel, y_accel, yaw_vel]
def update(self, inp, time):
inp = np.asarray(inp).reshape(3,1)
dt = time - self.prev_time
# Transition matrix :
#
# | 1 0 0 dt 0 |
# | 0 1 0 0 dt |
# | 0 0 1 0 0 |
# | 0 0 0 1 0 |
# | 0 0 0 0 1 |
#
A = np.asarray([\
[1, 0, 0, dt,0], \
[0, 1, 0, 0, dt],\
[0, 0, 1, 0, 0], \
[0, 0, 0, 1, 0], \
[0, 0, 0, 0, 1] \
])
# Input influence matrix
#
# | 0 0 0 |
# | 0 0 0 |
# | 0 0 dt |
# | dt 0 0 |
# | 0 dt 0 |
#
B = np.asarray([\
[0, 0, 0], \
[0, 0, 0], \
[0, 0, dt],\
[dt,0, 0], \
[0, dt,0], \
])
# L = np.asarray([\
# [0, 0, 0,], \
# [0, 0, 0,], \
# [0, 0, 1,], \
# [1, 0, 0], \
# [0, 1, 0], \
# ])
yaw = self.state[2][0]
accel_xl = inp[0][0]
accel_yl = inp[1][0]
accel_xg = accel_xl * np.cos(yaw) - accel_yl * np.sin(yaw)
accel_yg = accel_xl * np.sin(yaw) + accel_yl * np.cos(yaw)
dxvel_dyaw = -dt * (inp[0][0] * np.sin(self.state[2][0]) + inp[1][0] * np.cos(self.state[2][0]))
dyvel_dyaw = dt * (inp[0][0] * np.cos(self.state[2][0]) - inp[1][0] * np.sin(self.state[2][0]))
dxvel_din1 = dt * np.cos(self.state[2][0])
dxvel_din2 = -dt * np.sin(self.state[2][0])
dyvel_din1 = dt * np.sin(self.state[2][0])
dyvel_din2 = dt * np.cos(self.state[2][0])
g_inp = np.asarray([accel_xg, accel_yg, inp[2][0]]).reshape(3,1)
# State updation with input
self.state = A.dot(self.state) + B.dot(g_inp)
#self.state = np.asarray([x_new, y_new, yaw_new, xvel_new, yvel_new]).reshape(5,1)
if(self.state[2][0] > np.pi):
self.state[2][0] = self.state[2][0] - 2 * np.pi
elif(self.state[2][0] < -np.pi):
self.state[2][0] = self.state[2][0] + 2 * np.pi
# x_new = self.state[0][0] + dt * self.state[3][0]
# y_new = self.state[1][0] + dt * self.state[4][0]
# yaw_new = self.state[2][0] + dt * inp[2][0]
# xvel_new = self.state[3][0] + dt * (inp[0][0] * np.cos(self.state[2][0]) - inp[1][0] * np.sin(self.state[2][0]))
# yvel_new = self.state[4][0] + dt * (inp[0][0] * np.sin(self.state[2][0]) + inp[1][0] * np.cos(self.state[2][0]))
A = np.asarray([\
[1, 0, 0, dt,0], \
[0, 1, 0, 0, dt],\
[0, 0, 1, 0, 0], \
[0, 0, dxvel_dyaw, 1, 0], \
[0, 0, dyvel_dyaw, 0, 1] \
])
B = np.asarray([\
[0, 0, 0], \
[0, 0, 0], \
[0, 0, dt],\
[dxvel_din1, dxvel_din2, 0], \
[dyvel_din1, dyvel_din2, 0], \
])
# Covariance update
self.covar = A.dot(self.covar.dot(A.T)) + B.dot(self.Q.dot(B.T))
# Append to trajectory
self.states.append([self.state, time, 0])
self.covars.append([self.covar, time, 0])
# Update previous time
self.prev_time = time
def measure(self, measurement, time):
# How to find expected measurement from state?
H = np.asarray([\
[1, 0, 0, 0, 0], \
[0, 1, 0, 0, 0], \
])
measurement = np.asarray(measurement).reshape(2,1)
# Error of measurement from expected measurement
V = measurement - H.dot(self.state)
S = H.dot(self.covar.dot(H.T)) + self.R
K = self.covar.dot(H.T.dot(np.linalg.inv(S)))
self.state = self.state + K.dot(V)
self.covar = self.covar - K.dot(S.dot(K.T))
# Append to trajectory
self.states.append([self.state, time, 1])
self.covars.append([self.covar, time, 1])
# Return position
def get_pos(self):
return (self.states[len(self.states)-1])
| 35.429412 | 122 | 0.492612 | 4,869 | 0.808401 | 0 | 0 | 0 | 0 | 0 | 0 | 2,294 | 0.380873 |
d519202436d106e402d5a165429654cca28794e3 | 2,096 | py | Python | fileMover.py | ioawnen/fileMover2 | 4f5746e1d3261793403ef7d95c388792f58c3028 | [
"MIT"
] | null | null | null | fileMover.py | ioawnen/fileMover2 | 4f5746e1d3261793403ef7d95c388792f58c3028 | [
"MIT"
] | null | null | null | fileMover.py | ioawnen/fileMover2 | 4f5746e1d3261793403ef7d95c388792f58c3028 | [
"MIT"
] | null | null | null | import re
import sre_constants
from fileIO import *
from moveTasks import MoveTasks, MoveTaskIface
import logging
def get_matching_files(move_task: MoveTaskIface) -> list:
matches = []
try:
pattern = re.compile(move_task.filename_regex)
for root, dirs, files in get_all_files(move_task.search_path):
logging.out("Found {0} files in {1} subdirectories".format(len(files), len(dirs)), 3)
for file in files:
if re.search(pattern, file):
matches.append((root, file))
except sre_constants.error as exc:
logging.out("Error occurred while matching '{0}'. Skipping. Error: '{1}'"
.format(move_task.filename_regex, exc), 0)
return matches
def perform_check():
# STEP 1: GET ALL MOVE TASKS
move_tasks = MoveTasks().get_move_tasks()
logging.out("Found {0} Move Task(s)".format(len(move_tasks)), 2)
# STEP 2: WORK EACH TASK
for move_task in move_tasks:
move_task = MoveTaskIface(move_task)
logging.out("Working task: {0} {1} -> {2}"
.format(move_task.search_path, move_task.filename_regex, move_task.save_path), 2)
# STEP 3: GET FILES FOR TASK, MOVE EACH
for path, file in get_matching_files(move_task):
move_file(path, move_task.save_path, file)
def main():
# print(" __ _ _ __ __ ____ \n" +
# " / _| (_) | | ___ | \/ | ___ __ __ ___ _ __ |___ \ \n" +
# " | |_ | | | | / _ \ | |\/| | / _ \ \ \ / / / _ \ | '__| __) | \n" +
# " | _| | | | | | __/ | | | | | (_) | \ V / | __/ | | / __/ \n" +
# " |_| |_| |_| \___| |_| |_| \___/ \_/ \___| |_| |_____| \n" +
# " ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \n" +
# " IN DEVELOPMENT. DO NOT TRUST THIS TO ACTUALLY WORK. ENJOY. \n\n")
perform_check()
print("\nTasks Completed, exiting....")
exit()
if __name__ == '__main__':
main()
| 35.525424 | 101 | 0.510496 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 869 | 0.414599 |
d519270c80775a7bacb99ae959f7231648e44d40 | 222 | py | Python | ckan_cloud_operator/drivers/kubectl/driver.py | MuhammadIsmailShahzad/ckan-cloud-operator | 35a4ca88c4908d81d1040a21fca8904e77c4cded | [
"MIT"
] | 14 | 2019-11-18T12:01:03.000Z | 2021-09-15T15:29:50.000Z | ckan_cloud_operator/drivers/kubectl/driver.py | MuhammadIsmailShahzad/ckan-cloud-operator | 35a4ca88c4908d81d1040a21fca8904e77c4cded | [
"MIT"
] | 52 | 2019-09-09T14:22:41.000Z | 2021-09-29T08:29:24.000Z | ckan_cloud_operator/drivers/kubectl/driver.py | MuhammadIsmailShahzad/ckan-cloud-operator | 35a4ca88c4908d81d1040a21fca8904e77c4cded | [
"MIT"
] | 8 | 2019-10-05T12:46:25.000Z | 2021-09-15T15:13:05.000Z | from ckan_cloud_operator import kubectl
def get(what, *args, required=True, namespace=None, get_cmd=None, **kwargs):
return kubectl.get(what, *args, required=required, namespace=namespace, get_cmd=get_cmd, **kwargs)
| 37 | 102 | 0.765766 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
d51abf3de14a0e2363d262ce1c07a37057407501 | 2,121 | py | Python | wafextras/lyx2tex.py | tjhunter/phd-thesis-tjhunter | 8238e156b5dba9940bdda2a46cfffb62699f364d | [
"Apache-2.0"
] | 1 | 2018-03-25T11:36:21.000Z | 2018-03-25T11:36:21.000Z | wafextras/lyx2tex.py | tjhunter/phd-thesis-tjhunter | 8238e156b5dba9940bdda2a46cfffb62699f364d | [
"Apache-2.0"
] | null | null | null | wafextras/lyx2tex.py | tjhunter/phd-thesis-tjhunter | 8238e156b5dba9940bdda2a46cfffb62699f364d | [
"Apache-2.0"
] | null | null | null | """ Converts some lyx files to the latex format.
Note: everything in the file is thrown away until a section or the workd "stopskip" is found.
This way, all the preamble added by lyx is removed.
"""
from waflib import Logs
from waflib import TaskGen,Task
from waflib import Utils
from waflib.Configure import conf
def postprocess_lyx(src, tgt):
Logs.debug("post-processing %s into %s" % (src,tgt))
f_src = open(src,'r')
f_tgt = open(tgt,'w')
toks = ['\\documentclass','\\usepackage','\\begin{document}','\\end{document}','\\geometry','\\PassOptionsToPackage']
keep = False
for l in f_src:
this_keep = ("stopskip" in l) or ("\\section" in l) or ("\\chapter" in l)
if this_keep:
print "start to keep"
keep = keep or this_keep
local_skip = False
for tok in toks:
local_skip = local_skip or l.startswith(tok)
local_keep = False if local_skip else keep
if local_keep:
f_tgt.write(l)
f_src.close()
f_tgt.close()
return 0
def process_lyx(task):
input0 = task.inputs[0]
src = input0.abspath()
input1 = input0.change_ext("_tmp.lyx")
output0 =task.outputs[0]
tgt = output0.abspath()
print "processing lyx file %s" % src
t = task.exec_command("cp %s %s" % (input0.abspath(), input1.abspath()))
if t != 0:
return t
t = task.exec_command("%s --export pdflatex %s" % (task.env.LYX, input1.abspath()))
if t != 0:
return t
t = postprocess_lyx(input1.change_ext(".tex").abspath(),output0.abspath())
return t
class PostprocessLyx(Task.Task):
def run(self):
#Logs.debug("in post process")
return postprocess_lyx(self.inputs[0].abspath(),self.outputs[0].abspath())
@conf
def lyx2tex(bld, lyx_file):
lyx_files = Utils.to_list(lyx_file)
for a in lyx_files:
b = a.change_ext("_tmp.lyx")
c = a.change_ext("_tmp.tex")
d = a.change_ext(".tex")
bld(rule="cp ${SRC} ${TGT}",source=a,target=b)
tsk0 = bld(rule="${LYX} --export pdflatex ${SRC}",source=b,target=c)
tsk = tsk0.create_task("PostprocessLyx")
tsk.set_inputs(c)
tsk.set_outputs(d)
def configure(conf):
conf.find_program('lyx',var='LYX')
| 31.191176 | 119 | 0.666195 | 163 | 0.076851 | 0 | 0 | 404 | 0.190476 | 0 | 0 | 591 | 0.278642 |
d51cc9225f7dcd43ef79699c1d6c59de3a5d91bf | 492 | py | Python | monero_glue/xmr/core/pycompat.py | ph4r05/monero-agent | 0bac0e6f33142b2bb885565bfd1ef8ac04559280 | [
"MIT"
] | 20 | 2018-04-05T22:06:10.000Z | 2021-09-18T10:43:44.000Z | monero_glue/xmr/core/pycompat.py | ph4r05/monero-agent | 0bac0e6f33142b2bb885565bfd1ef8ac04559280 | [
"MIT"
] | null | null | null | monero_glue/xmr/core/pycompat.py | ph4r05/monero-agent | 0bac0e6f33142b2bb885565bfd1ef8ac04559280 | [
"MIT"
] | 5 | 2018-08-06T15:06:04.000Z | 2021-07-16T01:58:43.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Dusan Klinec, ph4r05, 2018
import operator
import sys
# Useful for very coarse version differentiation.
PY3 = sys.version_info[0] == 3
if PY3:
indexbytes = operator.getitem
intlist2bytes = bytes
int2byte = operator.methodcaller("to_bytes", 1, "big")
else:
int2byte = chr
range = xrange
def indexbytes(buf, i):
return ord(buf[i])
def intlist2bytes(l):
return b"".join(chr(c) for c in l)
| 19.68 | 58 | 0.648374 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 147 | 0.29878 |
d51eac1b5ab841692ba733e2c183650a99fc9aed | 2,324 | py | Python | train.py | liaojh1998/RNW | 412764400a4a555fb8245ab51047019429d1141f | [
"MIT"
] | null | null | null | train.py | liaojh1998/RNW | 412764400a4a555fb8245ab51047019429d1141f | [
"MIT"
] | null | null | null | train.py | liaojh1998/RNW | 412764400a4a555fb8245ab51047019429d1141f | [
"MIT"
] | null | null | null | import os.path as osp
from argparse import ArgumentParser
from mmcv import Config
from pytorch_lightning import Trainer, seed_everything
from pytorch_lightning.callbacks import ModelCheckpoint
from torch.utils.data import DataLoader
from datasets import build_dataset
from models import MODELS
import torch
def parse_args():
parser = ArgumentParser(description='Training with DDP.')
parser.add_argument('config',
type=str)
parser.add_argument('gpus',
type=int)
parser.add_argument('--work_dir',
type=str,
default='checkpoints')
parser.add_argument('--seed',
type=int,
default=1024)
args = parser.parse_args()
return args
def main():
torch.set_default_dtype(torch.float32)
# parse args
args = parse_args()
# parse cfg
cfg = Config.fromfile(osp.join(f'configs/{args.config}.yaml'))
# show information
print(f'Now training with {args.config}...')
# configure seed
seed_everything(args.seed)
# prepare data loader
dataset = build_dataset(cfg.dataset)
loader = DataLoader(dataset, cfg.imgs_per_gpu, shuffle=True, num_workers=cfg.workers_per_gpu, drop_last=True)
if cfg.model.name == 'rnw':
cfg.data_link = dataset
# define model
model = MODELS.build(name=cfg.model.name, option=cfg)
# define trainer
work_dir = osp.join(args.work_dir, args.config)
# save checkpoint every 'cfg.checkpoint_epoch_interval' epochs
checkpoint_callback = ModelCheckpoint(dirpath=work_dir,
save_weights_only=True,
save_top_k=-1,
filename='checkpoint_{epoch}',
every_n_epochs=cfg.checkpoint_epoch_interval)
trainer = Trainer(accelerator='ddp',
default_root_dir=work_dir,
gpus=args.gpus,
num_nodes=1,
max_epochs=cfg.total_epochs,
callbacks=[checkpoint_callback],
auto_scale_batch_size="power")
# training
trainer.fit(model, loader)
if __name__ == '__main__':
main()
| 30.578947 | 113 | 0.596816 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 360 | 0.154905 |
d51ec3243c5401e7f40b444326beab4b487e1f76 | 11,327 | py | Python | Audio_record_and_classifier_framework/helpers.py | stanFurrer/Multimodal-solution-for-grasp-stability-prediction | b7d07a217e2a4846f3fe782fe7c3f4942f3299b3 | [
"MIT"
] | null | null | null | Audio_record_and_classifier_framework/helpers.py | stanFurrer/Multimodal-solution-for-grasp-stability-prediction | b7d07a217e2a4846f3fe782fe7c3f4942f3299b3 | [
"MIT"
] | null | null | null | Audio_record_and_classifier_framework/helpers.py | stanFurrer/Multimodal-solution-for-grasp-stability-prediction | b7d07a217e2a4846f3fe782fe7c3f4942f3299b3 | [
"MIT"
] | null | null | null | """ This Script contain the different function used in the framework
part1. Data processing
part2. Prediction and analisys
part3. Plotting
"""
import numpy as np
import librosa
import matplotlib.pyplot as plt
from sklearn import metrics
import os
import pickle
import time
import struct
""" Data processing """
def get_mel_spectrogram(file_path, mfcc_max_padding=0, n_fft=2048, hop_length=512, n_mels=128):
"""Generates/extracts Log-MEL Spectrogram coefficients with LibRosa """
try:
# Load audio file
y, sr = librosa.load(file_path)
# Normalize audio data between -1 and 1
normalized_y = librosa.util.normalize(y)
# Generate mel scaled filterbanks
mel = librosa.feature.melspectrogram(normalized_y, sr=sr, n_mels=n_mels)
# Convert sound intensity to log amplitude:
mel_db = librosa.amplitude_to_db(abs(mel))
# Normalize between -1 and 1
normalized_mel = librosa.util.normalize(mel_db)
# Should we require padding
shape = normalized_mel.shape[1]
if (mfcc_max_padding > 0 & shape < mfcc_max_padding):
xDiff = mfcc_max_padding - shape
xLeft = xDiff//2
xRight = xDiff-xLeft
normalized_mel = np.pad(normalized_mel, pad_width=((0,0), (xLeft, xRight)), mode='constant')
except Exception as e:
print("Error parsing wavefile: ", e)
return None
return normalized_mel
def get_mfcc(file_path, mfcc_max_padding=0, n_mfcc=40, robots_noise = None, noise_amp = 1):
"""Generates/extracts MFCC coefficients with LibRosa"""
try:
# Load audio file
y, sr = librosa.load(file_path,sr=None)
if robots_noise != None :
y_n, _ = librosa.load(robots_noise)
y = (y + noise_amp * y_n)/(noise_amp + 1)
# Normalize audio data between -1 and 1
normalized_y = librosa.util.normalize(y)
# Compute MFCC coefficients
mfcc = librosa.feature.mfcc(y=normalized_y, sr=sr, n_mfcc=n_mfcc)
# Normalize MFCC between -1 and 1
normalized_mfcc = librosa.util.normalize(mfcc)
# Should we require padding
shape = normalized_mfcc.shape[1]
if (shape < mfcc_max_padding):
pad_width = mfcc_max_padding - shape
normalized_mfcc = np.pad(normalized_mfcc,
pad_width=((0, 0), (0, pad_width)),
mode ='constant',
constant_values=(0,))
except Exception as e:
print("Error parsing wavefile: ", e)
return None
return normalized_mfcc
def add_padding(features, mfcc_max_padding=174):
"""Given an numpy array of features, zero-pads each ocurrence to max_padding"""
padded = []
# Add padding
for i in range(len(features)):
px = features[i]
size = len(px[0])
# Add padding if required
if (size < mfcc_max_padding):
xDiff = mfcc_max_padding - size
xLeft = xDiff//2
xRight = xDiff-xLeft
px = np.pad(px, pad_width=((0,0), (xLeft, xRight)), mode='constant')
padded.append(px)
return padded
def scale(X, x_min, x_max, axis=0):
"""Scales data between x_min and x_max"""
nom = (X-X.min(axis=axis))*(x_max-x_min)
denom = X.max(axis=axis) - X.min(axis=axis)
denom[denom==0] = 1
return x_min + nom/denom
def save_split_distributions(test_split_idx, train_split_idx, file_path=None):
if (path == None):
print("You must enter a file path to save the splits")
return false
# Create split dictionary
split = {}
split['test_split_idx'] = test_split_idx
split['train_split_idx'] = train_split_idx
with open(file_path, 'wb') as file_pi:
pickle.dump(split, file_pi)
return file
def load_split_distributions(file_path):
file = open(file_path, 'rb')
data = pickle.load(file)
return [data['test_split_idx'], data['train_split_idx']]
def find_dupes(array):
seen = {}
dupes = []
for x in array:
if x not in seen:
seen[x] = 1
else:
if seen[x] == 1:
dupes.append(x)
seen[x] += 1
return len(dupes)
def read_header(filename):
"""Reads a file's header data and returns a list of wavefile properties"""
wave = open(filename,"rb")
riff = wave.read(12)
fmat = wave.read(36)
num_channels_string = fmat[10:12]
num_channels = struct.unpack('<H', num_channels_string)[0]
sample_rate_string = fmat[12:16]
sample_rate = struct.unpack("<I",sample_rate_string)[0]
bit_depth_string = fmat[22:24]
bit_depth = struct.unpack("<H",bit_depth_string)[0]
return (num_channels, sample_rate, bit_depth)
def play_dataset_sample(dataset_row, audio_path):
"""Given a dataset row it returns an audio player and prints the audio properties"""
fold_num = dataset_row.iloc[0]['fold']
file_name = dataset_row.iloc[0]['file']
file_path = os.path.join(audio_path, fold_num, file_name)
file_path = os.path.join(audio_path, dataset_row.iloc[0]['fold'], dataset_row.iloc[0]['file'])
print("Class:", dataset_row.iloc[0]['class'])
print("File:", file_path)
print("Sample rate:", dataset_row.iloc[0]['sample_rate'])
print("Bit depth:", dataset_row.iloc[0]['bit_depth'])
print("Duration {} seconds".format(dataset_row.iloc[0]['duration']))
# Sound preview
return IP.display.Audio(file_path)
"""
Prediction and analisys
"""
def evaluate_model(model, X_train, y_train, X_test, y_test):
train_score = model.evaluate(X_train, y_train, verbose=0)
test_score = model.evaluate(X_test, y_test, verbose=0)
return train_score, test_score
def model_evaluation_report(model, X_train, y_train, X_test, y_test, calc_normal=True):
dash = '-' * 38
# Compute scores
train_score, test_score = evaluate_model(model, X_train, y_train, X_test, y_test)
# Pint Train vs Test report
print('{:<10s}{:>14s}{:>14s}'.format("", "LOSS", "ACCURACY"))
print(dash)
print('{:<10s}{:>14.4f}{:>14.4f}'.format( "Training:", train_score[0], 100 * train_score[1]))
print('{:<10s}{:>14.4f}{:>14.4f}'.format( "Test:", test_score[0], 100 * test_score[1]))
# Calculate and report normalized error difference?
if (calc_normal):
max_err = max(train_score[0], test_score[0])
error_diff = max_err - min(train_score[0], test_score[0])
normal_diff = error_diff * 100 / max_err
print('{:<10s}{:>13.2f}{:>1s}'.format("Normal diff ", normal_diff, ""))
def acc_per_class(np_probs_array):
"""
Expects a NumPy array with probabilities and a confusion matrix data, retuns accuracy per class
"""
accs = []
for idx in range(0, np_probs_array.shape[0]):
correct = np_probs_array[idx][idx].astype(int)
total = np_probs_array[idx].sum().astype(int)
acc = (correct / total) * 100
accs.append(acc)
return accs
"""
Plotting
"""
def plot_train_history(history, x_ticks_vertical=False):
history = history.history
# min loss / max accs
min_loss = min(history['loss'])
min_val_loss = min(history['val_loss'])
max_accuracy = max(history['accuracy'])
max_val_accuracy = max(history['val_accuracy'])
# x pos for loss / acc min/max
min_loss_x = history['loss'].index(min_loss)
min_val_loss_x = history['val_loss'].index(min_val_loss)
max_accuracy_x = history['accuracy'].index(max_accuracy)
max_val_accuracy_x = history['val_accuracy'].index(max_val_accuracy)
# summarize history for loss, display min
plt.figure(figsize=(16,8))
plt.plot(history['loss'], color="#1f77b4", alpha=0.7)
plt.plot(history['val_loss'], color="#ff7f0e", linestyle="--")
plt.plot(min_loss_x, min_loss, marker='o', markersize=3, color="#1f77b4", alpha=0.7, label='Inline label')
plt.plot(min_val_loss_x, min_val_loss, marker='o', markersize=3, color="#ff7f0e", alpha=7, label='Inline label')
plt.title('Model loss', fontsize=20)
plt.ylabel('Loss', fontsize=16)
plt.xlabel('Epoch', fontsize=16)
plt.legend(['Train',
'Test',
('%.3f' % min_loss),
('%.3f' % min_val_loss)],
loc='upper right',
fancybox=True,
framealpha=0.9,
shadow=True,
borderpad=1)
if (x_ticks_vertical):
plt.xticks(np.arange(0, len(history['loss']), 5.0), rotation='vertical')
else:
plt.xticks(np.arange(0, len(history['loss']), 5.0))
plt.show()
# summarize history for accuracy, display max
plt.figure(figsize=(16,6))
plt.plot(history['accuracy'], alpha=0.7)
plt.plot(history['val_accuracy'], linestyle="--")
plt.plot(max_accuracy_x, max_accuracy, marker='o', markersize=3, color="#1f77b4", alpha=7)
plt.plot(max_val_accuracy_x, max_val_accuracy, marker='o', markersize=3, color="orange", alpha=7)
plt.title('Model accuracy', fontsize=20)
plt.ylabel('Accuracy', fontsize=16)
plt.xlabel('Epoch', fontsize=16)
plt.legend(['Train',
'Test',
('%.2f' % max_accuracy),
('%.2f' % max_val_accuracy)],
loc='upper left',
fancybox=True,
framealpha=0.9,
shadow=True,
borderpad=1)
plt.figure(num=1, figsize=(10, 6))
if (x_ticks_vertical):
plt.xticks(np.arange(0, len(history['accuracy']), 5.0), rotation='vertical')
else:
plt.xticks(np.arange(0, len(history['accuracy']), 5.0))
plt.show()
def compute_confusion_matrix(y_true,
y_pred,
classes,
normalize=False):
# Compute confusion matrix
cm = metrics.confusion_matrix(y_true, y_pred)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
return cm
def plot_confusion_matrix(cm,
classes,
normalized=False,
title=None,
cmap=plt.cm.Blues,
size=(10,10)):
"""Plots a confussion matrix"""
fig, ax = plt.subplots(figsize=size)
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes, yticklabels=classes,
title=title,
ylabel='True label',
xlabel='Predicted label')
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '.2f' if normalized else 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
fig.tight_layout()
plt.show() | 33.71131 | 116 | 0.611636 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,627 | 0.231924 |
d51ed2677db13fc8507ae2a63e4d95395e17a141 | 15,067 | py | Python | pdfimage/jb2.py | MatthewDaws/PDFImage | c5bcdbc324a28271fb9fa006989270ef0d1ea0d5 | [
"MIT"
] | 3 | 2019-06-11T14:12:15.000Z | 2022-02-17T10:57:20.000Z | pdfimage/jb2.py | MatthewDaws/PDFImage | c5bcdbc324a28271fb9fa006989270ef0d1ea0d5 | [
"MIT"
] | null | null | null | pdfimage/jb2.py | MatthewDaws/PDFImage | c5bcdbc324a28271fb9fa006989270ef0d1ea0d5 | [
"MIT"
] | null | null | null | """
jb2.py
~~~~~~
Use JBIG2, and an external compressor, for black and white images.
"""
import os, sys, subprocess, struct, zipfile, random
from . import pdf_image
from . import pdf_write
from . import pdf
import PIL.Image as _PILImage
_default_jbig2_exe = os.path.join(os.path.abspath(".."), "agl-jbig2enc", "jbig2.exe")
class JBIG2Compressor():
"""Use an external compressor to compress using the JBIG2 standard.
:param jbig2_exe_path: The path to the "jbig2.exe" excutable. Or `None` to
use the default.
:param oversample: Can be 1, 2 or 4. Upsample by this amount before making b/w.
"""
def __init__(self, jbig2_exe_path=None, oversample=2):
if jbig2_exe_path is None:
jbig2_exe_path = _default_jbig2_exe
self._jbig2_exe_path = jbig2_exe_path
self._upsample = oversample
def call(self, args):
return subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def encode(self, files):
"""Will generate `output.sym` and `output.0000`, `output.0001` etc.
in the current directory."""
args = [self._jbig2_exe_path, "-s", "-p", "-v"]
if self._upsample == 1:
pass
elif self._upsample == 2:
args += ["-2"]
elif self._upsample == 4:
args += ["-4"]
else:
raise ValueError("{} is not supported for over-sampling".format(self._upsample))
result = self.call(args + list(files))
assert result.returncode == 0
return result
class JBIG2CompressorToZip():
"""A higher-level version of :class:`JBIG2Compressor` which takes care of
temporary output directories, and zipping the result.
:param output_filename: The filename to write the ZIP file to.
:param jbig2_exe_path: The path to the "jbig2.exe" excutable. Or `None` to
use the default.
:param input_directory: The directory to find input files in, or `None` for
the current directory.
:param temporary_directory: The directory to write temporary files to, or
`None` to auto-generated one (and delete at the end).
:param oversample: Can be 1, 2 or 4. Upsample by this amount before making b/w.
:param split: Should we ask `jbig2.exe` to attempt to split out PNG files of
graphics? If so, `oversample==1` seems to be the only setting which works!
"""
def __init__(self, output_filename, jbig2_exe_path=None, input_directory=None,
temporary_directory=None, oversample=2, split=False):
if jbig2_exe_path is None:
jbig2_exe_path = _default_jbig2_exe
self._jbig2_exe_path = os.path.abspath(jbig2_exe_path)
self._in_dir = input_directory
self._temp_dir = temporary_directory
self._out_file = os.path.abspath(output_filename)
self._upsample = oversample
self._split = split
def _random_dir_name(self):
return "".join(random.choice("abcdefghijklmnopqrstuvwxyz") for _ in range(8))
def _call(self, args):
return subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def _cleanup(self):
if self._old_directory is not None:
os.chdir(self._old_directory)
return
files = list(os.listdir())
for f in files:
try:
os.remove(f)
except:
pass
os.chdir("..")
try:
os.rmdir(self._temp_dir)
except:
pass
def _make_temp_dir(self):
if self._temp_dir is not None:
self._old_directory = os.path.abspath(os.curdir)
os.chdir(self._temp_dir)
else:
self._old_directory = None
self._temp_dir = self._random_dir_name()
os.mkdir(self._temp_dir)
os.chdir(self._temp_dir)
def _write_zip_file(self):
zf = zipfile.ZipFile(self._out_file, "w")
try:
files = list(os.listdir())
for f in files:
with open(f, "rb") as file:
data = file.read()
with zf.open(f, "w") as file:
file.write(data)
finally:
zf.close()
self._cleanup()
def encode(self, files, threshold=None):
"""Encode the files, all to be found in the input directory.
:param files: The files to encode
:param threshold: If not `None`, then the level, between 0 and 255, to
use when converting to 1 bpp.
"""
if self._in_dir is not None:
files = [os.path.join(self._in_dir, x) for x in files]
files = [os.path.abspath(f) for f in files]
self._make_temp_dir()
args = [self._jbig2_exe_path, "-s", "-p", "-v"]
if self._split:
args.append("-S")
if self._upsample == 1:
pass
elif self._upsample == 2:
args += ["-2"]
elif self._upsample == 4:
args += ["-4"]
else:
raise ValueError("{} is not supported for over-sampling".format(self._upsample))
if threshold is not None:
args += ["-T", str(int(threshold))]
result = self._call(args + list(files))
if not result.returncode == 0:
self._cleanup()
raise Exception("Failed to compress files", result)
self._write_zip_file()
class ImageFacade():
pass
class JBIG2Image(pdf_image.PDFImage):
"""Assemble a single jbig2 output file into a PDF file."""
def __init__(self, jbig2globals_object, file, proc_set_object, dpi=1):
self._file = file
super().__init__(self._image(), proc_set_object, dpi)
self._jbig2globals_object = jbig2globals_object
@staticmethod
def read_file(file):
"""Read binary data from a file or filename."""
if isinstance(file, str):
with open(file, "rb") as f:
return f.read()
return file.read()
def _read_file(self):
if not hasattr(self, "_file_cache"):
self._file_cache = self.read_file(self._file)
return self._file_cache
@staticmethod
def load_jbig2(data):
(width, height, xres, yres) = struct.unpack('>IIII', data[11:27])
image = ImageFacade()
image.width = width
image.height = height
image.mode = "1"
return image
def _image(self):
return self.load_jbig2(self._read_file())
def _get_filtered_data(self, image):
params = {"JBIG2Globals" : self._jbig2globals_object}
data = self._read_file()
return "JBIG2Decode", data, params
class JBIG2PNGMultiImage(pdf_image.PDFMultipleImage):
"""Combine a jbig2 image with a png image for graphics."""
def __init__(self, jbig2globals_object, file, proc_set_object, dpi=1):
self._jb2_data = JBIG2Image.read_file(file)
self._jb2png_proc_set_object = proc_set_object
self._jb2_image = JBIG2Image.load_jbig2(self._jb2_data)
super().__init__(self._jb2_image, proc_set_object, dpi)
self._jbig2globals_object = jbig2globals_object
def _get_filtered_data(self, image):
params = {"JBIG2Globals" : self._jbig2globals_object}
return "JBIG2Decode", self._jb2_data, params
def _get_top_filtered_data(self, image):
png_image = pdf_image.PNGImage(image, self._jb2png_proc_set_object)
return png_image._get_filtered_data(image)
@property
def image_size(self):
"""Size of the image given by the JBIG2 layer."""
im = self._jb2_image
return im.width, im.height
class JBIG2Output():
"""Container for the output of converting JBIG2 output to PDF format."""
def __init__(self, pages, objects):
self._pages = pages
self._objects = objects
@property
def pages(self):
"""Iterable of page objects."""
return self._pages
@property
def objects(self):
"""An iterable of objects to add to the PDF file."""
return self._objects
def add_to_pdf_writer(self, pdf_writer):
"""Convenience method to add directly to a :class:`pdf_write.PDFWriter`
instance."""
for page in self.pages:
pdf_writer.add_page(page)
for obj in self.objects:
pdf_writer.add_pdf_object(obj)
class JBIG2Images():
"""Assemble the compressed JBIG2 files into a PDF document.
The ZIP file should have been generated by :class:`JBIG2CompressorToZip`.
:param zipfilename: The ZIP file to look at for data.
:param dpi: The scaling to apply to each page.
"""
def __init__(self, zipfilename, dpi=1):
self._objects = []
self._dpi = dpi
self._zipfilename = zipfilename
def _make_result(self):
zf = zipfile.ZipFile(self._zipfilename, "r")
try:
self._add_globals(zf)
self._proc_set_object = pdf_write.ProcedureSet().object()
self._result = self._compile_pages(zf)
self._result.objects.append(self._jb2_globals)
self._result.objects.append(self._proc_set_object)
finally:
zf.close()
@property
def parts(self):
"""The output"""
if not hasattr(self, "_result"):
self._make_result()
return self._result
def _compile_pages(self, zf):
page_number = 0
pages = []
objects = []
while True:
ending = ".{:04}".format(page_number)
choices = [x for x in zf.filelist if x.filename.endswith(ending)]
if len(choices) == 0:
break
with zf.open(choices[0]) as file:
parts = JBIG2Image(self._jb2_globals, file, self._proc_set_object, self._dpi)()
pages.append(parts.page)
objects.extend(parts.objects)
page_number += 1
return JBIG2Output(pages, objects)
def _add_globals(self, zf):
for zfile in zf.filelist:
if zfile.filename.endswith(".sym"):
with zf.open(zfile) as f:
data = f.read()
stream = pdf.PDFStream([(pdf.PDFName("Length"), pdf.PDFNumeric(len(data)))], data)
self._jb2_globals = pdf.PDFObject(stream)
return
raise ValueError("Could not find a symbol file.")
class JBIG2MultiImages(JBIG2Images):
"""As :class:`JBIG2Images` but supports blending in a PNG file which has
been automatically produced by the external compressor.
The input should be a ZIP file produced by :class:`JBIG2CompressorToZip`
with `oversample=1` and `split=True`.
"""
def __init__(self, zipfilename, dpi=1):
super().__init__(zipfilename, dpi)
def _check_and_get_png(self, zf, basename):
try:
with zf.open(basename + ".png") as file:
return _PILImage.open(file)
except KeyError:
return None
def _compile_pages(self, zf):
page_number = 0
pages = []
objects = []
while True:
ending = ".{:04}".format(page_number)
choices = [x for x in zf.filelist if x.filename.endswith(ending)]
if len(choices) == 0:
break
png_image = self._check_and_get_png(zf, choices[0].filename)
with zf.open(choices[0]) as file:
if png_image is None:
parts = JBIG2Image(self._jb2_globals, file, self._proc_set_object, self._dpi)()
else:
multi_image = JBIG2PNGMultiImage(self._jb2_globals, file, self._proc_set_object, self._dpi)
multi_image.add_top_image(png_image, (0,0),
(png_image.width / self._dpi, png_image.height / self._dpi), (255,255)*3)
parts = multi_image()
pages.append(parts.page)
objects.extend(parts.objects)
page_number += 1
return JBIG2Output(pages, objects)
class JBIG2ManualMultiImages(JBIG2Images):
"""As :class:`JBIG2MultiImages` but with the extracted PNG image(s) chosen
by hand.
The ZIP file should have been generated by :class:`JBIG2CompressorToZip`.
:param zipfilename: The ZIP file to look at for data.
:param dpi: The scaling to apply to each page.
"""
def __init__(self, zipfilename, dpi=1):
super().__init__(zipfilename, dpi)
self._page_rectangles = {}
def add_png_section(self, page_number, page_image, rectangles):
"""Overlay the given page with one or more rectangular extracts from
the given image. For ease, we work witht the usual, :mod:`PIL`,
coordinate system, with `(0,0)` as the top-left corner.
:param page_number: Starting from 0, the page number to adjust.
:param page_image: A :class:`PIL.Image` image to extract rectangles
from.
:param rectangles: An iterable of tuples `(xmin, ymin, xmax, ymax)`
determining a rectangle `xmin <= x < xmax` and `ymin <= y < ymax`.
"""
self._page_rectangles[page_number] = (page_image, list(rectangles))
def _to_parts(self, filename, zf, page_number):
with zf.open(filename) as file:
if page_number not in self._page_rectangles:
return JBIG2Image(self._jb2_globals, file, self._proc_set_object, self._dpi)()
multi_image = JBIG2PNGMultiImage(self._jb2_globals, file, self._proc_set_object, self._dpi)
png_image, rectangles = self._page_rectangles[page_number]
scale_to_page = multi_image.image_size[0] / png_image.size[0]
height_scale = multi_image.image_size[1] / png_image.size[1]
if abs(scale_to_page - height_scale) > 1e-6:
raise ValueError("JBIG2 image and PNG image of different aspect ratios")
for xmin, ymin, xmax, ymax in rectangles:
png_part = png_image.crop((xmin, ymin, xmax, ymax))
xmin, xmax = xmin * scale_to_page, xmax * scale_to_page
ymin, ymax = ymin * scale_to_page, ymax * scale_to_page
x1, x2 = xmin / self._dpi, xmax / self._dpi
y1, y2 = ymin / self._dpi, ymax / self._dpi
page_height = multi_image.image_size[1] / self._dpi
multi_image.add_top_image(png_part, (x1, page_height - y2), (x2 - x1, y2 - y1))
return multi_image()
def _compile_pages(self, zf):
page_number = 0
pages = []
objects = []
while True:
ending = ".{:04}".format(page_number)
choices = [x for x in zf.filelist if x.filename.endswith(ending)]
if len(choices) == 0:
break
parts = self._to_parts(choices[0], zf, page_number)
pages.append(parts.page)
objects.extend(parts.objects)
page_number += 1
return JBIG2Output(pages, objects)
| 37.387097 | 113 | 0.605761 | 14,711 | 0.976372 | 0 | 0 | 1,006 | 0.066768 | 0 | 0 | 3,769 | 0.250149 |
d52024545b971d12235af9a15abe55f96fe6aa8a | 1,581 | py | Python | python/mpopt/ct/cmdline/jug.py | vislearn/libmpopt | 11c9e99bedc7fb5dd2e11bff69c60d4ce974f525 | [
"MIT"
] | 1 | 2021-03-23T06:45:42.000Z | 2021-03-23T06:45:42.000Z | python/mpopt/ct/cmdline/jug.py | vislearn/libmpopt | 11c9e99bedc7fb5dd2e11bff69c60d4ce974f525 | [
"MIT"
] | 1 | 2022-01-18T03:17:09.000Z | 2022-01-18T03:17:09.000Z | python/mpopt/ct/cmdline/jug.py | vislearn/libmpopt | 11c9e99bedc7fb5dd2e11bff69c60d4ce974f525 | [
"MIT"
] | 2 | 2021-03-03T14:01:52.000Z | 2022-01-18T02:45:55.000Z | #!/usr/bin/env python3
import argparse
import sys
from mpopt import ct, utils
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='ct_jug', description='Optimizer for *.jug cell tracking models.')
parser.add_argument('-B', '--batch-size', type=int, default=ct.DEFAULT_BATCH_SIZE)
parser.add_argument('-b', '--max-batches', type=int, default=ct.DEFAULT_MAX_BATCHES)
parser.add_argument('-o', '--output', default=None, help='Specifies the output file.')
parser.add_argument('--ilp', choices=('standard', 'decomposed'), help='Solves the ILP after reparametrizing.')
parser.add_argument('input_filename', metavar='INPUT', help='Specifies the *.jug input file.')
args = parser.parse_args()
with utils.smart_open(args.input_filename, 'rt') as f:
model, bimap = ct.convert_jug_to_ct(ct.parse_jug_model(f))
tracker = ct.construct_tracker(model)
tracker.run(args.batch_size, args.max_batches)
if args.ilp:
if args.ilp == 'standard':
gurobi = ct.GurobiStandardModel(model)
gurobi.construct()
gurobi.update_upper_bound(tracker)
else:
gurobi = ct.GurobiDecomposedModel(model, tracker)
gurobi.construct()
gurobi.update_upper_bound()
gurobi.run()
primals = gurobi.get_primals()
else:
primals = ct.extract_primals_from_tracker(model, tracker)
print('final solution:', primals.evaluate())
if args.output:
with open(args.output, 'w') as f:
ct.format_jug_primals(primals, bimap, f)
| 37.642857 | 114 | 0.667932 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 320 | 0.202404 |
d520c0da5ba138e741aa09521a01f1f65620aa03 | 1,436 | py | Python | setup.py | CybercentreCanada/assemblyline-v4-p2compat | 96aa268f672f8d226eb9f4234d2e97c1a39b7b66 | [
"MIT"
] | null | null | null | setup.py | CybercentreCanada/assemblyline-v4-p2compat | 96aa268f672f8d226eb9f4234d2e97c1a39b7b66 | [
"MIT"
] | 1 | 2020-09-09T13:26:52.000Z | 2020-09-09T13:26:52.000Z | setup.py | CybercentreCanada/assemblyline-v4-p2compat | 96aa268f672f8d226eb9f4234d2e97c1a39b7b66 | [
"MIT"
] | 2 | 2020-09-08T16:59:57.000Z | 2020-09-09T12:08:07.000Z | import os
from setuptools import setup, find_packages
# For development and local builds use this version number, but for real builds replace it
# with the tag found in the environment
package_version = "4.0.0.dev0"
if 'BITBUCKET_TAG' in os.environ:
package_version = os.environ['BITBUCKET_TAG'].lstrip('v')
elif 'BUILD_SOURCEBRANCH' in os.environ:
full_tag_prefix = 'refs/tags/v'
package_version = os.environ['BUILD_SOURCEBRANCH'][len(full_tag_prefix):]
setup(
name="assemblyline_v4_p2compat",
version=package_version,
description="Assemblyline 4 python2 service compatibility layer",
long_description="This package provides common functionalities for python2 only services.",
url="https://bitbucket.org/cse-assemblyline/assemblyline_v4_p2compat/",
author="CCCS Assemblyline development team",
author_email="assemblyline@cyber.gc.ca",
license="MIT",
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7'
],
keywords="assemblyline malware gc canada cse-cst cse cst cyber cccs",
packages=find_packages(exclude=['test/*']),
install_requires=[
'PyYAML',
'netifaces',
'easydict',
'chardet'
],
package_data={
'': []
}
)
| 34.190476 | 95 | 0.685237 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 811 | 0.564763 |
d5220e82bf5048293d79285d2100fa6124ce5c82 | 695 | py | Python | Arrays and Strings/LongestSubstringWithoutRepeating.py | dileeppandey/hello-interview | 78f6cf4e2da4106fd07f4bd86247026396075c69 | [
"MIT"
] | null | null | null | Arrays and Strings/LongestSubstringWithoutRepeating.py | dileeppandey/hello-interview | 78f6cf4e2da4106fd07f4bd86247026396075c69 | [
"MIT"
] | null | null | null | Arrays and Strings/LongestSubstringWithoutRepeating.py | dileeppandey/hello-interview | 78f6cf4e2da4106fd07f4bd86247026396075c69 | [
"MIT"
] | 1 | 2020-02-12T16:57:46.000Z | 2020-02-12T16:57:46.000Z | """
https://leetcode.com/problems/longest-substring-without-repeating-characters/
Given a string, find the length of the longest substring without repeating characters.
"""
class Solution:
def lengthOfLongestSubstring(self, s):
longest = 0
longest_substr = ""
for ch in s:
if ch not in longest_substr:
longest_substr += ch
else:
if len(longest_substr) > longest:
longest = len(longest_substr)
longest_substr = longest_substr[(longest_substr.find(ch)+1):] + ch
return max(longest, len(longest_substr))
s=Solution()
print(s.lengthOfLongestSubstring("umvejcuuk"))
| 27.8 | 86 | 0.630216 | 456 | 0.656115 | 0 | 0 | 0 | 0 | 0 | 0 | 187 | 0.269065 |
d522a2438ec803dddc6ee2c08ebc2b5bae1d14b5 | 388 | py | Python | core/src/trezor/messages/TxAck.py | Kayuii/trezor-crypto | 6556616681a4e2d7e18817e8692d4f6e041dee01 | [
"MIT"
] | null | null | null | core/src/trezor/messages/TxAck.py | Kayuii/trezor-crypto | 6556616681a4e2d7e18817e8692d4f6e041dee01 | [
"MIT"
] | 1 | 2019-02-08T00:22:42.000Z | 2019-02-13T09:41:54.000Z | core/src/trezor/messages/TxAck.py | Kayuii/trezor-crypto | 6556616681a4e2d7e18817e8692d4f6e041dee01 | [
"MIT"
] | 2 | 2019-02-07T23:57:09.000Z | 2020-10-21T07:07:27.000Z | # Automatically generated by pb2py
# fmt: off
import protobuf as p
from .TransactionType import TransactionType
class TxAck(p.MessageType):
MESSAGE_WIRE_TYPE = 22
def __init__(
self,
tx: TransactionType = None,
) -> None:
self.tx = tx
@classmethod
def get_fields(cls):
return {
1: ('tx', TransactionType, 0),
}
| 17.636364 | 44 | 0.600515 | 272 | 0.701031 | 0 | 0 | 107 | 0.275773 | 0 | 0 | 48 | 0.123711 |
d52368f2b3c16c6294cb4e60b4f69e820a0d92d4 | 12,476 | py | Python | balrog/__main__.py | samhorsfield96/ggCaller | 01a32b85e3221e9e3d3552095c439dfc89cd70b6 | [
"MIT"
] | 15 | 2020-08-26T10:31:37.000Z | 2022-03-03T15:42:04.000Z | balrog/__main__.py | samhorsfield96/ggCaller | 01a32b85e3221e9e3d3552095c439dfc89cd70b6 | [
"MIT"
] | null | null | null | balrog/__main__.py | samhorsfield96/ggCaller | 01a32b85e3221e9e3d3552095c439dfc89cd70b6 | [
"MIT"
] | null | null | null | import os
import tarfile
import time
import pickle
import numpy as np
from Bio.Seq import Seq
from scipy.special import expit
from scipy.special import logit
import torch
import torch.nn.functional as F
""" Get directories for model and seengenes """
module_dir = os.path.dirname(os.path.realpath(__file__))
model_dir = os.path.join(module_dir, "balrog_models")
""" Print what the program is doing."""
verbose = True
""" Use kmer prefilter to increase gene sensitivity.
May not play nice with very high GC genomes."""
protein_kmer_filter = False
""" Nucleotide to amino acid translation table. 11 for most bacteria/archaea.
4 for Mycoplasma/Spiroplasma."""
translation_table = 11
# translation_table = 4
""" Batch size for the temporal convolutional network used to score genes.
Small batches and big batches slow down the model. Very big batches may crash the
GPU. """
gene_batch_size = 200
TIS_batch_size = 1000
""" All following are internal parameters. Change at your own risk."""
weight_gene_prob = 0.9746869839852076
weight_TIS_prob = 0.25380288790532707
score_threshold = 0.47256101519707244
weight_ATG = 0.84249804151264
weight_GTG = 0.7083689705744909
weight_TTG = 0.7512400826652517
unidirectional_penalty_per_base = 3.895921717182765 # 3' 5' overlap
convergent_penalty_per_base = 4.603432608883688 # 3' 3' overlap
divergent_penalty_per_base = 3.3830814940689975 # 5' 5' overlap
k_seengene = 10
multimer_threshold = 2
nuc_encode = {"A": 0,
"T": 1,
"G": 2,
"C": 3,
"N": 0,
"M": 0,
"R": 0,
"Y": 0,
"W": 0,
"K": 0}
start_enc = {"ATG": 0,
"GTG": 1,
"TTG": 2}
aa_table = {"L": 1,
"V": 2,
"I": 3,
"M": 4,
"C": 5,
"A": 6,
"G": 7,
"S": 8,
"T": 9,
"P": 10,
"F": 11,
"Y": 12,
"W": 13,
"E": 14,
"D": 15,
"N": 16,
"Q": 17,
"K": 18,
"R": 19,
"H": 20,
"*": 0,
"X": 0}
# generate ORF sequences from coordinates
# @profile
def generate_sequence(graph_vector, nodelist, node_coords, overlap):
sequence = ""
for i in range(0, len(nodelist)):
id = nodelist[i]
coords = node_coords[i]
# calculate strand based on value of node (if negative, strand is false)
strand = True if id >= 0 else False
if strand:
unitig_seq = graph_vector[abs(id) - 1].seq
else:
unitig_seq = str(Seq(graph_vector[abs(id) - 1].seq).reverse_complement())
if len(sequence) == 0:
substring = unitig_seq[coords[0]:(coords[1] + 1)]
else:
if coords[1] >= overlap:
substring = unitig_seq[overlap:(coords[1] + 1)]
sequence += substring
return sequence
#@profile
def tokenize_aa_seq(aa_seq):
""" Convert amino acid letters to integers."""
tokenized = torch.tensor([aa_table[aa] for aa in aa_seq])
return tokenized
#@profile
def get_ORF_info(ORF_vector, graph, overlap):
ORF_seq_list = []
TIS_seqs = []
# iterate over list of ORFs
for ORFNodeVector in ORF_vector:
# need to determine ORF sequences from paths
ORF_nodelist = ORFNodeVector[0]
ORF_node_coords = ORFNodeVector[1]
TIS_nodelist = ORFNodeVector[3]
TIS_node_coords = ORFNodeVector[4]
# generate ORF_seq, as well as upstream and downstream TIS seq
ORF_seq = graph.generate_sequence(ORF_nodelist, ORF_node_coords, overlap)
upstream_TIS_seq = graph.generate_sequence(TIS_nodelist, TIS_node_coords, overlap)
downstream_TIS_seq = ORF_seq[0:19]
# generate Seq class for translation
seq = Seq(ORF_seq)
# translate once per frame, then slice. Note, do not include start or stop codons
aa = str(seq[3:-3].translate(table=translation_table, to_stop=False))
ORF_seq_list.append(aa)
TIS_seqs.append((upstream_TIS_seq, downstream_TIS_seq))
# convert amino acids into integers
ORF_seq_enc = [tokenize_aa_seq(x) for x in ORF_seq_list]
return ORF_seq_enc, TIS_seqs
#@profile
def predict(model, X):
model.eval()
with torch.no_grad():
if torch.cuda.device_count() > 0:
X_enc = F.one_hot(X, 21).permute(0, 2, 1).float().cuda()
probs = expit(model(X_enc).cpu())
del X_enc
torch.cuda.empty_cache()
else:
X_enc = F.one_hot(X, 21).permute(0, 2, 1).float()
probs = expit(model(X_enc).cpu())
return probs
#@profile
def predict_tis(model_tis, X):
model_tis.eval()
with torch.no_grad():
if torch.cuda.device_count() > 0:
X_enc = F.one_hot(X, 4).permute(0, 2, 1).float().cuda()
else:
X_enc = F.one_hot(X, 4).permute(0, 2, 1).float()
probs = expit(model_tis(X_enc).cpu())
return probs
#@profile
def kmerize(seq, k):
kmerset = set()
for i in range(len(seq) - k + 1):
kmer = tuple(seq[i: i + k].tolist())
kmerset.add(kmer)
return kmerset
def load_kmer_model():
# check if directory exists. If not, unzip file
if not os.path.exists(model_dir):
tar = tarfile.open(model_dir + ".tar.gz", mode="r:gz")
tar.extractall(module_dir)
tar.close()
"""Load k-mer filters"""
genexa_kmer_path = os.path.join(model_dir, "10mer_thresh2_minusARF_all.pkl")
with open(genexa_kmer_path, "rb") as f:
aa_kmer_set = pickle.load(f)
return aa_kmer_set
def load_gene_models():
# check if directory exists. If not, unzip file
if not os.path.exists(model_dir):
tar = tarfile.open(model_dir + ".tar.gz", mode="r:gz")
tar.extractall(module_dir)
tar.close()
torch.hub.set_dir(model_dir)
# print("Loading convolutional model...")
if torch.cuda.device_count() > 0:
# print("GPU detected...")
model = torch.hub.load(model_dir, "geneTCN", source='local').cuda()
model_tis = torch.hub.load(model_dir, "tisTCN", source='local').cuda()
time.sleep(0.5)
else:
# print("No GPU detected, using CPU...")
model = torch.hub.load(model_dir, "geneTCN", source='local')
model_tis = torch.hub.load(model_dir, "tisTCN", source='local')
time.sleep(0.5)
return (model, model_tis)
#@profile
def score_genes(ORF_vector, graph_vector, minimum_ORF_score, overlap, model, model_tis, aa_kmer_set):
# get sequences and coordinates of ORFs
# print("Finding and translating open reading frames...")
ORF_seq_enc, TIS_seqs = get_ORF_info(ORF_vector, graph_vector, overlap)
# seengene check
if protein_kmer_filter:
seengene = []
for s in ORF_seq_enc:
kmerset = kmerize(s, k_seengene)
# s = [x in aa_kmer_set for x in kmerset]
s = np.isin(list(kmerset), aa_kmer_set)
seen = np.count_nonzero(s) >= multimer_threshold
seengene.append(seen)
# score
# print("Scoring ORFs with temporal convolutional network...")
# sort by length to minimize impact of batch padding
ORF_lengths = np.asarray([len(x) for x in ORF_seq_enc])
length_idx = np.argsort(ORF_lengths)
ORF_seq_sorted = [ORF_seq_enc[i] for i in length_idx]
# pad to allow creation of batch matrix
prob_list = []
for i in range(0, len(ORF_seq_sorted), gene_batch_size):
batch = ORF_seq_sorted[i:i + gene_batch_size]
seq_lengths = torch.LongTensor(list(map(len, batch)))
seq_tensor = torch.zeros((len(batch), seq_lengths.max())).long()
for idx, (seq, seqlen) in enumerate(zip(batch, seq_lengths)):
seq_tensor[idx, :seqlen] = torch.LongTensor(seq)
pred_all = predict(model, seq_tensor)
pred = []
for j, length in enumerate(seq_lengths):
subseq = pred_all[j, 0, 0:int(length)]
predprob = float(expit(torch.mean(logit(subseq))))
pred.append(predprob)
prob_list.extend(pred)
prob_arr = np.asarray(prob_list, dtype=float)
# unsort
unsort_idx = np.argsort(length_idx)
ORF_prob = prob_arr[unsort_idx]
# recombine ORFs
idx = 0
ORF_gene_score = [None] * len(ORF_seq_enc)
for k, coord in enumerate(ORF_gene_score):
ORF_gene_score[k] = float(ORF_prob[idx])
idx += 1
# print("Scoring translation initiation sites...")
# extract nucleotide sequence surrounding potential start codons
ORF_TIS_seq_flat = []
ORF_TIS_seq_idx = []
ORF_TIS_prob = [None] * len(TIS_seqs)
ORF_start_codon = [None] * len(ORF_seq_enc)
for i, TIS in enumerate(TIS_seqs):
# unpack tuple. Note, downsteam includes start codon, which needs to be removed
upstream, downstream = TIS
if len(upstream) == 16:
TIS_seq = torch.tensor([nuc_encode[c] for c in (upstream + downstream[3:])[::-1]],
dtype=int) # model scores 3' to 5' direction
ORF_TIS_seq_flat.append(TIS_seq)
ORF_TIS_seq_idx.append(i)
else:
ORF_TIS_prob[i] = 0.5
# encode start codon
start_codon = start_enc[downstream[0:3]]
ORF_start_codon[i] = start_codon
# batch score TIS
TIS_prob_list = []
for i in range(0, len(ORF_TIS_seq_flat), TIS_batch_size):
batch = ORF_TIS_seq_flat[i:i + TIS_batch_size]
TIS_stacked = torch.stack(batch)
pred = predict_tis(model_tis, TIS_stacked)
TIS_prob_list.extend(pred)
y_pred_TIS = np.asarray(TIS_prob_list, dtype=float)
# reindex batched scores
for i, prob in enumerate(y_pred_TIS):
idx = ORF_TIS_seq_idx[i]
ORF_TIS_prob[idx] = float(prob)
# combine all info into single score for each ORF
if protein_kmer_filter:
ORF_score_flat = []
for i, geneprob in enumerate(ORF_gene_score):
if not geneprob:
ORF_score_flat.append(None)
continue
seengene_idx = 0
# calculate length by multiplying number of amino acids by 3, then adding 6 for start and stop
length = (len(ORF_seq_enc[i]) * 3) + 6
TIS_prob = ORF_TIS_prob[i]
start_codon = ORF_start_codon[i]
ATG = start_codon == 0
GTG = start_codon == 1
TTG = start_codon == 2
combprob = geneprob * weight_gene_prob \
+ TIS_prob * weight_TIS_prob \
+ ATG * weight_ATG \
+ GTG * weight_GTG \
+ TTG * weight_TTG
maxprob = weight_gene_prob + weight_TIS_prob + max(weight_ATG, weight_TTG, weight_GTG)
probthresh = score_threshold * maxprob
score = (combprob - probthresh) * length + 1e6 * seengene[seengene_idx]
seengene_idx += 1
ORF_score_flat.append(score)
else:
ORF_score_flat = []
for i, geneprob in enumerate(ORF_gene_score):
if not geneprob:
ORF_score_flat.append(None)
continue
# calculate length by multiplying number of amino acids by 3, then adding 6 for start and stop
length = len(ORF_seq_enc[i]) * 3
TIS_prob = ORF_TIS_prob[i]
start_codon = ORF_start_codon[i]
ATG = start_codon == 0
GTG = start_codon == 1
TTG = start_codon == 2
combprob = geneprob * weight_gene_prob \
+ TIS_prob * weight_TIS_prob \
+ ATG * weight_ATG \
+ GTG * weight_GTG \
+ TTG * weight_TTG
maxprob = weight_gene_prob + weight_TIS_prob + max(weight_ATG, weight_TTG, weight_GTG)
probthresh = score_threshold * maxprob
score = (combprob - probthresh) * length
ORF_score_flat.append(score)
# update initial dictionary, removing low scoring ORFs and create score mapping score within a tuple
ORF_score_dict = {}
for i, score in enumerate(ORF_score_flat):
# if score greater than minimum, add to the ORF_score_dict
if score >= minimum_ORF_score:
ORF_score_dict[i] = score
return ORF_score_dict
| 32.321244 | 106 | 0.605162 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,609 | 0.209122 |
d52452bbbdfe7f2a5739d5992e91cfe848ffbc23 | 154 | py | Python | toal/annotators/AbstractAnnotator.py | Bhaskers-Blu-Org1/text-oriented-active-learning | facfb40673a59e43391b7bdb508e612dff1988d9 | [
"MIT"
] | 4 | 2020-10-23T14:42:30.000Z | 2021-06-10T13:29:04.000Z | toal/annotators/AbstractAnnotator.py | Bhaskers-Blu-Org1/text-oriented-active-learning | facfb40673a59e43391b7bdb508e612dff1988d9 | [
"MIT"
] | null | null | null | toal/annotators/AbstractAnnotator.py | Bhaskers-Blu-Org1/text-oriented-active-learning | facfb40673a59e43391b7bdb508e612dff1988d9 | [
"MIT"
] | 1 | 2020-07-30T10:35:09.000Z | 2020-07-30T10:35:09.000Z | import abc
class AbstractAnnotator(abc.ABC):
@abc.abstractmethod
def annotate(self, unlab_index, unlabeled):
raise NotImplementedError() | 22 | 47 | 0.733766 | 142 | 0.922078 | 0 | 0 | 103 | 0.668831 | 0 | 0 | 0 | 0 |
d52485559a1125bc8acdb1957ce05811a8034901 | 1,046 | py | Python | code/HHV2020_07/Adafruit_Trinket_Neopixel_Strip_Cycle/main.py | gowenrw/BSidesDFW_2020_HHV | accfe2d4ba91f8899c3d4dca75c13c220e224960 | [
"MIT"
] | null | null | null | code/HHV2020_07/Adafruit_Trinket_Neopixel_Strip_Cycle/main.py | gowenrw/BSidesDFW_2020_HHV | accfe2d4ba91f8899c3d4dca75c13c220e224960 | [
"MIT"
] | null | null | null | code/HHV2020_07/Adafruit_Trinket_Neopixel_Strip_Cycle/main.py | gowenrw/BSidesDFW_2020_HHV | accfe2d4ba91f8899c3d4dca75c13c220e224960 | [
"MIT"
] | null | null | null | import board, time
import neopixel
# Define Neopixels
LED7_PIN = board.D0 # pin that the NeoPixel is connected to
# Most Neopixels have a color order of GRB or GRBW some use RGB
LED7_ORDER = neopixel.GRB # pixel color channel order
# Create NeoPixel object
LED6 = neopixel.NeoPixel(board.D1, 1, pixel_order=neopixel.RGB)
LED7 = neopixel.NeoPixel(LED7_PIN, 3, pixel_order=LED7_ORDER)
# Turn down brightness to 30%
LED7.brightness = 0.3
# Function to color cycle NeoPixels
def wheel(pos):
if (pos < 0) or (pos > 255):
return (0, 0, 0)
if (pos < 85):
return (int(pos * 3), int(255 - (pos*3)), 0)
elif (pos < 170):
pos -= 85
return (int(255 - pos*3), 0, int(pos*3))
else:
pos -= 170
return (0, int(pos*3), int(255 - pos*3))
# Iteration Var
i = 0
### MAIN LOOP ###
while True:
LED6[0] = (0, 0, 0) # turn off 8mm to focus on strip
LED7[0] = wheel(i & 255)
LED7[1] = wheel(i & 255)
LED7[2] = wheel(i & 255)
time.sleep(0.05)
i = (i+1) % 256 # run from 0 to 255
| 29.055556 | 63 | 0.614723 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 318 | 0.304015 |
d52c443ce49f74f8bcfed2e21fe8f2f3f85e5084 | 263 | py | Python | abc/abc163/abc163c-1.py | c-yan/atcoder | 940e49d576e6a2d734288fadaf368e486480a948 | [
"MIT"
] | 1 | 2019-08-21T00:49:34.000Z | 2019-08-21T00:49:34.000Z | abc/abc163/abc163c-1.py | c-yan/atcoder | 940e49d576e6a2d734288fadaf368e486480a948 | [
"MIT"
] | null | null | null | abc/abc163/abc163c-1.py | c-yan/atcoder | 940e49d576e6a2d734288fadaf368e486480a948 | [
"MIT"
] | null | null | null | N = int(input())
A = list(map(int, input().split()))
d = {}
for i in range(N - 1):
if A[i] in d:
d[A[i]].append(i + 2)
else:
d[A[i]] = [i + 2]
for i in range(1, N + 1):
if i in d:
print(len(d[i]))
else:
print(0)
| 15.470588 | 35 | 0.422053 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |