max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
791A.py
|
blurryface92/CodeForces
| 1
|
12775351
|
<reponame>blurryface92/CodeForces<gh_stars>1-10
n = input()
split =n.split()
limak = int(split[0])
bob = int(split[-1])
years = 0
while True:
limak*=3
bob*=2
years+=1
if limak>bob:
break
print(years)
| 3.0625
| 3
|
tools/homer/makeUCSCfile.py
|
globusgenomics/galaxy
| 1
|
12775352
|
#!/usr/bin/python
CHUNK_SIZE = 2**20 #1mb
import argparse, os, shutil, subprocess, sys, tempfile, shlex, vcf, pysam
parser = argparse.ArgumentParser(description='')
parser.add_argument ( '--input', dest='input', help='the bed file')
parser.add_argument ( '-o', dest='output', help='output log file' )
def execute( cmd, output="" ):
tmp_dir = tempfile.mkdtemp()
try:
err = open(tmp_dir+"/errorLog", 'a')
if output != "":
out = open(output, 'w')
else:
out = subprocess.PIPE
process = subprocess.Popen( args=shlex.split(cmd), stdout=out, stderr=err )
process.wait()
err.close()
if out != subprocess.PIPE:
out.close()
except Exception, e:
sys.stderr.write("problem doing : %s\n" %(cmd))
sys.stderr.write( '%s\n\n' % str(e) )
def __main__():
args = parser.parse_args()
if args.input is not None:
cmd="makeUCSCfile %s -o bedgraph" % args.input
# cmd="makeUCSCfile %s -o %s" % (args.input, args.output)
print("cmd: %s" % cmd)
tmp_dir = tempfile.mkdtemp()
stdout = tempfile.NamedTemporaryFile( prefix="makeucscfile-stdout-", dir=tmp_dir )
stderr = tempfile.NamedTemporaryFile( prefix="makeucscfile-stderr-", dir=tmp_dir )
proc = subprocess.Popen( args=cmd, stdout=stdout, stderr=stderr, shell=True, cwd=tmp_dir)
return_code = proc.wait()
if return_code:
stderr_target = sys.stderr
else:
stderr_target = sys.stdout
stderr.flush()
stderr.seek(0)
while True:
chunk = stderr.read( CHUNK_SIZE )
if chunk:
stderr_target.write( chunk )
else:
break
##shutil.copy("%s/
stderr.close()
stdout.close()
shutil.copy('%s/bedgraph.gz' % tmp_dir, args.output)
if __name__=="__main__":
__main__()
| 2.390625
| 2
|
ztag/annotations/FtpKebi.py
|
justinbastress/ztag
| 107
|
12775353
|
<gh_stars>100-1000
import re
from ztag.annotation import Annotation
from ztag.annotation import OperatingSystem
from ztag import protocols
import ztag.test
class FtpKebi(Annotation):
protocol = protocols.FTP
subprotocol = protocols.FTP.BANNER
port = None
impl_re = re.compile("^220- Kebi FTP Server", re.IGNORECASE)
version_re = re.compile("\(Version (\d+(?:\.\d+)*)\)", re.IGNORECASE)
def process(self, obj, meta):
banner = obj["banner"]
if self.impl_re.search(banner):
meta.local_metadata.product = "Kebi Ftpd"
match = self.version_re.search(banner)
if match:
meta.local_metadata.version = match.group(1)
return meta
""" Tests
"220- Kebi FTP Server ( \\xb1\\xfa\\xba\\xf1 FTP \\xbc\\xad\\xb9\\xf6 )\r\n220- Written by <NAME> - http://www.webkebi.com\r\n220 Kebi FTP \\xbc\\xad\\xb9\\xf6 \\xc1\\xa2\\xbc\\xd3\\xc0\\xbb \\xc8\\xaf\\xbf\\xb5\\xc7\\xd5\\xb4\\xcf\\xb4\\xd9. !\r\n"
"220- Kebi FTP Server (Version 2.0.0)\r\n220 Kebi FTP \\xbc\\xad\\xb9\\xf6\\xbf\\xa1 \\xc1\\xa2\\xbc\\xd3\\xc0\\xbb \\xc8\\xaf\\xbf\\xb5\\xc7\\xd5\\xb4\\xcf\\xb4\\xd9. !\r\n"
"220- Kebi FTP Server (Version 2.0.0)\r\n220 Kebi FTP \\xbc\\xad\\xb9\\xf6\\xbf\\xa1 \\xc1\\xa2\\xbc\\xd3\\xc0\\xbb \\xc8\\xaf\\xbf\\xb5\\xc7\\xd5\\xb4\\xcf\\xb4\\xd9. !\r\n"
"220- Kebi FTP Server (Version 2.0.0)\r\n220 Kebi FTP \\xbc\\xad\\xb9\\xf6\\xbf\\xa1 \\xc1\\xa2\\xbc\\xd3\\xc0\\xbb \\xc8\\xaf\\xbf\\xb5\\xc7\\xd5\\xb4\\xcf\\xb4\\xd9. !\r\n"
"220- Kebi FTP Server (Version 2.0.0)\r\n220 SINN \\xbc\\xad\\xb9\\xf6\\xbf\\xa1 \\xc1\\xa2\\xbc\\xd3\\xc0\\xbb \\xc8\\xaf\\xbf\\xb5\\xc7\\xd5\\xb4\\xcf\\xb4\\xd9. !\r\n"
"220- Kebi FTP Server ( \\xb1\\xfa\\xba\\xf1 FTP \\xbc\\xad\\xb9\\xf6 )\r\n220- Written by <NAME> - http://www.webkebi.com\r\n220 Easy FTP\r\n"
"220- Kebi FTP Server ( \\xb1\\xfa\\xba\\xf1 FTP \\xbc\\xad\\xb9\\xf6 )\r\n220- Written by <NAME> - http://www.webkebi.com\r\n220 Kebi FTP \\xbc\\xad\\xb9\\xf6 \\xc1\\xa2\\xbc\\xd3\\xc0\\xbb \\xc8\\xaf\\xbf\\xb5\\xc7\\xd5\\xb4\\xcf\\xb4\\xd9. !\r\n"
"220- Kebi FTP Server ( \\xb1\\xfa\\xba\\xf1 FTP \\xbc\\xad\\xb9\\xf6 )\r\n220- Written by <NAME> - http://www.webkebi.com\r\n220 Kebi FTP \\xbc\\xad\\xb9\\xf6\\xbf\\xa1 \\xc1\\xa2\\xbc\\xd3\\xc0\\xbb \\xc8\\xaf\\xbf\\xb5\\xc7\\xd5\\xb4\\xcf\\xb4\\xd9. !\r\n"
"220- Kebi FTP Server ( \\xb1\\xfa\\xba\\xf1 FTP \\xbc\\xad\\xb9\\xf6 )\r\n220- Written by <NAME> - http://www.webkebi.com\r\n220 Kebi FTP \\xbc\\xad\\xb9\\xf6 \\xc1\\xa2\\xbc\\xd3\\xc0\\xbb \\xc8\\xaf\\xbf\\xb5\\xc7\\xd5\\xb4\\xcf\\xb4\\xd9. !\r\n"
"220- Kebi FTP Server (Version 2.0.0)\r\n220 Kebi FTP \\xbc\\xad\\xb9\\xf6\\xbf\\xa1 \\xc1\\xa2\\xbc\\xd3\\xc0\\xbb \\xc8\\xaf\\xbf\\xb5\\xc7\\xd5\\xb4\\xcf\\xb4\\xd9. !\r\n"
"220- Kebi FTP Server (Version 2.0.0)\r\n220 Kebi FTP \\xbc\\xad\\xb9\\xf6\\xbf\\xa1 \\xc1\\xa2\\xbc\\xd3\\xc0\\xbb \\xc8\\xaf\\xbf\\xb5\\xc7\\xd5\\xb4\\xcf\\xb4\\xd9. !\r\n"
"220- Kebi FTP Server (Version 2.0.0)\r\n220 Kebi FTP \\xbc\\xad\\xb9\\xf6\\xbf\\xa1 \\xc1\\xa2\\xbc\\xd3\\xc0\\xbb \\xc8\\xaf\\xbf\\xb5\\xc7\\xd5\\xb4\\xcf\\xb4\\xd9. !\r\n"
"220- Kebi FTP Server ( \\xb1\\xfa\\xba\\xf1 FTP \\xbc\\xad\\xb9\\xf6 )\r\n220- Written by <NAME> - http://www.webkebi.com\r\n220 Kebi FTP \\xbc\\xad\\xb9\\xf6\\xbf\\xa1 \\xc1\\xa2\\xbc\\xd3\\xc0\\xbb \\xc8\\xaf\\xbf\\xb5\\xc7\\xd5\\xb4\\xcf\\xb4\\xd9. !\r\n"
"220- Kebi FTP Server ( \\xb1\\xfa\\xba\\xf1 FTP \\xbc\\xad\\xb9\\xf6 )\r\n220- Written by <NAME> - http://www.webkebi.com\r\n220 Kebi FTP \\xbc\\xad\\xb9\\xf6 \\xc1\\xa2\\xbc\\xd3\\xc0\\xbb \\xc8\\xaf\\xbf\\xb5\\xc7\\xd5\\xb4\\xcf\\xb4\\xd9. !\r\n"
"""
| 2.046875
| 2
|
FusionIIIT/applications/online_cms/admin.py
|
paras11agarwal/FusionIIIT
| 0
|
12775354
|
from django.contrib import admin
from .models import (Assignment, CourseDocuments, CourseVideo, Forum,
ForumReply, Quiz, QuizQuestion, QuizResult, StudentAnswer,
StudentAssignment)
admin.site.register(CourseDocuments)
admin.site.register(CourseVideo)
admin.site.register(Quiz)
admin.site.register(QuizQuestion)
admin.site.register(StudentAnswer)
admin.site.register(Assignment)
admin.site.register(StudentAssignment)
admin.site.register(QuizResult)
admin.site.register(Forum)
admin.site.register(ForumReply)
| 1.484375
| 1
|
test.py
|
puiterwijk/rpm-head-signing
| 1
|
12775355
|
from tempfile import mkdtemp
import hashlib
from shutil import rmtree, copy
import os
import os.path
import subprocess
import struct
import sys
import unittest
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric.utils import Prehashed
from cryptography.hazmat.primitives.hashes import Hash, SHA1
import cryptography.hazmat.primitives.serialization as crypto_serialization
import cryptography.hazmat.primitives.hashes as crypto_hashes
import cryptography.hazmat.primitives.asymmetric.ec as crypto_ec
from cryptography.x509 import load_der_x509_certificate
import xattr
import rpm_head_signing
class TestRpmHeadSigning(unittest.TestCase):
pkg_numbers = ['1', '2']
@classmethod
def setUpClass(cls):
cls.asset_dir = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'test_assets',
)
def setUp(self):
self.tmpdir = mkdtemp(prefix='test-rpm_head_signing-', dir=os.path.abspath('.'))
def tearDown(self):
rmtree(self.tmpdir)
self.tmpdir = None
def compare_files(self, asset_name, tmp_name):
with open(os.path.join(self.asset_dir, asset_name), 'rb') as asset_file:
with open(os.path.join(self.tmpdir, tmp_name), 'rb') as tmp_file:
self.assertEqual(
asset_file.read(),
tmp_file.read(),
"Asset file %s is different from tmp file %s" % (asset_name, tmp_name),
)
def test_extract(self):
rpm_head_signing.extract_header(
os.path.join(self.asset_dir, 'testpkg-1.noarch.rpm'),
os.path.join(self.tmpdir, 'testpkg-1.noarch.rpm.hdr.tmp'),
os.path.join(self.tmpdir, 'digests.out.tmp'),
)
rpm_head_signing.extract_header(
os.path.join(self.asset_dir, 'testpkg-2.noarch.rpm'),
os.path.join(self.tmpdir, 'testpkg-2.noarch.rpm.hdr.tmp'),
os.path.join(self.tmpdir, 'digests.out.tmp'),
)
self.compare_files("testpkg-1.noarch.rpm.hdr", "testpkg-1.noarch.rpm.hdr.tmp")
self.compare_files("testpkg-2.noarch.rpm.hdr", "testpkg-2.noarch.rpm.hdr.tmp")
self.compare_files("digests.out", "digests.out.tmp")
def test_insert_no_ima(self):
copy(
os.path.join(self.asset_dir, 'gpgkey.asc'),
os.path.join(self.tmpdir, 'gpgkey.key'),
)
for pkg in self.pkg_numbers:
copy(
os.path.join(self.asset_dir, "testpkg-%s.noarch.rpm" % pkg),
os.path.join(self.tmpdir, "testpkg-%s.noarch.rpm" % pkg),
)
res = subprocess.check_output(
[
'rpm',
'--define', '%%_keyringpath %s' % self.tmpdir,
'--define', '%%_keyringpath %s' % self.tmpdir,
'-Kv',
os.path.join(self.tmpdir, 'testpkg-%s.noarch.rpm' % pkg),
],
)
self.assertTrue(b'SHA1 digest: OK' in res)
self.assertFalse(b'Header V3 RSA' in res)
rpm_head_signing.insert_signature(
os.path.join(self.tmpdir, 'testpkg-%s.noarch.rpm' % pkg),
os.path.join(self.asset_dir, 'testpkg-%s.noarch.rpm.hdr.sig' % pkg)
)
res = subprocess.check_output(
[
'rpm',
'--define', '%%_keyringpath %s' % self.tmpdir,
'--define', '%%_keyringpath %s' % self.tmpdir,
'-Kvvvvvvvv',
os.path.join(self.tmpdir, 'testpkg-%s.noarch.rpm' % pkg),
],
)
self.assertTrue(b'SHA1 digest: OK' in res)
self.assertTrue(b'Header V3 RSA' in res)
self.assertTrue(b'15f712be: ok' in res.lower())
def test_insert_ima(self):
self._ima_insertion_test(None)
def test_insert_ima_valgrind(self):
valgrind_logfile = os.environ.get(
'VALGRIND_LOG_FILE',
'%s/valgrind.log' % self.tmpdir,
)
self._ima_insertion_test(
[
'valgrind',
'--tool=memcheck',
'--track-fds=yes',
'--leak-check=full',
'--track-origins=yes',
'--log-file=%s' % valgrind_logfile,
'--',
sys.executable,
'test_insert.py',
]
)
with open(valgrind_logfile, 'r') as logfile:
log = logfile.read()
if os.environ.get('PRINT_VALGRIND_LOG'):
print('---- START OF VALGRIND LOG ----')
print(log)
print('---- END OF VALGRIND LOG ----')
if 'insertlib.c' in log:
raise Exception("insertlib.c found in the Valgrind log")
def _ima_insertion_test(self, insert_command):
copy(
os.path.join(self.asset_dir, 'gpgkey.asc'),
os.path.join(self.tmpdir, 'gpgkey.key'),
)
for pkg in self.pkg_numbers:
copy(
os.path.join(self.asset_dir, "testpkg-%s.noarch.rpm" % pkg),
os.path.join(self.tmpdir, "testpkg-%s.noarch.rpm" % pkg),
)
res = subprocess.check_output(
[
'rpm',
'--define', '%%_keyringpath %s' % self.tmpdir,
'--define', '%%_keyringpath %s' % self.tmpdir,
'-Kv',
os.path.join(self.tmpdir, 'testpkg-%s.noarch.rpm' % pkg),
],
)
self.assertTrue(b'SHA1 digest: OK' in res)
self.assertFalse(b'Header V3 RSA' in res)
if insert_command is None:
rpm_head_signing.insert_signature(
os.path.join(self.tmpdir, 'testpkg-%s.noarch.rpm' % pkg),
os.path.join(self.asset_dir, 'testpkg-%s.noarch.rpm.hdr.sig' % pkg),
ima_presigned_path=os.path.join(self.asset_dir, 'digests.out.signed'),
)
else:
subprocess.check_call(
insert_command + [
os.path.join(self.tmpdir, 'testpkg-%s.noarch.rpm' % pkg),
os.path.join(self.asset_dir, 'testpkg-%s.noarch.rpm.hdr.sig' % pkg),
os.path.join(self.asset_dir, 'digests.out.signed'),
]
)
res = subprocess.check_output(
[
'rpm',
'--define', '%%_keyringpath %s' % self.tmpdir,
'--define', '%%_keyringpath %s' % self.tmpdir,
'-Kvvvv',
os.path.join(self.tmpdir, 'testpkg-%s.noarch.rpm' % pkg),
],
)
self.assertTrue(b'SHA1 digest: OK' in res)
self.assertTrue(b'Header V3 RSA' in res)
self.assertTrue(b'15f712be: ok' in res.lower())
extracted_dir = os.path.join(self.tmpdir, 'testpkg-%s.noarch.extracted' % pkg)
os.mkdir(extracted_dir)
rpm_head_signing.extract_rpm_with_filesigs(
os.path.join(self.tmpdir, 'testpkg-%s.noarch.rpm' % pkg),
extracted_dir,
)
with open(os.path.join(self.asset_dir, 'imacert.der'), 'rb') as f:
cert = load_der_x509_certificate(f.read(), backend=default_backend())
pubkey = cert.public_key()
evmctl_help = subprocess.check_output(['evmctl', '--help'])
for (where, dnames, fnames) in os.walk(extracted_dir):
for fname in fnames:
# Always run the manual evmctl check.
alternative_evmctl_check(
os.path.join(where, fname),
pubkey,
)
if b'--xattr-user' in evmctl_help:
subprocess.check_call(
[
'evmctl',
'-v',
'--key', os.path.join(self.asset_dir, 'imacert.der'),
'ima_verify',
'--xattr-user',
os.path.join(where, fname),
],
)
else:
if not os.environ.get('ONLY_ALTERNATIVE_EVMCTL_CHECK'):
raise Exception("Can't test evmctl")
def alternative_evmctl_check(file_path, pubkey):
# In RHEL7, evmctl is too old, so we won't be able to run the
# evmctl check
ima_sig = bytearray(xattr.getxattr(file_path, 'user.ima'))
if ima_sig[0] != 3:
raise Exception("IMA signature has wrong prefix (%s)" % ima_sig[0])
if ima_sig[1] != 2:
raise Exception("IMA signature has wrong version (%s)" % ima_sig[1])
algo_id = ima_sig[2]
if algo_id == 7: # SHA224
hasher = hashlib.sha224()
crypto_algo = crypto_hashes.SHA224()
elif algo_id == 4: # SHA256
hasher = hashlib.sha256()
crypto_algo = crypto_hashes.SHA256()
elif algo_id == 5: # SHA384
hasher = hashlib.sha384()
crypto_algo = crypto_hashes.SHA384()
elif algo_id == 6: # SHA512
hasher = hashlib.sha512()
crypto_algo = crypto_hashes.SHA512()
else:
raise Exception("IMA signature has invalid algo: %d" % algo_id)
crypto_algo = Prehashed(crypto_algo)
if sys.version_info.major == 3:
# X962 is only supported on Cryptography 2.5+
# We are a bit lazy and just check for py3 instead of checking this more carefully
# Check the Key ID
key_id = ima_sig[3:7]
keybytes = pubkey.public_bytes(
crypto_serialization.Encoding.X962,
crypto_serialization.PublicFormat.UncompressedPoint,
)
keybytes_digester = Hash(SHA1())
keybytes_digester.update(keybytes)
keybytes_digest = keybytes_digester.finalize()
correct_keyid = keybytes_digest[-4:]
if correct_keyid != key_id:
raise Exception("IMA signature has invalid key ID: %s != %s" % (correct_keyid, key_id))
# Check the signature itself
(sig_size,) = struct.unpack('>H', ima_sig[7:9])
sig = ima_sig[9:]
if len(sig) != sig_size:
raise Exception("IMA signature size invalid: %d != %d" % (len(sig), sig_size))
with open(file_path, 'rb') as f:
hasher.update(f.read())
file_digest = hasher.digest()
pubkey.verify(
bytes(sig),
bytes(file_digest),
crypto_ec.ECDSA(crypto_algo),
)
if __name__ == '__main__':
unittest.main()
| 2.25
| 2
|
notebooks/converted_notebooks/Frederick_ipts.py
|
mabrahamdevops/python_notebooks
| 0
|
12775356
|
<gh_stars>0
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] run_control={"frozen": false, "read_only": false}
# [](https://neutronimaging.pages.ornl.gov/tutorial/notebooks/frederick_ipts/)
# + [markdown] run_control={"frozen": false, "read_only": false}
# <img src='__docs/__all/notebook_rules.png' />
# + [markdown] run_control={"frozen": false, "read_only": false}
# # Select IPTS
# + run_control={"frozen": false, "read_only": false}
from __code.frederick_ipts import FrederickIpts
from __code.ui_builder import UiBuilder
o_builder = UiBuilder(ui_name = 'ui_file_metadata_display.ui')
from __code.file_metadata_display import Interface
from __code import system
system.System.select_working_dir()
from __code.__all import custom_style
custom_style.style()
# -
# %gui qt
# + [markdown] run_control={"frozen": false, "read_only": false}
# # Select Files
# + run_control={"frozen": false, "read_only": false}
o_fred = FrederickIpts(working_dir = system.System.get_working_dir())
o_fred.select_files()
# + [markdown] run_control={"frozen": false, "read_only": false}
# # Display images
# + run_control={"frozen": false, "read_only": false}
o_gui = Interface(exp_dict=o_fred.exp_dict)
o_gui.show()
# + run_control={"frozen": false, "read_only": false}
| 1.679688
| 2
|
app/models.py
|
khiranouchi/Okemowd2
| 0
|
12775357
|
<filename>app/models.py<gh_stars>0
from django.db import models
class Genre(models.Model):
# id primary_key [made automatically]
name = models.CharField(max_length=16)
def __str__(self):
return self.name
class KeyLevel(models.Model):
# id primary_key [made automatically]
rank = models.IntegerField(unique=True)
name = models.CharField(max_length=16)
def __str__(self):
return self.name
class Song(models.Model):
# id primary_key [made automatically]
name = models.CharField(max_length=64)
name_ruby = models.CharField(max_length=128)
artist = models.CharField(max_length=32, null=True)
genre = models.ForeignKey(Genre, models.SET_NULL, db_index=True, null=True)
key_level = models.ForeignKey(KeyLevel, models.SET_NULL, db_index=True, null=True)
key_min = models.IntegerField(null=True) # 0 corresponds to tone lowlowA
key_freq_min = models.IntegerField(null=True) # "
key_freq_max = models.IntegerField(null=True) # "
key_max = models.IntegerField(null=True) # "
rank = models.IntegerField(null=True) # 1,2,3,others
link = models.TextField(null=True)
note = models.TextField(null=True)
check = models.BooleanField(default=False)
def values(self):
return [self.name, self.name_ruby, self.artist, self.genre_id, self.key_level_id,
self.key_min, self.key_freq_min, self.key_freq_max, self.key_max, self.rank, self.link,
self.note, self.check]
@staticmethod
def number_of_fields():
return 13 # 13 is the number of fields!!!
| 2.453125
| 2
|
uwhpsc/codes/python/debugdemo1.py
|
philipwangdk/HPC
| 0
|
12775358
|
<gh_stars>0
"""
$UWHPSC/codes/python/debugdemo1.py
Debugging demo using pdb. Original code.
"""
x = 3
y = -22.
def f(z):
x = z+10
return x
y = f(x)
print "x = ",x
print "y = ",y
| 2.453125
| 2
|
arduCryoFridgeCLI.py
|
emilyychenn/arduinoCryoFridge
| 0
|
12775359
|
"""
Usage:
arduCryoFridgeCLI.py [--port=<USBportname>] configure [--ontime=<ontime>] [--offtime=<offtime>]
arduCryoFridgeCLI.py [--port=<USBportname>] switch [--on | --off] [--now | --delay=<delay>]
arduCryoFridgeCLI.py [--port=<USBportname>] (-s | --status)
arduCryoFridgeCLI.py [--port=<USBportname>] -q
arduCryoFridgeCLI.py -h | --help
Options:
--port=<USBportname> Specify USB port: done before running other commands.
--ontime=<ontime> Duration of ontime minutes.
--offtime=<offtime> Duration of offtime minutes.
--delay=<delay> Start on/off cycle in delay [default: 0] minutes.
-s --status Read out and report PT410 status.
-q Query program version + version run on the arduino.
-h --help Show this screen.
"""
from docopt import docopt
import serial
import serial.tools.list_ports
baud = 9600
programVersion = 1.0
# will try to autodetect port first, if no port detected, will prompt user to input a port
# doesn't work with third-party Arduino knockoffs (in which case, user specifies port)
def autodetect():
ports = serial.tools.list_ports.comports()
connected = False
print("Available ports: ")
for port, desc, hwid in sorted(ports):
print("{}: {} [{}]".format(port, desc, hwid))
if desc == "USB2.0-Serial":
try:
ser = serial.Serial(port, baud)
print("Connected to: " + port + '\n')
connected = True
return ser
except Exception as e:
print("\nCouldn't open port: " + str(e))
ser = None
if not(connected):
print("No likely serial port found. Use command '--port=<USBportname>' to manually specify a port.")
if __name__ == "__main__":
args = docopt(__doc__) # docopt saves arguments and options as key:value pairs in a dictionary
print(args)
if args['--port'] == None:
ser = autodetect()
else:
ser = serial.Serial(args['--port'], baud)
if args['configure'] == True:
if args['--ontime'] != None:
ontime = args['--ontime']
print("Ontime = " + ontime)
ser.readline() # waits until arduino prints "UNO is ready!"
ser.write(('A'+ str(ontime)).encode())
elif args['--offtime'] != None:
offtime = args['--offtime']
print("Offtime = " + offtime)
ser.readline()
ser.write(('B'+ str(offtime)).encode())
elif args['switch'] == True:
if args['--on'] == True:
if args['--now'] == True:
print("switch compressor on NOW")
ser.readline()
ser.write('G'.encode())
else:
delay = args['--delay']
print("delay turning on by " + str(delay) + " minutes")
ser.readline()
ser.write(('Z'+str(delay)).encode())
elif args['--off'] == True:
if args['--now'] == True:
print("switch compressor off NOW")
ser.readline()
ser.write('X'.encode())
else:
delay = args['--delay']
print("delay turning off by " + str(delay) + " minutes")
ser.readline()
ser.write(('Z'+str(delay)).encode())
print(ser.readline())
elif args['--status'] != False:
print("PT410 status: ")
ser.readline()
ser.write('S'.encode())
LEDStatus = ser.readline()
print(LEDStatus)
button1Status = ser.readline()
print(button1Status)
button2Status = ser.readline()
print(button2Status)
button3Status = ser.readline()
print(button3Status)
elif args['-q'] != False:
print("Python program version: " + str(programVersion))
ser.readline()
ser.write('Q'.encode())
arduinoProgramVersion = ser.readline()
print(str(arduinoProgramVersion))
else:
print('nothing left to do')
| 3.03125
| 3
|
2019/day11.py
|
coingraham/adventofcode
| 5
|
12775360
|
import aoc_common as ac
import numpy as np
from aocd.models import Puzzle
puzzle = Puzzle(year=2019, day=11)
ram = [int(x) for x in puzzle.input_data.split(",")]
pointer = 0
relative_base = 0
painting = {(0, 0): 0}
coord = (0, 0)
color = 0 # Part One
color = 1 # Part Two
direction = "N"
our_computer = ac.full_intcode_computer(ram, pointer, relative_base, locals())
while True:
try:
new_color = next(our_computer)
d_color = next(our_computer)
painting[coord] = new_color
coord, direction = ac.robot_turner(coord, direction, d_color)
if coord in painting:
color = painting[coord]
else:
color = 0
except:
break
# print(len(painting.keys()))
x = []
y = []
z = []
for k, v in painting.items():
x.append(int(k[0]))
y.append(int(k[1]))
z.append(int(v))
min_x = abs(min(x))
min_y = abs(min(y))
x = [i + min_x for i in x]
y = [j + min_y for j in y]
message = np.zeros([6, 43])
message[y, x] = z
# message = np.where(message == 0, " ","■")
ac.screen(painting)
# print(np.array2string(np.flipud(message), max_line_width=np.inf))
| 2.875
| 3
|
app.py
|
jschmidtnj/FizzBuzz
| 0
|
12775361
|
fizz = 3
buzz = 5
upto = 100
for n in range(1,(upto + 1)):
if n % fizz == 0:
if n % buzz == 0:
print("FizzBuzz")
else:
print("Fizz")
elif n % buzz == 0:
print("Buzz")
else:
print(n)
| 3.875
| 4
|
library/pip_dep_generator.py
|
DanielOjalvo/execview
| 0
|
12775362
|
<gh_stars>0
#!/usr/bin/env python3
'''
test script for collecting module dependencies used
'''
import re, os, isoparser
import itertools
import tempfile
import subprocess
def exec_cmd(cmd_str):
stdout_tmp = tempfile.TemporaryFile()
stderr_tmp = tempfile.TemporaryFile()
stdout_str = ""
stderr_str = ""
p = subprocess.Popen(cmd_str, stdout = stdout_tmp, stderr = stderr_tmp, shell=True)
p.wait()
stdout_tmp.seek(0)
stderr_tmp.seek(0)
retcode = p.returncode
stdout_str = stdout_tmp.read()
stderr_str = stderr_tmp.read()
return (retcode, stdout_str.decode("utf-8"), stderr_str)
if __name__ == "__main__":
_, result, _ = exec_cmd("grep 'import' *")
import_patterns = [":import (.*)\n", ":from (.*) import .*\n"]
pip_dependencies = []
for pattern in import_patterns:
found = re.findall(pattern, result)
if found:
# strip whitespace, and remove regex cases where string contains .* (not really importing)
found = [element.strip() for element in found if '.*' not in element and not element.startswith('.')]
pip_dependencies.extend(found)
# split import modules separated by commas (e.g., import )
pip_dependencies = list(itertools.chain.from_iterable([module.split(',') if ',' in module else [module] for module in pip_dependencies]))
# remove duplicate modules
pip_dependencies = list(dict.fromkeys(pip_dependencies))
print(pip_dependencies)
| 2.5625
| 3
|
main/coin-change/coin-change.py
|
EliahKagan/old-practice-snapshot
| 0
|
12775363
|
#!/usr/bin/env python3
def count_combos(coins, total):
len_coins = len(coins)
memo = {}
def count(tot, i):
if tot == 0:
return 1
if i == len_coins:
return 0
subproblem = (tot, i)
try:
return memo[subproblem]
except KeyError:
j = i + 1
subtotals = range(0, total + 1, coins[i])
combos = sum(count(tot - subtot, j) for subtot in subtotals)
memo[subproblem] = combos
return combos
return count(total, 0)
def read_record():
return map(int, input().split())
total, _ = read_record() # don't need m
coins = list(read_record())
print(count_combos(coins, total))
| 3.8125
| 4
|
pca-server/src/pca/pca-aws-file-drop-trigger.py
|
Harsh15021992/amazon-transcribe-post-call-analytics
| 8
|
12775364
|
"""
This python function is triggered when a new audio file is dropped into the S3 bucket that has
been configured for audio ingestion. It will ensure that no Transcribe job already exists for this
filename, and will then trigger the main Step Functions workflow to process this file.
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
"""
import json
import urllib.parse
import boto3
import pcaconfiguration as cf
def lambda_handler(event, context):
# Load our configuration
cf.loadConfiguration()
# Get the object from the event and validate it exists
s3 = boto3.client("s3")
bucket = event['Records'][0]['s3']['bucket']['name']
key = urllib.parse.unquote_plus(event['Records'][0]['s3']['object']['key'], encoding='utf-8')
try:
response = s3.get_object(Bucket=bucket, Key=key)
except Exception as e:
print(e)
raise Exception(
'Error getting object {} from bucket {}. Make sure they exist and your bucket is in the same region as this function.'.format(
key, bucket))
# Check a Transcribe job isn't in progress for this file-name
jobName = cf.generateJobName(key)
try:
# If it exists (e.g. doesn't exception) then we may want to delete iz
transcribe = boto3.client('transcribe')
currentJobStatus = transcribe.get_transcription_job(TranscriptionJobName=jobName)["TranscriptionJob"]["TranscriptionJobStatus"]
except Exception as e:
# Job didn't already exist - no problem here
currentJobStatus = ""
# If there's a job already running then the input file may have been copied - quit
if (currentJobStatus == "IN_PROGRESS") or (currentJobStatus == "QUEUED"):
# Throw an exception if this is the case
raise Exception(
'A Transcription job named \'{}\' is already in progress - cannot continue.'.format(jobName))
# Now find our Step Function
ourStepFunction = cf.appConfig[cf.COMP_SFN_NAME]
sfnClient = boto3.client('stepfunctions')
response = sfnMachinesResult = sfnClient.list_state_machines(maxResults = 1000)
sfnArnList = list(filter(lambda x: x["stateMachineArn"].endswith(ourStepFunction), sfnMachinesResult["stateMachines"]))
if sfnArnList == []:
# Doesn't exist
raise Exception(
'Cannot find configured Step Function \'{}\' in the AWS account in this region - cannot begin workflow.'.format(ourStepFunction))
sfnArn = sfnArnList[0]['stateMachineArn']
# Decide what language this should be transcribed in - leave it blank to trigger auto-detection
if cf.isAutoLanguageDetectionSet():
transcribeLanguage = ""
else:
transcribeLanguage = cf.appConfig[cf.CONF_TRANSCRIBE_LANG][0]
# Trigger a new Step Function execution
parameters = '{\n \"bucket\": \"' + bucket + '\",\n' +\
' \"key\": \"' + key + '\",\n' +\
' \"langCode\": \"' + transcribeLanguage + '\"\n' +\
'}'
sfnClient.start_execution(stateMachineArn = sfnArn, input = parameters)
# Everything was successful
return {
'statusCode': 200,
'body': json.dumps('Post-call analytics workflow for file ' + key + ' successfully started.')
}
# Main entrypoint
if __name__ == "__main__":
event = {
"Records": [
{
"s3": {
"s3SchemaVersion": "1.0",
"configurationId": "eca58aa9-dd2b-4405-94d5-d5fba7fd0a16",
"bucket": {
"name": "ajk-call-analytics-demo",
"ownerIdentity": {
"principalId": "A39I0T5T4Z0PZJ"
},
"arn": "arn:aws:s3:::ajk-call-analytics-demo"
},
"object": {
"key": "audio/example-call.wav",
"size": 963023,
"eTag": "8588ee73ae57d72c072f4bc401627724",
"sequencer": "005E99B1F567D61004"
}
}
}
]
}
lambda_handler(event, "")
| 2.90625
| 3
|
front_end/load/process_result/BenchmarkResult.py
|
arnaudsjs/YCSB-1
| 0
|
12775365
|
<filename>front_end/load/process_result/BenchmarkResult.py
#!/bin/python
from load.process_result.Measurement import Measurement
class BenchmarkResult:
def __init__(self, pathToFile):
self.throughput = -1;
self.insertResults = Measurement()
self.updateResults = Measurement();
self.readResults = Measurement();
self.deleteResults = Measurement();
self.scanResults = Measurement();
self.processFile(pathToFile);
def processFile(self, pathToFile):
linesOfFile = self.getLinesOfFile(pathToFile);
for line in linesOfFile:
self.processLine(line);
def processLine(self, line):
segmentId = self.getSegmentId(line);
if segmentId is None or segmentId is 'CLEAN':
return;
if segmentId == 'OVERALL':
self.processOverallSection(line);
latency = self.getLatencyMeasurement(line);
if latency is None:
return;
if segmentId == 'INSERT':
self.insertResults.add(latency);
if segmentId == 'UPDATE':
self.updateResults.add(latency);
if segmentId == 'READ':
self.readResults.add(latency);
if segmentId == 'SCAN':
self.scanResults.add(latency);
if segmentId == 'DELETE':
self.deleteResults.add(latency);
def getLatencyMeasurement(self, line):
splittedLine = line.split(',');
if len(splittedLine) != 3:
raise Exception('Illegal measurement: ' + line);
timepoint = splittedLine[1].strip(' \n');
latency = splittedLine[2].strip(' \n');
if not self.isInteger(timepoint) or int(timepoint) <= 300000:
return None;
return float(latency);
def isInteger(self, toCheck):
try:
int(toCheck);
return True;
except ValueError:
return False;
def processOverallSection(self, line):
splittedLine = line.split(',');
if len(splittedLine) != 3:
raise Exception('Illegal throughput section: ' + line);
if line.find('Throughput(ops/sec)') == -1:
return;
if self.throughput != -1:
raise Exception('throughput already set');
resultPart = splittedLine[2].strip(' ');
self.throughput = float(resultPart);
def getSegmentId(self, line):
if line[0] != '[':
return None;
endSegmentId = line.find(']');
if endSegmentId == -1:
raise Exception('Illegal segment constructionId: ' + line);
return line[1:endSegmentId];
def getLinesOfFile(self, pathToFile):
f = open(pathToFile);
lines = f.readlines();
f.close();
return lines;
def hasThroughput(self):
return (self.throughput != -1);
def getThroughput(self):
if not self.hasThroughput():
raise Exception('BenchmarkResult has no throughput');
return self.throughput;
def hasAverageInsertLatency(self):
return self.insertResults.hasMeasurement();
def getAverageInsertLatency(self):
if not self.hasAverageInsertLatency():
raise Exception('BenchmarkResult has no average insert latency');
return self.insertResults.getAverageLatency();
def hasAverageUpdateLatency(self):
return self.updateResults.hasMeasurement();
def getAverageUpdateLatency(self):
if not self.hasAverageUpdateLatency():
raise Exception('BenchmarkResult has no average update latency');
return self.updateResults.getAverageLatency();
def hasAverageReadLatency(self):
return self.readResults.hasMeasurement();
def getAverageReadLatency(self):
if not self.hasAverageReadLatency():
raise Exception('BenchmarkResult has no average read latency');
return self.readResults.getAverageLatency();
def hasAverageScanLatency(self):
return self.scanResults.hasMeasurement();
def getAverageScanLatency(self):
if not self.hasAverageScanLatency():
raise Exception('BenchmarkResult has no average scan latency');
return self.scanResults.getAverageLatency();
def hasAverageDeleteLatency(self):
return self.deleteResults.hasMeasurement();
def getAverageDeleteLatency(self):
if not self.hasAverageDeleteLatency():
raise Exception('BenchmarkResult has no average delete latency');
return self.deleteResults.getAverageLatency();
| 2.53125
| 3
|
python/jimmy_plot/clk_tuner.py
|
JimmyZhang12/predict-T
| 0
|
12775366
|
import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import os
import plot_voltage
import pdn_params as pdn
from cython.sim_pdn import sim_throttling_wrapper
TEST_LIST_spec=[
"429.mcf",
"433.milc",
"435.gromacs",
"436.cactusADM",
"437.leslie3d",
"444.namd",
"445.gobmk",
"453.povray",
"454.calculix",
"456.hmmer",
"458.sjeng",
"459.GemsFDTD",
"462.libquantum",
"464.h264ref",
# "470.lbm",
"471.omnetpp",
"473.astar",
"481.wrf", \
"482.sphinx3", \
]
def sim_throttling(power, pwr_throttle):
print("Sim throttling...")
THROTTLE_DUR = pdn.THROTTLE_DUR
LEADTIME= pdn.LEADTIME
VDC = pdn.VDC
THRES = pdn.THRES
L = pdn.L
C = pdn.C
R = pdn.R
CLK = pdn.CLK
CLK_THROTTLE = pdn.CLK_THROTTLE
voltage, ve_cycles, power = sim_throttling_wrapper(power, pwr_throttle, THRES, L, C, R, VDC, CLK, CLK_THROTTLE, LEADTIME, THROTTLE_DUR)
plot_voltage.print_power(voltage, power, ve_cycles)
return voltage, power, ve_cycles
def run(print_stats=False):
VDC = pdn.VDC
THRES = pdn.THRES
L = pdn.L
C = pdn.C
R = pdn.R
CLK = pdn.CLK
THR_CLK = pdn.CLK_THROTTLE
HOME = os.environ['HOME']
#get power scaling constants
dirs = ["/home/jimmy/output_10_9/gem5_out/482.sphinx3_5_1000000_DESKTOP_HarvardPowerPredictor_4000000000",
"/home/jimmy/output_10_9/gem5_out/482.sphinx3_5_1000000_DESKTOP_HarvardPowerPredictor_2000000000"]
power = [plot_voltage.get_data(d, 'power.bin', np.single) for d in dirs]
(static_scale, dyn_scale) = plot_voltage.get_pwr_scaling(power[0],power[1],4E9,2E9)
d = "/home/jimmy/output_10_14/gem5_out/482.sphinx3_20_1000000_DESKTOP_HarvardPowerPredictor_fastforwardtest"
orig_data = plot_voltage.get_voltage(d, np.single, THRES, L, C, R, VDC, CLK, 0, static_scale, dyn_scale)
np.set_printoptions(threshold=np.inf)
thr_data = plot_voltage.get_voltage(d, np.single, THRES, L, C, R, VDC, CLK, THR_CLK-CLK, static_scale, dyn_scale)
mit_data = sim_throttling(orig_data[1], thr_data[1])
power_data = [orig_data[1],thr_data[1], mit_data[1]]
volt_data = [orig_data[0],thr_data[0], mit_data[0]]
#transform 2ghz to 4ghz
volt_test = np.copy(thr_data[0][0:100000])
volt_test = volt_test - 0.005
plot_voltage.plot([orig_data[0],thr_data[0], volt_test],
orig_data[2],
'10_14_mit_test',
labels=["fullclk","throttle", "test"])
if __name__ == "__main__":
run(True)
| 2.21875
| 2
|
before/0124/11021.py
|
Kwak-JunYoung/154Algoritm-5weeks
| 3
|
12775367
|
ans = []
n = int(input())
for i in range(n):
a, b = list(map(int, input().split()))
ans.append(a + b)
for i in range(len(ans)):
print("Case #{}: {}".format(i+1, ans[i]))
| 2.921875
| 3
|
era/apps/user/decorators.py
|
doctorzeb8/django-era
| 1
|
12775368
|
from django.contrib import auth
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.shortcuts import redirect
from era.utils.functools import unidec, omit
@unidec
def role_required(method, req, *args, **kw):
if req.user.role in kw.get('allow', []):
return method(req, *args, **omit(kw, 'allow'))
raise PermissionDenied()
@unidec
def anonymous_required(method, req, *args, **kw):
if req.user.is_authenticated():
if kw.get('logout', False):
auth.logout(req)
return redirect(req.get_full_path())
else:
return redirect('/')
return method(req, *args, **omit(kw, 'logout'))
| 1.867188
| 2
|
pageobjects/optionhandler.py
|
charleshamel73/robot-pageobjects
| 1
|
12775369
|
from _metaflyweight import MetaFlyWeight
from context import Context
from exceptions import VarFileImportErrorError
from robot.libraries.BuiltIn import BuiltIn
import os
import re
import imp
class OptionHandler(object):
"""
This class is a Flyweight for the options
Example:
OptionHandler(LoginPage)
To get options use:
OptionHandler(LoginPage).get("ice cream",None)
"""
# from automationpages import Page
__metaclass__ = MetaFlyWeight
_page_instance = None
_opts = {}
def __init__(self, page_inst):
if not self._initialized:
self._opts = {}
self._page_instance = page_inst
self._populate_opts()
def _populate_opts(self):
"""
Pulls environment from PO_ environment file
"""
self._opts.update(self._get_opts_from_var_file())
self._opts.update(self._get_opts_from_env_vars())
if Context.in_robot():
self._opts.update(self._get_opts_from_robot())
self._update_opts_from_inherited_classes()
def _get_opts_from_robot(self):
"""
Pulls environment from PO_ environment file
"""
ret = {}
robot_vars = BuiltIn().get_variables()
for var, val in robot_vars.iteritems():
ret[self._normalize(var)] = val
return ret
def _get_opts_from_var_file(self):
"""
Pulls environment from PO_ environment file
"""
ret = {}
var_file_path = os.environ.get("PO_VAR_FILE", None)
if var_file_path:
abs_var_file_path = os.path.abspath(var_file_path)
try:
vars_mod = imp.load_source("vars", abs_var_file_path)
except (ImportError, IOError), e:
raise VarFileImportErrorError(
"Couldn't import variable file: %s. Ensure it exists and is importable." % var_file_path)
var_file_attrs = vars_mod.__dict__
for vars_mod_attr_name in var_file_attrs:
if not vars_mod_attr_name.startswith("_"):
vars_file_var_value = var_file_attrs[vars_mod_attr_name]
ret[self._normalize(vars_mod_attr_name)] = vars_file_var_value
return ret
def _get_opts_from_env_vars(self):
"""
Pulls environment from PO_ environment vars in the local machine
"""
ret = {}
for env_varname in os.environ:
if env_varname.startswith("PO_") and env_varname.isupper():
varname = env_varname[3:]
ret[self._normalize(varname)] = os.environ.get(env_varname)
return ret
def _update_opts_from_inherited_classes(self):
"""
Using the given Page object class, we create a cumulative options value based on its parent pages
"""
list_of_classes = self._page_instance._get_parent_pages(top_to_bottom=True)
self.temp_options = {}
for clazz in list_of_classes:
if hasattr(clazz, "options"):
self.temp_options.update(clazz.options)
self._opts.update(self.temp_options)
def _normalize(self, opts):
"""
Convert an option keyname to lower-cased robot format, or convert
all the keys in a dictionary to robot format.
"""
if isinstance(opts, str) or isinstance(opts,unicode):
name = opts.lower()
rmatch = re.search("\$\{(.+)\}", name)
return rmatch.group(1) if rmatch else name
else:
# We're dealing with a dict
return {self._normalize(key): val for (key, val) in opts.iteritems()}
def get(self, name, default=None):
"""
Gets an option value given an option name
:param name: The name of the option to get
:type name: str
:param default: the value to return if none is found
:type default: any
:return: the value of the attribute or default if not found
"""
ret = default
try:
if Context.in_robot():
ret = self._opts[self._normalize(name)]
else:
ret = self._opts[self._normalize(name.replace(" ", "_"))]
except KeyError:
pass
return ret
| 2.515625
| 3
|
tests/unittests/commands/test_cmd_cs_beacon.py
|
f5devcentral/f5-cli
| 13
|
12775370
|
""" Test Beacon command """
import json
from f5sdk.cs import ManagementClient
from f5sdk.cs.beacon.insights import InsightsClient
from f5sdk.cs.beacon.declare import DeclareClient
from f5sdk.cs.beacon.token import TokenClient
from f5cli.config import AuthConfigurationClient
from f5cli.commands.cmd_cs import cli
from ...global_test_imports import pytest, CliRunner
# Test Constants
MOCK_CONFIG_CLIENT_READ_AUTH_RETURN_VALUE = {
'user': 'test_user',
'password': '<PASSWORD>'
}
class TestCommandBeacon(object):
""" Test Class: command beacon """
@classmethod
def setup_class(cls):
""" Setup func """
cls.runner = CliRunner()
@classmethod
def teardown_class(cls):
""" Teardown func """
@staticmethod
@pytest.fixture
def config_client_read_auth_fixture(mocker):
""" PyTest fixture mocking AuthConfigurationClient's read_auth method """
mock_config_client_read_auth = mocker.patch.object(
AuthConfigurationClient, "read_auth")
mock_config_client_read_auth.return_value = MOCK_CONFIG_CLIENT_READ_AUTH_RETURN_VALUE
return mock_config_client_read_auth
@staticmethod
@pytest.fixture
def mgmt_client_fixture(mocker):
""" PyTest fixture returning mocked Cloud Services Management Client """
mock_management_client = mocker.patch.object(ManagementClient, '__init__')
mock_management_client.return_value = None
return mock_management_client
@pytest.mark.usefixtures("config_client_read_auth_fixture")
@pytest.mark.usefixtures("mgmt_client_fixture")
def test_cmd_beacon_insights_list(self, mocker):
""" List all configured beacon insights
Given
- The Insights Client returns a successful response
When
- User executes a 'list'
Then
- The 'list' command returns a successful response
"""
mock_response = {
'foo': 'bar'
}
mocker.patch.object(
InsightsClient, "list", return_value=mock_response)
result = self.runner.invoke(cli, ['beacon', 'insights', 'list'])
assert result.output == json.dumps(mock_response, indent=4, sort_keys=True) + '\n'
@pytest.mark.usefixtures("config_client_read_auth_fixture")
@pytest.mark.usefixtures("mgmt_client_fixture")
def test_cmd_beacon_insights_create(self, mocker):
""" Creating a beacon insight
Given
- The Insights Client returns a successful response
When
- User executes a 'create' with a declaration
Then
- The 'create' command returns a successful response
and creates an insight
"""
mock_response = {
'title': 'foo',
'description': 'blah'
}
mocker.patch.object(
InsightsClient, "create", return_value=mock_response)
result = self.runner.invoke(cli, ['beacon', 'insights', 'create',
'--declaration', './test/fake_declaration.json'])
assert result.output == json.dumps(mock_response, indent=4, sort_keys=True) + '\n'
@pytest.mark.usefixtures("config_client_read_auth_fixture")
@pytest.mark.usefixtures("mgmt_client_fixture")
def test_cmd_beacon_insights_update(self, mocker):
""" Updating a beacon insight
Given
- The Insights Client returns a successful response
When
- User executes a 'update' with a declaration with the same name
Then
- The 'update' command returns a successful response
and updates the specified insight
"""
mock_response = {
'title': 'foo',
'description': 'blah2'
}
mocker.patch.object(
InsightsClient, "create", return_value=mock_response)
result = self.runner.invoke(cli, ['beacon', 'insights', 'update',
'--declaration', './test/fake_declaration.json'])
assert result.output == json.dumps(mock_response, indent=4, sort_keys=True) + '\n'
@pytest.mark.usefixtures("config_client_read_auth_fixture")
@pytest.mark.usefixtures("mgmt_client_fixture")
def test_cmd_beacon_insights_delete(self, mocker):
""" Deleting a beacon insight
Given
- The Insights Client returns a successful response
When
- User executes a 'delete' with the name of the insight to be deleted
Then
- The 'delete' command returns a successful response
and delete the specified insight
"""
mocker.patch.object(
InsightsClient, "delete", return_value={})
result = self.runner.invoke(cli, [
'beacon', 'insights', 'delete', '--name', 'foo', '--auto-approve'])
assert result.output == json.dumps(
{'message': 'Insight deleted successfully'},
indent=4, sort_keys=True) + '\n'
@pytest.mark.usefixtures("config_client_read_auth_fixture")
@pytest.mark.usefixtures("mgmt_client_fixture")
def test_cmd_beacon_insights_show(self, mocker):
""" Show a beacon insight
Given
- The Insights Client returns a successful response
When
- User executes a 'show' with a name of the insight
Then
- The 'show' command returns requested insight
"""
mock_response = {
'title': 'foo',
'description': 'blah'
}
mocker.patch.object(
InsightsClient, "show", return_value=mock_response)
result = self.runner.invoke(cli, ['beacon', 'insights', 'show', '--name', 'foo'])
assert result.output == json.dumps(mock_response, indent=4, sort_keys=True) + '\n'
@pytest.mark.usefixtures("config_client_read_auth_fixture")
@pytest.mark.usefixtures("mgmt_client_fixture")
def test_cmd_beacon_declare_show(self, mocker):
""" Show a beacon declaration
Given
- The Declare Client returns a mocked response
When
- User executes a 'show'
Then
- The 'show' command returns the mocked response
"""
mock_response = {'foo': 'bar'}
mocker.patch.object(
DeclareClient, "create", return_value=mock_response
)
result = self.runner.invoke(cli, ['beacon', 'declare', 'show'])
assert result.output == json.dumps(mock_response, indent=4, sort_keys=True) + '\n'
@pytest.mark.usefixtures("config_client_read_auth_fixture")
@pytest.mark.usefixtures("mgmt_client_fixture")
def test_cmd_beacon_declare_create(self, mocker):
""" Create/update a beacon declaration
Given
- The Declare Client returns a mocked response
When
- User executes a 'create'
Then
- The 'create' command returns the mocked response
"""
mock_response = {'foo': 'bar'}
mocker.patch.object(
DeclareClient, "create", return_value=mock_response
)
result = self.runner.invoke(
cli, ['beacon', 'declare', 'create', '--declaration', './foo.json']
)
assert result.output == json.dumps(mock_response, indent=4, sort_keys=True) + '\n'
@pytest.mark.usefixtures("config_client_read_auth_fixture")
@pytest.mark.usefixtures("mgmt_client_fixture")
def test_cmd_beacon_token_create(self, mocker):
""" Creating a beacon token
Given
- The Token Client returns a successful response
When
- User executes a 'create' with a declaration
Then
- The 'create' command returns a successful response
and creates an token
"""
mock_response = {
'title': 'foo',
'description': 'blah'
}
mocker.patch.object(
TokenClient, "create", return_value=mock_response)
result = self.runner.invoke(cli, ['beacon', 'token', 'create',
'--declaration', './test/fake_declaration.json'])
assert result.output == json.dumps(mock_response, indent=4, sort_keys=True) + '\n'
@pytest.mark.usefixtures("config_client_read_auth_fixture")
@pytest.mark.usefixtures("mgmt_client_fixture")
def test_cmd_beacon_token_delete(self, mocker):
""" Deleting a beacon token
Given
- The Token Client returns a successful response
When
- User executes a 'delete' with the name of the token to be deleted
Then
- The 'delete' command returns a successful response
and delete the specified token
"""
mocker.patch.object(
TokenClient, "delete", return_value={})
result = self.runner.invoke(cli, [
'beacon', 'token', 'delete', '--name', 'foo', '--auto-approve'])
assert result.output == json.dumps(
{'message': 'Token deleted successfully'},
indent=4, sort_keys=True) + '\n'
@pytest.mark.usefixtures("config_client_read_auth_fixture")
@pytest.mark.usefixtures("mgmt_client_fixture")
def test_cmd_beacon_token_show(self, mocker):
""" Show a beacon token
Given
- The Token Client returns a successful response
When
- User executes a 'show' with a name of the token
Then
- The 'show' command returns requested token
"""
mock_response = {
'title': 'foo',
'description': 'blah'
}
mocker.patch.object(
TokenClient, "show", return_value=mock_response)
result = self.runner.invoke(cli, ['beacon', 'token', 'show', '--name', 'foo'])
assert result.output == json.dumps(mock_response, indent=4, sort_keys=True) + '\n'
@pytest.mark.usefixtures("config_client_read_auth_fixture")
@pytest.mark.usefixtures("mgmt_client_fixture")
def test_cmd_beacon_token_list(self, mocker):
""" List all configured beacon token
Given
- The Token Client returns a successful response
When
- User executes a 'list'
Then
- The 'list' command returns a successful response
"""
mock_response = {
'foo': 'bar'
}
mocker.patch.object(
TokenClient, "list", return_value=mock_response)
result = self.runner.invoke(cli, ['beacon', 'token', 'list'])
assert result.output == json.dumps(mock_response, indent=4, sort_keys=True) + '\n'
| 2.125
| 2
|
phfnbutils/store.py
|
phfaist/phfnbutils
| 0
|
12775371
|
import os
import os.path
import sys
import logging
logger = logging.getLogger(__name__)
import numpy as np
import inspect
import datetime
import hashlib
import functools
import h5py
import filelock
import multiprocessing
import itertools
import random
from tqdm.auto import tqdm
#
# utilities for my hdf5 datasets
#
def _normalize_attribute_value_string(v):
# NOTE: Only ASCII strings allowed in string values.
return v.encode('ascii')
class _Hdf5GroupProxyObject:
def __init__(self, grp):
self.grp = grp
def get(self, key, default, *, _default_action=None):
if key in self.grp:
obj = self.grp[key]
if isinstance(obj, h5py.Group):
return _Hdf5GroupProxyObject(self.grp[key])
if isinstance(obj, h5py.Dataset):
return obj[()]
raise ValueError("Can't interface object value {!r}".format(obj))
if key in self.grp.attrs:
return self._unpack_attr_val(self.grp.attrs[key])
if _default_action:
return _default_action()
return default
def keys(self):
return itertools.chain(self.grp.keys(), self.grp.attrs.keys())
def keys_children(self):
return self.grp.keys()
def keys_attrs(self):
return self.grp.attrs.keys()
def all_attrs(self):
return dict([(k, self._unpack_attr_val(v)) for (k,v) in self.grp.attrs.items()])
def __getitem__(self, key):
def keyerror():
raise KeyError("No key {} in hdf5 group {!r} or its attributes"
.format(key, self.grp))
return self.get(key, None, _default_action=keyerror)
def _unpack_attr_val(self, att_val):
return _unpack_attr_val(att_val) # call global method
def value_equals(self, key, test_value):
val = self.get(key, None)
if val is None:
return (test_value is None)
if isinstance(val, np.ndarray) or isinstance(test_value, np.ndarray):
return np.all(val == test_value)
if _normalize_attribute_value_global(val, keep_float=False) \
!= _normalize_attribute_value_global(test_value, keep_float=False):
return False
return True
def __repr__(self):
return '_Hdf5GroupProxyObject('+repr(self.grp)+')'
def __str__(self):
ds = {k: str(v) for k, v in self.all_attrs().items() }
for k in self.keys_children():
v = self.grp[k]
ds[k] = '<{}>'.format(type(v).__name__)
return ('HDF5 group {' +
', '.join('{}: {}'.format(k,vstr) for k,vstr in ds.items()) + '}')
def hdf5_group(self):
"""
Return the group object in the HDF5 data structure, giving you direct access
to the :py:mod:`h5py` API in case you need it.
"""
return self.grp
def hdf5_key(self):
"""
Return the key in the HDF5 data structure where this group is located.
"""
return self.grp.name
def _unpack_attr_val(att_val):
if isinstance(att_val, bytes):
return att_val.decode('ascii')
#if isinstance(att_val, np.ndarray) and att_val.size == 1:
# # if it's a scalar, return the bare scalar and not an ndarray
# return att_val[()]
return att_val
def _normalize_attribute_value_global(
value, *,
normalize_string=_normalize_attribute_value_string,
keep_float=True
):
t = type(value)
if value is None:
return ""
if isinstance(value, str):
return _normalize_attribute_value_string(value)
if isinstance(value, bytes):
# bytes and str are treated the same, as ASCII strings. For storage
# of raw binary data you'll want to store a dataset of some kind
# e.g. with numpy.
return value
if isinstance(value, int) or np.issubdtype(t, np.integer):
return int(value)
if isinstance(value, float) or np.issubdtype(t, np.floating):
if keep_float:
return value
else:
return _normalize_attribute_value_string( '{:0.8g}'.format(value) )
if isinstance(value, (datetime.date, datetime.time, datetime.datetime)):
return _normalize_attribute_value_string(value.isoformat())
if isinstance(value, (datetime.timedelta,)):
return _normalize_attribute_value_string("total_seconds={:.06g}"
.format(value.total_seconds()))
raise ValueError("Cannot encode {!r} for HDF5 attribute storage, unknown type"
.format(value))
class Hdf5StoreResultsAccessor:
"""
TODO: Doc.....
Note: must be used in a context manager!
"""
def __init__(self, filename, *, realm='results'):
super().__init__()
self.filename = filename
self.realm = realm
self._lock_file_name = os.path.join(
os.path.dirname(filename),
'.' + os.path.basename(filename) + '.py_lock'
)
self._filelock = None
self._store = None
self.store_value_filters = []
def __enter__(self):
self._filelock = filelock.FileLock(self._lock_file_name)
self._filelock.acquire()
try:
self._store = h5py.File(self.filename, 'a')
except Exception:
self._filelock.release()
raise
return self
def __exit__(self, type, value, traceback):
try:
if self._store is not None:
self._store.close()
self._store = None
finally:
if self._filelock is not None:
self._filelock.release()
self._filelock = None
def iterate_results(self, *, predicate=None, **kwargs):
if self.realm not in self._store:
# no results registered yet, nothing to yield
return
grp_results = self._store[self.realm]
predicate_attrs = None
if predicate is not None:
sig = inspect.signature(predicate)
predicate_attrs = list( sig.parameters.keys() )
def want_this(grpiface):
for k,v in kwargs.items():
if not grpiface.value_equals(k, v):
return False
if predicate is not None:
return predicate(**{k: _unpack_attr_val(grpiface.get(k, None)) for k in predicate_attrs})
return True
for key in grp_results.keys():
grp = grp_results[key]
grpiface = _Hdf5GroupProxyObject(grp)
if want_this(grpiface):
yield grpiface
def attribute_values(self, attribute_name, *, include_none=False):
if self.realm not in self._store:
return set()
grp_results = self._store[self.realm]
return set(
_unpack_attr_val(attval)
for attval in (
grp.attrs.get(attribute_name, None)
for grp in (grp_results[key] for key in grp_results.keys())
)
if include_none or attval is not None
)
# vals = set()
# for key in grp_results.keys():
# grp = grp_results[key]
# this_val = _unpack_attr_val(grp.attrs[attribute_name])
# if this_val not in vals:
# vals.append(this_val)
# return vals
def has_result(self, attributes):
key = self._store_key(attributes)
if key in self._store:
return True
return False
def get_result(self, attributes):
key = self._store_key(attributes)
if key in self._store:
grp = self._store[key]
return _Hdf5GroupProxyObject(grp)
return None
def store_result(self, attributes, value, *, forbid_overwrite=False, info=None):
key = self._store_key(attributes)
if key in self._store:
if forbid_overwrite:
raise ValueError("key {!r} already exists in {}, not overwriting"
.format(key, self.realm))
logger.debug("Overwriting key %r in %s", key, self.realm)
del self._store[key]
grp = self._store.create_group(key)
for k, v in attributes.items():
grp.attrs[k] = self._normalize_attribute_value(v)
for filt in self.store_value_filters:
value = filt(value)
has_error = self._store_result_dict_value(grp, value)
# only raise errors *after* having written everything to disk, in case
# that computation was very time-costly to obtain and our poor user
# would otherwise lose all their hard-obtained results
if has_error is not None:
raise has_error
if info:
for k, v in info.items():
grp.attrs[k] = self._normalize_attribute_value(v)
def _store_result_dict_value(self, grp, value):
has_error = None
for k, v in value.items():
if k.startswith('_'):
continue
try:
for filt in self.store_value_filters:
v = filt(v)
if v is None:
continue
if isinstance(v, dict):
newgrp = grp.create_group(k)
has_error = self._store_result_dict_value(newgrp, v)
elif isinstance(v, (np.ndarray, int, float)) \
or np.issubdtype(np.dtype(type(v)), np.integer) \
or np.issubdtype(np.dtype(type(v)), np.floating):
# Pass on any numpy array as is to h5py. Also store floats
# and ints directly
dset = grp.create_dataset(k, data=v)
elif isinstance(v, str):
# difficult to support strings in HDF5 -- see
# https://docs.h5py.org/en/stable/strings.html
#
# we use " np.void(utf8 bytes) " stored in an attribute as
# it looks like it's the safest. NOTE: You need to access
# the string via result['string_field'].tobytes().decode('utf-8')
grp.attrs[k] = np.void(v.encode('utf-8'))
logger.warning("Storing string as UTF-8 opaque bytes for field ‘%s’. Use "
"“result['%s'].tobytes().decode('utf-8')” when reading "
"out the string.", k, k)
elif isinstance(v, bytes):
# store raw bytes
grp.attrs[k] = np.void(v)
logger.warning("Storing bytes as opaque type for field ‘%s’. Use "
"“result['%s'].tobytes()” when reading "
"out the bytes again.", k, k)
elif isinstance(v, (datetime.date, datetime.time, datetime.datetime)):
grp.attrs[k] = v.isoformat().encode('ascii')
elif isinstance(v, (datetime.timedelta,)):
grp.attrs[k] = ("timedelta(seconds={:.06g})"
.format(v.total_seconds())).encode('ascii')
else:
has_error = ValueError("Can't save object {!r}, unknown type".format(v))
# continue saving other stuff
except Exception as e:
has_error = e
return has_error
def delete_result(self, attributes, *, dry_run=False):
key = self._store_key(attributes)
if key not in self._store:
raise ValueError("No such key for attributes {!r}".format(attributes))
if dry_run:
logger.info("Delete results %r, key=%r (dry run)", attributes, key)
else:
del self._store[key]
logger.info("Deleted results %r, key=%r", attributes, key)
def delete_results(self, *, dry_run=False, **kwargs):
keys_to_delete = []
for it in self.iterate_results(**kwargs):
keys_to_delete.append(it.hdf5_key())
for key in keys_to_delete:
if dry_run:
logger.info("Delete results %r (dry run)", key)
def _do_get_result(key):
# use "self" outside inner class
return _Hdf5GroupProxyObject(self._store[key])
class get_all_attrs_str:
def __str__(self):
return repr(_do_get_result(key).all_attrs())
logger.debug("with properties: %r -> %s", key, get_all_attrs_str())
else:
del self._store[key]
logger.info("Deleted results %r", key)
def update_keys(self, attribute_names, *, add_default_keys=None, dry_run=False):
"""
Checks that all result storage keys are up-to-date. If you introduce a new
kwarg attribute in the storage, we can set that attribute to all
existing results with the given value in `add_default_keys`.
- `attribute_names` is a list or tuple of attribute names to consider when
composing the storage key.
- `add_default_keys` is a dictionary of new attribute names and values
to set to records that don't have that attribute set
"""
rename_keys = [] # [ (oldkey,newkey), ... ]
set_attributes = {} # { newkey: {attribute1: value1 ...}, ... }
if add_default_keys is None:
add_default_keys = {}
grp_results = self._store[self.realm]
for key in grp_results.keys():
grp = grp_results[key]
these_attributes = {}
this_set_attributes = {}
for k in attribute_names:
att_value = None
if k in grp.attrs:
att_value = grp.attrs[k]
else:
if k in add_default_keys:
att_value = add_default_keys[k]
this_set_attributes[k] = att_value
else:
att_value = None
these_attributes[k] = att_value
# also take note of any default attributes to set that are not part
# of the results-identifying attributes
for k, v in ((akey, aval,)
for akey, aval in add_default_keys.items()
if akey not in attribute_names):
if k not in grp.attrs:
this_set_attributes[k] = v
newkey = self._store_key(these_attributes, hash_only=True)
if newkey != key:
logger.debug("Will rename {} -> {}".format(key, newkey))
rename_keys.append( (key, newkey) )
if this_set_attributes:
logger.debug("Will set attributes on newkey {}: {!r}"
.format(newkey, this_set_attributes))
set_attributes[newkey] = this_set_attributes
if not rename_keys and not set_attributes:
logger.debug("All keys and attributes are up-to-date.")
return
logger.debug("Finished inspecting keys, proceeding to updates ... ")
for oldkey, newkey in rename_keys:
if dry_run:
logger.info("\tgrp_results.move({!r}, {!r})".format(oldkey, newkey))
else:
grp_results.move(oldkey, newkey)
for newkey, attrib in set_attributes.items():
grp = grp_results[newkey] if not dry_run else None
for ak, av in attrib.items():
if dry_run:
logger.info("\tresults({!r}).attrs[{!r}] = {!r}".format(newkey, ak, av))
else:
grp.attrs[ak] = self._normalize_attribute_value(av)
logger.debug("Keys and attributes renamed successfully.")
def _normalize_attribute_value(self, value, **kwargs):
return _normalize_attribute_value_global(value, **kwargs)
def _store_key(self, attributes, *, hash_only=False):
m = hashlib.sha1()
stuff = "\n".join(
"{key}={value}\n".format(
key=k,
value=repr(self._normalize_attribute_value(attributes[k], keep_float=False))
)
for k in sorted(attributes.keys())
)
m.update( stuff.encode('ascii') )
the_hash = m.hexdigest()
if hash_only:
return the_hash
return '{}/{}'.format(self.realm, the_hash)
class NoResultException(Exception):
pass
class MultipleResults:
def __init__(self, results=None):
# results = [
# ({attrs1...}, {infoattrs1...}, <result1>),
# ({attrs2...}, {infoattrs2...}, <result2>),
# ...
# ]
# arttrsN are merged with "global" attributes (items
# in attrsN take precedence)
if results is not None:
self.results = results #[ (attrs, info, result) for (attrs, info, result) in results ]
else:
self.results = []
def append_result(self, attrs, info, result):
# if result is itself a MultipleResults instance, merge results.
if isinstance(result, MultipleResults):
for res_attrs, res_info_v, res in result.results:
try:
the_res_attrs = dict(attrs)
the_res_attrs.update(**res_attrs)
the_res_info = dict(info)
if res_info_v:
the_res_info.update(**res_info_v)
self.results.append( (the_res_attrs, the_res_info, res,) )
except Exception as e:
logger.warning(
f"Couldn't save result {attrs}, {res_attrs}; "
f"[info {info}, {res_info_v}] [result {res}]: {e}"
)
else:
self.results.append( (attrs, info, result) )
class _ShowValueShort:
def __init__(self, value, process_value=None):
self.value = value
self.process_value = process_value
def _processed_value(self):
if self.process_value is not None:
return self.process_value(self.value)
else:
return self.value
def __str__(self):
return _showvalue(self._processed_value())
def __repr__(self):
return repr(self._processed_value())
def _showvalue(value, short=False):
if isinstance(value, dict) and not short:
return '{' + ",".join(
"{}={}".format(k, _showvalue(v, short=True))
for k,v in value.items()
) + '}'
if short and isinstance(value, (np.ndarray,)):
# print short version of ndarray
with np.printoptions(precision=4,threshold=8,linewidth=9999,):
return str(value)
if isinstance(value, (float,)) or np.issubdtype(type(value), np.floating):
return "%.4g"%(value)
if value is None or isinstance(value, (int, bool, str, bytes)):
return str(value)
return '<{}>'.format(value.__class__.__name__)
def _call_with_accepted_kwargs(fun, kwargs):
sig = inspect.signature(fun)
fun_args = set( sig.parameters.keys() )
return fun(**{k: v
for k, v in kwargs.items()
if k in fun_args})
class FnComputer:
decode_inputargs = None
fixed_attributes = None
multiple_attribute_values = None
info = None
force_recompute = False
skip_store = False
def __call__(self):
raise RuntimeError("You need to reimplement the __call__() function")
class ComputeAndStore:
"""
Wraps a function `fn` that computes something potentially expensive with the
necessary code to perform the computation only if it doesn't already exist
in the data storage described by `store_filename` and `realm` and designed
to be managed by a :py:class:`HDF5StoreResultsAccessor`.
To determine whether the computation must be run, and to store the result
after the computation if it was carried out, the attributes that
characterize the associated result in the
:py:class:`HDF5StoreResultsAccessor` are determined as follows (for use with
:py:meth:`HDF5StoreResultsAccessor.has_result()` and
:py:meth:`HDF5StoreResultsAccessor.store_result()`). The function's named
arguments are considered as attributes, and they are merged with the given
attribute dictionary `fixed_attributes`.
The return value of the function (usually a dictionary) is then stored using
a :py:class:`HDF5StoreAccessor` instance in the given filename and realm,
with the associated attributes. The function may also return an instance of
:py:class:`MultipleResults`—see more on this topic below.
The `info` argument can be a dictionary of values to store alongside with
the result, but that do not contribute to the identification of the result
instance (see :py:meth:`HDF5StoreAccessor.store_result()`'s `info=` keyword
argument).
It is possible to "decode" some arguments of `fn()` if you would like the
attribute value in the store file to have a different format or
representation as the value actually passed on to `fn()`. Use the
`decode_inputargs()` for this purpose. It is given the tuple of input
arguments as-is (without any 'multiple-attributes' arguments—see below), and
is supposed to return the arguments to send to `fn()` instead (either as a
tuple or as a kwargs dictionary). If a tuple is returned, it must preserve
the order and number of the arguments.
The results storage file `store_filename` is accessed with a
:py:class:`HDF5StoreResultsAccessor` instance. The instance is only created
momentarily to check whether the results exist in the storage, and again if
necessary to store the result into the cache. In this way multiple
instances of this function can run in different processes without locking
out the results storage file.
Messages are logged to the given `logger` instance (see python's
:py:mod:`logging` mechanism), or to a default logger.
**Computing functions with multiple attribute values at in one function
call:**
Sometimes we want to compute multiple result objects in one go, especially
if they share some common intermediate steps. In such cases, the function
should return a :py:class:`MultipleResults` instance that collects the
different result objects along with their different attributes values. The
attributes specified in each object in `MultipleResults` are merged with the
function's arguments and with the `fixed_attributes`.
When the function returns multiple result objects, then `ComputeAndStore`
needs additional information in order to determine if a computation needs to
run, and if so, which of those multiple results need to be computed. Use
the `multiple_attribute_values` field to this effect. This field should be
a list of dictionaries, or a dictionary containing a list in one of its
values, that specify any additional attribute(s) and the values associated
with the results that the function is expected to return. These values are
used to check the existence of the result objects in the store.
If the function accepts a keyword argument associated with a "multiple
result attributes", then a list of all the values that we need to compute
(i.e., that are not in the store) is provided to the function via that
keyword argument. If multiple such arguments are accepted, then all these
keyword arguments `kw1`, `kw2`, ... are given a list of the same length,
such that `{kw1=kw1[j], kw2=kw2[j], ...}` for `j=0,1,...` describe the
result objects that need to be computed.
"""
def __init__(self, fn, store_filename, *,
realm=None,
fixed_attributes=None,
info=None,
decode_inputargs=None,
multiple_attribute_values=None,
force_recompute=None,
skip_store=None,
logger=None):
self.fn = fn
if isinstance(fn, FnComputer):
self.fn_name = fn.__class__.__name__
fn_sig = inspect.signature(fn.__call__)
else:
self.fn_name = fn.__name__
fn_sig = inspect.signature(fn)
self.fn_arg_names = list( fn_sig.parameters.keys() )
self.store_filename = store_filename
self.realm = realm
self.fixed_attributes = {}
if getattr(fn, 'fixed_attributes', None) is not None:
self.fixed_attributes.update(fn.fixed_attributes)
if fixed_attributes is not None:
self.fixed_attributes.update(fixed_attributes)
self.info = {}
if getattr(fn, 'info', None) is not None:
self.info.update(fn.info)
if info is not None:
self.info.update(info)
self.decode_inputargs = None
if getattr(fn, 'decode_inputargs', None) is not None:
self.decode_inputargs = fn.decode_inputargs
if decode_inputargs is not None:
if self.decode_inputargs is not None:
raise ValueError("decode_inputargs=... specified both in FnComputer class "
"and as argument to ComputeAndStore()")
self.decode_inputargs = decode_inputargs
self.multiple_attribute_values = None
if getattr(fn, 'multiple_attribute_values', None) is not None:
self.multiple_attribute_values = fn.multiple_attribute_values
if multiple_attribute_values is not None:
if self.multiple_attribute_values is not None:
raise ValueError("multiple_attribute_values=... specified both in FnComputer "
"class and as argument to ComputeAndStore()")
self.multiple_attribute_values = multiple_attribute_values
if self.multiple_attribute_values is None:
self.multiple_attribute_values = []
# go through multiple_attribute_values, and replace dictionary-of-list
# by list-of-dictionaries, i.e. {'a': [1, 2]} -> [{'a': 1}, {'a': 2}]
self.multiple_attribute_values = \
flatten_attribute_value_lists(self.multiple_attribute_values)
self.multiple_attribute_all_keys = \
list(set( itertools.chain.from_iterable(
d.keys() for d in self.multiple_attribute_values
) ))
#print(f"{self.multiple_attribute_values=}")
self.fn_attribute_names = [k for k in self.fn_arg_names
if k not in self.multiple_attribute_all_keys ]
self.force_recompute = False
if hasattr(fn, 'force_recompute'):
self.force_recompute = fn.force_recompute
if force_recompute is not None:
self.force_recompute = self.force_recompute or force_recompute
self.skip_store = False
if hasattr(fn, 'skip_store'):
self.skip_store = fn.skip_store
if skip_store is not None:
self.skip_store = self.skip_store and skip_store
if logger is None:
self.logger = logging.getLogger(__name__ + '.ComputeAndStore')
else:
self.logger = logger
def _prepare_inputargs_as_kwargs(self, inputargs):
decoded_inputargs = inputargs
if self.decode_inputargs is not None:
decoded_inputargs = self.decode_inputargs(inputargs)
if isinstance(decoded_inputargs, dict):
kwargs = decoded_inputargs
else:
if len(decoded_inputargs) != len(self.fn_attribute_names):
raise ValueError("Can't match (decoded) input arguments %r to "
"function parameters %r"
% (decoded_inputargs, self.fn_attribute_names))
kwargs = dict(zip(self.fn_attribute_names, decoded_inputargs))
return kwargs
def __call__(self, inputargs):
return self.call_with_inputs( [inputargs] )
def call_with_inputs(self, list_of_inputargs):
logger = self.logger
import phfnbutils # TimeThis
if self.skip_store:
# offer friendly warning to make sure the user didn't forget to
# unset skip_store before a very long computation
logger.warning("`skip_store` is set to True, results will not be stored at the end!")
# we might have to decode the inputargs, in case they have attribute
# values encoded in some way (e.g. dependent attributes zipped together)
kwargs = None
list_of_kwargs = [ self._prepare_inputargs_as_kwargs(inputargs)
for inputargs in list_of_inputargs ]
list_of_kwargs_and_attributes = [
(kwargs, dict(self.fixed_attributes, **kwargs))
for kwargs in list_of_kwargs
]
#logger.debug("requested %s(%r)", self.fn_name,
# _ShowValueShort(list_of_kwargs_and_attributes, lambda x: [y[1] for y in x]))
with self._get_store() as store:
# def is_need_to_recompute(attributes):
# if self.force_recompute:
# return True
# return not store.has_result(attributes)
#
# def which_attributes_need_recompute
list_of_kwargs_and_attributes_and_multiattribs = []
for kwargs, attributes in list_of_kwargs_and_attributes:
multiple_attribute_values = self.multiple_attribute_values
if not multiple_attribute_values:
multiple_attribute_values = [ {} ]
# here we use multiple_attribute_values also for functions that
# don't explicitly have any multiple_attribute_values. In
# thoses cases an empty list means that there is nothing to
# compute, and a list containing only an empty dictionary means
# that we should compute that function.
if not self.force_recompute:
multiple_attribute_values = [
m
for m in multiple_attribute_values
if not store.has_result(dict(attributes, **m))
]
if not multiple_attribute_values:
# nothing to compute even for non-multiple-attributed
# functions, see comment above
logger.debug("Results for %s [%s] already present, not repeating computation",
_ShowValueShort(attributes),
_ShowValueShort(self.multiple_attribute_values))
continue
multiattribkwargs = {
k: [m.get(k, None) for m in multiple_attribute_values]
for k in self.multiple_attribute_all_keys
}
list_of_kwargs_and_attributes_and_multiattribs.append(
(kwargs, attributes, multiattribkwargs)
)
# if not self.multiple_attribute_values:
# if is_need_to_recompute(attributes):
# def have_all_necessary_results_in_store():
# if not self.multiple_attribute_values:
# return store.has_result(attributes)
# return
# if not self.force_recompute and have_all_necessary_results_in_store():
# logger.debug("Results for %s already present, not repeating computation",
# _ShowValueShort(attributes))
# else:
# new_list_of_kwargs_and_attributes.append( (kwargs,attributes,) )
if not list_of_kwargs_and_attributes_and_multiattribs:
logger.debug("There's nothing to compute.")
return
all_results = MultipleResults()
for kwargs, attributes, multiattribkwargs \
in list_of_kwargs_and_attributes_and_multiattribs:
logger.info("computing for attributes = %s [with multi-attributes = %s]",
_ShowValueShort(attributes), _ShowValueShort(multiattribkwargs))
run_kwargs = dict(kwargs, **{k: v for (k,v) in multiattribkwargs.items()
if k in self.fn_arg_names})
tr = {}
result = None
try:
with phfnbutils.TimeThis(tr, silent=True):
# call the function that actually computes the result
result = self.fn(**run_kwargs)
except NoResultException as e:
logger.warning(
"No result (NoResultException): %s [for %s after %s seconds]",
e, _ShowValueShort(attributes), tr['timethisresult'].dt,
)
return False
except Exception as e:
logger.error("Exception while computing result!", exc_info=True)
return False
dt = tr['timethisresult'].dt
if result is None:
logger.warning("No result (returned None) for %s, after %s seconds",
_ShowValueShort(attributes), dt)
return False
logger.debug("result: %s", _ShowValueShort(result))
logger.info("Got result for %s [runtime: %s seconds]",
_ShowValueShort(attributes), dt)
the_info = {}
for info_k, info_v in self.info.items():
if callable(info_v):
info_v = _call_with_accepted_kwargs(info_v, attributes)
the_info[info_k] = info_v
the_info.update(timethisresult=dt)
all_results.append_result(attributes, the_info, result)
# store results
if not self.skip_store:
with self._get_store() as store:
for attributes, the_info, result in all_results.results:
store.store_result(attributes, result, info=the_info)
# signal to caller that we've computed (a) new result(s) -- but this
# return value is probably ignored anyways
return True
def _get_store(self):
store_kwargs = {}
if self.realm is not None:
store_kwargs.update(realm=self.realm)
return Hdf5StoreResultsAccessor(self.store_filename, **store_kwargs)
def flatten_attribute_value_lists(alist):
# {'a': [1, 2]} -> [{'a': 1}, {'a': 2}] for all keys in all listed dictionaries
if isinstance(alist, dict):
alist = [alist]
need_another_loop = True
while need_another_loop:
#print(f"Looping to flatten attribute value lists, {alist=}")
newalist = []
need_another_loop = False
for a in alist:
#print(f"Inspecting {a=}")
assert isinstance(a, dict) # should be dict here
k, v = next( ((k, v) for (k,v) in a.items() if isinstance(v, list)),
(None,None) )
if k is not None:
#print(f"Expanding {k=}: {v=}")
need_another_loop = True
# expand list value into list of dictionaries with each value
def _updated_k_with_vitem(vitem):
d = dict(a)
d[k] = vitem
return d
expanded = [
_updated_k_with_vitem(vitem)
for vitem in v
]
#print(f"{expanded=}") # DEBUG
newalist += expanded
else:
newalist += [a] # ok, keep this dict as is
alist = newalist
return newalist
| 1.945313
| 2
|
world.py
|
ano0002/Level-One-Again
| 0
|
12775372
|
from ursina import *
from objects import ThreeD_Button,Laser
class Level(Entity):
def __init__(self,base,next,player):
super().__init__(model = None,position = (0,0,0))
self.base = base
self.base[0].set_level(self)
for elem in self.base :
elem.parent = self
self.player = player
self.followers = next
for elem in self.followers:
if type(elem) not in (list,tuple) :
elem.disable()
else :
for elem in elem :
elem.disable()
def next(self):
if len(self.followers)>0 :
if type(self.followers[0]) not in (list,tuple) :
self.followers[0].enable()
self.base.append(self.followers[0])
else :
for elem in self.followers[0] :
elem.enable()
self.base.append(elem)
self.followers.pop(0)
for elem in self.base:
if type(elem) == ThreeD_Button:
if not elem.state :
elem.toggle()
else :
scene.clear()
bg = Entity(parent=camera.ui, model='quad', scale_x=camera.aspect_ratio, color=color.black, z=1)
bg.scale *= 400
win = Text(parent = camera.ui,text=f'You Won using {self.player.deaths} lives !!', origin=(0,0), color=color.clear,scale =2)
win.animate_color(color.white,duration = 1)
create = Text(parent = camera.ui,text='Made by ano002 (Spike Model by NufNuf)', origin=(0,1.7), color=color.clear,scale =1)
create.animate_color(color.white,duration = 1)
| 2.5
| 2
|
src/model.py
|
Awagi/wiki-update-tracker
| 0
|
12775373
|
from tracker import (
GitFile,
TranslationGitFile,
GitPatch,
TranslationTrack,
ToCreateTranslationTrack,
ToInitTranslationTrack,
ToUpdateTranslationTrack,
UpToDateTranslationTrack,
OrphanTranslationTrack,
Status
)
from pathlib import Path
import os.path
from github_utils import (
file_url,
raw_file_url,
compare_url
)
class GitFileModel:
"""
A model describing a git file.
"""
def __init__(self, git_file):
"""
Builds the model for templating after the given git file.
:param tracker.GitFile git_file: git file to build the model from
:raise ValueError: when git_file is not an instance of GitFile
"""
if isinstance(git_file, GitFile):
self.path = git_file.path.as_posix()
self.filename = git_file.path.name
self.directory = git_file.path.parent.as_posix()
self.no_trace = git_file.no_trace
if git_file.no_trace:
self.commit = None
else:
self.commit = git_file.commit.hexsha
self.new_file = git_file.new_file
self.copied_file = git_file.copied_file
self.renamed_file = git_file.renamed_file
if git_file.rename_from:
self.rename_from = git_file.rename_from.as_posix()
else:
self.rename_from = None
if git_file.rename_to:
self.rename_to = git_file.rename_to.as_posix()
else:
self.rename_to = None
self.deleted_file = git_file.deleted_file
if isinstance(git_file, TranslationGitFile):
self.lang_tag = git_file.lang_tag
self.language = git_file.language
else:
raise ValueError("git_file is not an instance of GitFile")
class GitPatchModel:
"""
A model describing a git patch.
"""
def __init__(self, git_patch):
"""
Builds the model for templating after the given git patch.
:param tracker.GitPatch git_patch: git patch to build the model from
:raise ValueError: when git_patch is not an instance of GitPatch
"""
if isinstance(git_patch, GitPatch):
self.diff = git_patch.diff
self.additions = git_patch.additions
self.deletions = git_patch.deletions
self.changes = git_patch.changes
else:
raise ValueError("git_patch is not an instance of GitPatch")
class TranslationTrackModel:
"""
A model describing a translation track as an interface for templates to use.
"""
def __init__(self, track):
"""
Builds the model for templating after the given track.
:param tracker.TranslationTrack: the track
:raise ValueError: when track is not an instance of TranslationTrack
"""
if isinstance(track, TranslationTrack):
self.translation = GitFileModel(track.translation)
self.original = GitFileModel(track.original)
self.status = track.status
if isinstance(track, ToCreateTranslationTrack):
self.missing_lines = track.missing_lines
elif isinstance(track, ToInitTranslationTrack):
self.missing_lines = track.missing_lines
elif isinstance(track, ToUpdateTranslationTrack):
self.base_original = GitFileModel(track.base_original)
self.patch = GitPatchModel(track.patch)
self.to_rename = track.to_rename
elif isinstance(track, UpToDateTranslationTrack):
pass
elif isinstance(track, OrphanTranslationTrack):
self.deleted = track.deleted
self.surplus_lines = track.surplus_lines
else:
raise ValueError("track is not an instance of TranslationTrack")
class Template:
"""
Represents a template. Like "{t.translation.language} translation needs to be done here: {translation_url}" for a Github instruction, ``t`` being a TranslationTrackModel instance.
:var str template: the template itself
:var bool empty: whether the template is an empty string or not, generally meaning to an updater it should not process it
"""
def __init__(self, template=""):
"""
Template is a str with unformatted tags of a ``t`` object representing a ``TranslationTrackModel`` instance, and more args depending on the context (e.g URLs for Github).
Creating an empty template would generally mean to an updater that it should not process it.
:param str template: unformatted template, with ``format``-type tags using ``t``, instance of ``TranslationTrackModel``
:raise TypeError: when template is not a str
"""
if not isinstance(template, str):
raise TypeError("template is not str")
self.template = template
self.empty = len(self.template) == 0
def special_args(self, track, **kwargs):
"""
Defines special arguments for the template from a track, when necessary.
Override this method to provide special args when required in a certain context.
:param tracker.TranslationTrack track: the track, base of template
:param kwargs: other provided values for subclasses of this template when necessary
:return: kwargs for template formatting
:rtype: dict
"""
return {}
def format(self, t, **kwargs):
"""
Format the template using the translation track given, resources for the template to be built.
:param tracker.TranslationTrack t: a translation track or a subclass
:param **kwargs: other parameters to pass when formatting specific template, defined in special_args of subclass
:return: the formatted message
:rtype: str
:raise ValueError: when t is not a TranslationTrack instance
"""
if not isinstance(t, TranslationTrack):
raise ValueError("t is not a TranslationTrack instance")
data = TranslationTrackModel(t)
return self.template.format(t=data, **self.special_args(t, **kwargs))
class StubTemplate(Template):
"""
Represents a template for content of stub files.
"""
def special_args(self, track):
"""
Sets special argument ``translation_to_original_path``, relative path to original file from translation parent directory.
:param tracker.TranslationTrack track: the track, base of template
:return: kwargs for template formatting
:rtype: dict
"""
return {
"translation_to_original_path": Path(os.path.relpath(track.original.path, track.translation.path.parent)).as_posix()
}
class GithubTemplate(Template):
"""
Represents a template for Github Issues and Projects.
"""
def special_args(self, track, repo):
"""
Sets special arguments:
- ``original_url``, Github URL to original file (using commit rev). Only with To Create, To Initialize, To Update and Up-To-Date tracks.
- ``raw_original_url``, Github URL to raw original file (using commit rev). Only with To Create, To Initialize, To Update and Up-To-Date tracks.
- ``translation_url``, Github URL to translation file (using branch rev). Only with To Initialize, To Update, Up-To-Date and Orphan tracks.
- ``raw_translation_url``, Github URL to raw translation file (using commit rev). Only with To Initialize, To Update, Up-To-Date and Orphan tracks.
- ``base_original_url``, Github URL to base original file (using commit rev). Only with To Update tracks.
- ``raw_base_original_url``, Github URL to raw base original file (using commit rev). Only with To Update tracks.
- ``compare_url``, Github URL to Github comparison (using base_original and original commit rev). Only with To Update tracks.
:param tracker.TranslationTrack track: the track, base of template
:param github.Repository.Repository repo: the github repo for URL building purpose
:return: kwargs for template formatting
:rtype: dict
"""
args = {}
if isinstance(track, (ToCreateTranslationTrack, ToInitTranslationTrack, ToUpdateTranslationTrack, UpToDateTranslationTrack)):
args["original_url"] = file_url(repo.full_name, track.original.commit.hexsha, track.original.path.as_posix())
args["raw_original_url"] = raw_file_url(repo.full_name, track.original.commit.hexsha, track.original.path.as_posix())
if isinstance(track, (ToInitTranslationTrack, ToUpdateTranslationTrack, UpToDateTranslationTrack, OrphanTranslationTrack)):
args["translation_url"] = file_url(repo.full_name, track.branch, track.translation.path.as_posix())
args["raw_translation_url"] = raw_file_url(repo.full_name, track.translation.commit.hexsha, track.translation.path.as_posix())
if isinstance(track, ToUpdateTranslationTrack):
args["base_original_url"] = file_url(repo.full_name, track.base_original.commit.hexsha, track.base_original.path.as_posix()),
args["raw_base_original_url"] = raw_file_url(repo.full_name, track.base_original.commit.hexsha, track.base_original.path.as_posix()),
args["compare_url"] = compare_url(repo.full_name, track.base_original.commit.hexsha, track.original.commit.hexsha)
return args
def format(self, t, repo):
return super().format(t, repo=repo)
class GithubTemplater:
"""
Github Templates handler:
- maps ``GithubTemplate`` instances to ``tracker.Status``
- format corresponding template from ``tracker.TranslationTrack``, according to their status attribute
"""
def __init__(self):
self.map = {}
def __setitem__(self, status, template):
"""
Maps a template to a status. Templates can't be empty.
:param tracker.Status status: the status to map the template to
:param GithubTemplate template: the template to map to the status
:raise TypeError: when status is not an instance of tracker.Status
:raise TypeError: when template is not an instance of model.GithubTemplate
:raise AttributeError: when template is empty
"""
if not isinstance(template, GithubTemplate):
raise TypeError("template is not an instance of GithubTemplate")
if not isinstance(status, Status):
raise TypeError("status is not an instance of Status")
if template.empty:
raise AttributeError("template can't be empty")
self.map[status] = template
def __contains__(self, status):
"""
Tells whether a template is mapped to the given status.
:param tracker.Status status: the status
:return: True if status is key of a template, False otherwise
:rtype: bool
"""
return status in self.map
def __getitem__(self, status):
"""
Gets the template mapped to the given status.
:param tracker.Status status: the status
:return: the corresponding template, or None if status is not key of a template
:rtype: GithubTemplate
"""
if status in self:
return self.map[status]
else:
return None
def format(self, track, repo):
"""
Gets the formatted template using the given translation track and corresponding to its status attribute.
:param tracker.TranslationTrack: the track used as input to format the template
:param github.Repository.Repository repo: the repo input for the GitHub template
:return: the formatted template, or None if status is not mapped to a template
:rtype: str
:raise TypeError: when track is not an instance of tracker.TranslationTrack
"""
if not isinstance(track, TranslationTrack):
raise TypeError("track is not an instance of TranslationTrack")
if track.status in self:
return self.map[track.status].format(track, repo)
else:
return None
| 2.328125
| 2
|
sp_api/auth/credentials.py
|
lionsdigitalsolutions/python-amazon-sp-api
| 213
|
12775374
|
import os
class Credentials:
def __init__(self, refresh_token, credentials):
self.client_id = credentials.lwa_app_id
self.client_secret = credentials.lwa_client_secret
self.refresh_token = refresh_token or credentials.refresh_token
| 1.914063
| 2
|
mathproblem/graph_transforms.py
|
matthewcpp/mathproblem
| 0
|
12775375
|
from .problem import Problem
from .trig_defs import RightAngleTrigFunction
from enum import Enum
from typing import List
import random
class TransformationType(Enum):
VerticalTranslation = 1
HorizontalTranslation = 2
VerticalStretchCompression = 3
HorizontalStretchCompression = 4
class GraphTransformProblem(Problem):
def __init__(self):
Problem.__init__(self)
def __repr__(self):
return str.format("Graph Transform Problem ({}): {}", self.level, self.prompt)
def _get_trig_func_text(trig_func: RightAngleTrigFunction) -> str:
if trig_func == RightAngleTrigFunction.Sin:
return "sin"
else:
return "cos"
class GraphTransformData:
def __init__(self):
self.trig_func: RightAngleTrigFunction = RightAngleTrigFunction.Sin
self.hints: List[str] = []
self.answer: str = ""
self.vert_translation_mod: str = ""
self.horiz_translation_mod: str = ""
self.vert_stretch_mod: str = ""
self.horiz_stretch_mod: str = ""
def _append_to_answer(self, val: str):
if len(self.answer) > 0:
self.answer += ";"
self.answer += val
def add_horiz_translation(self):
val = random.randint(0, 3)
horiz_translation = "π"
if val > 1:
horiz_translation = "{}{}".format(val, horiz_translation)
if random.randint(0, 1) is 1:
self.horiz_translation_mod = " + {}".format(horiz_translation)
hint_text = horiz_translation
else:
self.horiz_translation_mod = " - {}".format(horiz_translation)
hint_text = "-{}".format(horiz_translation)
self.hints.append("Observe horizontal translation: {}".format(hint_text))
self._append_to_answer("ht")
def add_vertical_translation(self):
val = random.randint(1, 3)
if random.randint(0, 1) is 1:
self.vert_translation_mod = " + {}".format(val)
hint_text = val
else:
self.vert_translation_mod = " - {}".format(val)
hint_text = "-{}".format(val)
self.hints.append("Observe vertical translation: {}".format(hint_text))
self._append_to_answer("vt")
def add_vertical_stretch(self):
val = random.randint(2, 5)
if random.randint(0, 1) is 1:
val *= -1
self.vert_stretch_mod = str(val)
self.hints.append("Observe vertical stretch: {}".format(self.vert_stretch_mod))
self._append_to_answer("vs")
def add_horiz_stretch(self):
val = random.randint(2, 5)
if random.randint(0, 1) is 1:
val *= -1
self.horiz_stretch_mod = str(val)
self.hints.append("Observe horizontal stretch: {}".format(self.horiz_stretch_mod))
self._append_to_answer("hs")
def get_prompt(self):
x = "x"
if self.horiz_stretch_mod != "":
x = "{}/{}".format(x, self.horiz_stretch_mod)
if self.horiz_translation_mod != "":
x = "{}{}".format(x, self.horiz_translation_mod)
x = "{}({})".format(self.trig_func.name, x)
if self.vert_stretch_mod != "":
x = "{}{}".format(self.vert_stretch_mod, x)
if self.vert_translation_mod != "":
x = "{}{}".format(x, self.vert_translation_mod)
return x
def generate_graph_transform_problem(level: int = 1):
graph_data = GraphTransformData()
graph_data.trig_func = RightAngleTrigFunction(random.randint(1, 2))
# pick some random transforms to add to the graph
transforms = list(range(4))
random.shuffle(transforms)
for i in range(level):
xform = transforms.pop()
if xform == 0 and graph_data.horiz_translation_mod is "":
graph_data.add_horiz_stretch()
elif xform == 1 and graph_data.horiz_stretch_mod is "":
graph_data.add_horiz_translation()
elif xform == 2:
graph_data.add_horiz_stretch()
else:
graph_data.add_vertical_stretch()
problem = GraphTransformProblem()
problem.prompt = graph_data.get_prompt()
problem.steps = graph_data.hints
problem.level = level
problem.answer = graph_data.answer
return problem
| 2.953125
| 3
|
test/test_website_project_instance_api.py
|
hyperonecom/h1-client-python
| 0
|
12775376
|
"""
HyperOne
HyperOne API # noqa: E501
The version of the OpenAPI document: 0.1.0
Generated by: https://openapi-generator.tech
"""
import unittest
import h1
from h1.api.website_project_instance_api import WebsiteProjectInstanceApi # noqa: E501
class TestWebsiteProjectInstanceApi(unittest.TestCase):
"""WebsiteProjectInstanceApi unit test stubs"""
def setUp(self):
self.api = WebsiteProjectInstanceApi() # noqa: E501
def tearDown(self):
pass
def test_website_project_instance_connect_get(self):
"""Test case for website_project_instance_connect_get
Get website/instance.connect # noqa: E501
"""
pass
def test_website_project_instance_connect_list(self):
"""Test case for website_project_instance_connect_list
List website/instance.connect # noqa: E501
"""
pass
def test_website_project_instance_create(self):
"""Test case for website_project_instance_create
Create website/instance # noqa: E501
"""
pass
def test_website_project_instance_credential_create(self):
"""Test case for website_project_instance_credential_create
Create website/instance.credential # noqa: E501
"""
pass
def test_website_project_instance_credential_delete(self):
"""Test case for website_project_instance_credential_delete
Delete website/instance.credential # noqa: E501
"""
pass
def test_website_project_instance_credential_get(self):
"""Test case for website_project_instance_credential_get
Get website/instance.credential # noqa: E501
"""
pass
def test_website_project_instance_credential_list(self):
"""Test case for website_project_instance_credential_list
List website/instance.credential # noqa: E501
"""
pass
def test_website_project_instance_credential_patch(self):
"""Test case for website_project_instance_credential_patch
Update website/instance.credential # noqa: E501
"""
pass
def test_website_project_instance_delete(self):
"""Test case for website_project_instance_delete
Delete website/instance # noqa: E501
"""
pass
def test_website_project_instance_domain_create(self):
"""Test case for website_project_instance_domain_create
Create website/instance.domain # noqa: E501
"""
pass
def test_website_project_instance_domain_delete(self):
"""Test case for website_project_instance_domain_delete
Delete website/instance.domain # noqa: E501
"""
pass
def test_website_project_instance_domain_get(self):
"""Test case for website_project_instance_domain_get
Get website/instance.domain # noqa: E501
"""
pass
def test_website_project_instance_domain_list(self):
"""Test case for website_project_instance_domain_list
List website/instance.domain # noqa: E501
"""
pass
def test_website_project_instance_env_create(self):
"""Test case for website_project_instance_env_create
Create website/instance.env # noqa: E501
"""
pass
def test_website_project_instance_env_delete(self):
"""Test case for website_project_instance_env_delete
Delete website/instance.env # noqa: E501
"""
pass
def test_website_project_instance_env_get(self):
"""Test case for website_project_instance_env_get
Get website/instance.env # noqa: E501
"""
pass
def test_website_project_instance_env_list(self):
"""Test case for website_project_instance_env_list
List website/instance.env # noqa: E501
"""
pass
def test_website_project_instance_event_get(self):
"""Test case for website_project_instance_event_get
Get website/instance.event # noqa: E501
"""
pass
def test_website_project_instance_event_list(self):
"""Test case for website_project_instance_event_list
List website/instance.event # noqa: E501
"""
pass
def test_website_project_instance_get(self):
"""Test case for website_project_instance_get
Get website/instance # noqa: E501
"""
pass
def test_website_project_instance_link_create(self):
"""Test case for website_project_instance_link_create
Create website/instance.link # noqa: E501
"""
pass
def test_website_project_instance_link_delete(self):
"""Test case for website_project_instance_link_delete
Delete website/instance.link # noqa: E501
"""
pass
def test_website_project_instance_link_get(self):
"""Test case for website_project_instance_link_get
Get website/instance.link # noqa: E501
"""
pass
def test_website_project_instance_link_list(self):
"""Test case for website_project_instance_link_list
List website/instance.link # noqa: E501
"""
pass
def test_website_project_instance_list(self):
"""Test case for website_project_instance_list
List website/instance # noqa: E501
"""
pass
def test_website_project_instance_log_get(self):
"""Test case for website_project_instance_log_get
Get website/instance.log # noqa: E501
"""
pass
def test_website_project_instance_log_list(self):
"""Test case for website_project_instance_log_list
List website/instance.log # noqa: E501
"""
pass
def test_website_project_instance_log_read(self):
"""Test case for website_project_instance_log_read
Read website/instance.log # noqa: E501
"""
pass
def test_website_project_instance_metric_get(self):
"""Test case for website_project_instance_metric_get
Get website/instance.metric # noqa: E501
"""
pass
def test_website_project_instance_metric_list(self):
"""Test case for website_project_instance_metric_list
List website/instance.metric # noqa: E501
"""
pass
def test_website_project_instance_metric_point_list(self):
"""Test case for website_project_instance_metric_point_list
List website/instance.point # noqa: E501
"""
pass
def test_website_project_instance_restart(self):
"""Test case for website_project_instance_restart
Restart website/instance # noqa: E501
"""
pass
def test_website_project_instance_service_get(self):
"""Test case for website_project_instance_service_get
Get website/instance.service # noqa: E501
"""
pass
def test_website_project_instance_service_list(self):
"""Test case for website_project_instance_service_list
List website/instance.service # noqa: E501
"""
pass
def test_website_project_instance_sideapp_get(self):
"""Test case for website_project_instance_sideapp_get
Get website/instance.sideapp # noqa: E501
"""
pass
def test_website_project_instance_sideapp_list(self):
"""Test case for website_project_instance_sideapp_list
List website/instance.sideapp # noqa: E501
"""
pass
def test_website_project_instance_sideapp_open(self):
"""Test case for website_project_instance_sideapp_open
Open website/instance.sideapp # noqa: E501
"""
pass
def test_website_project_instance_snapshot_create(self):
"""Test case for website_project_instance_snapshot_create
Create website/instance.snapshot # noqa: E501
"""
pass
def test_website_project_instance_snapshot_delete(self):
"""Test case for website_project_instance_snapshot_delete
Delete website/instance.snapshot # noqa: E501
"""
pass
def test_website_project_instance_snapshot_download(self):
"""Test case for website_project_instance_snapshot_download
Download website/instance.snapshot # noqa: E501
"""
pass
def test_website_project_instance_snapshot_get(self):
"""Test case for website_project_instance_snapshot_get
Get website/instance.snapshot # noqa: E501
"""
pass
def test_website_project_instance_snapshot_list(self):
"""Test case for website_project_instance_snapshot_list
List website/instance.snapshot # noqa: E501
"""
pass
def test_website_project_instance_start(self):
"""Test case for website_project_instance_start
Start website/instance # noqa: E501
"""
pass
def test_website_project_instance_stop(self):
"""Test case for website_project_instance_stop
Stop website/instance # noqa: E501
"""
pass
def test_website_project_instance_tag_create(self):
"""Test case for website_project_instance_tag_create
Create website/instance.tag # noqa: E501
"""
pass
def test_website_project_instance_tag_delete(self):
"""Test case for website_project_instance_tag_delete
Delete website/instance.tag # noqa: E501
"""
pass
def test_website_project_instance_tag_get(self):
"""Test case for website_project_instance_tag_get
Get website/instance.tag # noqa: E501
"""
pass
def test_website_project_instance_tag_list(self):
"""Test case for website_project_instance_tag_list
List website/instance.tag # noqa: E501
"""
pass
def test_website_project_instance_tag_put(self):
"""Test case for website_project_instance_tag_put
Replace website/instance.tag # noqa: E501
"""
pass
def test_website_project_instance_transfer(self):
"""Test case for website_project_instance_transfer
Transfer website/instance # noqa: E501
"""
pass
def test_website_project_instance_update(self):
"""Test case for website_project_instance_update
Update website/instance # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| 2.234375
| 2
|
coach/SHL/client.py
|
LSTM-Kirigaya/NUAA-guandan
| 0
|
12775377
|
<filename>coach/SHL/client.py
# -*- coding: utf-8 -*-
# @Time : 2020/10/1 16:30
# @Author : <NAME>
# @File : client_pjh3.py
# @Description:
import json
from ws4py.client.threadedclient import WebSocketClient
from clients.state import State
from coach.SHL.action import Action_shl
class Main(WebSocketClient):
def __init__(self, url, render=True):
super().__init__(url)
self.state = State(render) #动作执行的状态
self.action = Action_shl(render) #动作的算法
self.rounds = 0 #回合数
self.my_pos = None #当前的位置
self.cards_num = dict()
self.curRank = None #当前等级
def opened(self):
pass
def closed(self, code, reason=None):
print("Closed down", code, reason)
def received_message(self, message):
message = json.loads(str(message)) # 先序列化收到的消息,转为Python中的字典
self.state.parse(message) # 调用状态对象来解析状态
if message["stage"] == "beginning":
self.my_pos = message['myPos']
self.action.initcards(message)
if message["type"] == "act":
self.curRank = message['curRank']
if message["stage"] == "episodeOver": # 小局结束,回合数清零
self.rounds = 0
if "actionList" in message: # 需要做出动作选择时调用动作对象进行解析
self.rounds += 1 # 回合数加1
# print("handcards:", message["handCards"])
# 解析当前state(服务器的message、agent剩余卡牌数量、目前的回合数、位置)
if message["stage"] == "tribute":
act_index = 0
else:
self.action.initcards(message)
act_index = self.action.parse(msg=message,
rounds=self.rounds,
my_pos=self.my_pos)
self.send(json.dumps({"actIndex": act_index}))
| 2.15625
| 2
|
data/rotation/main.py
|
Wanzaz/projects
| 3
|
12775378
|
<filename>data/rotation/main.py
from tests import tests, test
"""
You are given list of numbers, obtained by rotating a sorted list an unknown number of times. Write a function to determine the minimum number of times the original sorted list was rotated to obtain the given list. Your function should have the worst-case complexity of O(log N), where N is the length of the list. You can assume that all the numbers in the list are unique.
Example: The list [5, 6, 9, 0, 2, 3, 4] was obtained by rotating the sorted list [0, 2, 3, 4, 5, 6, 9] 3 times.
We define "rotating a list" as removing the last element of the list and adding it before the first element. E.g. rotating the list [3, 2, 4, 1] produces [1, 3, 2, 4].
"Sorted list" refers to a list where the elements are arranged in the increasing order e.g. [1, 3, 5, 7].
"""
# DESCTIOPTION: Given a rotated sorted list tha was roatated some uknown number of times, we need to find the minimum number of times it was rotated.
# check for each number in the list whether it is smaller than the number that comes before it
def count_rotations_linear(nums):
position = 1
while position < len(nums):
if position > 0 and nums[position] < nums[position-1]:
return position
position += 1
return 0
def count_rotations_binary(nums):
lo = 0
hi = len(nums) - 1
while lo <= hi:
mid = (lo + hi) // 2
mid_number = nums[mid]
print("lo:", lo, ", hi:", hi, ", mid:", mid, ", mid_number:", mid_number)
if mid >= lo and mid_number < nums[mid - 1]:
# The middle position is the answer
return mid
elif mid_number < nums[hi]:
# Answer lies in the left half
hi = mid - 1
else:
# Answer lies in the right half
lo = mid + 1
return 0
nums0 = test['input']['nums']
output0 = test['input']['nums']
result0 = count_rotations_linear(nums0)
result1 = count_rotations_binary(nums0)
print(result0)
print(result1)
# print(tests)
| 4.3125
| 4
|
lambdas/firehose-transform/index.py
|
craigbailey-dev/aws-codebuild-event-logs
| 0
|
12775379
|
<reponame>craigbailey-dev/aws-codebuild-event-logs<filename>lambdas/firehose-transform/index.py
import json
import base64
import traceback
from datetime import datetime
def handler(event, context):
output = []
for record in event["records"]:
try:
# Base64 decode record data and JSON parse data
entry = base64.b64decode(record["data"]).decode("utf-8")
parsed_entry = json.loads(entry)
payload = { key.replace("-", "_"): value for key, value in parsed_entry["detail"].items() }
# Gather fields outside of 'detail' property and add them to payload
payload["event_version"] = parsed_entry["version"]
payload["timestamp"] = parsed_entry["time"]
# Set 'additional_information' field to JSON string
payload["additional_information"] = json.dumps(payload["additional_information"])
# Add new line to payload string, Base64 encode payload and return transformed record
decoded_data = json.dumps(payload) + "\n"
encoded_data = base64.b64encode(decoded_data.encode("utf-8")).decode("utf-8")
output.append({
"recordId": record["recordId"],
"result": "Ok",
"data": encoded_data,
})
except:
# If an error occurs, print error and return record as having failed processing
traceback.print_exc()
output.append({
"recordId": record["recordId"],
"result": "ProcessingFailed",
"data": record["data"],
})
return {
"records": output
}
| 2.3125
| 2
|
PythonSolutions/ItertoolsPermutations.py
|
MohamedMetwalli5/HackerRank_solutions
| 37
|
12775380
|
<reponame>MohamedMetwalli5/HackerRank_solutions
import itertools as it
x = input()
s = x.split(" ")[0]
k = int(x.split(" ")[1])
temp = list(it.permutations(s,k))
result = []
for item in temp:
result.append("".join(item))
list.sort(result)
for item in result:
print(item)
| 3.65625
| 4
|
tests/run_examples.py
|
DavidMetzIMT/pyEIT
| 0
|
12775381
|
import os
import subprocess
folder = r"./examples"
example = [
"eit_dynamic_bp.py",
"eit_dynamic_greit.py",
"eit_dynamic_jac.py",
"eit_dynamic_jac3d.py",
"eit_dynamic_stack.py",
"eit_dynamic_svd.py",
"eit_sensitivity2d.py",
"eit_static_GN_3D.py",
"eit_static_jac.py",
"fem_forward2d.py",
"fem_forward3d.py",
"mesh_distmesh2d.py",
"mesh_distmesh3d.py",
"mesh_intro2d.py",
"mesh_multi_shell.py",
"paper_eit2016b.py",
"softx/figure01.py",
"softx/figure02.py",
"softx/figure02b.py",
"softx/figure03.py",
]
list_ex = ""
index = {}
for i, file in enumerate(example):
list_ex = f"{list_ex}Example #{i}: {file}\r\n"
index[f"{i}"] = i
def run():
ans = input(f"List of all examples:\r\n{list_ex} Run all examples? (y)/n or #: ")
all = ans in ["Y", "y"]
if not all and ans in list(index.keys()):
_run_ex(example[index[ans]])
return
for ex in example:
next = True
if not all:
ans = input(f"Run example '{ex}'? (y)/n:")
next = ans not in ["N", "n"]
if not next:
continue
_run_ex(ex)
def _run_ex(ex_):
path = os.path.join(folder, ex_)
cmd = f"python {path}"
print(f"runs >> {cmd}")
subprocess.call(cmd, shell=True)
if __name__ == "__main__":
""""""
run()
| 2.390625
| 2
|
examples/server_delete.py
|
sulidi-maimaitiming/cyberwatch_api_toolbox
| 10
|
12775382
|
<filename>examples/server_delete.py<gh_stars>1-10
'''Delete a Server'''
import os
from configparser import ConfigParser
from cbw_api_toolbox.cbw_api import CBWApi
CONF = ConfigParser()
CONF.read(os.path.join(os.path.abspath(os.path.dirname(__file__)), '..', 'api.conf'))
CLIENT = CBWApi(CONF.get('cyberwatch', 'url'), CONF.get('cyberwatch', 'api_key'), CONF.get('cyberwatch', 'secret_key'))
CLIENT.ping()
SERVER_ID = '' #Id of the server you which to delete
RESULT = CLIENT.delete_server(SERVER_ID)
if RESULT:
print('Successfull deletion')
else:
print('Failure deletion')
| 2.984375
| 3
|
tests.py
|
qiang123/regal
| 432
|
12775383
|
from unittest import TestCase
from regal import BaseInfo
from regal.grouping import GroupAlgorithm
from regal.check_interface import AlgorithmABC
# Run Method: python -m unittest -v tests.py
class TestBaseInfoInitial(TestCase):
def test_empty_info(self):
ab = BaseInfo('', '', '')
with self.assertRaises(AttributeError):
ab.grouping()
def test_empty_info_version_host_isdict(self):
ab = BaseInfo({}, '', '')
self.assertIsNotNone(ab.grouping())
def test_info_errortype(self):
ab = BaseInfo({}, '1', 'sds')
self.assertIsNotNone(ab.grouping())
class TestGroupingResult(TestCase):
ver = {
'ver1': '1.1.1.1,2.2.2.2,3.3.3.3,4.4.4.4,5.1.1.1,6.2.2.2,7.3.3.3,8.4.4.4'}
combine_num = 4
def test_combine_num(self):
ab = BaseInfo(
self.ver,
self.combine_num
)
instance_combine_num = ab.grouping().result[0][1]
self.assertEqual(len(instance_combine_num[1:-1][0]), self.combine_num)
def test_schedule_num(self):
schedule_num = 2
ab = BaseInfo(self.ver, self.combine_num, schedule_num)
instance_combine_num = ab.grouping().result[0][1]
self.assertEqual(len(instance_combine_num[0][0].split(',')), schedule_num)
class TestInstance(TestCase):
def test_algorithm_instance(self):
self.assertIsInstance(GroupAlgorithm(), AlgorithmABC)
| 2.546875
| 3
|
core/pandajob/utils.py
|
kiae-grid/panda-bigmon-core
| 0
|
12775384
|
<filename>core/pandajob/utils.py
"""
pandajob.utils
"""
import pytz
import re
from datetime import datetime, timedelta
from django.conf import settings
from django.db.models import Q, Count
from ..common.settings import defaultDatetimeFormat
from ..common.models import JediJobRetryHistory
from ..common.models import Users
from ..resource.models import Schedconfig
from .models import Jobsactive4, Jobsdefined4, Jobswaiting4, \
Jobsarchived4, Jobsarchived
homeCloud = {}
statelist = [ 'defined', 'waiting', 'assigned', 'activated', 'sent', \
'running', 'holding', 'finished', 'failed', 'cancelled', \
'transferring', 'starting', 'pending' ]
sitestatelist = [ 'assigned', 'activated', 'sent', 'starting', 'running', \
'holding', 'transferring', 'finished', 'failed', 'cancelled' ]
viewParams = {}
VOLIST = [ 'atlas', 'bigpanda', 'htcondor', 'lsst' ]
VONAME = { 'atlas' : 'ATLAS', \
'bigpanda' : 'BigPanDA', \
'htcondor' : 'HTCondor', \
'lsst' : 'LSST', \
'' : '' \
}
VOMODE = ' '
standard_fields = [ 'processingtype', 'computingsite', 'destinationse', \
'jobstatus', 'prodsourcelabel', 'produsername', \
'jeditaskid', 'taskid', 'workinggroup', 'transformation', \
'vo', 'cloud']
standard_sitefields = [ 'region', 'gocname', 'status', 'tier', '\
comment_field', 'cloud' ]
standard_taskfields = [ 'tasktype', 'status', 'corecount', 'taskpriority', \
'username', 'transuses', 'transpath', 'workinggroup', \
'processingtype', 'cloud', ]
LAST_N_HOURS_MAX = 0
#JOB_LIMIT = 0
JOB_LIMIT = 1000
def setupHomeCloud():
global homeCloud
if len(homeCloud) > 0:
return
sites = Schedconfig.objects.filter().exclude(cloud='CMS').values()
for site in sites:
homeCloud[site['siteid']] = site['cloud']
def cleanJobList(jobs, mode='drop'):
for job in jobs:
if not job['produsername']:
if job['produserid']:
job['produsername'] = job['produserid']
else:
job['produsername'] = 'Unknown'
if job['transformation']: job['transformation'] = job['transformation'].split('/')[-1]
if job['jobstatus'] == 'failed':
job['errorinfo'] = errorInfo(job, nchars=50)
else:
job['errorinfo'] = ''
job['jobinfo'] = ''
if isEventService(job): job['jobinfo'] = 'Event service job'
if mode == 'nodrop': return jobs
## If the list is for a particular JEDI task, filter out the jobs superseded by retries
taskids = {}
for job in jobs:
if 'jeditaskid' in job: taskids[job['jeditaskid']] = 1
droplist = []
if len(taskids) == 1:
for task in taskids:
retryquery = {}
retryquery['jeditaskid'] = task
retries = JediJobRetryHistory.objects.filter(**retryquery).order_by('newpandaid').values()
newjobs = []
for job in jobs:
dropJob = 0
pandaid = job['pandaid']
for retry in retries:
if retry['oldpandaid'] == pandaid and retry['newpandaid'] != pandaid:
## there is a retry for this job. Drop it.
print 'dropping', pandaid
dropJob = retry['newpandaid']
if dropJob == 0:
newjobs.append(job)
else:
droplist.append({ 'pandaid' : pandaid, 'newpandaid' : dropJob })
droplist = sorted(droplist, key=lambda x:-x['pandaid'])
jobs = newjobs
jobs = sorted(jobs, key=lambda x:-x['pandaid'])
return jobs
def cleanTaskList(tasks):
for task in tasks:
if task['transpath']: task['transpath'] = task['transpath'].split('/')[-1]
return tasks
def siteSummaryDict(sites):
""" Return a dictionary summarizing the field values for the chosen most interesting fields """
global standard_sitefields
sumd = {}
sumd['category'] = {}
sumd['category']['test'] = 0
sumd['category']['production'] = 0
sumd['category']['analysis'] = 0
sumd['category']['multicloud'] = 0
for site in sites:
for f in standard_sitefields:
if f in site:
if not f in sumd: sumd[f] = {}
if not site[f] in sumd[f]: sumd[f][site[f]] = 0
sumd[f][site[f]] += 1
isProd = True
if site['siteid'].find('ANALY') >= 0:
isProd = False
sumd['category']['analysis'] += 1
if site['siteid'].lower().find('test') >= 0:
isProd = False
sumd['category']['test'] += 1
if (site['multicloud'] is not None) and (site['multicloud'] != 'None') and (re.match('[A-Z]+', site['multicloud'])):
sumd['category']['multicloud'] += 1
if isProd: sumd['category']['production'] += 1
if VOMODE != 'atlas': del sumd['cloud']
## convert to ordered lists
suml = []
for f in sumd:
itemd = {}
itemd['field'] = f
iteml = []
kys = sumd[f].keys()
kys.sort()
for ky in kys:
iteml.append({ 'kname' : ky, 'kvalue' : sumd[f][ky] })
itemd['list'] = iteml
suml.append(itemd)
suml = sorted(suml, key=lambda x:x['field'])
return suml
def userSummaryDict(jobs):
""" Return a dictionary summarizing the field values for the chosen most interesting fields """
sumd = {}
for job in jobs:
if 'produsername' in job and job['produsername'] != None:
user = job['produsername'].lower()
else:
user = 'Unknown'
if not user in sumd:
sumd[user] = {}
for state in statelist:
sumd[user][state] = 0
sumd[user]['name'] = job['produsername']
sumd[user]['cputime'] = 0
sumd[user]['njobs'] = 0
for state in statelist:
sumd[user]['n' + state] = 0
sumd[user]['nsites'] = 0
sumd[user]['sites'] = {}
sumd[user]['nclouds'] = 0
sumd[user]['clouds'] = {}
sumd[user]['nqueued'] = 0
# sumd[user]['latest'] = timezone.now() - timedelta(hours=2400)
sumd[user]['latest'] = datetime.utcnow().replace(tzinfo=pytz.utc) - timedelta(hours=2400)
sumd[user]['pandaid'] = 0
cloud = job['cloud']
site = job['computingsite']
cpu = float(job['cpuconsumptiontime']) / 1.
state = job['jobstatus']
if job['modificationtime'] > sumd[user]['latest']: sumd[user]['latest'] = job['modificationtime']
if job['pandaid'] > sumd[user]['pandaid']: sumd[user]['pandaid'] = job['pandaid']
sumd[user]['cputime'] += cpu
sumd[user]['njobs'] += 1
sumd[user]['n' + state] += 1
if not site in sumd[user]['sites']: sumd[user]['sites'][site] = 0
sumd[user]['sites'][site] += 1
if not site in sumd[user]['clouds']: sumd[user]['clouds'][cloud] = 0
sumd[user]['clouds'][cloud] += 1
for user in sumd:
sumd[user]['nsites'] = len(sumd[user]['sites'])
sumd[user]['nclouds'] = len(sumd[user]['clouds'])
sumd[user]['nqueued'] = sumd[user]['ndefined'] + sumd[user]['nwaiting'] + sumd[user]['nassigned'] + sumd[user]['nactivated']
sumd[user]['cputime'] = "%d" % float(sumd[user]['cputime'])
## convert to list ordered by username
ukeys = sumd.keys()
ukeys.sort()
suml = []
for u in ukeys:
uitem = {}
uitem['name'] = u
uitem['latest'] = sumd[u]['pandaid']
uitem['dict'] = sumd[u]
suml.append(uitem)
suml = sorted(suml, key=lambda x:-x['latest'])
return suml
def errorInfo(job, nchars=300):
errtxt = ''
if int(job['brokerageerrorcode']) != 0:
errtxt += 'Brokerage error %s: %s <br>' % (job['brokerageerrorcode'], job['brokerageerrordiag'])
if int(job['ddmerrorcode']) != 0:
errtxt += 'DDM error %s: %s <br>' % (job['ddmerrorcode'], job['ddmerrordiag'])
if int(job['exeerrorcode']) != 0:
errtxt += 'Executable error %s: %s <br>' % (job['exeerrorcode'], job['exeerrordiag'])
if int(job['jobdispatchererrorcode']) != 0:
errtxt += 'Dispatcher error %s: %s <br>' % (job['jobdispatchererrorcode'], job['jobdispatchererrordiag'])
if int(job['piloterrorcode']) != 0:
errtxt += 'Pilot error %s: %s <br>' % (job['piloterrorcode'], job['piloterrordiag'])
if int(job['superrorcode']) != 0:
errtxt += 'Sup error %s: %s <br>' % (job['superrorcode'], job['superrordiag'])
if int(job['taskbuffererrorcode']) != 0:
errtxt += 'Task buffer error %s: %s <br>' % (job['taskbuffererrorcode'], job['taskbuffererrordiag'])
if job['transexitcode'] != '' and job['transexitcode'] is not None and int(job['transexitcode']) > 0:
errtxt += 'Transformation exit code %s' % job['transexitcode']
if len(errtxt) > nchars:
ret = errtxt[:nchars] + '...'
else:
ret = errtxt[:nchars]
return ret
def isEventService(job):
if job is not None and 'specialhandling' in job \
and job['specialhandling'] is not None \
and job['specialhandling'].find('eventservice') >= 0:
return True
else:
return False
def siteSummary(query):
summary = []
summary.extend(Jobsactive4.objects.filter(**query).values('cloud', 'computingsite', 'jobstatus').annotate(Count('jobstatus')).order_by('cloud', 'computingsite', 'jobstatus'))
summary.extend(Jobsarchived4.objects.filter(**query).values('cloud', 'computingsite', 'jobstatus').annotate(Count('jobstatus')).order_by('cloud', 'computingsite', 'jobstatus'))
return summary
def voSummary(query):
summary = []
summary.extend(Jobsactive4.objects.filter(**query).values('vo', 'jobstatus').annotate(Count('jobstatus')))
summary.extend(Jobsarchived4.objects.filter(**query).values('vo', 'jobstatus').annotate(Count('jobstatus')))
return summary
def wnSummary(query):
summary = []
summary.extend(Jobsactive4.objects.filter(**query).values('cloud', 'computingsite', 'modificationhost', 'jobstatus').annotate(Count('jobstatus')))
summary.extend(Jobsarchived4.objects.filter(**query).values('cloud', 'computingsite', 'modificationhost', 'jobstatus').annotate(Count('jobstatus')))
return summary
def jobStateSummary(jobs):
global statelist
statecount = {}
for state in statelist:
statecount[state] = 0
for job in jobs:
statecount[job['jobstatus']] += 1
return statecount
def jobSummary(query):
""" Not in use. Cannot take account of rerun jobs. """
summary = []
summary.extend(Jobsdefined4.objects.filter(**query).values('jobstatus')\
.annotate(Count('jobstatus')).order_by('jobstatus'))
summary.extend(Jobswaiting4.objects.filter(**query).values('jobstatus')\
.annotate(Count('jobstatus')).order_by('jobstatus'))
summary.extend(Jobsactive4.objects.filter(**query).values('jobstatus')\
.annotate(Count('jobstatus')).order_by('jobstatus'))
summary.extend(Jobsarchived4.objects.filter(**query).values('jobstatus')\
.annotate(Count('jobstatus')).order_by('jobstatus'))
summary.extend(Jobsarchived.objects.filter(**query).values('jobstatus')\
.annotate(Count('jobstatus')).order_by('jobstatus'))
jobstates = []
global statelist
for state in statelist:
statecount = {}
statecount['name'] = state
statecount['count'] = 0
for rec in summary:
if rec['jobstatus'] == state:
statecount['count'] = rec['jobstatus__count']
continue
jobstates.append(statecount)
return jobstates
def jobSummary2(query):
jobs = []
jobs.extend(Jobsdefined4.objects.filter(**query).values('pandaid', 'jobstatus', 'jeditaskid'))
jobs.extend(Jobswaiting4.objects.filter(**query).values('pandaid', 'jobstatus', 'jeditaskid'))
jobs.extend(Jobsactive4.objects.filter(**query).values('pandaid', 'jobstatus', 'jeditaskid'))
jobs.extend(Jobsarchived4.objects.filter(**query).values('pandaid', 'jobstatus', 'jeditaskid'))
jobs.extend(Jobsarchived.objects.filter(**query).values('pandaid', 'jobstatus', 'jeditaskid'))
## If the list is for a particular JEDI task, filter out the jobs superseded by retries
taskids = {}
for job in jobs:
if 'jeditaskid' in job: taskids[job['jeditaskid']] = 1
droplist = []
if len(taskids) == 1:
for task in taskids:
retryquery = {}
retryquery['jeditaskid'] = task
retries = JediJobRetryHistory.objects.filter(**retryquery).order_by('newpandaid').values()
newjobs = []
for job in jobs:
dropJob = 0
pandaid = job['pandaid']
for retry in retries:
if retry['oldpandaid'] == pandaid and retry['newpandaid'] != pandaid:
## there is a retry for this job. Drop it.
print 'dropping', pandaid
dropJob = retry['newpandaid']
if dropJob == 0:
newjobs.append(job)
else:
droplist.append({ 'pandaid' : pandaid, 'newpandaid' : dropJob })
droplist = sorted(droplist, key=lambda x:-x['pandaid'])
jobs = newjobs
jobstates = []
global statelist
for state in statelist:
statecount = {}
statecount['name'] = state
statecount['count'] = 0
for job in jobs:
if job['jobstatus'] == state:
statecount['count'] += 1
continue
jobstates.append(statecount)
return jobstates
| 1.695313
| 2
|
tests/test_extensions_parser.py
|
fokion/google_drive_extractor
| 2
|
12775385
|
<gh_stars>1-10
import os
import unittest
from extensions_parser import ExtensionsParser
class ExtensionsParserTest(unittest.TestCase):
def test_parse(self):
extensions = set()
extensions.update([".json", ".ai"])
parsed_extensions = ExtensionsParser.parse(os.path.join(os.getcwd(), "extensions.conf"))
print(parsed_extensions)
self.assertSetEqual(extensions,parsed_extensions)
if __name__ == '__main__':
unittest.main()
| 2.796875
| 3
|
insert.py
|
emmacunningham/court-reminder
| 2
|
12775386
|
import sys
from storage.models import Database
if len(sys.argv) != 2:
print("Usage: python insert.py <file> # file should contain one A Number per line.")
sys.exit(1)
alien_numbers = [line.strip().replace('-', '') for line in open(sys.argv[1])]
db = Database()
db.create_table() # checks if already exists
db.upload_new_requests(alien_numbers)
| 2.90625
| 3
|
lwn.py
|
coderanger/cfp-scraper
| 16
|
12775387
|
import re
from datetime import date, datetime, time
import dateparser
import pytz
import requests
from bs4 import BeautifulSoup
import sessionize
def get(url):
res = requests.get(url)
return BeautifulSoup(res.text, 'html.parser')
def parse_page(root):
for evt_elm in root.select('.CalMEvent a'):
col_index = len(evt_elm.find_parent('td').find_previous_siblings('td'))
date_row = evt_elm.find_parent('tr').find_previous_sibling(lambda elm: elm.name == 'tr' and elm.select('.CalMDate'))
day = date_row.find_all('td')[col_index].text
yield {
'short_name' : evt_elm.text,
'url': evt_elm['href'],
'name': evt_elm['title'],
'day': day
}
def find_pages():
start = date.today()
for i in range(12):
new_month = start.month + i
new_year = start.year
if new_month > 12:
new_month -= 12
new_year += 1
yield f'https://lwn.net/Calendar/Monthly/cfp/{new_year}-{new_month:02d}/', date(new_year, new_month, 1)
def parse_pages():
for url, base_date in find_pages():
for evt in parse_page(get(url)):
evt['date'] = base_date.replace(day=int(evt['day']))
yield evt
def format_page(raw_evt):
md = re.search(r'^([^(]+) \(([^)]+)\)$', raw_evt['name'])
name, location = md.group(1, 2)
return {
'Conference Name': name,
'Conference URL': raw_evt['url'],
'Location': location,
'CFP URL': raw_evt['url'],
'CFP End Date': datetime.combine(raw_evt['date'], time()),
}
def scrape():
for raw_evt in parse_pages():
evt = format_page(raw_evt)
if evt is None:
continue
if 'papercall.io' in evt['CFP URL']:
continue
if 'events.linuxfoundation.org' in evt['CFP URL']:
continue
if 'sessionize.com' in evt['CFP URL']:
s = sessionize.parse_event(evt['CFP URL'])
if s:
evt.update(s)
yield evt
if __name__ == '__main__':
for e in scrape():
print(e)
| 2.828125
| 3
|
subsystems/elevator.py
|
FRCTeam279/2019mule
| 0
|
12775388
|
<reponame>FRCTeam279/2019mule
import math
import wpilib
from wpilib.command.subsystem import Subsystem
from wpilib import SmartDashboard
from commands.elevatormoveup import ElevatorMoveUp
from commands.elevatormovedown import ElevatorMoveDown
from commands.elevatorteleopdefault import ElevatorTeleopDefault
import subsystems
import robotmap
"""
The elevator raises and lowers the grabber mechanisms
it uses a forklift style lift with three bars that slide against each other
It is powered with a 775 motor, a speed controller, an encoder to read the height,
and a limit switch at the bottom to stop it from over moving.
The motor pulls a string to lift the system. use the encoder to monitor the distance
(or discuss another potentiometer system like last year)
I need to add speed controller operatios, and encoder code to measure the height.
"""
class Elevator(Subsystem):
def __init__(self):
print('Elevator: init called')
super().__init__('Elevator')
self.logPrefix = "Elevator: "
self.btmLimitSwitch = wpilib.DigitalInput(robotmap.elevator.btmLimitSwitchPort)
self.elevatorSpdCtrl = wpilib.VictorSP(robotmap.elevator.motorPort) # or could be talon
#reconfigure these ports in robotmap later
self.elevatorEncoder = wpilib.Encoder(robotmap.elevator.encAPort, robotmap.elevator.encBPort, robotmap.elevator.encReverse, robotmap.elevator.encType)
self.elevatorEncoder.setDistancePerPulse(robotmap.elevator.inchesPerTick)
#self.elevatorLastSpeedSet = 0.0
# ------------------------------------------------------------------------------------------------------------------
def initDefaultCommand(self):
self.setDefaultCommand(ElevatorTeleopDefault()) #change
print("{}Default command set to ElevatorTeleopDefault".format(self.logPrefix))
def stopElevator(self):
self.elevatorSpdCtrl.set(0.0)
def holdElevator(self):
if self.btmLimitSwitch():
self.elevatorSpdCtrl.set(0.0)
#self.elevatorLastSpeedSet = 0.0 #this is an example from last years code
else:
self.elevatorSpdCtrl.set(robotmap.elevator.holdSpeed) #Add holdSpeed to robotmap
#self.elevatorLastSpeedSet = robotmap.elevator.holdSpeed
# -----------------------------------------------------------------------------
def rawMove(self, speed):
self.elevatorSpdCtrl.set(speed)
def move(self, speed):
btmLimit = self.btmLimitSwitch.get()
dist = self.elevatorEncoder.get()*robotmap.elevator.inchesPerTick
topLimit = dist >= robotmap.elevator.maxHeight
if (btmLimit and speed <= 0.0):
self.elevatorSpdCtrl.set(0)
elif (topLimit and speed > 0.0):
self.elevatorSpdCtrl.set(robotmap.elevator.holdSpeed)
else:
if speed > 0:
self.elevatorSpdCtrl.set(robotmap.elevator.holdSpeed + abs(robotmap.elevator.scaleSpdUp*speed))
else:
self.elevatorSpdCtrl.set(robotmap.elevator.holdSpeed - abs(robotmap.elevator.scaleSpdDown*speed))
self.elevatorLastSpeedSet = speed
"""
def elevatorMoveUp(self, speed):
self.elevatorSpdCtrl.set(speed)
#self.elevatorLastSpeedSet = speed
def elevatorMoveDown(self, speed):
if not self.btmLimitSwitch:
self.elevatorSpdCtrl.set(speed)
#self.elevatorLastSpeedSet = speed
else:
self.elevatorSpdCtrl.set(0.0)
#self.elevatorLastSpeedSet = 0.0
"""
def resetEncoders(self):
self.elevatorEncoder.reset()
| 3.03125
| 3
|
python/AmcCarrierCore/AppTop/_TopLevel.py
|
slaclab/amc-carrier-core
| 1
|
12775389
|
#-----------------------------------------------------------------------------
# Title : PyRogue AMC Carrier Cryo Demo Board Application
#-----------------------------------------------------------------------------
# File : AppCore.py
# Created : 2017-04-03
#-----------------------------------------------------------------------------
# Description:
# PyRogue AMC Carrier Cryo Demo Board Application
#
# Network Interfaces:
# UDP_SRV_XVC_IDX_C => 2542, -- Xilinx XVC
# UDP_SRV_SRPV0_IDX_C => 8192, -- Legacy SRPv0 register access (still used for remote FPGA reprogramming)
# UDP_SRV_RSSI0_IDX_C => 8193, -- Legacy Non-interleaved RSSI for Register access and ASYNC messages
# UDP_SRV_RSSI1_IDX_C => 8194, -- Legacy Non-interleaved RSSI for bulk data transfer
# UDP_SRV_BP_MGS_IDX_C => 8195, -- Backplane Messaging
# UDP_SRV_TIMING_IDX_C => 8197, -- Timing ASYNC Messaging
# UDP_SRV_RSSI_ILEAVE_IDX_C => 8198); -- Interleaved RSSI
#-----------------------------------------------------------------------------
# This file is part of the rogue software platform. It is subject to
# the license terms in the LICENSE.txt file found in the top-level directory
# of this distribution and at:
# https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html.
# No part of the rogue software platform, including this file, may be
# copied, modified, propagated, or distributed except according to the terms
# contained in the LICENSE.txt file.
#-----------------------------------------------------------------------------
import pyrogue as pr
# import pyrogue.interfaces.simulation
# import pyrogue.protocols
# import pyrogue.utilities.fileio
import AmcCarrierCore as amccCore
from AmcCarrierCore.AppTop._AppTop import AppTop
class TopLevel(pr.Device):
def __init__( self,
name = 'FpgaTopLevel',
description = 'Container for FPGA Top-Level',
# JESD Parameters
numRxLanes = [0,0],
numTxLanes = [0,0],
enJesdDrp = False,
# Signal Generator Parameters
numSigGen = [0,0],
sizeSigGen = [0,0],
modeSigGen = [False,False],
# General Parameters
enablePwrI2C = False,
enableBsa = False,
enableMps = False,
numWaveformBuffers = 4,
expand = True,
enableTpgMini = True,
**kwargs):
super().__init__(name=name, description=description, expand=expand, **kwargs)
self._numRxLanes = numRxLanes
self._numTxLanes = numTxLanes
self._numWaveformBuffers = numWaveformBuffers
# Add devices
self.add(amccCore.AmcCarrierCore(
offset = 0x00000000,
enablePwrI2C = enablePwrI2C,
enableBsa = enableBsa,
enableMps = enableMps,
numWaveformBuffers= numWaveformBuffers,
enableTpgMini = enableTpgMini,
))
self.add(AppTop(
offset = 0x80000000,
numRxLanes = numRxLanes,
numTxLanes = numTxLanes,
enJesdDrp = enJesdDrp,
numSigGen = numSigGen,
sizeSigGen = sizeSigGen,
modeSigGen = modeSigGen,
numWaveformBuffers = numWaveformBuffers,
expand = True
))
# Define SW trigger command
@self.command(description="Software Trigger for DAQ MUX",)
def SwDaqMuxTrig():
for i in range(2):
self.AppTop.DaqMuxV2[i].TriggerDaq.call()
def writeBlocks(self, **kwargs):
super().writeBlocks(**kwargs)
# Retire any in-flight transactions before starting
self._root.checkBlocks(recurse=True)
# Calculate the BsaWaveformEngine buffer sizes
size = [[0]*self._numWaveformBuffers,[0]*self._numWaveformBuffers]
for i in range(2):
if ((self._numRxLanes[i] > 0) or (self._numTxLanes[i] > 0)):
for j in range(self._numWaveformBuffers):
waveBuff = self.AmcCarrierCore.AmcCarrierBsa.BsaWaveformEngine[i].WaveformEngineBuffers
if ( (waveBuff.Enabled[j].get() > 0) and (waveBuff.EndAddr[j].get() > waveBuff.StartAddr[j].get()) ):
size[i][j] = waveBuff.EndAddr[j].get() - waveBuff.StartAddr[j].get()
# Calculate the
minSize = [size[0][0],size[1][0]]
for i in range(2):
if ((self._numRxLanes[i] > 0) or (self._numTxLanes[i] > 0)):
for j in range(self._numWaveformBuffers):
if ( size[i][j]<minSize[i] ):
minSize[i] = size[i][j]
# Set the DAQ MUX buffer sizes to match the BsaWaveformEngine buffer sizes
for i in range(2):
if ((self._numRxLanes[i] > 0) or (self._numTxLanes[i] > 0)):
# Convert from bytes to words
minSize[i] = minSize[i] >> 2
# Set the DAQ MUX buffer sizes
self.AppTop.DaqMuxV2[i].DataBufferSize.set(minSize[i])
self.checkBlocks(recurse=True)
| 1.390625
| 1
|
FASTAExtractPlugin.py
|
movingpictures83/FASTAExtract
| 0
|
12775390
|
<gh_stars>0
import PyPluMA
class FASTAExtractPlugin:
def input(self, filename):
params = open(filename, 'r')
self.parameters = dict()
for line in params:
contents = line.strip().split('\t')
self.parameters[contents[0]] = contents[1]
self.fasta = PyPluMA.prefix()+"/"+self.parameters["fasta"]
self.start = int(self.parameters["start"]) #Assuming indexing is from 1
self.end = int(self.parameters["end"]) #Assuming indexing is from 1
def run(self):
fastafile = open(self.fasta, 'r')
self.header = fastafile.readline().strip()
self.header += " extracted by PluMA, region "+str(self.start)+"-"+str(self.end)
DNA = ''
for line in fastafile:
DNA += line.strip()
self.region = DNA[self.start-1:self.end]
def output(self, filename):
outfile = open(filename, 'w')
outfile.write(self.header+"\n")
outfile.write(self.region)
| 2.75
| 3
|
textrank.py
|
VivekPandey0001/TextRank
| 0
|
12775391
|
'''
import numpy as np
import pandas as pd
import nltk
nltk.download('punkt') # one time execution
import re
we_df = pd.read_hdf('mini.h5', start = 0, stop = 100) # (362891, 300)
pi(we_df.shape)
words = we_df.index
pi(words)
pi(words[50000])
pi(we_df.iloc[50000])
mes = 'This is some demo text, which has some spe$hial charecters! And numbers 10, also mixed with text, like - numb3r and number34. Just for testing. #peace_out!'
def get_text_vector(text):
re.findall(r'[a-zA-Z]+', )
'''
# python textrank.py
# textrank (using conceptnet word ventors/embeddings and cosinesimilarity)
import numpy as np
import pandas as pd
'''
import time
from sklearn.metrics import confusion_matrix
import json
import re
'''
cnnb_df = pd.read_hdf('mini.h5')
# cnnb_df = cnnb_df/59 # not req. (takes ~1.3sec)
def pi(a, b = None):
if b:
print('\n', b, a, '\n', type(a))
else:
print('\n', a, '\n', type(a))
'''
mes = 'This is some demo text, which has some spe$hial characters! And numbers 10, also mixed with text, like - numb3r and number34. Just for testing. #peace_out!'
#words = ['This', 'is', 'some', 'demo', 'text', 'which', 'has', 'some', 'spe', 'hial', 'characters', 'And', 'numbers', '10', 'also', 'mixed', 'with', 'text', 'like', 'numb', 'r', 'and', 'number', 'Just', 'for', 'testing', 'peace_out']
mes2 = 'demo text, which only has plain characters and no numbers, also not mixed with text, like - numb3r and number34. Just for testing.'
#vec = text_to_vec(list(map(lambda x: x.lower(), words)))
words = re.findall(r'[a-zA-Z]+', mes.lower())
words2 = re.findall(r'[a-zA-Z]+', mes2.lower())
#pi(words)
vec = text_to_vec(words)
vec2 = text_to_vec(words2)
sim = get_cosine_similarity(vec, vec2)
pi(sim)
pi(keyerror_list)
'''
# Read data
df = pd.read_csv('demo_articles.csv')
df.head()
df['article_text'][0]
# Form sentences
from nltk.tokenize import sent_tokenize
sentences = []
for s in df['article_text']:
sentences.append(sent_tokenize(s))
sentences = [y for x in sentences for y in x] # flatten list / 2d to 1d / combine
# Text preprocessing
# remove punctuations, numbers and special characters
clean_sentences = pd.Series(sentences).str.replace("[^a-zA-Z]", " ")
# make alphabets lowercase
clean_sentences = [s.lower() for s in clean_sentences]
import nltk
#nltk.download('stopwords')
from nltk.corpus import stopwords
stop_words = stopwords.words('english')
print(stop_words)
print(len(stop_words))
# function to remove stopwords
def remove_stopwords(sen):
sen_new = " ".join([i for i in sen if i not in stop_words])
return sen_new
# remove stopwords from the sentences
clean_sentences = [remove_stopwords(r.split()) for r in clean_sentences]
# Vector Representation of Sentences
# Form vector from text
keyerror_list = []
def word_to_vec(word):
vec = pd.Series(np.zeros(shape=(300)))
try:
wuri = '/c/en/' + word
vec = cnnb_df.loc[wuri]
except KeyError:
keyerror_list.append(wuri)
return vec
sentence_vectors = []
for i in clean_sentences:
if len(i) != 0:
#v = sum([word_embeddings.get(w, np.zeros((100,))) for w in i.split()]) / (len(i.split())+0.001)
v = sum([word_to_vec(word) for word in i.split()]) / (len(i.split())+0.001)
else:
v = pd.Series(np.zeros(shape=(300)))
sentence_vectors.append(v)
# Similarity Matrix Preparation
# similarity matrix
sim_mat = np.zeros([len(sentences), len(sentences)])
'''
from sklearn.metrics.pairwise import cosine_similarity
'''
# Vector comparision
def get_cosine_similarity(vec1, vec2):
# =a.b/|a||b| =dot_prod/vec_mag
try:
return sum(vec1 * vec2) / ( pow(sum(vec1*vec1), 0.5) * pow(sum(vec2*vec2), 0.5) )
except ZeroDivisionError:
return 0
for i in range(len(sentences)):
for j in range(len(sentences)):
if i != j:
#sim_mat[i][j] = cosine_similarity(sentence_vectors[i].reshape(1,300), sentence_vectors[j].reshape(1,300))[0,0]
sim_mat[i][j] = get_cosine_similarity(sentence_vectors[i], sentence_vectors[j])
'''
sim_mat[i][j] = get_cosine_similarity(sentence_vectors[i], sentence_vectors[j])
__main__:3: RuntimeWarning: invalid value encountered in double_scalars
'''
# Applying PageRank Algorithm
import networkx as nx
nx_graph = nx.from_numpy_array(sim_mat)
scores = nx.pagerank(nx_graph, max_iter=100) # default max_iter is 100
# Summary Extraction
ranked_sentences = sorted(((scores[i],s) for i,s in enumerate(sentences)), reverse=True)
# Extract top 10 sentences as the summary
for i in range(10):
print(ranked_sentences[i][1])
| 2.609375
| 3
|
inkscape-laser-cutter-engraver-master/makerwelt_raster_mcl1000.py
|
ilnanny/Inkscape-addons
| 3
|
12775392
|
<reponame>ilnanny/Inkscape-addons
'''
# ----------------------------------------------------------------------------
# Maintained by Maker-Welt (https://github.com/guiEmotiv/inkscape-laser-cutter-engraver)
# Designed to run on Ramps 1.4 + Marlin firmware on a MCL1000.
# Based on raster2gcode.py gcode inkscape extension
# Based on a script by 305engineering
#
# Copyright (C) 2017 gsoto, <EMAIL>
# based on raster2laser_gcode.py (C) 2014 305engineering...
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# ----------------------------------------------------------------------------
'''
import sys
import os
import re
sys.path.append('/usr/share/inkscape/extensions')
sys.path.append('/Applications/Inkscape.app/Contents/Resources/extensions')
import subprocess
import math
import inkex
import png
import array
class GcodeExport(inkex.Effect):
######## Richiamata da _main()
def __init__(self):
"""init the effetc library and get options from gui"""
inkex.Effect.__init__(self)
# Opzioni di esportazione dell'immagine
self.OptionParser.add_option("-d", "--directory", action="store", type="string", dest="directory",
default="/home/", help="Directory for files") ####check_dir
self.OptionParser.add_option("-f", "--filename", action="store", type="string", dest="filename", default="-1.0",
help="File name")
self.OptionParser.add_option("", "--add-numeric-suffix-to-filename", action="store", type="inkbool",
dest="add_numeric_suffix_to_filename", default=True,
help="Add numeric suffix to filename")
self.OptionParser.add_option("", "--bg_color", action="store", type="string", dest="bg_color", default="",
help="")
self.OptionParser.add_option("", "--resolution", action="store", type="int", dest="resolution", default="5",
help="") # Usare il valore su float(xy)/resolution e un case per i DPI dell export
# Come convertire in scala di grigi
self.OptionParser.add_option("", "--grayscale_type", action="store", type="int", dest="grayscale_type",
default="1", help="")
# Modalita di conversione in Bianco e Nero
self.OptionParser.add_option("", "--conversion_type", action="store", type="int", dest="conversion_type",
default="1", help="")
# Opzioni modalita
self.OptionParser.add_option("", "--BW_threshold", action="store", type="int", dest="BW_threshold",
default="128", help="")
self.OptionParser.add_option("", "--grayscale_resolution", action="store", type="int",
dest="grayscale_resolution", default="1", help="")
# Velocita Nero e spostamento
self.OptionParser.add_option("", "--speed_ON", action="store", type="int", dest="speed_ON", default="200",
help="")
# Mirror Y
self.OptionParser.add_option("", "--flip_y", action="store", type="inkbool", dest="flip_y", default=False,
help="")
# Homing
self.OptionParser.add_option("", "--homing", action="store", type="int", dest="homing", default="1", help="")
# Commands
self.OptionParser.add_option("", "--laseron", action="store", type="string", dest="laseron", default="M106",
help="")
self.OptionParser.add_option("", "--laseroff", action="store", type="string", dest="laseroff", default="M107",
help="")
# Anteprima = Solo immagine BN
self.OptionParser.add_option("", "--preview_only", action="store", type="inkbool", dest="preview_only",
default=False, help="")
# inkex.errormsg("BLA BLA BLA Messaggio da visualizzare") #DEBUG
######## Richiamata da __init__()
######## Qui si svolge tutto
def effect(self):
current_file = self.args[-1]
bg_color = self.options.bg_color
##Implementare check_dir
if (os.path.isdir(self.options.directory)) == True:
##CODICE SE ESISTE LA DIRECTORY
# inkex.errormsg("OK") #DEBUG
#Aggiungo un suffisso al nomefile per non sovrascrivere dei file
if self.options.add_numeric_suffix_to_filename :
dir_list = os.listdir(self.options.directory) #List di tutti i file nella directory di lavoro
temp_name = self.options.filename
max_n = 0
for s in dir_list :
r = re.match(r"^%s_0*(\d+)%s$"%(re.escape(temp_name),'original.png' ), s)
if r :
max_n = max(max_n,int(r.group(1)))
self.options.filename = temp_name + "_" + ( "0"*(4-len(str(max_n+1))) + str(max_n+1) )
#genero i percorsi file da usare
suffix = ""
if self.options.conversion_type == 1:
suffix = "_umbralFijoBN_" + str(self.options.BW_threshold) + "_"
elif self.options.conversion_type == 2:
suffix = "_umbralAleaBN_"
elif self.options.conversion_type == 3:
suffix = "_Semitonos_"
elif self.options.conversion_type == 4:
suffix = "_filaSemito_"
elif self.options.conversion_type == 5:
suffix = "_colSemito_"
else:
if self.options.grayscale_resolution == 1:
suffix = "_Gray_256_"
elif self.options.grayscale_resolution == 2:
suffix = "_Gray_128_"
elif self.options.grayscale_resolution == 4:
suffix = "_Gray_64_"
elif self.options.grayscale_resolution == 8:
suffix = "_Gray_32_"
elif self.options.grayscale_resolution == 16:
suffix = "_Gray_16_"
elif self.options.grayscale_resolution == 32:
suffix = "_Gray_8_"
else:
suffix = "_Gray_"
pos_file_png_exported = os.path.join(self.options.directory, self.options.filename + "original.png")
pos_file_png_BW = os.path.join(self.options.directory, self.options.filename + suffix + "generado.png")
pos_file_gcode = os.path.join(self.options.directory, self.options.filename + suffix + "gcode.gcode")
# Esporto l'immagine in PNG
self.exportPage(pos_file_png_exported, current_file, bg_color)
# DA FARE
# Manipolo l'immagine PNG per generare il file Gcode
self.PNGtoGcode(pos_file_png_exported, pos_file_png_BW, pos_file_gcode)
else:
inkex.errormsg("El directorio no exite! Porfavor especifique el correcto directorio!")
######## ESPORTA L IMMAGINE IN PNG
######## Richiamata da effect()
def exportPage(self, pos_file_png_exported, current_file, bg_color):
######## CREAZIONE DEL FILE PNG ########
# Crea l'immagine dentro la cartella indicata da "pos_file_png_exported"
# -d 127 = risoluzione 127DPI => 5 pixel/mm 1pixel = 0.2mm
###command="inkscape -C -e \"%s\" -b\"%s\" %s -d 127" % (pos_file_png_exported,bg_color,current_file)
if self.options.resolution == 1:
DPI = 25.4
elif self.options.resolution == 2:
DPI = 50.8
elif self.options.resolution == 5:
DPI = 127
elif self.options.resolution == 10:
DPI = 254
elif self.options.resolution == 20:
DPI = 508
else:
DPI = 1270
command = "inkscape -C -e \"%s\" -b\"%s\" %s -d %s" % (
pos_file_png_exported, bg_color, current_file, DPI) # Comando da linea di comando per esportare in PNG
p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return_code = p.wait()
f = p.stdout
err = p.stderr
######## CREA IMMAGINE IN B/N E POI GENERA GCODE
######## Richiamata da effect()
def PNGtoGcode(self, pos_file_png_exported, pos_file_png_BW, pos_file_gcode):
######## GENERO IMMAGINE IN SCALA DI GRIGI ########
# Scorro l immagine e la faccio diventare una matrice composta da list
reader = png.Reader(pos_file_png_exported) # File PNG generato
w, h, pixels, metadata = reader.read_flat()
matrice = [[255 for i in range(w)] for j in range(h)] # List al posto di un array
# Scrivo una nuova immagine in Scala di grigio 8bit
# copia pixel per pixel
if self.options.grayscale_type == 1:
# 0.21R + 0.71G + 0.07B
for y in range(h): # y varia da 0 a h-1
for x in range(w): # x varia da 0 a w-1
pixel_position = (x + y * w) * 4 if metadata['alpha'] else (x + y * w) * 3
matrice[y][x] = int(pixels[pixel_position] * 0.21 + pixels[(pixel_position + 1)] * 0.71 + pixels[
(pixel_position + 2)] * 0.07)
elif self.options.grayscale_type == 2:
# (R+G+B)/3
for y in range(h): # y varia da 0 a h-1
for x in range(w): # x varia da 0 a w-1
pixel_position = (x + y * w) * 4 if metadata['alpha'] else (x + y * w) * 3
matrice[y][x] = int(
(pixels[pixel_position] + pixels[(pixel_position + 1)] + pixels[(pixel_position + 2)]) / 3)
elif self.options.grayscale_type == 3:
# R
for y in range(h): # y varia da 0 a h-1
for x in range(w): # x varia da 0 a w-1
pixel_position = (x + y * w) * 4 if metadata['alpha'] else (x + y * w) * 3
matrice[y][x] = int(pixels[pixel_position])
elif self.options.grayscale_type == 4:
# G
for y in range(h): # y varia da 0 a h-1
for x in range(w): # x varia da 0 a w-1
pixel_position = (x + y * w) * 4 if metadata['alpha'] else (x + y * w) * 3
matrice[y][x] = int(pixels[(pixel_position + 1)])
elif self.options.grayscale_type == 5:
# B
for y in range(h): # y varia da 0 a h-1
for x in range(w): # x varia da 0 a w-1
pixel_position = (x + y * w) * 4 if metadata['alpha'] else (x + y * w) * 3
matrice[y][x] = int(pixels[(pixel_position + 2)])
elif self.options.grayscale_type == 6:
# Max Color
for y in range(h): # y varia da 0 a h-1
for x in range(w): # x varia da 0 a w-1
pixel_position = (x + y * w) * 4 if metadata['alpha'] else (x + y * w) * 3
list_RGB = pixels[pixel_position], pixels[(pixel_position + 1)], pixels[(pixel_position + 2)]
matrice[y][x] = int(max(list_RGB))
else:
# Min Color
for y in range(h): # y varia da 0 a h-1
for x in range(w): # x varia da 0 a w-1
pixel_position = (x + y * w) * 4 if metadata['alpha'] else (x + y * w) * 3
list_RGB = pixels[pixel_position], pixels[(pixel_position + 1)], pixels[(pixel_position + 2)]
matrice[y][x] = int(min(list_RGB))
####Ora matrice contiene l'immagine in scala di grigi
######## GENERO IMMAGINE IN BIANCO E NERO ########
# Scorro matrice e genero matrice_BN
B = 255
N = 0
matrice_BN = [[255 for i in range(w)] for j in range(h)]
if self.options.conversion_type == 1:
# B/W fixed threshold
soglia = self.options.BW_threshold
for y in range(h):
for x in range(w):
if matrice[y][x] >= soglia:
matrice_BN[y][x] = B
else:
matrice_BN[y][x] = N
elif self.options.conversion_type == 2:
# B/W random threshold
from random import randint
for y in range(h):
for x in range(w):
soglia = randint(20, 235)
if matrice[y][x] >= soglia:
matrice_BN[y][x] = B
else:
matrice_BN[y][x] = N
elif self.options.conversion_type == 3:
# Halftone
Step1 = [[B, B, B, B, B], [B, B, B, B, B], [B, B, N, B, B], [B, B, B, B, B], [B, B, B, B, B]]
Step2 = [[B, B, B, B, B], [B, B, N, B, B], [B, N, N, N, B], [B, B, N, B, B], [B, B, B, B, B]]
Step3 = [[B, B, N, B, B], [B, N, N, N, B], [N, N, N, N, N], [B, N, N, N, B], [B, B, N, B, B]]
Step4 = [[B, N, N, N, B], [N, N, N, N, N], [N, N, N, N, N], [N, N, N, N, N], [B, N, N, N, B]]
for y in range(h / 5):
for x in range(w / 5):
media = 0
for y2 in range(5):
for x2 in range(5):
media += matrice[y * 5 + y2][x * 5 + x2]
media = media / 25
for y3 in range(5):
for x3 in range(5):
if media >= 250 and media <= 255:
matrice_BN[y * 5 + y3][x * 5 + x3] = B
if media >= 190 and media < 250:
matrice_BN[y * 5 + y3][x * 5 + x3] = Step1[y3][x3]
if media >= 130 and media < 190:
matrice_BN[y * 5 + y3][x * 5 + x3] = Step2[y3][x3]
if media >= 70 and media < 130:
matrice_BN[y * 5 + y3][x * 5 + x3] = Step3[y3][x3]
if media >= 10 and media < 70:
matrice_BN[y * 5 + y3][x * 5 + x3] = Step4[y3][x3]
if media >= 0 and media < 10:
matrice_BN[y * 5 + y3][x * 5 + x3] = N
elif self.options.conversion_type == 4:
# Halftone row
Step1r = [B, B, N, B, B]
Step2r = [B, N, N, B, B]
Step3r = [B, N, N, N, B]
Step4r = [N, N, N, N, B]
for y in range(h):
for x in range(w / 5):
media = 0
for x2 in range(5):
media += matrice[y][x * 5 + x2]
media = media / 5
for x3 in range(5):
if media >= 250 and media <= 255:
matrice_BN[y][x * 5 + x3] = B
if media >= 190 and media < 250:
matrice_BN[y][x * 5 + x3] = Step1r[x3]
if media >= 130 and media < 190:
matrice_BN[y][x * 5 + x3] = Step2r[x3]
if media >= 70 and media < 130:
matrice_BN[y][x * 5 + x3] = Step3r[x3]
if media >= 10 and media < 70:
matrice_BN[y][x * 5 + x3] = Step4r[x3]
if media >= 0 and media < 10:
matrice_BN[y][x * 5 + x3] = N
elif self.options.conversion_type == 5:
# Halftone column
Step1c = [B, B, N, B, B]
Step2c = [B, N, N, B, B]
Step3c = [B, N, N, N, B]
Step4c = [N, N, N, N, B]
for y in range(h / 5):
for x in range(w):
media = 0
for y2 in range(5):
media += matrice[y * 5 + y2][x]
media = media / 5
for y3 in range(5):
if media >= 250 and media <= 255:
matrice_BN[y * 5 + y3][x] = B
if media >= 190 and media < 250:
matrice_BN[y * 5 + y3][x] = Step1c[y3]
if media >= 130 and media < 190:
matrice_BN[y * 5 + y3][x] = Step2c[y3]
if media >= 70 and media < 130:
matrice_BN[y * 5 + y3][x] = Step3c[y3]
if media >= 10 and media < 70:
matrice_BN[y * 5 + y3][x] = Step4c[y3]
if media >= 0 and media < 10:
matrice_BN[y * 5 + y3][x] = N
else:
# Grayscale
if self.options.grayscale_resolution == 1:
matrice_BN = matrice
else:
for y in range(h):
for x in range(w):
if matrice[y][x] <= 1:
matrice_BN[y][x] == 0
if matrice[y][x] >= 254:
matrice_BN[y][x] == 255
if matrice[y][x] > 1 and matrice[y][x] < 254:
matrice_BN[y][x] = (matrice[y][
x] // self.options.grayscale_resolution) * self.options.grayscale_resolution
####Ora matrice_BN contiene l'immagine in Bianco (255) e Nero (0)
#### SALVO IMMAGINE IN BIANCO E NERO ####
file_img_BN = open(pos_file_png_BW, 'wb') # Creo il file
Costruttore_img = png.Writer(w, h, greyscale=True, bitdepth=8) # Impostazione del file immagine
Costruttore_img.write(file_img_BN, matrice_BN) # Costruttore del file immagine
file_img_BN.close() # Chiudo il file
#### GENERO IL FILE GCODE ####
if self.options.preview_only == False: # Genero Gcode solo se devo
if self.options.flip_y == False: # Inverto asse Y solo se flip_y = False
# -> coordinate Cartesiane (False) Coordinate "informatiche" (True)
matrice_BN.reverse()
Laser_ON = False
F_G01 = self.options.speed_ON
Scala = self.options.resolution
file_gcode = open(pos_file_gcode, 'w') # Creo il file
# Configurazioni iniziali standard Gcode
# HOMING
file_gcode.write('M117 Inicializando...!!\n')
file_gcode.write('G28\n')
file_gcode.write('G21\n')
file_gcode.write('G90\n')
file_gcode.write('G92\n')
file_gcode.write('M117 Listo para iniciar...\n')
file_gcode.write('M300 P1200 S150\n')
# Creazione del Gcode
# allargo la matrice per lavorare su tutta l'immagine
for y in range(h):
matrice_BN[y].append(B)
w = w + 1
if self.options.conversion_type != 6:
for y in range(h):
if y % 2 == 0:
for x in range(w):
if matrice_BN[y][x] == N:
if Laser_ON == False:
# file_gcode.write('G00 X' + str(float(x)/Scala) + ' Y' + str(float(y)/Scala) + ' F' + str(F_G00) + '\n')
file_gcode.write('G00 X' + str(float(x) / Scala) + ' Y' + str(
float(y) / Scala) + '\n') # tolto il Feed sul G00
file_gcode.write(self.options.laseron + '\n')
Laser_ON = True
if Laser_ON == True: # DEVO evitare di uscire dalla matrice
if x == w - 1:
file_gcode.write(
'G01 X' + str(float(x) / Scala) + ' Y' + str(float(y) / Scala) + ' F' + str(
F_G01) + '\n')
file_gcode.write(self.options.laseroff + '\n')
Laser_ON = False
else:
if matrice_BN[y][x + 1] != N:
file_gcode.write('G01 X' + str(float(x) / Scala) + ' Y' + str(
float(y) / Scala) + ' F' + str(F_G01) + '\n')
file_gcode.write(self.options.laseroff + '\n')
Laser_ON = False
else:
for x in reversed(range(w)):
if matrice_BN[y][x] == N:
if Laser_ON == False:
# file_gcode.write('G00 X' + str(float(x)/Scala) + ' Y' + str(float(y)/Scala) + ' F' + str(F_G00) + '\n')
file_gcode.write('G00 X' + str(float(x) / Scala) + ' Y' + str(
float(y) / Scala) + '\n') # tolto il Feed sul G00
file_gcode.write(self.options.laseron + '\n')
Laser_ON = True
if Laser_ON == True: # DEVO evitare di uscire dalla matrice
if x == 0:
file_gcode.write(
'G01 X' + str(float(x) / Scala) + ' Y' + str(float(y) / Scala) + ' F' + str(
F_G01) + '\n')
file_gcode.write(self.options.laseroff + '\n')
Laser_ON = False
else:
if matrice_BN[y][x - 1] != N:
file_gcode.write('G01 X' + str(float(x) / Scala) + ' Y' + str(
float(y) / Scala) + ' F' + str(F_G01) + '\n')
file_gcode.write(self.options.laseroff + '\n')
Laser_ON = False
else: ##SCALA DI GRIGI
for y in range(h):
if y % 2 == 0:
for x in range(w):
if matrice_BN[y][x] != B:
if Laser_ON == False:
file_gcode.write(
'G00 X' + str(float(x) / Scala) + ' Y' + str(float(y) / Scala) + '\n')
file_gcode.write(
self.options.laseron + ' ' + ' S' + str(255 - matrice_BN[y][x]) + '\n')
Laser_ON = True
if Laser_ON == True: # DEVO evitare di uscire dalla matrice
if x == w - 1: # controllo fine riga
file_gcode.write(
'G01 X' + str(float(x) / Scala) + ' Y' + str(float(y) / Scala) + ' F' + str(
F_G01) + '\n')
file_gcode.write(self.options.laseroff + '\n')
Laser_ON = False
else:
if matrice_BN[y][x + 1] == B:
file_gcode.write('G01 X' + str(float(x + 1) / Scala) + ' Y' + str(
float(y) / Scala) + ' F' + str(F_G01) + '\n')
file_gcode.write(self.options.laseroff + '\n')
Laser_ON = False
elif matrice_BN[y][x] != matrice_BN[y][x + 1]:
file_gcode.write('G01 X' + str(float(x + 1) / Scala) + ' Y' + str(
float(y) / Scala) + ' F' + str(F_G01) + '\n')
file_gcode.write(self.options.laseron + ' ' + ' S' + str(
255 - matrice_BN[y][x + 1]) + '\n')
else:
for x in reversed(range(w)):
if matrice_BN[y][x] != B:
if Laser_ON == False:
file_gcode.write(
'G00 X' + str(float(x + 1) / Scala) + ' Y' + str(float(y) / Scala) + '\n')
file_gcode.write(
self.options.laseron + ' ' + ' S' + str(255 - matrice_BN[y][x]) + '\n')
Laser_ON = True
if Laser_ON == True: # DEVO evitare di uscire dalla matrice
if x == 0: # controllo fine riga ritorno
file_gcode.write(
'G01 X' + str(float(x) / Scala) + ' Y' + str(float(y) / Scala) + ' F' + str(
F_G01) + '\n')
file_gcode.write(self.options.laseroff + '\n')
Laser_ON = False
else:
if matrice_BN[y][x - 1] == B:
file_gcode.write('G01 X' + str(float(x) / Scala) + ' Y' + str(
float(y) / Scala) + ' F' + str(F_G01) + '\n')
file_gcode.write(self.options.laseroff + '\n')
Laser_ON = False
elif matrice_BN[y][x] != matrice_BN[y][x - 1]:
file_gcode.write('G01 X' + str(float(x) / Scala) + ' Y' + str(
float(y) / Scala) + ' F' + str(F_G01) + '\n')
file_gcode.write(self.options.laseron + ' ' + ' S' + str(
255 - matrice_BN[y][x - 1]) + '\n')
# Configurazioni finali standard Gcode
file_gcode.write('G00 X0 Y0; home\n')
# HOMING
file_gcode.write('G28\n')
file_gcode.write('M18\n')
file_gcode.write('M117 Trabajo completado!!\n')
file_gcode.write('M300 P1200 S150\n')
file_gcode.close() # Chiudo il file
######## ######## ######## ######## ######## ######## ######## ######## ########
def _main():
e = GcodeExport()
e.affect()
exit()
if __name__ == "__main__":
_main()
| 1.765625
| 2
|
paaws/cli/instance.py
|
gkope/paaws
| 1
|
12775393
|
"""
Usage:
paaws instance detail [ --instance-id=<instance_id> ] [ --name=<app_name> --process=<process> --platform=<platform> --env=<env> ] --region=<region>
paaws instance list [ --instance-ids=<instance_ids> ] [ --name=<app_name> ] [ --process=<process> ] [ --platform=<platform> ] [ --env=<env> ] --region=<region>
paaws instance launch --name=<app_name> --process=<process> --platform=<platform> --env=<env> --instance-class=<instance_class> --region=<region> [ --zone=<zone> ] [ --public ]
paaws instance destroy --name=<app_name> [ --process=<process> --platform=<platform> --env=<env> ] --region=<region>
The most commonly used paaws instance commands are:
launch
destroy
list
detail
"""
from __future__ import print_function
from paaws.config import Config
from paaws.ec2 import Instance, get_instances_data
from paaws.helpers.parsers import to_table
def instance(args):
if args['launch']:
instance = Instance(
name=args['--name'],
process=args['--process'],
platform=Config.get_default_config(space='paaws', key='platform') if args['--platform'] is None else args['--platform'],
env=Config.get_default_config(space='paaws', key='env') if args['--env'] is None else args['--env'],
region=Config.get_default_config(space='paaws', key='region') if args['--region'] is None else args['--region'],
instance_class=args['--instance-class'],
public=args['--public'],
zone=args['--zone']
)
instance = instance.launch()
instance_id = instance.id
instance_data = get_instances_data(
region=Config.get_default_config(space='paaws', key='region') if args['--region'] is None else args['--region'],
instance_ids=[ instance_id ],
list_instances=False,
name=args['--name'],
process=args['--process'],
platform=args['--platform'],
env=args['--env']
)
print(to_table(instance_data))
elif args['destroy']:
instance = Instance(
region=Config.get_default_config(space='paaws', key='region') if args['--region'] is None else args['--region'],
name=args['--name'],
process=args['--process'],
platform=args['--platform'],
env=args['--env'],
)
print(instance.destroy())
elif args['detail']:
instance_data = get_instances_data(
region=Config.get_default_config(space='paaws', key='region') if args['--region'] is None else args['--region'],
instance_ids=[ args['--instance-id'] ] if args['--instance-id'] is not None else [],
list_instances=False,
name=args['--name'],
process=args['--process'],
platform=args['--platform'],
env=args['--env']
)
print(to_table(instance_data))
elif args['list']:
instance_data = get_instances_data(
region=Config.get_default_config(space='paaws', key='region') if args['--region'] is None else args['--region'],
instance_ids=args['--instance-ids'].split(" ") if args['--instance-ids'] is not None else [],
list_instances=True,
name=args['--name'],
process=args['--process'],
platform=args['--platform'],
env=args['--env']
)
print(to_table(instance_data))
else:
pass
| 2.890625
| 3
|
wagtail_references/migrations/0001_initial.py
|
cividi/wagtail_references
| 4
|
12775394
|
<reponame>cividi/wagtail_references
# Generated by Django 2.1.4 on 2018-12-19 11:15
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import wagtail.core.models
import wagtail.search.index
class Migration(migrations.Migration):
initial = True
dependencies = [
('wagtailcore', '0040_page_draft_title'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Reference',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('slug', models.CharField(help_text='A short key to cite the reference by. Determined from the BibTeX entry key. Must be unique.', max_length=255, unique=True, verbose_name='slug')),
('bibtex', models.TextField(help_text='The reference, in bibtex format.')),
('bibtype', models.CharField(default='article', help_text='The entry type, detected from the BibTeX entry.', max_length=255, verbose_name='Bibliography entry type')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='created at')),
('collection', models.ForeignKey(default=wagtail.core.models.get_root_collection_id, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtailcore.Collection', verbose_name='collection')),
('created_by_user', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='created by user')),
],
options={
'verbose_name': 'BibTeX Reference',
'verbose_name_plural': 'BibTeX References',
},
bases=(wagtail.search.index.Indexed, models.Model),
),
]
| 1.710938
| 2
|
constants.py
|
shamedgh/confine-
| 0
|
12775395
|
<reponame>shamedgh/confine-
SYSDIGERR = -1
NOPROCESS = -2
NOFUNCS = -3
NOATTACH = -4
CONSTOP = -5
HSTOPS = -6
HLOGLEN = -7
HNOKILL = -8
HNORUN = -9
CACHE = ".cache"
LIBFILENAME = "libs.out"
LANGFILENAME = ".lang.cache"
BINLISTCACHE = ".binlist.cache"
LIBLISTCACHE = ".liblist.cache"
BINTOLIBCACHE = ".bintolib.cache"
TOOLNAME = "CONFINE"
SECCOMPCPROG = "seccomp"
DOCKERENTRYSCRIPT = "docker-entrypoint.sh"
DOCKERENTRYSCRIPTMODIFIED = "docker-entrypoint.wseccomp.sh"
ERRTOMSG = dict()
ERRTOMSG[SYSDIGERR] = "There was an error running sysdig, please make sure it is installed and the script has enough privileges to run it"
ERRTOMSG[NOPROCESS] = "Sysdig was not able to identify any processes. This causes our dynamic analysis to fail and the static analysis cannot analyze anything"
ERRTOMSG[NOFUNCS] = "No imported functions could be extracted from any of the binaries and libraries required by the container"
ERRTOMSG[NOATTACH] = "The container did not run in attached mode"
ERRTOMSG[CONSTOP] = "The container got killed after being launched in attach mode"
ERRTOMSG[HSTOPS] = "The hardened container stops running. Probably due to a problem in generating the SECCOMP profile and prohibiting access to a required system call"
ERRTOMSG[HLOGLEN] = "While the container has been hardened successfully, the log length doesn't match the original log length, which was run without any SECCOMP profile"
ERRTOMSG[HNOKILL] = "The container has been hardened successfully, but we could not kill it afterwards. This usually means that the container has died. If so, the generated profile has a problem"
ERRTOMSG[HNORUN] = "The hardened container does not run at all. The generated SECCOMP profile has a problem"
| 1.804688
| 2
|
textSmartEditor.py
|
hydrogen602/betterTextEditor
| 3
|
12775396
|
import time
import sys
from textEditor import TextEditor
from core import curses
# import completion
raise Exception
class TextSmartEditor(TextEditor):
'''
option-o to write out
option-q to quit
'''
def __init__(self):
super(TextSmartEditor, self).__init__()
self.marginRight = self.width // 2
def updateDim(self):
super(TextSmartEditor, self).updateDim()
self.width -= self.marginRight
def checkErrors(self):
code = '\n'.join(self.lines)
try:
exec(code)
except Exception as e:
return e # type(e).__name__, e.__traceback__.tb_lineno
return
def updateScreen(self, endLine=True):
super(TextSmartEditor, self).updateScreen(endLine=endLine)
start = self.width + self.getMargin()
for y in range(self.height):
# space available = (self.marginRight - 2)
# msg = str(completion.understandLine(self.lines[y + self.scrollY]))
msg = str(self.checkErrors())
msg = msg[:self.marginRight - 2]
text = '| ' + msg
self.window.addstr(y, start, text, curses.color_pair(0))
if not endLine:
return
#self.print(, self.height - 1, resetX=True, fullLine=True)
msg1 = '<< option-q to quit >>'
msg2 = '<< option-o to save >>'
buf = '-' * ((self.width + self.marginLeft + self.marginRight) - len(msg1) - len(msg2) - 1)
if buf == '':
raise Exception('Make your window bigger (wider)')
text = msg1 + buf + msg2
self.window.addstr(self.height, 0, text, curses.color_pair(0)) # 16
if __name__ == '__main__':
if len(sys.argv) != 2:
print('usage: TextEditor file')
sys.exit(1)
# if not os.path.isfile(sys.argv[1]):
# print('error: file not found')
# sys.exit(1)
with TextSmartEditor() as m:
m.load(sys.argv[1])
m.run()
| 2.671875
| 3
|
oase-root/libs/commonlibs/mail/mail_common.py
|
wreathvine/oase-remove-file-test
| 9
|
12775397
|
# Copyright 2019 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
[概要]
メールドライバーのライブラリ
"""
import traceback
from web_app.models.models import MailTemplate
from web_app.models.mail_models import MailDriver
from web_app.models.mail_models import MailActionHistory
from libs.backyardlibs.action_driver.mail.mail_driver import mailManager
from libs.commonlibs.oase_logger import OaseLogger
from libs.commonlibs.common import DriverCommon
logger = OaseLogger.get_instance()
def check_dt_action_params(params, act_info, conditions, *args, **kwargs):
"""
アクションパラメータのチェックをする。
エラーメッセージのリストを返す。
"""
message_list = []
to_list = []
to_info = kwargs['to_info'] if 'to_info' in kwargs else {}
pre_flg = kwargs['pre_flg'] if 'pre_flg' in kwargs else False
for param in params:
param = param.strip()
# パラメーター情報取得
check_info = mailManager.analysis_parameters(params)
# MAIL_NAME チェック
message_list = check_dt_action_params_mail_name(check_info, act_info,message_list)
# MAIL_TEMPLATE チェック
result = check_dt_action_params_mail_template(check_info, pre_flg, act_info, to_list, to_info, message_list)
message_list = result[0]
to_list = result[1]
to_info = result[2]
# メール送信先設定の有無チェック(MAIL_TO MAIL_CC MAIL_BCC)
message_list = check_dt_action_params_mail_to_list(check_info, conditions, to_list, message_list)
return message_list
def check_dt_action_params_mail_name(check_info, act_info, message_list):
"""
MAIL_NAME のチェックをする。
エラーメッセージのリストを返す。
"""
mail_name = check_info['MAIL_NAME']
if mail_name is None:
logger.logic_log('LOSM00008', check_info)
message_list.append({'id': 'MOSJA03113', 'param': 'MAIL_NAME'})
else:
# MAIL_NAME の値が登録済みのドライバー名であるかチェック
if 'drv_name' not in act_info:
act_info['drv_name'] = {}
if mail_name not in act_info['drv_name']:
rcnt = MailDriver.objects.filter(mail_disp_name=mail_name).count()
act_info['drv_name'][mail_name] = True if rcnt > 0 else False
if not act_info['drv_name'][mail_name]:
logger.logic_log('LOSM00009', check_info)
message_list.append({'id': 'MOSJA03117', 'param': None})
return message_list
def check_dt_action_params_mail_template(check_info, pre_flg, act_info, to_list, to_info, message_list):
"""
MAIL_TEMPLATE のチェックをする。
エラーメッセージのリスト、宛先のリスト、テンプレート名と宛先の情報を返す。
"""
template = check_info['MAIL_TEMPLATE']
if template is None:
logger.logic_log('LOSM00010', check_info)
message_list.append({'id': 'MOSJA03113', 'param': 'MAIL_TEMPLATE'})
elif template == '':
if not pre_flg:
logger.logic_log('LOSM00011', check_info)
message_list.append({'id': 'MOSJA03118', 'param': None})
else:
# MAIL_TEMPLATE の値が登録済みのメールテンプレート名であるかチェック
result = is_dt_action_params_mail_template(act_info, template, to_list, to_info)
if result:
to_list.extend(to_info[template])
else:
logger.logic_log('LOSM00011', check_info)
message_list.append({'id': 'MOSJA03118', 'param': None})
return message_list, to_list, to_info
def check_dt_action_params_mail_to_list(check_info, conditions, to_list, message_list):
"""
メール送信先設定の有無チェックをする。
エラーメッセージのリストを返す。
"""
# MAIL_TO チェック
mail_to = check_info['MAIL_TO']
if mail_to is None:
logger.logic_log('LOSM00012', check_info)
message_list.append({'id': 'MOSJA03114', 'param': 'MAIL_TO'})
elif not DriverCommon.has_right_reserved_value(conditions, mail_to):
logger.logic_log('LOSM00023', mail_to)
message_list.append({'id': 'MOSJA03137', 'param': 'MAIL_TO'})
elif mail_to != '':
to_list.append(mail_to)
# MAIL_CC チェック
mail_cc = check_info['MAIL_CC']
if mail_cc is None:
logger.logic_log('LOSM00013', check_info)
message_list.append({'id': 'MOSJA03114', 'param': 'MAIL_CC'})
elif not DriverCommon.has_right_reserved_value(conditions, mail_cc):
logger.logic_log('LOSM00023', mail_cc)
message_list.append({'id': 'MOSJA03137', 'param': 'MAIL_CC'})
# MAIL_BCC チェック
mail_bcc = check_info['MAIL_BCC']
if mail_bcc is None:
logger.logic_log('LOSM00014', check_info)
message_list.append({'id': 'MOSJA03114', 'param': 'MAIL_BCC'})
elif not DriverCommon.has_right_reserved_value(conditions, mail_bcc):
logger.logic_log('LOSM00023', mail_bcc)
message_list.append({'id': 'MOSJA03137', 'param': 'MAIL_BCC'})
# メール送信先設定の有無チェック
if len(to_list) <= 0:
logger.logic_log('LOSM00015', check_info)
message_list.append({'id': 'MOSJA03119', 'param': None})
return message_list
def is_dt_action_params_mail_template(act_info, template, to_list, to_info):
"""
登録済みのメールテンプレート名であるかチェックをする。
メールテンプレート名が登録済みであればTrueを返す。
"""
if 'tmp_name' not in act_info:
act_info['tmp_name'] = {}
if template not in act_info['tmp_name']:
rset = list(MailTemplate.objects.filter(
mail_template_name=template,
).values_list(
'destination', flat=True)
)
for r in rset:
if r:
to_list.append(r)
else:
if template not in to_info:
to_info[template] = to_list
act_info['tmp_name'][template] = True if len(rset) > 0 else False
return act_info['tmp_name'][template]
def get_history_data(action_his_id):
"""
[概要]
action_his_idのメールアクション履歴を取得する
[引数]
action_his_id: int
[戻り値]
result: dict アクション情報に表示したい情報
"""
result = {}
try:
history = MailActionHistory.objects.get(action_his_id=action_his_id)
result['MOSJA13029'] = history.mail_template_name
result['MOSJA13030'] = history.mail_address
except MailActionHistory.DoesNotExist:
logger.system_log('LOSE00000', action_his_id, traceback.format_exc())
finally:
return result
| 1.867188
| 2
|
tests/__init__.py
|
prajmus/pypi_changes
| 24
|
12775398
|
<filename>tests/__init__.py
from __future__ import annotations
import sys
from pathlib import Path
from typing import Callable
from unittest.mock import MagicMock
if sys.version_info >= (3, 8): # pragma: no cover (py38+)
from importlib.metadata import PathDistribution
else: # pragma: no cover (<py38)
from importlib_metadata import PathDistribution
MakeDist = Callable[[Path, str, str], MagicMock]
__all__ = [
"PathDistribution",
"MakeDist",
]
| 2
| 2
|
Woo2Bing.py
|
yoelsher/Woo2Bing
| 0
|
12775399
|
<reponame>yoelsher/Woo2Bing<filename>Woo2Bing.py
__author__ = 'www.yoelsher.com'
# Grab all data from xml
#import xml.etree.ElementTree as etree
from lxml import etree
import csv,re
inputFileName = 'funk120415.xml'
outputFileName = 'funk120415.txt'
dbFileName = 'funkierb_1C.csv'
brand = 'Funkier Bike'
sellerName = '<NAME>'
websiteURL = 'https://www.funkierbikeusa.com'
uploadURL = websiteURL + '/wp-content/uploads/'
oFile = open(outputFileName,'w',newline='')
feedWriter = csv.writer(oFile,delimiter='\t')
feedWriter.writerow(['id','title','brand','link','price','description','image_link'])
tree = etree.parse(inputFileName)
root = tree.getroot()
channel = root.find('channel')
# Prase items and save
counter = 1
for item in channel.findall('item'):
counter = counter + 1
# Check if Published, else skip
status = item.find(etree.QName(item.nsmap['wp'],'status')).text
if (status != 'publish'):
continue
# Get Title
title = item.find('title').text
title= re.sub('\u2019','\'',title)
title = title.encode("ascii")
# Get URL to Item
link = item.find('link').text
# Change to https if http
link = re.sub('http://','https://',link)
# Get ID
postID = item.find(etree.QName(item.nsmap['wp'],'post_id')).text
# Get Description
description = item.find(etree.QName(item.nsmap['excerpt'] , 'encoded')).text.strip()
# Remove HTML tags
description = re.sub('<[^<]+?>', '', description)
description = re.sub('\n','. ',description)
description = re.sub('\t','',description)
description = re.sub('\xa0','',description)
description = re.sub('-','',description)
description = re.sub('\u2022','',description)
description = description.strip().encode("ascii").decode("utf-8")
# If description starts with ' ' remove it
description = description.strip();
if not (len(description)):
description = 'No description available.';
# Get Price
for postmeta in item.findall(etree.QName(item.nsmap['wp'],'postmeta')):
for metakey in postmeta.findall(etree.QName(item.nsmap['wp'],'meta_key')):
if (metakey.text=='_price'):
for element in postmeta.iter():
if (element.tag == etree.QName(item.nsmap['wp'],'meta_value')):
price = element.text
# Get Thumbnail ID
for postmeta in item.findall(etree.QName(item.nsmap['wp'],'postmeta')):
for metakey in postmeta.findall(etree.QName(item.nsmap['wp'],'meta_key')):
if (metakey.text=='_thumbnail_id'):
for element in postmeta.iter():
if (element.tag == etree.QName(item.nsmap['wp'],'meta_value')):
imageIDs = element.text.split(',')
# Generate image url
imageURL = ''
with open(dbFileName) as dbFile:
r = csv.reader(dbFile, delimiter=";")
for row in r:
# print(row[1])
for imageID in imageIDs:
if (row[1] == imageID) and (row[2]=='_wp_attached_file'):
imageURL = uploadURL + row[3]
break
# Generate the CSV
feedWriter = csv.writer(oFile,delimiter='\t')
# Make sure no duplicates
postIDWithB = ' ('+postID+')'
postIDWithB = postIDWithB.encode("utf8")
title = title + postIDWithB
title = title.decode("utf-8")
data = [postID,title,brand,link,price,description,imageURL]
feedWriter.writerow(data)
oFile.close()
print('Done')
| 2.671875
| 3
|
otcextensions/tests/functional/osclient/dcaas/v2/test_connection.py
|
gtema/python-otcextensions
| 10
|
12775400
|
<reponame>gtema/python-otcextensions
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import uuid
from openstackclient.tests.functional import base
class TestDirectConnection(base.TestCase):
UUID = uuid.uuid4().hex[:8]
DC_NAME = "direct_connect-" + UUID
BANDWIDTH = 50
PORT_TYPE = "1G"
PROVIDER = "OTC"
LOCATION = "Biere"
def setUp(self):
super(TestDirectConnection, self).setUp()
def tearDown(self):
try:
if self.DC_ID:
self._delete_direct_connection()
finally:
super(TestDirectConnection, self).tearDown()
def _create_direct_connection(self):
json_output = json.loads(self.openstack(
'dcaas connection create'
' {port_type}'
' {bandwidth}'
' {location}'
' {provider}'
' --name {name} -f json'.format(
name=self.DC_NAME,
port_type=self.PORT_TYPE,
bandwidth=self.BANDWIDTH,
location=self.LOCATION,
provider=self.PROVIDER
)
))
self.DC_ID = json_output['id']
return json_output
def _delete_direct_connection(self):
self.openstack('dcaas connection delete ' + self.DC_ID)
def test_create_direct_connection(self):
json_output = self._create_direct_connection()
self.assertIsNotNone(json_output)
self.assertEqual(json_output['id'], self.DC_ID)
def test_list_direct_connection(self):
self._create_direct_connection()
json_output = json.loads(self.openstack(
'dcaas connection list -f json'
))
self.assertIsNotNone(json_output)
def test_list_filter_direct_connection(self):
self._create_direct_connection()
json_output = json.loads(self.openstack(
'dcaas connection list '
'--name {name} '
'--bandwidth {bandwidth} '
'--port_type {port_type} '
'--location {location} '
'--provider {provider} -f json'.format(
name=self.DC_NAME,
bandwidth=self.BANDWIDTH,
port_type=self.PORT_TYPE,
location=self.LOCATION,
provider=self.PROVIDER
)
))
self.assertIsNotNone(json_output)
def test_find_direct_connection(self):
self._create_direct_connection()
json_output = json.loads(self.openstack(
'dcaas connection show {id} -f json'.format(id=self.DC_ID)
))
self.assertIsNotNone(json_output)
def test_update_direct_connection(self):
self._create_direct_connection()
connection_name = self.DC_NAME + '-updated'
json_output = json.loads(self.openstack(
'dcaas connection update {id} --name {name} -f json'.format(
id=self.DC_ID,
name=connection_name
)
))
self.assertEqual(json_output['id'], self.DC_ID)
self.assertEqual(json_output['name'], connection_name)
| 2.046875
| 2
|
memcload/__main__.py
|
stkrizh/otus
| 1
|
12775401
|
<gh_stars>1-10
import glob
import gzip
import logging
import multiprocessing as mp
import os
import sys
import time
from collections import Counter
from functools import partial
from optparse import OptionParser
from pathlib import Path
from typing import List
import memcache
from . import appsinstalled_pb2
from .types import AppsInstalled, ProcessingStatus
NORMAL_ERR_RATE = 0.01
MEMCACHE_RETRY_NUMBER = 3
MEMCACHE_RETRY_TIMEOUT_SECONDS = 1
MEMCACHE_SOCKET_TIMEOUT_SECONDS = 3
def dot_rename(path):
head, fn = os.path.split(path)
# atomic in most cases
os.rename(path, os.path.join(head, "." + fn))
def insert_appsinstalled(
memcache_client: memcache.Client,
appsinstalled: AppsInstalled,
dry_run: bool = False,
) -> bool:
ua = appsinstalled_pb2.UserApps()
ua.lat = appsinstalled.lat
ua.lon = appsinstalled.lon
key = "%s:%s" % (appsinstalled.dev_type, appsinstalled.dev_id)
ua.apps.extend(appsinstalled.apps)
packed = ua.SerializeToString()
if dry_run:
logging.debug("%s -> %s" % (key, str(ua).replace("\n", " ")))
return True
for _ in range(MEMCACHE_RETRY_NUMBER):
try:
# Use a tuple as key to write to specific Memcached server
# https://github.com/linsomniac/python-memcached/blob/bad41222379102e3f18f6f2f7be3ee608de6fbff/memcache.py#L698
success: bool = memcache_client.set(
(appsinstalled.dev_type.value, key), packed
)
except Exception as e:
logging.exception(f"Cannot write to Memcache: {e}")
return False
if success:
return True
time.sleep(MEMCACHE_RETRY_TIMEOUT_SECONDS)
logging.error("Cannot write to Memcache. Server is down")
return False
def process_line(
raw_line: bytes, memcache_client: memcache.Client, dry: bool
) -> ProcessingStatus:
line = raw_line.decode("utf-8").strip()
if not line:
return ProcessingStatus.SKIP
try:
appsinstalled = AppsInstalled.from_raw(line)
except ValueError as e:
logging.error(f"Cannot parse line: {e}")
return ProcessingStatus.ERROR
ok: bool = insert_appsinstalled(memcache_client, appsinstalled, dry)
if not ok:
return ProcessingStatus.ERROR
return ProcessingStatus.OK
def process_file(fn: str, memcache_addresses: List[str], dry: bool) -> None:
worker = mp.current_process()
logging.info(f"[{worker.name}] Processing {fn}")
memcache_client = memcache.Client(
memcache_addresses,
socket_timeout=3,
dead_retry=MEMCACHE_RETRY_TIMEOUT_SECONDS,
)
with gzip.open(fn) as fd:
job = partial(process_line, memcache_client=memcache_client, dry=dry)
statuses = Counter(map(job, fd))
ok = statuses[ProcessingStatus.OK]
errors = statuses[ProcessingStatus.ERROR]
processed = ok + errors
err_rate = float(errors) / processed if processed else 1.0
if err_rate < NORMAL_ERR_RATE:
logging.info(
f"[{worker.name}] [{fn}] Acceptable error rate: {err_rate}."
f" Successfull load"
)
else:
logging.error(
f"[{worker.name}] [{fn}] High error rate: "
f"{err_rate} > {NORMAL_ERR_RATE}. Failed load"
)
return fn
def main(options):
""" Entry point
"""
memcache_addresses: List[str] = [
options.idfa,
options.gaid,
options.adid,
options.dvid,
]
job = partial(
process_file, memcache_addresses=memcache_addresses, dry=options.dry
)
files = sorted(
glob.glob(options.pattern), key=lambda file: Path(file).name
)
with mp.Pool() as pool:
for processed_file in pool.imap(job, files):
worker = mp.current_process()
logging.info(f"[{worker.name}] Renaming {processed_file}")
dot_rename(processed_file)
if __name__ == "__main__":
op = OptionParser()
op.add_option("-l", "--log", action="store", default=None)
op.add_option("--dry", action="store_true", default=False)
op.add_option("--loglevel", action="store", default="INFO")
op.add_option(
"--pattern", action="store", default="/data/appsinstalled/*.tsv.gz"
)
op.add_option("--idfa", action="store", default="127.0.0.1:33013")
op.add_option("--gaid", action="store", default="127.0.0.1:33014")
op.add_option("--adid", action="store", default="127.0.0.1:33015")
op.add_option("--dvid", action="store", default="127.0.0.1:33016")
(opts, args) = op.parse_args()
logging.basicConfig(
filename=opts.log,
level=getattr(logging, opts.loglevel, logging.INFO),
format="[%(asctime)s] %(levelname).1s %(message)s",
datefmt="%Y.%m.%d %H:%M:%S",
)
logging.info("Memc loader started with options: %s" % opts)
try:
main(opts)
except Exception as e:
logging.exception("Unexpected error: %s" % e)
sys.exit(1)
| 2.046875
| 2
|
train.py
|
mekomlusa/neural-network-genetic-algorithm
| 1
|
12775402
|
<reponame>mekomlusa/neural-network-genetic-algorithm
"""
Utility used by the Network class to actually train.
Based on:
https://github.com/fchollet/keras/blob/master/examples/mnist_mlp.py
"""
from keras.datasets import mnist, cifar10
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
#from keras.utils.np_utils import to_categorical
from keras.utils import to_categorical
from keras.callbacks import EarlyStopping, TensorBoard
from keras.optimizers import SGD
from keras.regularizers import l1_l2
import math
from keras import backend as K
# Helper: Early stopping.
early_stopper = EarlyStopping(patience=5)
def get_cifar10(bs):
"""Retrieve the CIFAR dataset and process the data."""
# Set defaults.
nb_classes = 10
batch_size = bs
# Get the data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
input_shape = x_train.shape[1:]
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
# convert class vectors to binary class matrices
y_train = to_categorical(y_train, nb_classes)
y_test = to_categorical(y_test, nb_classes)
return (nb_classes, batch_size, input_shape, x_train, x_test, y_train, y_test)
def get_mnist(bs):
"""Retrieve the MNIST dataset and process the data."""
# Set defaults.
nb_classes = 10
batch_size = bs
# input image dimensions
img_rows, img_cols = 28, 28
# Get the data.
(x_train, y_train), (x_test, y_test) = mnist.load_data()
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
# convert class vectors to binary class matrices
y_train = to_categorical(y_train, nb_classes)
y_test = to_categorical(y_test, nb_classes)
return (nb_classes, batch_size, input_shape, x_train, x_test, y_train, y_test)
def compile_model(network, nb_classes, input_shape):
"""Compile a sequential model.
Args:
network (dict): the parameters of the network
Returns:
a compiled network.
"""
# Get our network parameters.
filter_size = network['filter_size']
l1_penalty = network['l1_penalty']
l2_penalty = network['l2_penalty']
learning_rate = network['learning_rate']
conv_layer_count = network['conv_layer_count']
filters_per_conv = network['filters_per_conv']
hidden_layer_count = network['hidden_layer_count']
units_per_hidden = network['units_per_hidden']
model = Sequential()
# Add each layer.
# Arrange conv layers first.
if conv_layer_count > 0:
for _ in range(conv_layer_count):
# Need input shape for first layer.
if len(model.layers) == 0:
model.add(Conv2D(filters_per_conv, filter_size, activation='relu', input_shape=input_shape, kernel_regularizer=l1_l2(l1=l1_penalty,l2=l2_penalty)))
model.add(MaxPooling2D(pool_size=(2, 2))) # hard-coded maxpooling
elif model.layers[-1].output_shape[1] > filter_size[1] and model.layers[-1].output_shape[1] > 2:
# valid, can subtract
model.add(Conv2D(filters_per_conv, filter_size, activation='relu', kernel_regularizer=l1_l2(l1=l1_penalty,l2=l2_penalty)))
model.add(MaxPooling2D(pool_size=(2, 2))) # hard-coded maxpooling
model.add(Flatten())
# Then get hidden layers.
if hidden_layer_count > 0:
for _ in range(hidden_layer_count):
if len(model.layers) == 0:
# Need to add a flatten layer here
model.add(Flatten())
model.add(Dense(units_per_hidden, activation='relu', input_shape=input_shape, kernel_regularizer=l1_l2(l1=l1_penalty,l2=l2_penalty)))
else:
model.add(Dense(units_per_hidden, activation='relu', kernel_regularizer=l1_l2(l1=l1_penalty,l2=l2_penalty)))
# Output layer.
model.add(Dense(nb_classes, activation='softmax'))
#print(model.summary())
model.compile(loss='categorical_crossentropy', optimizer=SGD(lr=learning_rate, momentum=0.9),
metrics=['accuracy'])
return model
def train_and_score(network, dataset):
"""Train the model, return test loss.
Args:
network (dict): the parameters of the network
dataset (str): Dataset to use for training/evaluating
"""
if dataset == 'cifar10':
nb_classes, batch_size, input_shape, x_train, \
x_test, y_train, y_test = get_cifar10(network['batch_size'])
elif dataset == 'mnist':
nb_classes, batch_size, input_shape, x_train, \
x_test, y_train, y_test = get_mnist(network['batch_size'])
model = compile_model(network, nb_classes, input_shape)
tbCallBack = TensorBoard(log_dir='./Graph/CIFAR10_RS', histogram_freq=0, write_graph=True, write_images=True)
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=50, # per paper
verbose=0,
validation_data=(x_test, y_test),
callbacks=[early_stopper, tbCallBack])
score = model.evaluate(x_test, y_test, verbose=0)
return score[1] # 1 is accuracy. 0 is loss.
def train_and_score_TB(network, dataset, iteration, current_network_count, dataset_TB_folder_name):
"""Train the model, return test loss.
Special cases for tensorboard for multiple runs.
Args:
network (dict): the parameters of the network
dataset (str): Dataset to use for training/evaluating
iteration (int): Count of the current iteration.
current_network_count (int): Count of the current network.
dataset_TB_folder_name (str): Name of the parent folder that holds the multiple run tensorboard result.
"""
if dataset == 'cifar10':
nb_classes, batch_size, input_shape, x_train, \
x_test, y_train, y_test = get_cifar10(network['batch_size'])
elif dataset == 'mnist':
nb_classes, batch_size, input_shape, x_train, \
x_test, y_train, y_test = get_mnist(network['batch_size'])
model = compile_model(network, nb_classes, input_shape)
tbCallBack = TensorBoard(log_dir='./Graph/'+dataset_TB_folder_name+'/Run'+str(iteration)+'/Model'+str(current_network_count)+'/', histogram_freq=0, write_graph=True, write_images=True)
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=50, # per paper
verbose=0,
validation_data=(x_test, y_test),
callbacks=[early_stopper, tbCallBack])
score = model.evaluate(x_test, y_test, verbose=0)
return score[1] # 1 is accuracy. 0 is loss.
| 3.375
| 3
|
chempy/properties/tests/test_water_density_tanaka_2001.py
|
matecsaj/chempy
| 0
|
12775403
|
<filename>chempy/properties/tests/test_water_density_tanaka_2001.py
import warnings
from chempy.units import allclose
from ..water_density_tanaka_2001 import water_density
def test_water_density():
warnings.filterwarnings("error")
assert abs(water_density(273.15 + 0) - 999.8395) < 0.004
assert abs(water_density(273.15 + 4) - 999.9720) < 0.003
assert abs(water_density(273.15 + 10) - 999.7026) < 0.0003
assert abs(water_density(273.15 + 15) - 999.1026) < 0.0001
assert abs(water_density(273.15 + 20) - 998.2071) < 0.0005
assert abs(water_density(273.15 + 22) - 997.7735) < 0.0007
assert abs(water_density(273.15 + 25) - 997.0479) < 0.0009
assert abs(water_density(273.15 + 30) - 995.6502) < 0.0016
assert abs(water_density(273.15 + 40) - 992.2) < 0.02
try:
water_density(1)
except UserWarning:
pass # good warning raised
else:
raise
warnings.resetwarnings()
try:
import quantities as pq
import numpy as np
unit = pq.kg/pq.m**3
assert allclose(water_density(298.15*pq.K, units=pq),
997.047021671824*unit, atol=1e-8*unit)
assert allclose(water_density(np.linspace(297, 299)*pq.K, units=pq),
997*unit, rtol=1e-3, atol=1e-3*unit)
except ImportError:
pass
| 2.359375
| 2
|
program_synthesis/models/modules/attention.py
|
sunblaze-ucb/SED
| 6
|
12775404
|
import numpy as np
import torch
import torch.nn.init as init
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
from .layer_norm import LayerNorm
def maybe_mask(attn, attn_mask):
if attn_mask is not None:
assert attn_mask.size() == attn.size(), \
'Attention mask shape {} mismatch ' \
'with Attention logit tensor shape ' \
'{}.'.format(attn_mask.size(), attn.size())
attn.data.masked_fill_(attn_mask, -float('inf'))
class DotProductAttention(nn.Module):
def __init__(self, num_units, num_mem_units, num_heads):
super(DotProductAttention, self).__init__()
self.linear_ins = [
nn.Linear(num_units, num_mem_units, bias=False) for _ in range(num_heads)]
self.linear_outs = [nn.Linear(
num_mem_units + 2 * num_units, num_units, bias=False) for _ in range(num_heads)]
for i, x in enumerate(self.linear_ins + self.linear_outs):
setattr(self, 'param_%s' % i, x)
self.num_heads = num_heads
def forward(self, query, context, attn_mask=None):
"""Apply attention.
query: batch x dim
context: batch x length x dim
"""
input_ = query
for i in range(self.num_heads):
query_proj = self.linear_ins[i](
input_).unsqueeze(2) # batch x dim x 1
attn = torch.bmm(context, query_proj).squeeze(2) # batch x length
maybe_mask(attn, attn_mask)
attn = F.softmax(attn, dim=1)
wc = torch.bmm(attn.unsqueeze(1), context).squeeze(1) # batch x dim
wc = torch.cat([wc, input_, query], 1) # batch x 2dim
wc = self.linear_outs[i](wc)
wc = torch.tanh(wc)
input_ = wc
return wc, attn
class ScaledDotProductAttention(nn.Module):
''' Scaled Dot-Product Attention '''
def __init__(self, dim, attn_dropout=0.1):
super(ScaledDotProductAttention, self).__init__()
self.temper = np.power(dim, 0.5)
self.dropout = nn.Dropout(attn_dropout)
self.softmax = nn.Softmax(dim=-1)
def forward(self, q, k, v, attn_mask=None):
attn = torch.bmm(q, k.transpose(1, 2)) / self.temper
maybe_mask(attn, attn_mask)
attn = self.softmax(attn)
attn = self.dropout(attn)
output = torch.bmm(attn, v)
return output, attn
class RepeatLinear(nn.Module):
def __init__(self, repeat, feature_dim, dim):
super(RepeatLinear, self).__init__()
self.repeat = repeat
self.layer = nn.Parameter(torch.FloatTensor(repeat, feature_dim, dim))
self.output_dim = dim
init.xavier_normal(self.layer)
def forward(self, x):
_, dim1, dim2 = x.size()
if self.repeat > 1:
out = x.repeat(self.repeat, 1, 1).view(self.repeat, -1, dim2)
else:
out = x.view(1, -1, dim2)
return torch.bmm(out, self.layer).view(-1, dim1, self.output_dim)
class MultiHeadAttention(nn.Module):
def __init__(
self, num_heads, num_units, query_dim, key_dim, value_dim,
dropout_p=0.1):
super(MultiHeadAttention, self).__init__()
self.num_heads = num_heads
self.num_units = num_units
assert query_dim == key_dim
self.query_dim = query_dim
self.key_dim = key_dim
self.value_dim = value_dim
self.query_layer = RepeatLinear(num_heads, num_units, query_dim)
self.key_layer = RepeatLinear(num_heads, num_units, key_dim)
self.value_layer = RepeatLinear(num_heads, num_units, value_dim)
self.attention = ScaledDotProductAttention(num_units)
self.proj = nn.Linear(num_heads * value_dim, num_units)
self.dropout = nn.Dropout(dropout_p)
self.layer_norm = LayerNorm(num_units)
def forward(self, query, keys, values, attn_mask=None):
# query shape: batch x num queries x num units
# keys shape: batch x num kv x num units
# values shape: batch x num kv x num units
# batch * heads x num queries x query_dim
Q = self.query_layer(query)
# batch * heads x num kv x key_dim (= query_dim)
K = self.key_layer(keys)
# batch * heads x num kv x value_dim
V = self.value_layer(values)
# outputs: batch * heads x num queries x value_dim
# attns: batch * heads x num queries x num kv
outputs, attns = self.attention(
Q, K, V, attn_mask=attn_mask.repeat(self.num_heads, 1, 1) if attn_mask is not None else None)
# TODO: transpose or unfold?
bsz = query.size(0)
# batch x num queries x num_heads * value_dim
outputs = torch.cat(torch.split(outputs, bsz, dim=0), dim=-1)
# batch x num queries x num_units
outputs = self.proj(outputs)
outputs = self.dropout(outputs)
return self.layer_norm(outputs + query), attns
class SimpleMultiHeadAttention(MultiHeadAttention):
def __init__(self, num_heads, num_units, dropout_p=0.1):
assert num_units % num_heads == 0
dim = num_units / num_heads
super(SimpleMultiHeadAttention, self).__init__(
num_heads, num_units, dim, dim, dim, dropout_p)
def forward(self, query, values, attn_mask=None):
if query.dim() == 2:
query = query.unsqueeze(1)
outputs, attns = super(SimpleMultiHeadAttention, self).forward(
query, values, values, attn_mask)
if query.dim() == 2:
outputs = outputs.squeeze(1)
return outputs, attns
class SimpleSDPAttention(ScaledDotProductAttention):
def __init__(self, query_dim, values_dim, dropout_p=0.0):
super(SimpleSDPAttention, self).__init__(values_dim, dropout_p)
self.query_proj = nn.Linear(query_dim, values_dim)
def forward(self, query, values, attn_mask=None):
# query shape: batch x query dim
# values shape: batch x num values x values dim
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(1)
output, attn = super(SimpleSDPAttention, self).forward(
self.query_proj(query).unsqueeze(1), values, values, attn_mask)
output = output.squeeze(1)
return output, attn
| 2.453125
| 2
|
BookClub/tests/views/forum_views/test_edit_post_view.py
|
amir-rahim/BookClubSocialNetwork
| 4
|
12775405
|
"""Unit testing of the Edit Post view"""
from django.test import TestCase, tag
from django.urls import reverse
from BookClub.models import User, ForumPost, Club
from BookClub.tests.helpers import reverse_with_next
@tag('views', 'forum', 'edit_post')
class EditPostViewTestCase(TestCase):
"""Tests of the Edit Posts view."""
fixtures = [
'BookClub/tests/fixtures/default_users.json',
'BookClub/tests/fixtures/default_clubs.json',
'BookClub/tests/fixtures/default_memberships.json',
'BookClub/tests/fixtures/default_forum.json',
'BookClub/tests/fixtures/default_posts.json',
]
def setUp(self):
self.user = User.objects.get(username="johndoe")
self.non_user = User.objects.get(pk=7)
self.club = Club.objects.get(pk=1)
self.my_post = ForumPost.objects.get(pk=1)
self.other_post = ForumPost.objects.get(pk=2)
self.club_post = ForumPost.objects.get(pk=4)
self.my_url = reverse('edit_forum_post', kwargs={'post_id': self.my_post.id})
self.other_url = reverse('edit_forum_post', kwargs={'post_id': self.other_post.id})
self.club_url = reverse('edit_forum_post', kwargs={'club_url_name': self.club.club_url_name, 'post_id': self.club_post.id})
self.edit = {
"content": "HELLO, HOW DO YOU DO!",
}
def test_edit_post_url(self):
self.assertEqual(self.my_url, '/forum/'+str(self.my_post.pk)+'/edit/')
def test_edit_other_post_url(self):
self.assertEqual(self.other_url, '/forum/'+str(self.other_post.pk)+'/edit/')
def test_edit_club_post_url(self):
self.assertEqual(self.club_url, '/club/'+str(self.club.club_url_name)+'/forum/'+str(self.club_post.id)+'/edit/')
def test_redirect_when_not_logged_in(self):
redirect_url = reverse_with_next('login', self.my_url)
response = self.client.post(self.my_url, self.edit, follow=True)
self.assertRedirects(response, redirect_url,
status_code=302, target_status_code=200, fetch_redirect_response=True
)
self.assertTemplateUsed(response, 'authentication/login.html')
def test_redirect_club_when_not_logged_in(self):
redirect_url = reverse_with_next('login', self.club_url)
response = self.client.post(self.club_url, self.edit, follow=True)
self.assertRedirects(response, redirect_url,
status_code=302, target_status_code=200, fetch_redirect_response=True
)
self.assertTemplateUsed(response, 'authentication/login.html')
def test_redirect_when_not_creator(self):
self.client.login(username=self.user.username, password="<PASSWORD>")
redirect_url = reverse('global_forum')
response = self.client.post(self.other_url, self.edit, follow=True)
self.assertRedirects(response, redirect_url,
status_code=302, target_status_code=200, fetch_redirect_response=True
)
def test_redirect_non_existing_id(self):
self.client.login(username=self.user.username, password="<PASSWORD>")
url = reverse('edit_forum_post', kwargs={'post_id': 555})
redirect_url = reverse('global_forum')
response = self.client.post(url, self.edit, follow=True)
self.assertRedirects(response, redirect_url,
status_code=302, target_status_code=200, fetch_redirect_response=True
)
def test_redirect_club_non_existing_id(self):
self.client.login(username=self.user.username, password="<PASSWORD>")
url = reverse('edit_forum_post', kwargs={'club_url_name': self.club.club_url_name, 'post_id': 555})
redirect_url = reverse('club_forum', kwargs={'club_url_name': self.club.club_url_name})
response = self.client.post(url, self.edit, follow=True)
self.assertRedirects(response, redirect_url,
status_code=302, target_status_code=200, fetch_redirect_response=True
)
def test_edit_post_when_not_logged_in(self):
redirect_url = reverse_with_next('login', self.my_url)
response = self.client.post(self.my_url, self.edit, follow=True)
post = ForumPost.objects.get(pk=1)
self.assertEqual(post.content, "Lorem Ipsum is simply dummy text of the printing and typesetting industry. "
"Lorem Ipsum has been the industrial standard dummy text ever since the 1500s, "
"when an unknown printer took a galley of type and scrambled it to make a type "
"specimen book.")
def test_edit_post_when_not_creator(self):
self.client.login(username=self.user.username, password="<PASSWORD>")
response = self.client.post(self.other_url, self.edit, follow=True)
post = ForumPost.objects.get(pk=2)
self.assertEqual(post.content, "Contrary to popular belief, Lorem Ipsum is not simply random text. It has "
"roots in a piece of classical Latin literature from 45 BC, making it over "
"2000 years old.")
def test_edit_club_post_when_non_member(self):
self.client.login(username=self.non_user.username, password="<PASSWORD>")
response = self.client.post(self.club_url, self.edit, follow=True)
post = ForumPost.objects.get(pk=4)
self.assertEqual(post.content, "... qui dolorem ipsum quia dolor sit amet, consectetur, adipisci velit...")
def test_edit_post_when_creator(self):
self.client.login(username=self.user.username, password="<PASSWORD>")
response = self.client.post(self.my_url, self.edit, follow=True)
post = ForumPost.objects.get(pk=1)
self.assertEqual(post.content, "HELLO, HOW DO YOU DO!")
def test_post_details_show(self):
self.client.login(username=self.user.username, password="<PASSWORD>")
response = self.client.get(self.my_url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'forum/edit_forum_post.html')
self.assertContains(response, "Lorem Ipsum")
self.assertContains(response, "Lorem Ipsum is simply dummy text of the printing and typesetting industry. "
"Lorem Ipsum has been the industrial standard dummy text ever since the "
"1500s, when an unknown printer took a galley of type and scrambled it to make "
"a type specimen book.")
self.assertContains(response, "Posted by: johndoe")
def test_club_post_details_show(self):
self.client.login(username=self.user.username, password="<PASSWORD>")
response = self.client.get(self.club_url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'forum/edit_forum_post.html')
self.assertContains(response, "Latin Quota")
self.assertContains(response, "... qui dolorem ipsum quia dolor sit amet, consectetur, adipisci velit...")
self.assertContains(response, "Posted by: johndoe")
| 2.890625
| 3
|
avr-libc/tests/simulate/readcore.py
|
avr-rust/avr-libc
| 9
|
12775406
|
#! /usr/bin/env python
# Copyright (c) 2008, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the copyright holders nor the names of
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# The ATmega128 simulations return an error code string in "external
# memory" at address 0x2000 upon failure. If runtest.sh is run with
# option -s, it will abort the simulation, and leave the file
# core_avr_dump.core where this script can read the error code string
# from. (The simulations on smaller AVRs don't generate this string
# in order to not bloat their code beyond the available ROM size by
# including sprintf().)
# If an argument is given to the script, it is used as the name of the
# simulavr core dump file to read. Otherwise, the simulavr default
# name "core_avr_dump.core" is used.
# $Id: readcore.py 1647 2008-03-19 22:45:15Z joerg_wunsch $
# Enum implementation, from Python recipe:
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/413486
# Author: <NAME>
def Enum(*names):
##assert names, "Empty enums are not supported" # <- Don't like empty enums? Uncomment!
class EnumClass(object):
__slots__ = names
def __iter__(self): return iter(constants)
def __len__(self): return len(constants)
def __getitem__(self, i): return constants[i]
def __repr__(self): return 'Enum' + str(names)
def __str__(self): return 'enum ' + str(constants)
class EnumValue(object):
__slots__ = ('__value')
def __init__(self, value): self.__value = value
Value = property(lambda self: self.__value)
EnumType = property(lambda self: EnumType)
def __hash__(self): return hash(self.__value)
def __cmp__(self, other):
# C fans might want to remove the following assertion
# to make all enums comparable by ordinal value {;))
assert self.EnumType is other.EnumType, "Only values from the same enum are comparable"
return cmp(self.__value, other.__value)
def __invert__(self): return constants[maximum - self.__value]
def __nonzero__(self): return bool(self.__value)
def __repr__(self): return str(names[self.__value])
maximum = len(names) - 1
constants = [None] * len(names)
for i, each in enumerate(names):
val = EnumValue(i)
setattr(EnumClass, each, val)
constants[i] = val
constants = tuple(constants)
EnumType = EnumClass()
return EnumType
# end Enum recipe
import re, sys
# Start of CPU register dump
regmagic = re.compile('^General Purpose Register Dump')
# Location of exit code is r24/r25
r24magic = re.compile('r24=(..) +r25=(..)')
# Start of external SRAM dump
srammagic = re.compile('^External SRAM Memory Dump:')
# Start of error code string at address 0x2000
startaddr = re.compile('^2000 :')
# Pattern to detect repeated lines
repline = re.compile('-- last line repeats --')
# Turn one line from the memory dump into an ASCII string.
# Stops processing upon encountering a NUL character.
# Returns a tuple consisting of the string and a condition
# code that is 1 when processing has been terminated by
# detecting NUL, 0 when reaching end of line without seeing
# NUL.
def asciiize(s):
rv = ''
a = s.split()
for iascii in a[2:]:
i = int(iascii, 16)
if i == 0:
return (rv, 1)
if i == 10 or (i >= 32 and i < 127):
rv += chr(i)
else:
# Non-printable character, not supposed to happen
rv += '?'
return (rv, 0)
# Calculate exitcode from r24/r25 hex values
def exitcode(r24, r25):
i24 = int(r24, 16)
i25 = int(r25, 16)
return i25 * 256 + i24
# Start of main
try:
corename = sys.argv[1]
except IndexError:
corename = 'core_avr_dump.core'
core = open(corename)
# Our result string
s = ''
# Exit code
ec = -1
# Parser state.
pstateClass = Enum('Done', 'StartAddr', 'SRAMfound', 'GotExitCode',
'FoundCPUregs', 'Starting')
pstate = pstateClass.Starting
oline = ''
while pstate > pstateClass.Done:
l = core.readline()
if l == '':
# EOF encountered
break
if pstate == pstateClass.Starting:
if regmagic.match(l):
pstate = pstateClass.FoundCPUregs
continue
elif pstate == pstateClass.FoundCPUregs:
matchobj = r24magic.match(l)
if matchobj != None:
ec = exitcode(matchobj.group(1), matchobj.group(2))
pstate = pstateClass.GotExitCode
continue
elif pstate == pstateClass.GotExitCode:
if srammagic.match(l):
pstate = pstateClass.SRAMfound
continue
elif pstate == pstateClass.SRAMfound or pstate == pstateClass.StartAddr:
if repline.match(l):
l = oline
if pstate == pstateClass.SRAMfound:
if startaddr.match(l):
pstate = pstateClass.StartAddr
else:
continue
(part, condcode) = asciiize(l)
s += part
if condcode == 1:
pstate = pstateClass.Done
oline = l
core.close()
print("Exit code: %d" % ec)
if s != '':
print("Message string:")
print(s)
else:
print("No message string found.")
| 1.460938
| 1
|
src/music-album.py
|
elanworld/music-album
| 0
|
12775407
|
# 根据图片和音乐合成带节奏的相册视频
from typing import Tuple, Union, Any
import moviepy.editor
from moviepy.video.fx.speedx import speedx
import wave
import numpy as np
import re
from progressbar import *
from common import python_box
from common import gui
import psutil
import time
import math
import moviepy.audio.fx.all
class FfmpegPlugin:
def __init__(self):
self.t = time.time()
self.ffmpeg = "ffmpeg"
def __del__(self):
print("use time:", time.time() - self.t)
def video2audio(self, directory):
f_lst = python_box.dir_list(directory, "mp4$")
for file in f_lst:
wav = re.sub("mp4", "", file) + "wav"
print(file, wav)
cmd = "%s -y -i '%s' '%s'" % (self.ffmpeg, file, wav)
print(cmd)
os.system(cmd)
def audio_split(self, directory):
f_lst = python_box.dir_list(directory, "mp3$")
for file in f_lst:
seconds = 0
while 1:
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
start = ("%01d:%02d:%02d" % (h, m, s))
end = "0:0:07"
seconds += 7
print(file)
mp4 = file
mp4_split = re.sub(".mp3", "", file) + "_%d.pcm" % seconds
cmd = "{ffmpeg} -y -ss {start} -t {end} -i {mp4} -acodec pcm_s16le -f s16le -ac 1 -ar 16000 {mp4_split}".format(
ffmpeg=self.ffmpeg, start=start, end=end, mp4_split=mp4_split, mp4=mp4)
print(cmd)
os.system(cmd)
size = os.path.getsize(mp4_split)
if size == 0:
break
def video_split(self, file):
mp4 = file
mp4_split = re.sub(".mp4", "", file) + "_split.mp4"
start = "0:0:9"
end = "0:4:49"
print(file)
cmd = '''{ffmpeg} -y -ss {start} -t {end} -i "{mp4}" -vcodec copy -acodec copy "{mp4_split}"'''.format(
ffmpeg=self.ffmpeg, start=start, end=end, mp4_split=mp4_split, mp4=mp4)
print(cmd)
os.system(cmd)
def video_concat(self, dir):
os.chdir(dir)
f_lst = []
for file in python_box.dir_list(dir, "mp4"):
file = "file '{}'".format(file)
f_lst.append(file)
videoInfo = dir + "/videoInfo.txt"
python_box.write_file(f_lst, videoInfo)
cmd = '''{} -f concat -i {} -c copy {}output.mp4'''.format(self.ffmpeg, videoInfo, dir + "/")
print(cmd)
os.chdir(dir)
os.system(cmd)
os.remove(videoInfo)
def imageSequence(directory, target):
# 只支持相同尺寸图片合成视频
clip = moviepy.editor.ImageSequenceClip(directory, fps=10)
clip.write_videofile(target)
def movie_concat(directory): # 合并后衔接处卡顿重复
outPath = directory + "/concatVideo.mp4"
f_lst = python_box.dir_list(directory, "mp4")
videoClips = []
for file in f_lst:
videoClip = moviepy.editor.VideoFileClip(file)
videoClips.append(videoClip)
videoClip = moviepy.editor.concatenate_videoclips(videoClips)
videoClip.write_videofile(outPath)
def clip_speed_change(clip, speed, ta, tb):
"""
调节速度
keep change's time
:param clip:
:param speed:
:param ta: 开始时间
:param tb: 结束时间
:return:
"""
tb = ta + (tb - ta) * speed
if tb <= clip.duration:
speed_lambda = lambda c: speedx(c, speed)
try:
clip = clip.subfx(speed_lambda, ta, tb)
# 此处报错关闭所有python即可解决,升级库
except Exception as e:
print(e)
return clip
def num_speed(numpy_arr, n):
new_numpy_arr = np.array([])
for speed in numpy_arr:
if speed > 1:
new_speed = 1 + (speed - 1) * n
else:
if n <= 1:
new_speed = (1 - (1 - speed) * n)
if n > 1:
new_speed = speed / n
new_numpy_arr = np.append(new_numpy_arr, new_speed)
return new_numpy_arr
def get_current_index(np_array: np.ndarray, value):
"""
获取顺序排序数组中t附近的索引
:param np_array:
:param value:
:return:
"""
index = np.where(np_array <= value)
if len(index) > 0:
if len(index[0]) > 0:
return index[0][len(index[0]) - 1]
return len(np_array) - 1
def compute_time_line(np_time: np.ndarray, np_speed: np.ndarray, clips: list, audio_duration) -> list:
"""
算法循环找出clip适合的时长,使总时长接近audio_duration
:param np_time:
:param np_speed:
:param clips:
:param audio_duration:
:return:
"""
default_var = audio_duration / len(clips)
change_var = 0.01
durations = []
while True:
durations.clear()
for _ in clips:
like_index = get_current_index(np_time, sum(durations))
clip_duration = 1.0 / np_speed[like_index]
clip_duration = clip_duration * default_var
durations.append(clip_duration)
total = sum(durations)
if total > audio_duration:
default_var *= 1 - change_var
if total <= audio_duration:
default_var *= 1 + change_var
got = math.fabs(total - audio_duration) < 1
if got:
break
else:
change_var *= 0.8
if len(sys.argv) >= 3 and sys.argv[2] == "plot":
from common import tools
data = []
for i in durations:
data.append(1 / i)
tools.plot_list(data)
return durations
class MovieLib(FfmpegPlugin):
def __init__(self):
super().__init__()
self.image_list = []
self.audio_lst = []
self.imageVideo = None
self.audio_file = None
self.speed_video_file = None
self.temp_videos = []
# 速度变化敏感度
self.sens = 0.6
self.change_speed_time = 0.8
self.audio_leader = True
def set_out(self, directory):
dir_ = os.path.split(directory)[0]
self.imageVideo = os.path.join(dir_, "pic2video.mp4")
self.audio_file = os.path.join(dir_, "pic2video.wav")
self.speed_video_file = os.path.join(dir_, f"{os.path.basename(dir_)}.mp4")
def add_bgm(self, audio_dir):
self.audio_lst.append(audio_dir)
def add_pic(self, pic_dir):
self.image_list.extend(sorted(python_box.dir_list(pic_dir, "jpg", walk=True)))
if not self.speed_video_file:
self.set_out(pic_dir)
def audio2data(self, audio):
f = wave.open(audio, 'rb')
params = f.getparams()
nchannels, sampwidth, self.framerate, nframes = params[:4]
strData = f.readframes(nframes)
f.close()
waveData = np.fromstring(strData, dtype=np.short)
waveData.shape = -1, 2
waveData = waveData.T
waveData = waveData[0]
audioTime = np.arange(0, nframes) * (1.0 / self.framerate)
if len(sys.argv) >= 3 and sys.argv[2] == "plot":
from common import tools
tools.plot_list(waveData, audioTime)
np.abs(waveData, out=waveData)
return audioTime, waveData
def frame2speed(self, audioTime: list, wave_data: list, f_duration=None) -> Tuple[
np.ndarray, Union[Union[float, int], Any]]:
"""
根据帧获取音频速度
:param f_duration:
:param audioTime:
:param wave_data:
:return:
"""
np_time = np.array([])
np_speed = np.array([])
# 获取关键帧
f = 0
if f_duration is None:
f_duration = int(self.framerate * 0.5)
while f <= len(audioTime) - 1:
t = audioTime[f]
speed = np.mean(wave_data[f:f + f_duration])
f += f_duration
np_time = np.append(np_time, t)
np_speed = np.append(np_speed, speed)
# 调整速度敏感度
np_speed = np_speed / np.mean(np_speed)
np_speed = np.where(np_speed >= 8, 8, np_speed)
np_speed = np.where(np_speed <= 0.2, 0.2, np_speed)
np_speed = np.where(np_speed >= 1, np_speed * self.sens, np_speed)
np_speed = np.where(np_speed < 1, np_speed / self.sens, np_speed)
np_speed = np_speed / np.mean(np_speed)
return np_time, np_speed
def video_speed_with_audio(self):
# 视频速度匹配音频节奏 适用视频为重复性图片或者平调速度
sys.setrecursionlimit(10000000)
video = moviepy.editor.VideoFileClip(self.imageVideo)
video.audio.write_audiofile(self.audio_file)
audioTime, wave_data = self.audio2data(self.audio_file)
np_time, np_speed = self.frame2speed(audioTime, wave_data,
f_duration=int(self.framerate * self.change_speed_time))
# 处理视频
bar_setting = ['change speed: ', Percentage(), Bar("#"), Timer(), ' ', ETA()]
speed_clip = moviepy.editor.VideoFileClip(self.imageVideo) # initial clip
audio_clip = speed_clip.audio
bar = ProgressBar(widgets=bar_setting, maxval=len(np_speed)).start()
bar_update_tie = 1
for i in range(len(np_speed)):
bar.update(bar_update_tie)
bar_update_tie += 1
speed = np_speed[i]
t = np_time[i]
speed_clip = clip_speed_change(speed_clip, speed, t, t + self.change_speed_time) # 分段变速
np_time = np.append(np_time, t)
speed_clip.audio = audio_clip
print(self.speed_video_file)
video_without_audio = python_box.FileSys().get_outfile(self.speed_video_file, "no_audio")
speed_clip.write_videofile(video_without_audio, audio=False)
speed_clip = moviepy.editor.VideoFileClip(video_without_audio) # solve cant write audio
duration = speed_clip.duration
audio = moviepy.editor.AudioFileClip(self.audio_file)
audio.set_duration(duration)
speed_clip.audio = audio
speed_clip.write_videofile(self.speed_video_file)
# destroy
del audio
del speed_clip
try:
os.remove(video_without_audio)
os.remove(self.audio_file)
os.remove(self.imageVideo)
except Exception as e:
print(e)
bar.finish()
def crop_clip(self, clip: moviepy.editor.ImageClip, width=1080 * 4 / 3, height=1080):
w, h = clip.size # 视频长宽
w_h = w / h
if w_h <= width / height: # 宽度尺寸偏小
clip = clip.resize(width=width)
w, h = clip.size
clip = clip.crop(x_center=w / 2, y_center=h / 2, width=width, height=height)
if w_h > width / height:
clip = clip.resize(height=height)
w, h = clip.size
clip = clip.crop(x_center=w / 2, y_center=h / 2, width=width, height=height)
return clip
def image2speed_video(self, width=1080 * 4 / 3, height=1080):
"""
图片直接生成变速视频
跳过图片生成视频步骤
:param width:
:param height:
:return:
"""
# 生成音频数据
if len(self.audio_lst) == 0:
raise Exception("not exists any music")
audio_clips = []
for m in self.audio_lst:
clip = moviepy.editor.AudioFileClip(m)
audio_clips.append(clip)
audio_clip = moviepy.editor.concatenate_audioclips(audio_clips)
audio_clip.write_audiofile(self.audio_file)
audioTime, wave_data = self.audio2data(self.audio_file)
np_time, np_speed = self.frame2speed(audioTime, wave_data)
time_line = compute_time_line(np_time, np_speed, self.image_list, audio_clip.duration)
self.image_list.sort()
image_clips = []
for i in range(len(self.image_list)):
image_clip = moviepy.editor.ImageClip(self.image_list[i])
image_clip.start = sum(time_line[0:i])
image_clip.duration = time_line[i]
image_clip.fps = 1
image_clip = self.crop_clip(image_clip, width, height)
image_clips.append(image_clip)
video_clip = moviepy.editor.concatenate_videoclips(image_clips)
video_clip.audio = audio_clip
video_clip.write_videofile(self.speed_video_file, fps=5)
os.remove(self.audio_file)
def image2clip(self, width=1080 * 4 / 3, height=1080, duration=0.25):
fps = 1.0 / duration
width_height = width / height
if len(self.audio_lst) == 0:
raise Exception("exists any music")
audioClips = []
for m in self.audio_lst:
audioClip = moviepy.editor.AudioFileClip(m)
audioClips.append(audioClip)
audioClip = moviepy.editor.concatenate_audioclips(audioClips)
self.image_list.sort()
bar_setting = ['image2clip: ', Percentage(), Bar('#'), ' ', ETA()]
bar = ProgressBar(widgets=bar_setting, maxval=len(self.image_list)).start()
videoStartTime = 0
videoClips = []
fail_pic = []
bar_i = 0
for imageFileName in self.image_list:
bar_i += 1
try:
imageClip = moviepy.editor.ImageClip(imageFileName)
videoClip = imageClip.set_duration(duration)
videoClip = videoClip.set_start(videoStartTime)
videoClip = self.crop_clip(videoClip, width, height)
videoStartTime += duration
if 'video_clip' not in locals().keys():
video_clip = videoClip
else:
video_clip = moviepy.editor.concatenate_videoclips([video_clip, videoClip])
# 内存不足时,分步写入
if psutil.Process(os.getpid()).memory_info().rss / 1024 / 1024 > 800:
i = 1
temp_video = python_box.FileSys().get_outfile(self.imageVideo, str(i))
while 1:
if os.path.exists(temp_video):
i += 1
temp_video = python_box.FileSys().get_outfile(self.imageVideo, str(i))
else:
self.temp_videos.append(temp_video)
break
video_clip.write_videofile(temp_video, fps=fps)
del video_clip
except Exception as e:
fail_pic.append(imageFileName)
print(e)
bar.update(bar_i)
if len(self.temp_videos) > 0:
videos = []
for temp_video in self.temp_videos:
video_clip = moviepy.editor.VideoFileClip(temp_video)
videos.append(video_clip)
video_clip = moviepy.editor.concatenate_videoclips(videos)
bar.finish()
# 设置音轨长度
video_duration = video_clip.duration
audio_duration = audioClip.duration
if self.audio_leader:
video_clip = video_clip.subfx(lambda c: speedx(c, video_duration / audio_duration))
else:
while audioClip.duration < video_duration:
audioClip = moviepy.editor.concatenate_audioclips([audioClip, audioClip])
audioClip = audioClip.set_duration(video_duration)
video_clip.audio = audioClip
video_clip.write_videofile(self.imageVideo, fps=fps)
del video_clip
for temp in self.temp_videos:
try:
os.remove(temp)
except Exception as e:
print(e)
return self.imageVideo
def run(self):
"""
批量图片合成clip
通过bgm识别播放节奏,生成新的clip
:return:
"""
self.image2speed_video()
if __name__ == "__main__":
"""
pic to video clip
"""
movie = MovieLib()
for i in range(6):
directory = gui.select_dir("多个图片目录,取消代表则选择完成")
if directory:
movie.add_pic(directory)
else:
break
for i in range(6):
file = gui.select_file("多个音乐文件,取消代表则选择完成")
if file:
movie.add_bgm(file)
else:
break
movie.run()
| 2.921875
| 3
|
src/dlt/randomizer_utils.py
|
thepolicylab/DLT-RESEA
| 0
|
12775408
|
import decimal
import math
import warnings
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
from decimal import Decimal, localcontext
from itertools import repeat
from pathlib import Path
from time import time
from typing import List, Optional, Union
import numpy as np
import pandas as pd
from tqdm.notebook import tqdm
from .config import get_global_config
from .types import FilenameType
def python_hash(SSN: int) -> int:
"""
A pythonic implementation of COBOL code using floating-point arithmetic. Note that
this will differ ever-so-slightly from the cobol_hash due to the differing rounding
conventions.
"""
# Constants determined by DoIT
L_SD = SSN
C_Q = 127773 # 3^2 * 14197
C_A = 16807 # 7^5
C_R = 2836 # 2^2 * 709
C_M = 2147483647 # prime (In fact, 2^{2^5 - 1} - 1, double Mersenne)
# Translated
W_HI = L_SD // C_Q
W_LO = L_SD % C_Q
# Recombine the quotient and remainder mod a medium-sized almost-prime with two
# coprime factors. (N.B. Not sure exactly why C_A is a power of 7 whereas C_R is
# almost prime. Would be curious to read the history of this algorithm.)
L_SD = C_A * W_LO - C_R * W_HI
# Note that C_M is _almost_ 2^31, but not quite. Also, note that
# C_A * W_LO - C_R * W_HI is maximized when SSN = C_Q - 1
# and it is minimized when SSN is the largest social security number which is
# exactly divisible by C_Q, i.e., (999_99_9999 // C_Q) * C_Q = 999_95_1498.
#
# In either case, C_A * W_LO - C_R * W_HI \in (-C_M, C_M) and so the following
# block guarantees that L_SD will be in [0, C_M).
#
# We also note that the _smallest negative_ value that C_A * W_LO - C_R * W_HI can
# achieve in theory is -1 (since C_A and C_R are coprime) but I haven't done the
# computation to determine whether it's actually possible in this range of numbers
if L_SD <= 0:
warnings.warn("L_SD is negative")
L_SD += C_M
# And so by the above comment, L_RAND is in [0, 1) and this rounding gives us the
# top 10 digits of the mantissa
L_RAND = math.floor(L_SD / C_M * 1e10) / 1e10
return L_RAND
def cobol_hash(SSN: int) -> float:
"""
A python implementation of COBOL's fixed-point arithmetic
"""
with localcontext() as ctx:
# Constants determined by DoIT
ctx.prec = 10
ctx.rounding = decimal.ROUND_DOWN
L_SD = Decimal(SSN)
C_A = Decimal("0000016807")
C_M = Decimal("2147483647")
C_Q = Decimal("0000127773")
C_R = Decimal("0000002836")
# Translated
W_HI = (L_SD / C_Q).quantize(Decimal("1E0")) # L_SD // C_Q
W_LO = L_SD - C_Q * W_HI # L_SD % C_Q
L_SD = C_A * W_LO - C_R * W_HI
if L_SD <= 0:
L_SD += C_M
L_RAND = (L_SD / C_M).quantize(Decimal("1E-10"))
if L_RAND == 0:
warnings.warn("L_RAND is zero")
L_SD += C_M
return L_RAND
def generate_outcomes(
input_list: Optional[List[int]] = None,
process_type: str = "cobol",
low: Optional[int] = None,
high: Optional[int] = None,
size: Optional[int] = None,
all_values: Optional[bool] = False,
generate_rand_whole: Optional[bool] = False,
) -> pd.DataFrame:
"""
Helper function that generates L_RAND outcomes with the option for pythonic or cobol implmentations.
"""
# Generate a random sample of SSNs to test, and sort to verify monotonicity of relationship
if input_list is not None:
ssn_pool = input_list
elif not all_values:
# Setting seed to ensure replicability
np.random.seed(0)
ssn_pool = np.random.randint(low=low, high=high, size=size)
ssn_pool.sort()
elif all_values:
ssn_pool = np.arange(low, high)
# apply random number generator to SSN pool
if process_type == "python":
with ThreadPoolExecutor() as executor:
ssn_outcomes = list(
tqdm(executor.map(python_hash, ssn_pool), total=len(ssn_pool))
)
if process_type == "cobol":
with ThreadPoolExecutor() as executor:
ssn_outcomes = list(
tqdm(
executor.map(cobol_hash, ssn_pool.astype(str)), total=len(ssn_pool)
)
)
df = pd.DataFrame(ssn_outcomes, columns=["L_RAND"])
final_df = pd.concat([pd.Series(ssn_pool, name="SSN"), df], axis=1)
if generate_rand_whole:
final_df["L_RAND_WHOLE"] = final_df["L_RAND"] * 10_000_000_000
return final_df
def chunk_using_generators(lst, n):
for i in range(0, len(lst), n):
yield lst[i : i + n]
def generate_all_L_RAND(
filepath: Optional[FilenameType] = None,
filename: FilenameType = "ssn_output.csv.gz",
ssn_min: int = 1_01_0001,
ssn_max: int = 899_99_9999,
chunksize: int = 10_0000,
):
"""
A function that calculates L_RAND values for all possible SSN from 001_01_0001 to 899_99_9999.
This exercise was necessary to ensure that the maximum value attainable from all reasonable SSNs
would result in an L_RAND value less than 9_999_999_999.
"""
if filepath is None:
# default to the DATA_DIR / reference
filepath = Path(get_global_config().DATA_DIR) / "reference"
# Total list of valid SSNs
list_of_ssn = np.arange(ssn_min, ssn_max)
# Divide the total list into manageable chunks
list_of_list_of_ssn = list(chunk_using_generators(list_of_ssn, chunksize))
# Process each list using COBOL
with ProcessPoolExecutor() as executor:
ssn_outcomes = list(
tqdm(
executor.map(generate_outcomes, list_of_list_of_ssn, repeat("cobol")),
total=len(list_of_list_of_ssn),
)
)
# Output data into a gzip dataframe.
pd.DataFrame(pd.concat(ssn_outcomes)).sort_values(
by="L_RAND", ascending=False
).reset_index(drop=True).to_csv(
filepath / filename, compression="gzip", index=False
)
def add_ms_to_seed(ssn: int, ms: int = None):
"""
A good-enough solution to resolve local-randomization issues with the current DoIT algorithm.
"""
if ms is None:
ms = int(round(time(), 6) * 1e6) % 1_000_000
return int(str(ssn + ms)[::-1])
| 2.125
| 2
|
neural_compressor/ux/utils/workload/tuning.py
|
intel/neural-compressor
| 172
|
12775409
|
# -*- coding: utf-8 -*-
# Copyright (c) 2021-2022 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration tuning module."""
from typing import Any, Dict, List, Optional, Union
from neural_compressor.ux.utils.exceptions import ClientErrorException
from neural_compressor.ux.utils.json_serializer import JsonSerializer
from neural_compressor.ux.utils.utils import (
parse_bool_value,
parse_to_float_list,
parse_to_string_list,
)
class Strategy(JsonSerializer):
"""Configuration Strategy class."""
def __init__(self, data: Dict[str, Any] = {}) -> None:
"""Initialize configuration Strategy class."""
super().__init__()
# [Required] One of neural_compressor.strategy.STRATEGIES
self.name: str = data.get("name", "basic")
self.sigopt_api_token: Optional[str] = data.get("sigopt_api_token", None)
self.accuracy_weight: Optional[float] = data.get("accuracy_weight", None)
self.latency_weight: Optional[float] = data.get("latency_weight", None)
class MultiObjectives(JsonSerializer):
"""Configuration MultiObjectives class."""
def __init__(self, data: Dict[str, Any] = {}) -> None:
"""Initialize configuration MultiObjectives class."""
super().__init__()
self._objective: List[str] = data.get("objective", [])
self._weight: List[float] = data.get("weight", [])
@property
def objective(self) -> List[str]:
"""Get objectives."""
return self._objective
@objective.setter
def objective(self, value: Union[None, str, List[str]]) -> None:
"""Set inputs value."""
self._objective = parse_to_string_list(value)
@property
def weight(self) -> List[float]:
"""Get weights."""
return self._weight
@weight.setter
def weight(self, value: Union[None, float, List[float]]) -> None:
"""Set weights value."""
self._weight = parse_to_float_list(value)
class AccCriterion(JsonSerializer):
"""Configuration AccCriterion class."""
def __init__(self, data: Dict[str, Any] = {}) -> None:
"""Initialize configuration AccCriterion class."""
super().__init__()
self.relative: Optional[float] = data.get(
"relative",
None,
) # [Optional] (INT8-FP32)/FP32
self.absolute: Optional[float] = data.get(
"absolute",
None,
) # [Optional] INT8-FP32
# Set default accuracy criterion to relative
if self.relative is None and self.absolute is None:
self.relative = 0.1
class ExitPolicy(JsonSerializer):
"""Configuration ExitPolicy class."""
def __init__(self, data: Dict[str, Any] = {}) -> None:
"""Initialize Configuration ExitPolicy class."""
super().__init__()
self.timeout: Optional[int] = data.get("timeout", None)
self.max_trials: Optional[int] = data.get("max_trials", None)
self.performance_only: Optional[bool] = data.get("performance_only", None)
class Workspace(JsonSerializer):
"""Configuration Workspace class."""
def __init__(self, data: Dict[str, Any] = {}) -> None:
"""Initialize Configuration Workspace class."""
super().__init__()
self.path: Optional[str] = data.get("path", None) # [Optional]
self.resume: Optional[str] = data.get("resume", None) # [Optional]
class Tuning(JsonSerializer):
"""Configuration Tuning class."""
def __init__(self, data: Dict[str, Any] = {}) -> None:
"""Initialize Configuration Tuning class."""
super().__init__()
self.strategy: Strategy = Strategy()
if data.get("strategy"):
self.strategy = Strategy(data.get("strategy", {}))
self.accuracy_criterion: AccCriterion = AccCriterion(
data.get("accuracy_criterion", {}),
)
self.multi_objectives: Optional[MultiObjectives] = None
if data.get("multi_objectives"):
self.multi_objectives = MultiObjectives(data.get("multi_objectives", {}))
self.exit_policy: Optional[ExitPolicy] = None
if data.get("exit_policy"):
self.exit_policy = ExitPolicy(data.get("exit_policy", {}))
self.random_seed: Optional[int] = data.get("random_seed", None)
self.tensorboard: Optional[bool] = data.get("tensorboard", None)
self.workspace: Optional[Workspace] = None
if data.get("workspace", {}):
self.workspace = Workspace(data.get("workspace", {}))
def set_timeout(self, timeout: int) -> None:
"""Update tuning timeout in config."""
try:
timeout = int(timeout)
if timeout < 0:
raise ValueError
except ValueError:
raise ClientErrorException(
"The timeout value is not valid. " "Timeout should be non negative integer.",
)
if self.exit_policy:
self.exit_policy.timeout = timeout
else:
self.exit_policy = ExitPolicy({"timeout": timeout})
def set_max_trials(self, max_trials: int) -> None:
"""Update max tuning trials in config."""
try:
max_trials = int(max_trials)
if max_trials < 0:
raise ValueError
except ValueError:
raise ClientErrorException(
"The max trials value is not valid. " "Max trials should be non negative integer.",
)
if self.exit_policy:
self.exit_policy.max_trials = max_trials
else:
self.exit_policy = ExitPolicy({"max_trials": max_trials})
def set_performance_only(self, performance_only: Any) -> None:
"""Update performance only flag in config."""
try:
performance_only = parse_bool_value(performance_only)
except ValueError:
raise ClientErrorException(
"The performance_only flag value is not valid. "
"Performance_ony should be a boolean.",
)
if self.exit_policy:
self.exit_policy.performance_only = performance_only
else:
self.exit_policy = ExitPolicy({"performance_only": performance_only})
def set_random_seed(self, random_seed: int) -> None:
"""Update random seed value in config."""
try:
random_seed = int(random_seed)
except ValueError:
raise ClientErrorException(
"The random seed value is not valid. " "Random seed should be an integer.",
)
self.random_seed = random_seed
def set_workspace(self, path: str) -> None:
"""Update tuning workspace path in config."""
if self.workspace is None:
self.workspace = Workspace()
self.workspace.path = path
| 1.984375
| 2
|
src/piglowui.py
|
deanydean/py-piglow-sys
| 1
|
12775410
|
#!/usr/bin/python
#
# Copyright 2016 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import piglow
from multiprocessing import Process, Queue
from Queue import Full, Empty
from time import sleep
#
# Piglow UI utils for piglow-sys library.
#
def start(clear=True):
""" Start PiGlow UI updates """
if _enabled:
return
if clear:
_change_task("_clear_all")
_change_task("_enable")
_start_updater()
def stop(clear=True):
""" Stop any PiGlow UI updates """
if clear:
_change_task("_clear_all")
_change_task("_disable")
def clear_all():
""" Clear all LEDs """
_change_task("_clear_all")
def pulse_color(color, speed=10, low=64, high=255):
""" Pulse each LED of the defined color at the defined speed. """
_change_task("_pulse_color", [color, speed, low, high], True)
def set_color(color, value):
""" Set the value of the defined color """
_change_task("_set_color", [color, value])
def cycle(leds, speed=10, low=0, high=255):
""" Cycle each LED from low to high in order """
_change_task("_cycle", [leds, speed, low, high], True)
def dim(led, speed=2, high=255, low=0):
""" Dims the LED from high to low at the given speed """
_change_task("_dim", [led, speed, high, low], True)
def set(leds, value):
""" Sets the value of each led """
_change_task("_set", [leds, value])
def pulse(led, speed=2, low=0, high=255):
""" Pulse the LED from low to high at the given speed """
_change_task("_pulse", [led, speed, low, high], True)
#
# Private functions to drive the UI (ie, PiGlow updates)
#
_enabled = False
_task_queue = Queue()
_updater_process = None
_NOTASK_SLEEP_INTERVAL = 1
def _enable():
""" Enable the PiGlow UI updates """
global _enabled
_enabled = True
def _disable():
""" Disable the PiGlow UI updates """
global _enabled
_enabled = False
def _change_task(task, args=[], repeat=False, interval=0):
""" Change the current task """
try:
_task_queue.put([task, args, repeat, interval])
except Full:
print "Task ", task, " failed. Task queue full"
return
def _handle_tasks(tasks):
""" Perform the UI update for the current task """
global _enabled
task = None
_enabled = True
while _enabled:
try:
task = tasks.get(False)
except Empty:
# Do nothing, this is a valid state
pass
# If we have no task, just sleep for an interval and read again
if task is None:
sleep(_NOTASK_SLEEP_INTERVAL)
continue
# Get and exec the task method
task_method = globals()[task[0]]
if task_method is None:
sleep(task[3])
continue
else:
task_method(*task[1])
if not task[2]:
task = None
def _start_updater():
""" Start an updater process if there isn't already one """
global _updater_process
# If already enabled, just return
if _enabled:
return
_updater_process = Process(target=_handle_tasks, args=(_task_queue,))
_updater_process.start()
#
# API drawing task functions
#
def _clear_all():
""" Clear all LEDs """
for l in range(0, 18):
piglow.set(l, 0)
piglow.show()
def _set_color(color, value):
""" Set the value of the defined color """
color_setter = getattr(piglow, color)
color_setter(value)
piglow.show()
def _pulse_color(color, speed, low, high):
""" Pulse each LED of the defined color at the given speed """
color_setter = getattr(piglow, color)
pulse_range = range(low, high)
wait_for = 1/speed
for c in pulse_range:
color_setter(c)
piglow.show()
sleep(wait_for)
for c in reversed(pulse_range):
color_setter(c)
piglow.show()
sleep(wait_for)
def _pulse(led, speed, low, high):
""" Pulse the LED from low to high """
pulse_range = range(low, high)
wait_for = 1/speed
for c in pulse_range:
piglow.set(led, c)
piglow.show()
sleep(wait_for)
for c in reversed(pulse_range):
piglow.set(led, c)
piglow.show()
sleep(wait_for)
def _set(leds, value):
""" Sets the value of each led """
for led in leds:
piglow.set(led, value)
piglow.show()
def _dim(led, speed, high, low):
""" Dims the led from high to low at the given speed """
dim_range = range(low, high)
wait_for = 1/speed
for c in reversed(dim_range):
piglow.set(led, c)
piglow.show()
sleep(wait_for)
def _cycle(leds, speed, low, high):
""" Cycle each LED from low to high in order """
pulse_range = range(low, high)
wait_for = 1/speed
# Set each LED to the LOW state
_set(leds, low)
for i in range(0, len(leds)):
for c in pulse_range:
# Increase the LED to HIGH
piglow.set(leds[i], c)
piglow.show()
sleep(wait_for)
# Decrease the previous LED back to LOW at same rate
if i > 0:
piglow.set(leds[i-1], high-(c-low))
piglow.show()
sleep(wait_for)
# Decrease the final LED back to LOW state
_dim(leds[-1], speed, high, low)
# Set each LED to the LOW state
_set(leds, low)
| 2.75
| 3
|
cogs/essentials/cog.py
|
jstan425/Cookie-Bot
| 2
|
12775411
|
<reponame>jstan425/Cookie-Bot
import disnake
import logging
import os
from disnake.ext import commands
from disnake.ext.commands import Param
class Essential(commands.Cog):
def __init__(self, bot:commands.Bot):
self.bot = bot
@commands.slash_command(description="Ping the bot!")
async def ping(self,
inter: disnake.ApplicationCommandInteraction,):
await inter.response.send_message(f"Cookie Crumps detected in {round(self.bot.latency * 1000)}ms")
@commands.slash_command(description="Reloads all cogs")
async def reload(self, inter: disnake.ApplicationCommandInteraction):
for folder in os.listdir("cogs"):
if os.path.exists(os.path.join("cogs", folder, "cog.py")):
self.bot.unload_extension(f"cogs.{folder}.cog")
self.bot.load_extension(f"cogs.{folder}.cog")
await inter.response.send_message('Cogs fully reloaded.')
def setup(bot: commands.Bot):
bot.add_cog(Essential(bot))
print('Essentials cog is now loaded' + "\n")
logger = logging.getLogger("disnake")
logger.info("Added Essential Cog")
def teardown(bot: commands.Bot):
bot.remove_cog("Essential")
print('Essentials cog is now unloaded' + "\n")
logger = logging.getLogger("disnake")
logger.info("Removed Essential Cog")
| 2.265625
| 2
|
setup.py
|
adiralashiva8/jmeter-metrics
| 12
|
12775412
|
<gh_stars>10-100
from setuptools import setup, find_packages
filename = 'jmeter_metrics/version.py'
exec(compile(open(filename, 'rb').read(), filename, 'exec'))
setup(name='jmeter-metrics',
version=__version__,
description='Custom dashboard report for Jmeter',
long_description='Dashboard view of jmeter results created by parsing .jtl or .csv file',
classifiers=[
'Programming Language :: Python',
'Topic :: Software Development :: Testing',
],
keywords='Jmeter report',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/adiralashiva8/jmeter-metrics',
license='MIT',
packages=find_packages(),
include_package_data=True,
zip_safe=True,
install_requires=[
'pandas',
'beautifulsoup4',
],
entry_points={
'console_scripts': [
'jmetermetrics=jmeter_metrics.runner:main',
]
},
)
| 1.367188
| 1
|
[5] MATH/1169 - Trigo no Tabuleiro.py
|
tiago040/URI-SOLUTIONS
| 1
|
12775413
|
'''
Uma rainha requisitou os serviços de um monge e disse-lhe que pagaria qualquer preço. O monge, necessitando de alimentos, perguntou a rainha se o pagamento poderia ser feito em grãos de trigo dispostos em um tabuleiro de damas, de forma que o primeiro quadrado tivesse apenas um grão, e os quadrados subseqüentes, o dobro do quadrado anterior. A rainha considerou o pagamento barato e pediu que o serviço fosse executado, porém, um dos cavaleiros que estava presente e entendia um pouco de matemática alertou-a que seria impossível executar o pagamento, pois a quantidade de grão seria muito alta. Curiosa, a rainha solicitou então a este cavaleiro que era bom em cálculo, que fizesse um programa que recebesse como entrada o número de quadrados a serem usados em um tabuleiro de damas e apresentasse a quantidade de kg de trigo correspondente, sabendo que cada 12 grãos do cereal correspondem a uma grama. Finalmente, o cálculo da quantidade deverá caber em um valor inteiro de 64 bits sem sinal.
Entrada
A primeira linha de entrada contem um único inteiro N (1 ≤ N ≤ 100), indicando o número de casos de teste. Cada caso de teste contém um único inteiro X (1 ≤ X ≤ 64), indicando o número de casas do tabuleiro que serão utilizadas.
Saída
Para cada caso de teste, imprima a quantidade de kgs de trigo que o monge esperava receber.
'''
N = int(input())
for i in range(N):
graos = []
c = 1
X = int(input())
for i in range(X):
graos.append(c)
c *= 2
qt_cereal = sum(graos)/12
qt_kg = qt_cereal/1000
qt_kg = int(qt_kg)
print(str(qt_kg)+' kg')
| 3.3125
| 3
|
threathunter_common_python/threathunter_common/bankcard_info/test.py
|
threathunterX/python_lib
| 2
|
12775414
|
<filename>threathunter_common_python/threathunter_common/bankcard_info/test.py
from bankcard_bin import get_issue_bank, get_card_type
if __name__ == "__main__":
fp = open( "data/bank_bin_info.csv", "r" )
lines = fp.readlines()
fp.close()
all_count = 0
matched_count = 0
not_matched_count = 0
print "\n"
for line in lines:
if len( line.split( "," ) ) == 3:
all_count += 1
info = line.split( "," )
if info[ 0 ].strip() == get_issue_bank( info[ 2 ].strip() ):
if info[ 1 ].strip() == get_card_type( info[ 2 ].strip() ):
# print line.strip() + "\t\t\tMatched"
matched_count += 1
else:
print line.strip() + "\t\t\tNot Matched"
not_matched_count += 1
else:
print line.strip() + "\t\t\tNot Matched"
not_matched_count += 1
print "\nAll records: \t\t" + str( all_count )
print "Matched records: \t\t" + str( matched_count )
print "Not Matched records: \t\t" + str( not_matched_count )
print "\n"
| 3.09375
| 3
|
horseback/chatobjects/chatobject.py
|
nasfarley88/horseback
| 0
|
12775415
|
class ChatObject:
def __init__(self, service, json):
"""Base class for objects emmitted from chat services."""
self.json = json
self.service = service
| 2.796875
| 3
|
mkmdtl/md.py
|
CounterPillow/mb2md
| 0
|
12775416
|
<reponame>CounterPillow/mb2md
from wcwidth import wcswidth
def get_max_title_len(tracklist):
"""Returns the visual length of the visually longest track in a tracklist.
"""
return max([wcswidth(x['title']) for x in tracklist])
def build_table(tracklist):
"""Takes a list of tracks in the form of {number, title, length} dicts,
formats them into a GitHub-flavoured markdown table, and returns the lines
of the formatted table as a list of strings, one string for each line.
"""
lines = []
max_len = get_max_title_len(tracklist)
# "this style is fucking gay" -- Hamuko, 2018, not letting me align things
header_fmt = '| No. | Title{} | Length|'
sep_fmt = '| ---:|:-----{} | -----:|'
track_fmt = '| {:2} | {}{} | {:02}:{:02} |'
lines.append(header_fmt.format(' ' * (max_len - len('Title'))))
lines.append(sep_fmt.format('-' * (max_len - len('Title'))))
for track in tracklist:
minutes = track['length'].seconds // 60
seconds = track['length'].seconds % 60
padding = max_len - wcswidth(track['title'])
lines.append(track_fmt.format(track['number'], track['title'],
' ' * padding, minutes, seconds))
return lines
| 3
| 3
|
shiSock-0.3.0/testing.py
|
AnanyaRamanA/shiSock
| 0
|
12775417
|
import socket
import base64
from random import sample,shuffle
import pickle
import time
def name_generator(_len_ = 16, onlyText = False):
lower_case = list("abcdefghijklmnopqrstuvwxyz")
upper_case = list('ABCDEFGHIJKLMNOPQRSTUVWXYZ')
special = list("!@#$%&*?")
number = list("0123456789")
if onlyText:
_all_ = lower_case + upper_case
else:
_all_ = lower_case + upper_case + special + number
shuffle(_all_)
return "".join(sample(_all_,_len_))
count = 0
print("Test Started...")
while True:
s = socket.socket()
s.connect(("192.168.43.206",9600))
name = name_generator(_len_ = 8, onlyText = True)
ini = base64.b64encode(pickle.dumps(name))
s.send(bytes(str(len(ini)).center(32,"-"),"utf-8"))
s.send(ini)
prepare_send_data = {
"channel" : "test",
"sender_name" : name,
"target_name" : "SERVER",
"data" : "Hello World"
}
prepare_for_send = base64.b64encode(pickle.dumps(prepare_send_data))
s.send(bytes(str(len(prepare_for_send)).center(32,"-"),"utf-8"))
s.send(prepare_for_send)
count += 1
print(count)
# time.sleep(1)
# C@C/piBsKTAP9?C
| 2.5
| 2
|
backend/app/api/v1/dependencies/employee.py
|
avinash010/qxf2-survey
| 1
|
12775418
|
<reponame>avinash010/qxf2-survey<filename>backend/app/api/v1/dependencies/employee.py
"""
This module contains the methods related to the nodes with employee label in the database
"""
import os
import sys
from pandas import DataFrame
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(\
os.path.abspath(__file__))))))
from db import session
from db.queries import cypher
GRAPH = session.auth()
def get_user_name(email):
"returns the Full name of a given employee email"
users = DataFrame(GRAPH.run(cypher.GET_USER_NAME), columns=['fullName', 'email'])
user_name = users.loc[users['email'] == email, 'fullName'].iloc[0]
return user_name
def get_user_id(email=None, all_users=False):
"returns the id of a given employee email"
users = DataFrame(GRAPH.run(cypher.GET_USER_ID), columns = ['ID', 'email'])
if all_users:
return users.ID.values.tolist()
if email is None:
user_id = users['ID'].iloc[-1]
else:
user_id = users.loc[users['email'] == email, 'ID'].iloc[0]
return user_id
def get_active_user_id(email=None, all_users=False):
"returns the id of a given active user employee email"
active_users=list(GRAPH.run(cypher.GET_ACTIVE_USER_ID))
users = DataFrame(active_users, columns = ['ID'])
if all_users:
return users.ID.values.tolist()
if email is None:
user_id = users['ID'].iloc[-1]
else:
user_id = users.loc[users['email'] == email, 'ID'].iloc[0]
return user_id
def set_employee_relation_properties(help_type, name, helped, date):
"sets the properties of employee relationship based on the given help_type"
if help_type=="given":
GRAPH.run(cypher.SET_GIVEN_PROP,\
parameters = {"user_name":name, "helped_name":helped, "date":date})
else:
GRAPH.run(cypher.SET_TAKEN_PROP,\
parameters = {"user_name":name, "helped_name":helped, "date":date})
def create_help_relation(help_type, name, helped, date):
"creates the relationship between the employee nodes"
if help_type == "taken":
GRAPH.run(cypher.CREATE_TAKEN_REL,\
parameters={"user_name":name, "helped_name":helped})
else:
GRAPH.run(cypher.CREATE_GIVEN_REL,\
parameters={"user_name":name, "helped_name":helped})
set_employee_relation_properties(help_type, name, helped, date)
def get_not_responded_user_emails(responded_users):
"returns a list of employee emails who are yet to respond to the survey"
responded_user_ids = [item for sublist in responded_users for item in sublist]
user_ids = get_active_user_id(all_users=True)
non_responded_user_ids = list(set(user_ids)-set(responded_user_ids))
non_responded_users = []
for user_id in non_responded_user_ids:
user = list(GRAPH.run(cypher.GET_USERS_BY_ID, parameters={"id": user_id}))
non_responded_users.append(user[0])
employee_list = [employee[0] for employee in non_responded_users]
return employee_list
| 2.796875
| 3
|
src/cania/utils/image.py
|
Cancer-Image-Analysis/cania-core
| 0
|
12775419
|
<reponame>Cancer-Image-Analysis/cania-core<filename>src/cania/utils/image.py
import tifffile
import cv2
import numpy as np
from cania.utils.vector import Vector
""" read images """
def read_rgb(filename):
return cv2.imread(filename, cv2.IMREAD_COLOR)
def read_gray(filename):
return cv2.imread(filename, cv2.IMREAD_GRAYSCALE)
def read_mirax(filename):
pass
def read_lsm(filename):
return tifffile.imread(filename)
def read_tiff(filename):
return tifffile.imread(filename)
""" write images """
def write_rgb(filename, rgb_image):
cv2.imwrite(filename, rgb_image)
def write_bgr(filename, rgb_image):
cv2.imwrite(filename, rgb2bgr(rgb_image))
def write_tiff(filename, tiff_image):
tifffile.imwrite(filename, tiff_image, imagej=True)
def write_gray():
pass
""" new image """
def new_image(shape):
return np.zeros(shape=shape, dtype=np.uint8)
""" color conversion """
def bgr2hsv(img):
return cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
def rgb2bgr(img):
return cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
def gray2rgb(img):
return cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
""" channels """
def split_channels(image):
return list(cv2.split(image))
""" draw on images """
def overlay(img, mask, color=[255, 255, 0], alpha=0.4, border_color='same'):
# Ref: http://www.pyimagesearch.com/2016/03/07/transparent-overlays-with-opencv/
out = img.copy()
img_layer = img.copy()
img_layer[np.where(mask)] = color
overlayed = cv2.addWeighted(img_layer, alpha, out, 1 - alpha, 0, out)
contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if border_color == 'same':
cv2.drawContours(overlayed, contours, -1, color, 2)
elif border_color is not None:
cv2.drawContours(overlayed, contours, -1, border_color, 2)
return overlayed
def fill_ellipses(mask, ellipses):
for ellipse in ellipses:
cv2.ellipse(mask, ellipse, 1, thickness=-1)
return mask
""" operations """
def resize(img, scale):
return cv2.resize(img, scale)
def count_in_mask(image, mask, threshold=0):
_, image_th = cv2.threshold(image, threshold, 1, cv2.THRESH_BINARY)
return np.count_nonzero(cv2.bitwise_and(image_th, image_th, mask=mask))
def mean_in_mask(image, mask):
return np.mean(cv2.bitwise_and(image, image, mask=mask))
def split_mask_with_line(mask, line):
line_mask = new_image(mask.shape)
line_mask = cv2.line(line_mask, line[0], line[1], 1, 2)
splitted_mask = cv2.bitwise_and(mask, cv2.bitwise_not(line_mask))
contours, _ = cv2.findContours(splitted_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
submasks = []
centroids = []
for i, c in enumerate(contours):
submask = new_image(mask.shape)
cv2.drawContours(submask, contours, i, 1, 2)
M = cv2.moments(c)
x_centroid = round(M['m10'] / M['m00'])
y_centroid = round(M['m01'] / M['m00'])
submasks.append(imfill(submask))
centroids.append(Vector(x_centroid, y_centroid))
return submasks, centroids
def intersection_with_line(mask, line):
line_mask = new_image(mask.shape)
line_mask = cv2.line(line_mask, line[0], line[1], 1, 2)
contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
mask_cnt = new_image(mask.shape)
cv2.drawContours(mask_cnt, contours, -1, 1, 2)
intersection = cv2.bitwise_and(line_mask, mask_cnt)
centroid = np.mean(np.argwhere(intersection), axis=0)
return centroid
def imfill(img):
# https://www.learnopencv.com/filling-holes-in-an-image-using-opencv-python-c/
im_floodfill = img.copy()
# Mask used to flood filling.
# Notice the size needs to be 2 pixels than the image.
h, w = img.shape[:2]
mask = np.zeros((h+2, w+2), np.uint8)
# Floodfill from point (0, 0)
cv2.floodFill(im_floodfill, mask, (0,0), 255)
# Invert floodfilled image
im_floodfill_inv = cv2.bitwise_not(im_floodfill)
# Combine the two images to get the foreground.
im_out = img | im_floodfill_inv
return im_out
| 2.78125
| 3
|
project-euler/548/euler_548_v1.py
|
zoffixznet/project-euler
| 0
|
12775420
|
#!/usr/bin/env python
# The Expat License
#
# Copyright (c) 2017, <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
import sys
from subprocess import Popen, PIPE
if sys.version_info > (3,):
long = int
xrange = range
cache = {'': long(1)}
def factor_sig(n):
pipe = Popen(['factor', str(n)], shell=False, stdout=PIPE).stdout
factors_s = re.sub('^[^:]*:', '', pipe.readline())
pipe.close()
sig = {}
for x in re.findall('[0-9]+', factors_s):
if x not in sig:
sig[x] = 0
sig[x] += 1
return sorted(sig.values())
def check_factor_sig(n, good):
if factor_sig(n) != good:
print("%d is not good" % (n))
raise BaseException
return
check_factor_sig(24, [1, 3])
check_factor_sig(100, [2, 2])
check_factor_sig(1000, [3, 3])
# sig must be sorted.
def real_calc_num_chains(sig):
def helper(so_far, x, all_zeros):
if x == len(sig):
return 0 if all_zeros else calc_num_chains(sorted(so_far))
ret = 0
n = sig[x]
for c in xrange(n+1):
ret += helper(so_far + [n - c] if c < n else so_far,
x+1,
(all_zeros and (c == 0)))
return ret
return helper([], 0, True)
# sig must be sorted.
def calc_num_chains(sig):
sig_s = ','.join(str(x) for x in sig)
if sig_s not in cache:
cache[sig_s] = real_calc_num_chains(sig)
return cache[sig_s]
def calc_g(n):
return calc_num_chains(factor_sig(n))
def check_num_chains(n, good):
if calc_g(n) != good:
print("calc_num_chains %d is not good" % (n))
raise BaseException
return
check_num_chains(12, 8)
check_num_chains(48, 48)
check_num_chains(120, 132)
LIM = long('1' + ('0' * 16))
found = set()
found.add(long(1))
def iter_over_sigs(length):
if calc_num_chains([1] * length) > LIM:
return False
def helper(so_far):
if len(so_far) == length:
ret = calc_num_chains(list(reversed(so_far)))
if ret > LIM:
return False
if (ret == calc_g(ret)):
found.add(ret)
return True
for x in xrange(1, so_far[-1]+1):
if not helper(so_far + [x]):
if x == 1:
return False
return True
first = 1
while True:
if not helper([first]):
break
first += 1
return True
length = 1
while (iter_over_sigs(length)):
print("Finished len = %d" % (length))
length += 1
print("Result = %d" % (sum(found)))
| 2.25
| 2
|
benchmark_ofa_stereo.py
|
blackjack2015/once-for-all
| 0
|
12775421
|
# Once for All: Train One Network and Specialize it for Efficient Deployment
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
# International Conference on Learning Representations (ICLR), 2020.
import os
import torch
import argparse
from ofa.stereo_matching.data_providers.stereo import StereoDataProvider
from ofa.stereo_matching.run_manager import StereoRunConfig, RunManager
from ofa.stereo_matching.elastic_nn.networks.ofa_aanet import OFAAANet
from ofa.stereo_matching.elastic_nn.training.progressive_shrinking import load_models
import numpy as np
from ofa.utils.pytorch_utils import get_net_info
parser = argparse.ArgumentParser()
parser.add_argument(
'-g',
'--gpu',
help='The gpu(s) to use',
type=str,
default='0')
parser.add_argument(
'-n',
'--net',
metavar='OFAAANet',
default='ofa_aanet',
choices=['ofa_aanet_d234_e346_k357_w1.0',
'ofa_aanet'],
help='OFA AANet networks')
args = parser.parse_args()
if args.gpu == 'all':
device_list = range(torch.cuda.device_count())
args.gpu = ','.join(str(_) for _ in device_list)
else:
device_list = [int(_) for _ in args.gpu.split(',')]
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
ofa_network = OFAAANet(ks_list=[3,5,7], expand_ratio_list=[2,4,6,8], depth_list=[2,3,4], scale_list=[2,3,4])
model_file = 'ofa_stereo_checkpoints/ofa_stereo_D234_E2468_K357_S4'
init = torch.load(model_file, map_location='cpu')
model_dict = init['state_dict']
ofa_network.load_state_dict(model_dict)
""" Randomly sample a sub-network,
you can also manually set the sub-network using:
ofa_network.set_active_subnet(ks=7, e=6, d=4)
"""
#ofa_network.sample_active_subnet()
#ofa_network.set_max_net()
d = 4
e = 8
ks = 7
s = 4
ofa_network.set_active_subnet(ks=ks, d=d, e=e, s=s)
subnet = ofa_network.get_active_subnet(preserve_weight=True)
#subnet = ofa_network
save_path = "ofa_stereo_checkpoints/aanet_D%d_E%d_K%d_S%d" % (d, e, ks, s)
torch.save(subnet.state_dict(), save_path)
net = subnet
net.eval()
net = net.cuda()
#net = net.get_tensorrt_model()
#torch.save(net.state_dict(), 'models/mobilefadnet_trt.pth')
get_net_info(net, input_shape=(3, 540, 960))
# fake input data
dummy_left = torch.randn(1, 3, 576, 960, dtype=torch.float).cuda()
dummy_right = torch.randn(1, 3, 576, 960, dtype=torch.float).cuda()
# INIT LOGGERS
starter, ender = torch.cuda.Event(enable_timing=True), torch.cuda.Event(enable_timing=True)
repetitions = 30
timings=np.zeros((repetitions,1))
#GPU-WARM-UP
for _ in range(10):
_ = net(dummy_left, dummy_right)
# MEASURE PERFORMANCE
with torch.no_grad():
for rep in range(-3, repetitions):
starter.record()
_ = net(dummy_left, dummy_right)
ender.record()
# WAIT FOR GPU SYNC
torch.cuda.synchronize()
if rep >= 0:
curr_time = starter.elapsed_time(ender)
timings[rep] = curr_time
print(rep, curr_time)
mean_syn = np.sum(timings) / repetitions
std_syn = np.std(timings)
print(mean_syn)
| 2.0625
| 2
|
tests/domain/book/test_book.py
|
tamanobi/dddpy
| 170
|
12775422
|
import pytest
from dddpy.domain.book import Book, Isbn
class TestBook:
def test_constructor_should_create_instance(self):
book = Book(
id="book_01",
isbn=Isbn("978-0321125217"),
title="Domain-Driven Design: Tackling Complexity in the Heart of Softwares",
page=560,
)
assert book.id == "book_01"
assert book.isbn == Isbn("978-0321125217")
assert (
book.title
== "Domain-Driven Design: Tackling Complexity in the Heart of Softwares"
)
assert book.page == 560
assert book.read_page == 0
def test_book_entity_should_be_identified_by_id(self):
book_1 = Book(
id="book_01",
isbn=Isbn("978-0321125217"),
title="Domain-Driven Design: Tackling Complexity in the Heart of Softwares",
page=560,
read_page=50,
)
book_2 = Book(
id="book_01",
isbn=Isbn("978-0321125217"),
title="Domain-Driven Design: Tackling Complexity in the Heart of Softwares",
page=560,
read_page=120,
)
book_3 = Book(
id="book_02",
isbn=Isbn("978-0321125217"),
title="Domain-Driven Design: Tackling Complexity in the Heart of Softwares",
page=560,
read_page=50,
)
assert book_1 == book_2
assert book_1 != book_3
@pytest.mark.parametrize(
"read_page",
[
(0),
(1),
(320),
],
)
def test_read_page_setter_should_update_value(self, read_page):
book = Book(
id="book_01",
isbn=Isbn("978-0321125217"),
title="Domain-Driven Design: Tackling Complexity in the Heart of Softwares",
page=560,
)
book.read_page = read_page
assert book.read_page == read_page
@pytest.mark.parametrize(
"read_page, expected",
[
(0, False),
(559, False),
(560, True),
],
)
def test_is_already_read_should_true_when_read_page_has_reached_last_page(
self, read_page, expected
):
book = Book(
id="book_01",
isbn=Isbn("978-0321125217"),
title="Domain-Driven Design: Tackling Complexity in the Heart of Softwares",
page=560,
)
book.read_page = read_page
assert book.is_already_read() == expected
| 2.90625
| 3
|
tests/test_print_commands.py
|
PackeTsar/meraki-cli
| 45
|
12775423
|
<gh_stars>10-100
import unittest
from unittest.mock import patch
from io import StringIO
from .ParsedArgs import ParsedArgs
from .Function import Function
from meraki_cli.__main__ import Args, _print_commands
EXPECT = """
meraki organization getOrganizationNetworks --pos1 'positional1' --pos2 \
'positional2' --kwargs '{"key1": "value1"}' \
"""
class TestPrintCommands(unittest.TestCase):
def setUp(self):
self.parsed_args = ParsedArgs()
self.arg_obj = Args(Function)
# Turn this and the target method into a ready to use command
self.arg_tup = [(['positional1', 'positional2'], {'key1': 'value1'})]
@patch('sys.argv', ['meraki'])
def testPrintCommands(self):
with patch('sys.stdout', new=StringIO()) as fake_out:
_print_commands(self.parsed_args, self.arg_tup,
self.arg_obj)
self.assertEqual(fake_out.getvalue(), EXPECT)
| 2.859375
| 3
|
src/__init__.py
|
pipspec/pipspec
| 0
|
12775424
|
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__version__ = '0.0.1'
| 1.023438
| 1
|
Steganography_1.py
|
zhaoyangding/Compression-with-Constraints-Steganography
| 0
|
12775425
|
import os
import sys
from PIL import Image
import numpy as np
import random
import matplotlib.pyplot as plt
size_image = (256, 256)
class LSB:
# convert integer to 8-bit binary
def int2bin(self, image):
r, g, b = image
return (f'{r:08b}', f'{g:08b}', f'{b:08b}')
# convert 8-bit binary to integer
def bin2int(self, image):
r, g, b = image
return (int(r, 2), int(g, 2), int(b, 2))
# define the encryption function
def encryption(self, original, secret):
pixel_1 = original.load()
pixel_2 = secret.load()
outcome = Image.new(original.mode, original.size)
pixel_new = outcome.load()
for i in range(size_image[0]):
for j in range(size_image[1]):
r1, g1, b1 = self.int2bin(pixel_1[i, j])
r2, g2, b2 = self.int2bin(pixel_2[i, j])
pixel_new[i, j] = self.bin2int((r1[:4] + r2[:4], g1[:4] + g2[:4], b1[:4] + b2[:4]))
return outcome
# define the decryption function
def decryption(self, image):
pixel_merge = image.load()
secret = Image.new(image.mode, image.size)
pixel_secret = secret.load()
for i in range(size_image[0]):
for j in range(size_image[1]):
r, g, b = self.int2bin(pixel_merge[i, j])
pixel_secret[i, j] = self.bin2int((r[4:] + '0000', g[4:] + '0000', b[4:] + '0000'))
return secret
if __name__ == '__main__':
test_images = []
for imgnames in os.listdir("./images_test/"):
test_images.append(Image.open("./images_test/" + imgnames).resize(size_image, Image.ANTIALIAS))
np.random.shuffle(test_images)
lsb_implementation = LSB()
test_original = test_images[0:12]
test_secret = test_images[12:24]
test_merge = []
test_reveal = []
for i in range(12):
test_merge.append(lsb_implementation.encryption(test_original[i], test_secret[i]))
test_reveal.append(lsb_implementation.decryption(test_merge[-1]))
# Number of secret and cover pairs to show.
n = 12
def show_image(img, n_rows, n_col, idx, gray=False, first_row=False, title=None):
ax = plt.subplot(n_rows, n_col, idx)
plt.imshow(img)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
if first_row:
plt.title(title)
plt.figure(figsize=(4, 12))
for i in range(12):
n_col = 4
show_image(test_original[i], n, n_col, i * n_col + 1, first_row=i == 0, title='Cover')
show_image(test_secret[i], n, n_col, i * n_col + 2, first_row=i == 0, title='Secret')
show_image(test_merge[i], n, n_col, i * n_col + 3, first_row=i == 0, title='Merge')
show_image(test_reveal[i], n, n_col, i * n_col + 4, first_row=i == 0, title='Reveal')
plt.savefig('./result_1.jpg')
plt.show()
| 3.265625
| 3
|
util/__init__.py
|
Str4thus/BraiNN
| 0
|
12775426
|
<filename>util/__init__.py
from .managers import HtmlManager
| 1.226563
| 1
|
bot.py
|
menlen/jumaa
| 0
|
12775427
|
import os, sys
from PIL import Image, ImageDraw, ImageFont
import random, time
import telebot
from telebot.types import InlineKeyboardMarkup, InlineKeyboardButton
from telebot import types
TELEGRAM_TOKEN = '<KEY>'
bot = telebot.TeleBot(TELEGRAM_TOKEN)
channelId = -1001390673326
user_dict = {}
msgDict = [
'اللَّهُمَّ صَلِّ عَلَى مُحَمَّدٍ وَعَلَى آلِ مُحَمَّدٍ.\nАллоҳумма солли ъалаа муҳаммадив-ва ъалаа аали муҳаммад.',
'صَلَّى اللهُ عَلَى مُحَمَّدٍ.\nСоллаллоҳу ъалаа муҳаммад.',
'صَلَّى اللهُ عَلَيْهِ وَسَلَّمَ.\nСоллаллоҳу ъалайҳи ва саллам.',
'أَللَّهُمَّ صَلِّ وَسَلِّمْ وَبَارِكْ عَلَيْهِ.\nАллоҳумма солли ва саллим ва баарик ъалайҳ.',
'اللَّهُمَّ صَلِّ عَلَى مُحَمَّدٍ وَعَلَى آلِهِ وَسَلِّمْ.\nАллоҳумма солли ъалаа муҳаммадив-ва ъалаа аалиҳий ва саллим.',
'صَلَّى اللهُ وَسَلَّمَ عَلَى نَبِيِّنَا مُحَمَّدٍ وَعَلَى آلِهِ وَأَصْحَابِهِ أَجْمَعِينَ.\nСоллаллоҳу ва саллама ъалаа набиййинаа муҳаммад, ва ъалаа аалиҳий ва асҳаабиҳий ажмаъийн.'
]
msgOne = random.choice(msgDict)
def UImgTextWriter(ext):
IMAGES = [
'juma01.jpg',
'juma02.jpg',
'juma03.jpg',
'juma04.jpg',
'juma05.jpg',
'juma06.jpg',
'juma07.jpg',
'juma08.jpg',
'juma09.jpg',
'juma010.jpg',
'juma011.jpg',
]
try:
img = random.choice(IMAGES)
except:
time.sleep(2)
img = random.choice(IMAGES)
# get an image
base = Image.open(img).convert("RGBA")
ext = ext.upper()
text = ext
# make a blank image for the text, initialized to transparent text color
txt = Image.new("RGBA", base.size, (255,255,255,0))
# get a font
fnt = ImageFont.truetype("OpenSans-Italic.ttf", 40)
# get a drawing context
d = ImageDraw.Draw(txt)
# draw text, half opacity
d.text(((800)/4, 330), text, font=fnt, fill=(231,195,113,255), anchor='mb')
out = Image.alpha_composite(base, txt)
filename = random.randint(30,45)
g = out.save(f'{filename}.png')
return filename
def ImgTextWriter(ext):
IMAGES = [
'juma1.jpg',
'juma2.jpg',
'juma3.jpg',
'juma4.jpg',
'juma5.jpg',
'juma6.jpg',
'juma7.jpg',
'juma8.jpg',
'juma9.jpg',
'juma10.jpg',
'juma11.jpg',
]
try:
img = random.choice(IMAGES)
except:
time.sleep(2)
img = random.choice(IMAGES)
# get an image
base = Image.open(img).convert("RGBA")
ext = ext.upper()
text = ext
# make a blank image for the text, initialized to transparent text color
txt = Image.new("RGBA", base.size, (255,255,255,0))
# get a font
fnt = ImageFont.truetype("OpenSans-Italic.ttf", 40)
# get a drawing context
d = ImageDraw.Draw(txt)
# draw text, half opacity
d.text(((800)/4, 330), text, font=fnt, fill=(231,195,113,255), anchor='mb')
out = Image.alpha_composite(base, txt)
filename = random.randint(1,15)
g = out.save(f'{filename}.png')
return filename
def gen_markup():
markup = InlineKeyboardMarkup()
markup.row_width = 1
markup.add(InlineKeyboardButton("Azo bo'ling", callback_data="cb_yes", url='t.me/onideal'),
InlineKeyboardButton("Tasdiqlash", callback_data="cb_no"))
return markup
def getUserFromChannel(userId):
u = bot.get_chat_member(channelId, userId)
return u.status
@bot.callback_query_handler(func=lambda call: True)
def callback_query(call):
if call.data == "cb_yes":
bot.answer_callback_query(call.id, "Answer is Yes")
elif call.data == "cb_no":
u = getUserFromChannel(call.from_user.id)
if u == 'member':
msg = bot.send_message(call.from_user.id, """\
Juda soz!!!, Do'stingizni ismini yozing
""")
bot.register_next_step_handler(msg, process_name_step)
else:
bot.send_message(call.from_user.id, f"Salom {call.from_user.first_name}, Kanalimizga a'zo bo'ling va A'zolikni tekshirish buyrug'ini tanlang", reply_markup=gen_markup())
def process_name_step(message):
try:
name = message.text
name = name.upper()
myfile = ImgTextWriter(name)
photoSend = open(f'{myfile}.png', 'rb')
caption = f"{name} : <NAME> muborak aziz dindoshim🕌🌙\
\nSizni Sayyid-ul Ayyom bilan qutlayman🌙\n{msgOne}\
\nO'zingiz yaxshi ko'rgan, jannatda xam birga bo'lishni istagan insonlaringizni O'z ismimlari bilan tabriklang. \n@JumaTabriklarbot"
bot.send_photo(message.chat.id, photoSend, caption=caption)
except Exception as e:
bot.reply_to(message, 'oooops')
def process_uname_step(message):
try:
name = message.text
name = name.upper()
myfile = UImgTextWriter(name)
photoSend = open(f'{myfile}.png', 'rb')
caption = f"Juma Ayyom muborak aziz dindoshlarim🕌🌙\
\nSizni Sayyid-ul Ayyom bilan qutlayman🌙,\n{msgOne}\
\nO'zingiz yaxshi ko'rgan, jannatda xam birga bo'lishni istagan insonlaringizga yuboring \n@JumaTabriklarbot"
bot.send_photo(message.chat.id, photoSend, caption=caption)
except Exception as e:
bot.reply_to(message, 'oooops')
@bot.message_handler(commands=['start','help'])
def start(message):
us = getUserFromChannel(message.chat.id)
if us == 'member':
markup = types.ReplyKeyboardMarkup(resize_keyboard=True, row_width=2)
btn1 = types.KeyboardButton("Do'stimga")
btn2 = types.KeyboardButton("O'zimga")
markup.add(btn1, btn2)
bot.send_message(message.chat.id, "Ass<NAME> Do'stim", reply_markup=markup)
else:
bot.send_message(message.chat.id, f"Salom {message.from_user.first_name}, Kanalimizga a'zo bo'ling va A'zolikni tekshirish buyrug'ini tasdiqlang", reply_markup=gen_markup())
@bot.message_handler(func=lambda message: True)
def message_handler(message):
us = getUserFromChannel(message.chat.id)
if us == 'member':
msg = bot.send_message(message.chat.id, """\
Juda soz!!!, Do'stingizni ismini yozing. \nYoki /start /help ni bosing
""")
if message.text == "Do'stimga":
bot.register_next_step_handler(msg, process_name_step)
elif message.text == "O'zimga":
bot.register_next_step_handler(msg, process_uname_step)
else:
bot.send_message(message.chat.id, f"Salom {message.from_user.first_name}, kanallarga a'zo bo'ling va A'zolikni tekshirish buyrug'ini tanlang", reply_markup=gen_markup())
bot.polling(none_stop=True)
| 2.1875
| 2
|
python/gilded_rose/processors/base.py
|
guilhermesimas/GildedRose-Refactoring-Kata
| 0
|
12775428
|
<gh_stars>0
from abc import ABC, abstractmethod
from gilded_rose.entities import Item
class BaseProcessor(ABC):
@abstractmethod
def process_item(self, item: Item, **kwargs):
pass
| 2.359375
| 2
|
cookie_demo/__init__.py
|
HarperHao/flask_study
| 0
|
12775429
|
<filename>cookie_demo/__init__.py
"""
Author : HarperHao
TIME : 2020/10/
FUNCTION:
"""
| 0.882813
| 1
|
pwn/decoutils.py
|
Haabb/pwnfork
| 1
|
12775430
|
def kwargs_remover(f, kwargs, check_list = None, clone = True):
'''Removes all the keys from a kwargs-list, that a given function does not understand.
The keys removed can optionally be restricted, so only keys from check_list are removed.'''
import inspect
if check_list == None: check_list = kwargs.keys()
if clone: kwargs = kwargs.copy()
if not f.func_code.co_flags & 8:
args, varargs, keywords, defaults = getargspec(f)
for c in set(check_list).intersection(kwargs.keys()):
if c not in args:
del kwargs[c]
return kwargs
def getargs(co):
"""Get information about the arguments accepted by a code object.
Three things are returned: (args, varargs, varkw), where 'args' is
a list of argument names (possibly containing nested lists), and
'varargs' and 'varkw' are the names of the * and ** arguments or None."""
import dis
CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS = 0x1, 0x2, 0x4, 0x8
CO_NESTED, CO_GENERATOR, CO_NOFREE = 0x10, 0x20, 0x40
nargs = co.co_argcount
names = co.co_varnames
args = list(names[:nargs])
step = 0
# The following acrobatics are for anonymous (tuple) arguments.
for i in range(nargs):
if args[i][:1] in ('', '.'):
stack, remain, count = [], [], []
while step < len(co.co_code):
op = ord(co.co_code[step])
step = step + 1
if op >= dis.HAVE_ARGUMENT:
opname = dis.opname[op]
value = ord(co.co_code[step]) + ord(co.co_code[step+1])*256
step = step + 2
if opname in ('UNPACK_TUPLE', 'UNPACK_SEQUENCE'):
remain.append(value)
count.append(value)
elif opname == 'STORE_FAST':
stack.append(names[value])
# Special case for sublists of length 1: def foo((bar))
# doesn't generate the UNPACK_TUPLE bytecode, so if
# `remain` is empty here, we have such a sublist.
if not remain:
stack[0] = [stack[0]]
break
else:
remain[-1] = remain[-1] - 1
while remain[-1] == 0:
remain.pop()
size = count.pop()
stack[-size:] = [stack[-size:]]
if not remain: break
remain[-1] = remain[-1] - 1
if not remain: break
args[i] = stack[0]
varargs = None
if co.co_flags & CO_VARARGS:
varargs = co.co_varnames[nargs]
nargs = nargs + 1
varkw = None
if co.co_flags & CO_VARKEYWORDS:
varkw = co.co_varnames[nargs]
return [args, varargs, varkw]
def getargspec(func):
return getargs(func.func_code) + [func.func_defaults if func.func_defaults else []]
def method_signature(f):
'''Returns the method signature for a function.'''
spec = getargspec(f)
args = []
def simple_arg(a):
if isinstance(a, list):
return '(' + ', '.join(map(simple_arg, a)) + ')'
return str(a)
if spec[2] != None:
args.append('**' + spec[2])
if spec[1] != None:
args.append('*' + spec[1])
for n in range(len(spec[0])):
cur = spec[0][len(spec[0])-n-1]
if n < len(spec[3]):
args.append(str(cur) + ' = ' + repr(spec[3][len(spec[3])-n-1]))
else:
args.append(simple_arg(cur))
return f.func_name + '(' + ', '.join(reversed(args)) + ')'
def ewraps(wrapped):
'''Extended version of functools.wraps.
This version also adds the original method signature to the docstring.'''
def deco(wrapper):
import functools
semi_fixed = functools.wraps(wrapped)(wrapper)
if not wrapped.__dict__.get('signature_added', False):
semi_fixed.__doc__ = method_signature(wrapped) + '\n\n' + (semi_fixed.__doc__ or '')
semi_fixed.__dict__['signature_added'] = True
return semi_fixed
return deco
# Copied from <NAME>'s blog:
# http://eli.thegreenplace.net/2009/08/29/co-routines-as-an-alternative-to-state-machines/
def coroutine(func):
def start(*args,**kwargs):
cr = func(*args,**kwargs)
cr.next()
return cr
return start
def memleaker(func):
'''Create an information leak object.'''
import leak
return leak.MemLeak(func)
| 3.109375
| 3
|
preferences/tests/test_views.py
|
rjw57/lecture-capture-preferences-webapp
| 1
|
12775431
|
import datetime
import itertools
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.utils import dateparse, timezone
from furl import furl
from rest_framework.authtoken.models import Token
from rest_framework.test import APIRequestFactory, force_authenticate
from .. import views
from .. import models
class ViewTestCase(TestCase):
fixtures = ['preferences/tests/fixtures/common.yaml']
def setUp(self):
self.factory = APIRequestFactory()
self.get_request = self.factory.get('/')
self.user = get_user_model().objects.get(pk=1)
class ProfileViewTestCase(ViewTestCase):
def setUp(self):
super().setUp()
self.view = views.ProfileView().as_view()
self.expected_display_name = self.user.get_full_name()
def test_anonymous(self):
"""An anonymous user should have is_anonymous set to True."""
response = self.view(self.get_request)
self.assertTrue(response.data['is_anonymous'])
def test_authenticated(self):
"""A non-anonymous user should have is_anonymous set to False and username set."""
force_authenticate(self.get_request, user=self.user)
response = self.view(self.get_request)
self.assertFalse(response.data['is_anonymous'])
self.assertEqual(response.data['username'], self.user.username)
self.assertEqual(response.data['display_name'], self.expected_display_name)
def test_token_authenticated(self):
"""A token-authenticated user should get expected media back."""
token = Token.objects.create(user=self.user)
token_get_request = self.factory.get('/', HTTP_AUTHORIZATION=f'Token {token.key}')
response = self.view(token_get_request)
self.assertFalse(response.data['is_anonymous'])
self.assertEqual(response.data['username'], self.user.username)
self.assertEqual(response.data['display_name'], self.expected_display_name)
def test_last_name_only(self):
"""A last name only user should have that as the display name."""
self.user.first_name = ''
self.user.last_name = 'GHJ'
force_authenticate(self.get_request, user=self.user)
response = self.view(self.get_request)
self.assertEqual(response.data['display_name'], 'GHJ')
def test_first_name_only(self):
"""A first name only user should have that as the display name."""
self.user.first_name = 'GHJ'
self.user.last_name = ''
force_authenticate(self.get_request, user=self.user)
response = self.view(self.get_request)
self.assertEqual(response.data['display_name'], 'GHJ')
def test_no_name(self):
"""A user with no name should fall back to the username as a display name."""
self.user.first_name = ''
self.user.last_name = ''
force_authenticate(self.get_request, user=self.user)
response = self.view(self.get_request)
self.assertEqual(response.data['display_name'], self.user.username)
class PreferenceListViewTestCase(ViewTestCase):
def setUp(self):
super().setUp()
self.view = views.PreferenceListView().as_view()
# Some common querysets
self.all_qs = models.Preference.objects.all()
self.most_recent_qs = models.Preference.objects.all().filter_most_recent_expressed_at()
def test_basic_functionality(self):
"""A basic GET returns all the user preferences."""
results = self._request_all()['results']
self.assertEqual(len(results), self.most_recent_qs.count())
def test_pagination(self):
"""A page size of one takes as many pages as there are results."""
response = self._request_all(data={'page_size': 1})
self.assertGreater(response['page_count'], 1)
self.assertEqual(len(response['results']), response['page_count'])
def test_user_filtering(self):
"""Filtering by user returns correct result."""
user_pref = self.most_recent_qs.filter(user=self.user).first()
self.assertIsNotNone(user_pref)
results = self._request_all({'user': self.user.username})['results']
self.assertEqual(len(results), 1)
self._assert_preference_dict_matches(results[0], user_pref)
def test_ordering_by_expressed_at_descending(self):
"""Ordering by descending expressed_at gives correct result."""
expected_prefs = self.most_recent_qs.order_by('-expressed_at')
results = self._request_all({'ordering': '-expressed_at'})['results']
self.assertEqual(len(results), expected_prefs.count())
for pref_dict, pref in zip(results, expected_prefs):
self._assert_preference_dict_matches(pref_dict, pref)
def test_ordering_by_expressed_at_ascending(self):
"""Ordering by ascending expressed_at gives correct result."""
expected_prefs = self.most_recent_qs.order_by('expressed_at')
results = self._request_all({'ordering': 'expressed_at'})['results']
self.assertEqual(len(results), expected_prefs.count())
for pref_dict, pref in zip(results, expected_prefs):
self._assert_preference_dict_matches(pref_dict, pref)
def test_expressed_at_query(self):
"""Can query list by expressed_at range."""
minimum = self.most_recent_qs.order_by('expressed_at')[0]
maximum = self.most_recent_qs.order_by('-expressed_at')[0]
self.assertGreater(maximum.expressed_at, minimum.expressed_at)
self.assertGreater(self.most_recent_qs.count(), 2)
# Get the expected preferences between lower and upper quartile dates
expected_prefs = self.most_recent_qs.filter(
expressed_at__gt=minimum.expressed_at,
expressed_at__lt=maximum.expressed_at).order_by('-expressed_at')
self.assertTrue(expected_prefs.exists())
# Get list returned by query
results = self._request_all({
'ordering': '-expressed_at',
'expressed_at_after':
(minimum.expressed_at + datetime.timedelta(seconds=0.1)).isoformat(),
'expressed_at_before':
(maximum.expressed_at - datetime.timedelta(seconds=0.1)).isoformat(),
})['results']
self.assertEqual(len(results), expected_prefs.count())
for pref_dict, pref in zip(results, expected_prefs):
self._assert_preference_dict_matches(pref_dict, pref)
def test_creation(self):
"""POST-ing preferences updates preferences for user"""
for allow_capture, request_hold in itertools.product([True, False], [True, False]):
# Update user preference
request = self.factory.post('/', {
'allow_capture': allow_capture, 'request_hold': request_hold
})
force_authenticate(request, user=self.user)
response = self.view(request)
self.assertEqual(response.status_code, 201) # created
# Most recent preference is updated
pref = self.most_recent_qs.filter(user=self.user).first()
self.assertIsNotNone(pref)
self.assertEqual(pref.allow_capture, allow_capture)
self.assertEqual(pref.request_hold, request_hold)
def test_anonymous_creation(self):
"""POST-ing preferences updates preferences for anonymous user fails"""
request = self.factory.post('/', {
'allow_capture': True, 'request_hold': False
})
response = self.view(request)
self.assertEqual(response.status_code, 403) # Forbidden
def test_creation_ignores_expressed_at(self):
"""POST-ing preferences updates preferences for user and ignores any expressed_at"""
# Update user preference
expressed_at_request = timezone.now() - datetime.timedelta(days=34)
request = self.factory.post('/', {
'allow_capture': True, 'request_hold': False,
'expressed_at': expressed_at_request.isoformat()
})
force_authenticate(request, user=self.user)
prev_pref = self.most_recent_qs.filter(user=self.user).first()
response = self.view(request)
self.assertEqual(response.status_code, 201) # created
pref = self.most_recent_qs.filter(user=self.user).first()
self.assertNotEqual(prev_pref.id, pref.id)
self.assertNotEqual(pref.expressed_at, expressed_at_request)
def _assert_preference_dict_matches(self, pref_dict, pref):
"""
Assert that a preference returned from the API matches a database object.
"""
self.assertEqual(pref_dict['user']['username'], pref.user.username)
self.assertEqual(pref_dict['allow_capture'], pref.allow_capture)
self.assertEqual(pref_dict['request_hold'], pref.request_hold)
self.assertEqual(dateparse.parse_datetime(pref_dict['expressed_at']), pref.expressed_at)
def _request_all(self, data=None, page_count_max=20):
"""
Fetch all preference objects from the API. Returns an object of the form
{'results': [...], 'page_count': number }
"""
results = []
page_count = 0
# Use the furl library so that it's easy to merge query arguments in.
url = furl('/')
if data is not None:
url.args.update(data)
while True:
request = self.factory.get(url.url)
response = self.view(request)
self.assertEqual(response.status_code, 200)
page_count += 1
results.extend(response.data['results'])
# We're done if we've run out of pages
if response.data.get('next') is None:
break
# Update the URL from the "next" field in the response
url = furl(response.data.get('next'))
if page_count > page_count_max:
assert False, f'Exceeded maximum page count of {page_count_max}'
return {'results': results, 'page_count': page_count}
| 2.359375
| 2
|
invenio_records_presentation/views.py
|
CESNET/invenio-records-presentation
| 0
|
12775432
|
<gh_stars>0
# -*- coding: utf-8 -*-
#
# Copyright (C) 2019 CESNET.
#
# Invenio Records Presentation is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Blueprint definitions."""
from __future__ import absolute_import, print_function
import time
import traceback
from functools import wraps
import logging
from uuid import UUID
from celery._state import app_or_default
from celery.result import AsyncResult, result_from_tuple
from flask import Blueprint, jsonify, abort, request, Response, current_app
from flask_login import current_user
from invenio_pidstore.models import PersistentIdentifier
from invenio_userprofiles import UserProfile
from invenio_workflows import WorkflowEngine
from workflow.errors import WorkflowDefinitionError
from .api import Presentation, PresentationWorkflowObject
from .errors import PresentationNotFound, WorkflowsPermissionError
from .proxies import current_records_presentation
logger = logging.getLogger(__name__)
blueprint = Blueprint(
'invenio_records_presentation',
__name__,
url_prefix='/presentation/1.0'
)
"""Blueprint used for loading templates and static assets
The sole purpose of this blueprint is to ensure that Invenio can find the
templates and static files located in the folders of the same names next to
this file.
"""
def pass_result(f):
"""Decorate to provide an AsyncResult instance of the job."""
@wraps(f)
def decorate(*args, **kwargs):
job_uuid = kwargs.pop('job_uuid')
Result = app_or_default(None).AsyncResult
result = Result(job_uuid, parent=None)
# result: AsyncResult = result_from_tuple([[job_uuid, None], None])
# if result is None:
# abort(400, 'Invalid job UUID')
return f(result=result, *args, **kwargs)
return decorate
def pass_presentation(f):
"""Decorate to provide a presentation instance."""
@wraps(f)
def decorate(*args, **kwargs):
presid = kwargs.pop('presentation_id')
try:
presentation = current_records_presentation.get_presentation(presid)
return f(presentation=presentation, *args, **kwargs)
except PresentationNotFound:
abort(400, 'Invalid presentation type')
return decorate
def with_presentations(f):
""" Init all presentation objects """
@wraps(f)
def decorate(*args, **kwargs):
current_records_presentation.init_presentations()
return f(*args, **kwargs)
return decorate
@blueprint.route("/")
@with_presentations
def index():
return 'presentation loaded successfully'
@blueprint.route('/prepare/<string:pid_type>/<string:pid>/<string:presentation_id>/', methods=('POST',))
@with_presentations
def pid_prepare(pid_type: str, pid: str, presentation_id: str):
pid_record = PersistentIdentifier.query.filter_by(pid_type=pid_type, pid_value=pid).one_or_none()
if pid_record:
return prepare(str(pid_record.object_uuid), presentation_id=presentation_id)
else:
abort(404, 'Record with PID {}:{} not found'.format(pid_type, pid_type))
@blueprint.route('/prepare/<string:record_uuid>/<string:presentation_id>/', methods=('POST',))
@with_presentations
@pass_presentation
def prepare(record_uuid: str, presentation: Presentation):
if current_user.is_anonymous:
user_meta = {
'id': None,
'email': None,
'login_ip': None,
'current_ip': str(request.remote_addr),
'roles': [],
'full_name': 'Anonymous',
'username': None
}
else:
profile_meta = {}
profile: UserProfile = UserProfile.get_by_userid(current_user.id)
if profile:
profile_meta = {
'full_name': profile.full_name,
'username': profile.username,
}
user_meta = {
'id': current_user.id,
'email': current_user.email,
'current_ip': str(request.remote_addr),
'login_ip': str(current_user.current_login_ip),
'roles': [{'id': role.id, 'name': role.name} for role in current_user.roles]
}
user_meta.update(profile_meta)
headers = {k: v for k, v in request.headers}
try:
result = presentation.prepare(record_uuid, user_meta, headers, delayed=True)
if isinstance(result, AsyncResult):
return jsonify({'job_id': result.task_id})
else:
return jsonify({'job_id': result})
except WorkflowsPermissionError as e:
logger.exception('Exception detected in prepare')
abort(403, e)
except WorkflowDefinitionError:
logger.exception('Exception detected in prepare')
abort(400, 'There was an error in the {} workflow definition'.format(presentation.name))
@blueprint.route('/status/<string:job_uuid>/')
@pass_result
def status(result: AsyncResult):
if result.state == 'FAILURE':
print(result.traceback)
try:
eng_uuid = str(UUID(result.info, version=4))
engine = WorkflowEngine.from_uuid(eng_uuid)
object = engine.objects[-1]
info = {'current_data': object.data,
'created': object.created,
'modified': object.modified}
except Exception:
logger.exception('Exception detected in status')
info = str(result.info)
return jsonify({'status': result.state, 'info': info})
import unicodedata
def strip_accents(s):
return ''.join(c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn')
@blueprint.route('/download/<string:job_uuid>/')
@pass_result
def download(result: AsyncResult):
for i in range(10):
try:
time.sleep(1)
eng_uuid = result.get() # Will wait until task has completed
break
except:
traceback.print_exc()
if i == 9:
raise
time.sleep(5)
engine = WorkflowEngine.from_uuid(eng_uuid)
object = PresentationWorkflowObject(engine.objects[-1])
data_path = object.scratch.full_path(object.data['path'])
def serve():
with open(data_path, 'rb') as f:
while True:
buf = f.read(128000)
if not buf:
break
yield buf
return Response(serve(), mimetype=object.data['mimetype'], headers={
'Content-disposition': 'inline; filename=\"{}\"'.format(strip_accents(object.data['filename'])),
'Content-Security-Policy': "object-src 'self';"
})
| 2
| 2
|
src/caracara/rtr/_scripts.py
|
LaudateCorpus1/caracara
| 1
|
12775433
|
<reponame>LaudateCorpus1/caracara
"""Script interactions."""
from .._tool import Tool
class Scripts(Tool):
"""Class to represent Script interactions."""
def upload(self: object, script: str, script_name: str):
"""Upload a script."""
self.display(f" Uploading script {script_name}")
upload = self.api.rtr_admin.create_scripts(
data={
"name": script_name,
"content": script,
"platform": "linux", # need params for these
"permission_type": "private", # need params for these
"description": f"RTR script {script_name}"
}, files=[(script_name, (script_name, 'application/script'))]
)
self.display(f" Script {script_name} uploaded")
return bool(upload["status_code"] in [200, 409])
def remove(self: object, script_name: str):
"""Delete a script."""
self.display(f" Removing script {script_name}")
delete = self.api.rtr_admin.delete_scripts(ids=self.api.rtr_admin.list_scripts(
filter=f"name:'{script_name}'"
)["body"]["resources"][0]
)
self.display(f" Script {script_name} removed")
return bool(delete["status_code"] == 200)
| 2.578125
| 3
|
test_package/module2.py
|
BigMountainTiger/p-virtualenv-excercise
| 0
|
12775434
|
<reponame>BigMountainTiger/p-virtualenv-excercise<filename>test_package/module2.py<gh_stars>0
print('module2.py is initiated')
module2 = { 'name': 'module2' }
| 1.632813
| 2
|
pcr_cycle_sweep/count_all_amplicons.py
|
jackwadden/UltraRapidSeq
| 3
|
12775435
|
#!/usr/bin/python
import sys
import os
fn = "pileup.txt"
coverage_thresh = 5
if not os.path.isfile(fn):
print("File not found...")
sys.exit()
with open(fn) as fp:
hotspot_count = 0
hotspot_read_count = 0
in_hotspot = False
max_coverage = 0
hotspot_chr = ""
hotspot_start = 0
hotspot_end = 0
for line in fp:
# parse each line (chr, loc, base, coverage, codes, quality)
fields = line.split()
coverage = int(fields[3])
if not in_hotspot :
if coverage > coverage_thresh :
in_hotspot = True
hotspot_chr = str(fields[0])
hotspot_start = str(fields[1])
hotspot_count = hotspot_count + 1
max_coverage = coverage
#print(line)
else :
#print(line)
if coverage > max_coverage:
max_coverage = coverage
if coverage < coverage_thresh and in_hotspot:
#print(max_coverage)
hotspot_read_count = hotspot_read_count + max_coverage
hotspot_end = str(fields[1])
print(hotspot_chr + "\t" + hotspot_start + "\t" + hotspot_end)
max_coverage = 0
in_hotspot = False
#print("Num hotspots: ", hotspot_count)
#print("Hotspot reads: ", hotspot_read_count)
| 3.09375
| 3
|
models/basicnet.py
|
jaejun-yoo/TDDIP
| 11
|
12775436
|
import numpy as np
import torch
import torch.nn as nn
def conv(in_channels, out_channels, kernel_size, bias=True):
return nn.Conv2d(
in_channels, out_channels, kernel_size,
padding=(kernel_size//2), bias=bias)
class MappingNet(nn.Module):
def __init__(self, opt):
super().__init__()
latent_dim = opt.latent_dim
style_dim = opt.style_size**2
hidden_dim = opt.hidden_dim
depth = opt.depth
layers = []
layers += [nn.Linear(latent_dim, hidden_dim)]
layers += [nn.ReLU()]
for _ in range(depth):
layers += [nn.Linear(hidden_dim, hidden_dim)]
layers += [nn.ReLU()]
layers += [nn.Linear(hidden_dim, style_dim)]
self.net = nn.Sequential(*layers)
def forward(self, z):
out = self.net(z)
return out
class Net(nn.Module):
def __init__(self, opt):
super().__init__()
inp_ch=opt.input_nch
ndf=opt.ndf
out_ch=opt.output_nch
Nr=opt.Nr
num_ups=int(np.log2(opt.up_factor))
need_bias=opt.need_bias
upsample_mode=opt.upsample_mode
layers = [conv(inp_ch, ndf, 3, bias=need_bias),
nn.BatchNorm2d(ndf),
nn.ReLU(True)]
for _ in range(Nr):
layers += [conv(ndf, ndf, 3, bias=need_bias),
nn.BatchNorm2d(ndf),
nn.ReLU(True)]
for _ in range(num_ups):
layers += [nn.Upsample(scale_factor=2, mode=upsample_mode),
conv(ndf, ndf, 3, bias=need_bias),
nn.BatchNorm2d(ndf),
nn.ReLU(True)]
for _ in range(Nr):
layers += [conv(ndf, ndf, 3, bias=need_bias),
nn.BatchNorm2d(ndf),
nn.ReLU(True)]
layers += [conv(ndf, out_ch, 3, bias=need_bias)]
self.net = nn.Sequential(*layers)
def forward(self, z, s=None):
out = self.net(z)
return out
| 2.328125
| 2
|
pymachine/__init__.py
|
landrew31/pymachine
| 1
|
12775437
|
<filename>pymachine/__init__.py
from .condition import Condition
from .exceptions import (
DuplicateCondition,
StateMachineAlreadyFinished,
StateMachineTransitionWithoutNextState,
UnknownInput,
UnknownState,
)
from .state_machine import StateMachine
from .transition_table import TransitionTable
__all__ = (
'Condition',
'DuplicateCondition',
'StateMachineAlreadyFinished',
'StateMachineTransitionWithoutNextState',
'UnknownInput',
'UnknownState',
'StateMachine',
'TransitionTable',
)
| 1.71875
| 2
|
practice_app/migrations/0010_alter_museumapicsv_accessionnumber.py
|
VinayArora404219/crud-ops-practice-codeops
| 0
|
12775438
|
# Generated by Django 4.0 on 2021-12-22 04:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('practice_app', '0009_alter_museumapicsv_additionalimages_and_more'),
]
operations = [
migrations.AlterField(
model_name='museumapicsv',
name='accessionNumber',
field=models.CharField(max_length=50),
),
]
| 1.359375
| 1
|
bcs_server/bcs_server.py
|
nikhilgarg459/bcs
| 0
|
12775439
|
#!usr/bin/env python
# -*-coding:utf8-*-
from bank import Bank
from bank import Account
import socket
import time
from server import Server
from server_logger import log
__doc__ = """
* This module provide bcs_server class to access the bcs server.
* This extends the Server class.
"""
class BcsServer(Server):
def __init__(self):
super(BcsServer, self).__init__()
Bank()
def start(self, conn, addr):
self.respond(conn, "connecting...", str("type=valid"))
log.info('Session started with %s' % addr)
login_params = None
debug = None
try:
while True:
request_message, request_params = self.receive(conn, addr)
# Get response message and parameters
response_params = None
response_msg = None
debug = False
log.info('Request from %s - %s' % (addr, request_message))
if request_message == "authenticate":
login_params = request_params
response_msg, account_type = Bank().login(
request_params['email'],
request_params['password'])
response_params = str("type=" + account_type)
elif request_message == "logout":
response_msg = "Logout Successful"
del Bank().logged_ins[login_params['email']]
else:
response_msg, response_params, debug = self.bank_operation(
request_message,
request_params)
# Respond to client
self.respond(conn, response_msg, response_params)
if debug:
log.debug('Response to %s - %s' % (addr, response_msg))
log.info('Passbook sent to %s' % (addr))
else:
log.info('Response to %s - %s' % (addr, response_msg))
# Close connection if authentication failed or logout
if ("Login Unsuccessful" in response_msg or
response_msg == "Logout Successful"):
conn.close()
break
except Exception as e:
if login_params['email'] in Bank().logged_ins:
del Bank().logged_ins[login_params['email']]
log.error(e)
log.error('Error after menu ' + str(addr))
finally:
self.count -= 1
conn.close()
def bank_operation(self, request_message, request_params):
response_msg = None
response_params = None
debug = False
if request_message == "addAccount":
response_msg = Bank().addAccount(Account(request_params['name'],
request_params['email'],
request_params['password'],
request_params['type']))
elif request_message == "deleteAccount":
response_msg = Bank().deleteAccount(request_params['email'])
elif request_message == "changePassword":
response_msg = Bank().changePassword(request_params['email'],
request_params['password'])
elif request_message == "withdraw":
log.debug('withDraw: %s' % str(request_params))
response_msg = Bank().withDraw(request_params['email'],
request_params['amount'])
elif request_message == "deposit":
response_msg = Bank().deposit(request_params['email'],
request_params['amount'])
elif request_message == "getPassbook":
response_msg = Bank().getPassbook(request_params['email'])
debug = True
return response_msg, response_params, debug
if __name__ == '__main__':
server_app = BcsServer()
try:
server_app.listen()
except KeyboardInterrupt:
log.info('Keyboard Interupt, Shutting down the server')
| 2.859375
| 3
|
2018/2018_22a.py
|
davidxiao93/Advent-of-Code
| 0
|
12775440
|
<reponame>davidxiao93/Advent-of-Code
from collections import namedtuple
Point = namedtuple("Point", ["x", "y"])
def add_point(p: Point, q: Point):
return Point(p.x + q.x, p.y + q.y)
geologic_index_dict = {}
erosion_level_dict = {}
def get_geologic_index(p: Point, target: Point) -> int:
if p in geologic_index_dict:
return geologic_index_dict[p]
if p.x == 0 and p.y == 0:
geologic_index_dict[p] = 0
elif p.x == target.x and p.y == target.y:
geologic_index_dict[p] = 0
elif p.y == 0:
geologic_index_dict[p] = p.x * 16807
elif p.x == 0:
geologic_index_dict[p] = p.y * 48271
else:
geologic_index_dict[p] = get_erosion_level(
add_point(p, Point(-1, 0)), target
) * get_erosion_level(
add_point(p, Point(0, -1)), target
)
return geologic_index_dict[p]
def get_erosion_level(p: Point, target: Point) -> int:
if p in erosion_level_dict:
return erosion_level_dict[p]
geologic_index = get_geologic_index(p, target)
erosion_level = (geologic_index + depth) % 20183
erosion_level_dict[p] = erosion_level
return erosion_level_dict[p]
def get_type(p: Point, target: Point) -> int:
return get_erosion_level(p, target) % 3
def get_printable_type(p: Point, target: Point) -> str:
type = get_type(p, target)
if type == 0:
# rocky
return "."
elif type == 1:
# wet
return "="
else:
# narrow
return "|"
start = Point(0, 0)
target = Point(10, 715)
depth = 3339
# for y in range(16):
# row = []
# for x in range(16):
# if Point(x, y) == start:
# row.append("M")
# elif Point(x, y) == target:
# row.append("T")
# else:
# row.append(get_printable_type(Point(x, y), target, depth, geologic_index_dict, erosion_level_dict))
# print("".join(row))
def get_risk_level(start: Point, target: Point) -> int:
risk = 0
for y in range(start.y, target.y + 1):
for x in range(start.x, target.x + 1):
p = Point(x, y)
risk += get_type(p, target)
return risk
print(get_risk_level(start, target))
| 3.75
| 4
|
src/yael_arenarewards_dialogs.py
|
KnowsCount/money-and-honour
| 1
|
12775441
|
<gh_stars>1-10
# -*- coding: us-ascii -*-
#### HEADER
from header_common import *
from header_dialogs import *
from header_operations import *
from header_parties import *
from header_item_modifiers import *
from header_skills import *
from header_triggers import *
from ID_troops import *
from ID_party_templates import *
from module_constants import *
#### MOD
from yael_util import *
print ' ' + __name__
def modmerge(var_set):
from traceback import print_exc
try:
dialogs = var_set['dialogs']
#### For each arena-result dialog identify the tier, and patch the experience amount.
result_dialogs = [
diag for diag in dialogs
if diag[1] == 'arena_master_fight_result' ]
y_dump_tree(result_dialogs)
for diag in result_dialogs:
diag[5][0:0] = [
(store_mul, ":yael_arenarewards_exp", "$g_arena_training_kills", yael_arena_exp_per_enemy),
(add_xp_to_troop,":yael_arenarewards_exp", "trp_player"),
]
except:
print_exc()
| 1.742188
| 2
|
trunk/Documentacion/Memoria/trozos-codigo/codigo-9-tcp-test-tcp-id.py
|
MGautier/security-sensor
| 2
|
12775442
|
def test_tcp_id(self):
"""
Comprobacion de que el puerto (objeto heredado) coincide con el asociado al Protocolos
Returns:
"""
port = Ports.objects.get(Tag="ssh")
tcp = Tcp.objects.get(id=port)
self.assertEqual(tcp.get_id(), port)
| 2.75
| 3
|
upload_ytmusic.py
|
ashpipe/youtube-music-autouploader
| 3
|
12775443
|
""" This script uploads created music files in directories to youtube music library """
import time
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
from ytmusicapi import YTMusic
import music_tag
from datetime import date
import os
directories = ["D:\Kwan\Desktop", "D:\Kwan\Music"]
# torrent folder is not allowed due to slow download (> 60 s)
fill_empty_tag = True # Fill empty music tags or not
ytmusic = YTMusic("ytmusic_auth.json") # Authentication file
filetypes = [".mp3", "flac", ".wma", ".m4a", ".ogg"] # only last four elements
def set_tag(fn):
""" This function sets music tags if empty """
f = music_tag.load_file(fn)
title = os.path.splitext(os.path.basename(fn))[0]
title = title.split("-",1) # Assumes 'artist - song name' format
if f["year"].value == 0:
f["year"] = int(date.today().strftime("%Y"))
if f["title"].value == "":
f["title"] = title[-1]
if f["artist"].value == "":
f["artist"] = title[0]
f.save()
def on_created(event):
""" This function gets executed when a file is created in directories being monitored """
fn = event.src_path
print(f"fn is {fn} and extension is {fn[-4:]}")
if fn[-4:] in filetypes:
time.sleep(30) # Wait until download is done
try:
if fill_empty_tag:
set_tag(fn)
ytmusic.upload_song(fn)
except:
print("File does not exist")
pass
if __name__ == "__main__":
patterns = "*"
ignore_patterns = ""
ignore_directories = False
case_sensitive = True
my_event_handler = PatternMatchingEventHandler(
patterns, ignore_patterns, ignore_directories, case_sensitive
)
my_event_handler.on_created = on_created
my_observer = Observer()
for path in directories:
my_observer.schedule(my_event_handler, path, recursive=True)
my_observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
my_observer.stop()
my_observer.join()
| 3.109375
| 3
|
spiketoolkit/validation/quality_metric_classes/noise_overlap.py
|
teristam/spiketoolk
| 55
|
12775444
|
import numpy as np
from copy import copy
from .utils.thresholdcurator import ThresholdCurator
from .quality_metric import QualityMetric
import spiketoolkit as st
import spikemetrics.metrics as metrics
from spikemetrics.utils import printProgressBar
from spikemetrics.metrics import find_neighboring_channels
from collections import OrderedDict
from sklearn.neighbors import NearestNeighbors
from .parameter_dictionaries import update_all_param_dicts_with_kwargs
class NoiseOverlap(QualityMetric):
installed = True # check at class level if installed or not
installation_mesg = "" # err
params = OrderedDict([('num_channels_to_compare', 13),
('max_spikes_per_unit_for_noise_overlap', 1000),
('num_features', 10),
('num_knn', 6)])
curator_name = "ThresholdNoiseOverlaps"
def __init__(self, metric_data):
QualityMetric.__init__(self, metric_data, metric_name="noise_overlap")
if not metric_data.has_recording():
raise ValueError("MetricData object must have a recording")
def compute_metric(self, num_channels_to_compare, max_spikes_per_unit_for_noise_overlap,
num_features, num_knn, **kwargs):
# Make sure max_spikes_per_unit_for_noise_overlap is not None
assert max_spikes_per_unit_for_noise_overlap is not None, "'max_spikes_per_unit_for_noise_overlap' must be an integer."
# update keyword arg in case it's already specified to something
kwargs['max_spikes_per_unit'] = max_spikes_per_unit_for_noise_overlap
params_dict = update_all_param_dicts_with_kwargs(kwargs)
save_property_or_features = params_dict['save_property_or_features']
seed = params_dict['seed']
# set random seed
if seed is not None:
np.random.seed(seed)
# first, get waveform snippets of every unit (at most n spikes)
# waveforms = List (units,) of np.array (n_spikes, n_channels, n_timepoints)
waveforms = st.postprocessing.get_unit_waveforms(
self._metric_data._recording,
self._metric_data._sorting,
unit_ids=self._metric_data._unit_ids,
**kwargs)
n_waveforms_per_unit = np.array([len(wf) for wf in waveforms])
n_spikes_per_unit = np.array([len(self._metric_data._sorting.get_unit_spike_train(u)) for u in self._metric_data._unit_ids])
if np.all(n_waveforms_per_unit < max_spikes_per_unit_for_noise_overlap):
# in this case it means that waveforms have been computed on
# less spikes than max_spikes_per_unit_for_noise_overlap --> recompute
kwargs['recompute_info'] = True
waveforms = st.postprocessing.get_unit_waveforms(
self._metric_data._recording,
self._metric_data._sorting,
unit_ids = self._metric_data._unit_ids,
# max_spikes_per_unit = max_spikes_per_unit_for_noise_overlap,
**kwargs)
elif np.all(n_waveforms_per_unit >= max_spikes_per_unit_for_noise_overlap):
# waveforms computed on more spikes than needed --> sample
for i_w, wfs in enumerate(waveforms):
if len(wfs) > max_spikes_per_unit_for_noise_overlap:
selecte_idxs = np.random.permutation(len(wfs))[:max_spikes_per_unit_for_noise_overlap]
waveforms[i_w] = wfs[selecte_idxs]
# get channel idx and locations
channel_idx = np.arange(self._metric_data._recording.get_num_channels())
channel_locations = self._metric_data._channel_locations
if num_channels_to_compare > len(channel_idx):
num_channels_to_compare = len(channel_idx)
# get noise snippets
min_time = min([self._metric_data._sorting.get_unit_spike_train(unit_id=unit)[0]
for unit in self._metric_data._sorting.get_unit_ids()])
max_time = max([self._metric_data._sorting.get_unit_spike_train(unit_id=unit)[-1]
for unit in self._metric_data._sorting.get_unit_ids()])
max_spikes = np.max([len(self._metric_data._sorting.get_unit_spike_train(u)) for u in self._metric_data._unit_ids])
if max_spikes < max_spikes_per_unit_for_noise_overlap:
max_spikes_per_unit_for_noise_overlap = max_spikes
times_control = np.random.choice(np.arange(min_time, max_time),
size=max_spikes_per_unit_for_noise_overlap, replace=False)
clip_size = waveforms[0].shape[-1]
# np.array, (n_spikes, n_channels, n_timepoints)
clips_control_max = np.stack(self._metric_data._recording.get_snippets(snippet_len=clip_size,
reference_frames=times_control))
noise_overlaps = []
for i_u, unit in enumerate(self._metric_data._unit_ids):
# show progress bar
if self._metric_data.verbose:
printProgressBar(i_u + 1, len(self._metric_data._unit_ids))
# get spike and noise snippets
# np.array, (n_spikes, n_channels, n_timepoints)
clips = waveforms[i_u]
clips_control = clips_control_max
# make noise snippets size equal to number of spikes
if len(clips) < max_spikes_per_unit_for_noise_overlap:
selected_idxs = np.random.choice(np.arange(max_spikes_per_unit_for_noise_overlap),
size=len(clips), replace=False)
clips_control = clips_control[selected_idxs]
else:
selected_idxs = np.random.choice(np.arange(len(clips)),
size=max_spikes_per_unit_for_noise_overlap,
replace=False)
clips = clips[selected_idxs]
num_clips = len(clips)
# compute weight for correcting noise snippets
template = np.median(clips, axis=0)
chmax, tmax = np.unravel_index(np.argmax(np.abs(template)), template.shape)
max_val = template[chmax, tmax]
weighted_clips_control = np.zeros(clips_control.shape)
weights = np.zeros(num_clips)
for j in range(num_clips):
clip0 = clips_control[j, :, :]
val0 = clip0[chmax, tmax]
weight0 = val0 * max_val
weights[j] = weight0
weighted_clips_control[j, :, :] = clip0 * weight0
noise_template = np.sum(weighted_clips_control, axis=0)
noise_template = noise_template / np.sum(np.abs(noise_template)) * np.sum(np.abs(template))
# subtract it out
for j in range(num_clips):
clips[j, :, :] = _subtract_clip_component(clips[j, :, :], noise_template)
clips_control[j, :, :] = _subtract_clip_component(clips_control[j, :, :], noise_template)
# use only subsets of channels that are closest to peak channel
channels_to_use = find_neighboring_channels(chmax, channel_idx,
num_channels_to_compare, channel_locations)
channels_to_use = np.sort(channels_to_use)
clips = clips[:,channels_to_use,:]
clips_control = clips_control[:,channels_to_use,:]
all_clips = np.concatenate([clips, clips_control], axis=0)
num_channels_wfs = all_clips.shape[1]
num_samples_wfs = all_clips.shape[2]
all_features = _compute_pca_features(all_clips.reshape((num_clips * 2,
num_channels_wfs * num_samples_wfs)), num_features)
num_all_clips=len(all_clips)
distances, indices = NearestNeighbors(n_neighbors=min(num_knn + 1, num_all_clips - 1), algorithm='auto').fit(
all_features.T).kneighbors()
group_id = np.zeros((num_clips * 2))
group_id[0:num_clips] = 1
group_id[num_clips:] = 2
num_match = 0
total = 0
for j in range(num_clips * 2):
for k in range(1, min(num_knn + 1, num_all_clips - 1)):
ind = indices[j][k]
if group_id[j] == group_id[ind]:
num_match = num_match + 1
total = total + 1
pct_match = num_match / total
noise_overlap = 1 - pct_match
noise_overlaps.append(noise_overlap)
noise_overlaps = np.asarray(noise_overlaps)
if save_property_or_features:
self.save_property_or_features(self._metric_data._sorting, noise_overlaps, self._metric_name)
return noise_overlaps
def threshold_metric(self, threshold, threshold_sign, num_channels_to_compare,
max_spikes_per_unit_for_noise_overlap,
num_features, num_knn, **kwargs):
noise_overlaps = self.compute_metric(num_channels_to_compare,
max_spikes_per_unit_for_noise_overlap,
num_features, num_knn, **kwargs)
threshold_curator = ThresholdCurator(sorting=self._metric_data._sorting, metric=noise_overlaps)
threshold_curator.threshold_sorting(threshold=threshold, threshold_sign=threshold_sign)
return threshold_curator
def _compute_pca_features(X, num_components):
u, s, vt = np.linalg.svd(X)
return u[:, :num_components].T
def _subtract_clip_component(clip1, component):
V1 = clip1.flatten()
V2 = component.flatten()
V1 = V1 - np.mean(V1)
V2 = V2 - np.mean(V2)
V1 = V1 - V2 * np.dot(V1, V2) / np.dot(V2, V2)
return V1.reshape(clip1.shape)
| 2.375
| 2
|
bot/modules/impacta/timetable.py
|
bruno-zaccariello/wdm-bot
| 0
|
12775445
|
from bs4 import BeautifulSoup as bs
from requests import request as req
from requests import Session
from requests import codes as requestCodes
import re
from .session import getSession
base_url = "https://account.impacta.edu.br/"
login_url = base_url + "account/enter.php"
url_timetable_aula = base_url + "aluno/horario-aula.php"
url_timetable = base_url + "aluno/quadro-horario.php?turmaid={}&produto={}"
def treatRoom(room):
split = room.split(';')
return f'{split[0]} ({split[1]}) '
class_template = """
=> {0}
=> {1}
=> {2}
=> {3}
========================\n
"""
def getDisciplinesByDay(disciplinesElements, title):
# Faz um parse da string (o html não possui IDs ou separação em tags
# que faça sentido)
# Ele adiciona uma string #DIV## para dividir por aulas
# e uma string ; para dividir por dado da aula (Aula, Disciplina, Prof, Sala)
parsedWeekday = disciplinesElements \
.replace(';', '') \
.replace('\n', '') \
.replace(' :', ':') \
.replace(': ', ':') \
.replace('Aula', '#DIV##;Aula') \
.replace('Disciplina', ';Disciplina') \
.replace('Prof', ';Prof') \
.replace('Sala', ';Sala') \
.split('#DIV##')
dayData = [x[1:].split(';') for x in parsedWeekday[1:]]
disciplines = dict()
for _classRow in dayData:
_class = _classRow[0].split('[')[0].lstrip()
discipline = _classRow[1].split(':')[1]
teacher = _classRow[2].split(':')[1]
room = _classRow[3].split(':')[1]
if discipline not in disciplines.keys():
disciplines[discipline] = {
'teacher': teacher,
'rooms': [f'{room};{_class}']
}
else:
if room not in [room.split(';')[0] for room in disciplines[discipline]['rooms']]:
disciplines[discipline]['rooms'].append(f'{room};{_class}')
response = []
for discipline, content in disciplines.items():
if len(content['rooms']) == 1:
roomString = content['rooms'][0].split(';')[0]
else:
roomString = ''.join(treatRoom(room) for room in content['rooms'])
response.append(class_template.format(
title, discipline, content['teacher'], roomString
))
return response
def filterDisciplinesArray(disciplinesArray):
for data in disciplinesArray:
return None
def getFullTimetable(update, context):
# Pega os dados do usuário na mensagem
user = context.args[0]
passw = context.args[1]
# Inicia a sessão na impacta
s, success = getSession(user, passw)
# Busca o id da turma e produto
if not success:
context.bot.send_message(
chat_id=update.message.chat.id,
text="Algo deu errado ! Sinto muito, poderia tentar novamente?"
)
return None
classes_timetable_page = bs(s.get(url_timetable_aula).text, 'html.parser')
ids_el = classes_timetable_page.find(attrs={'data-turmaid':True})
class_id = ids_el.get('data-turmaid')
product_id = ids_el.get('data-produto')
# Recupera a página com os horários do aluno
class_timetable_page = bs(s.get(url_timetable.format(class_id, product_id)).text, 'html.parser')
timetable_wrapper = class_timetable_page.find('div', attrs={"class": "accordion"})
# Busca as divs de cada dia ("dia-semana")
days_of_week = timetable_wrapper.find_all('div', attrs={"class":"dia-semana"})
fullResponse = []
# Extrai as informações diárias e response o usuário
for weekday in days_of_week:
title = weekday.find('h2').text
weekday.h2.extract()
dayData = getDisciplinesByDay(weekday.text, title)
fullResponse.append(dayData)
context.bot.send_message(
chat_id=update.message.chat.id,
text=''.join(''.join(day) for day in fullResponse)
)
return None
| 3.265625
| 3
|
mwptoolkit/model/Graph2Tree/multiencdec.py
|
ShubhamAnandJain/MWP-CS229
| 71
|
12775446
|
<filename>mwptoolkit/model/Graph2Tree/multiencdec.py
# -*- encoding: utf-8 -*-
# @Author: <NAME>
# @Time: 2021/08/21 04:33:54
# @File: multiencdec.py
import copy
import random
import torch
import numpy as np
from torch import nn
from torch.nn import functional as F
from mwptoolkit.module.Encoder.graph_based_encoder import GraphBasedMultiEncoder, NumEncoder
from mwptoolkit.module.Decoder.tree_decoder import TreeDecoder
#from mwptoolkit.module.Decoder.rnn_decoder import AttentionalRNNDecoder
from mwptoolkit.module.Layer.layers import TreeAttnDecoderRNN
from mwptoolkit.module.Layer.tree_layers import NodeGenerater, SubTreeMerger, TreeNode, TreeEmbedding
from mwptoolkit.module.Layer.tree_layers import Prediction, GenerateNode, Merge
from mwptoolkit.module.Embedder.basic_embedder import BaiscEmbedder
from mwptoolkit.module.Strategy.beam_search import TreeBeam, Beam
from mwptoolkit.loss.masked_cross_entropy_loss import MaskedCrossEntropyLoss, masked_cross_entropy
from mwptoolkit.utils.enum_type import SpecialTokens, NumMask
from mwptoolkit.utils.utils import copy_list
class MultiEncDec(nn.Module):
"""
Reference:
Shen et al. "Solving Math Word Problems with Multi-Encoders and Multi-Decoders" in COLING 2020.
"""
def __init__(self, config, dataset):
super(MultiEncDec, self).__init__()
self.device = config['device']
self.USE_CUDA = True if self.device == torch.device('cuda') else False
self.rnn_cell_type = config['rnn_cell_type']
self.embedding_size = config['embedding_size']
self.hidden_size = config['hidden_size']
self.n_layers = config['num_layers']
self.hop_size = config['hop_size']
self.teacher_force_ratio = config['teacher_force_ratio']
self.beam_size = config['beam_size']
self.max_out_len = config['max_output_len']
self.dropout_ratio = config['dropout_ratio']
self.operator_nums = dataset.operator_nums
self.generate_nums = len(dataset.generate_list)
self.num_start1 = dataset.num_start1
self.num_start2 = dataset.num_start2
self.input1_size = len(dataset.in_idx2word_1)
self.input2_size = len(dataset.in_idx2word_2)
self.output2_size = len(dataset.out_idx2symbol_2)
self.unk1 = dataset.out_symbol2idx_1[SpecialTokens.UNK_TOKEN]
self.unk2 = dataset.out_symbol2idx_2[SpecialTokens.UNK_TOKEN]
self.sos2 = dataset.out_symbol2idx_2[SpecialTokens.SOS_TOKEN]
self.eos2 = dataset.out_symbol2idx_2[SpecialTokens.EOS_TOKEN]
self.out_symbol2idx1 = dataset.out_symbol2idx_1
self.out_idx2symbol1 = dataset.out_idx2symbol_1
self.out_symbol2idx2 = dataset.out_symbol2idx_2
self.out_idx2symbol2 = dataset.out_idx2symbol_2
generate_list = dataset.generate_list
self.generate_list = [self.out_symbol2idx1[symbol] for symbol in generate_list]
self.mask_list = NumMask.number
try:
self.out_sos_token1 = self.out_symbol2idx1[SpecialTokens.SOS_TOKEN]
except:
self.out_sos_token1 = None
try:
self.out_eos_token1 = self.out_symbol2idx1[SpecialTokens.EOS_TOKEN]
except:
self.out_eos_token1 = None
try:
self.out_pad_token1 = self.out_symbol2idx1[SpecialTokens.PAD_TOKEN]
except:
self.out_pad_token1 = None
try:
self.out_sos_token2 = self.out_symbol2idx2[SpecialTokens.SOS_TOKEN]
except:
self.out_sos_token2 = None
try:
self.out_eos_token2 = self.out_symbol2idx2[SpecialTokens.EOS_TOKEN]
except:
self.out_eos_token2 = None
try:
self.out_pad_token2 = self.out_symbol2idx2[SpecialTokens.PAD_TOKEN]
except:
self.out_pad_token2 = None
# Initialize models
embedder = nn.Embedding(self.input1_size, self.embedding_size)
in_embedder = self._init_embedding_params(dataset.trainset, dataset.in_idx2word_1, config['embedding_size'], embedder)
self.encoder = GraphBasedMultiEncoder(input1_size=self.input1_size,
input2_size=self.input2_size,
embed_model=in_embedder,
embedding1_size=self.embedding_size,
embedding2_size=self.embedding_size // 4,
hidden_size=self.hidden_size,
n_layers=self.n_layers,
hop_size=self.hop_size)
self.numencoder = NumEncoder(node_dim=self.hidden_size, hop_size=self.hop_size)
self.predict = Prediction(hidden_size=self.hidden_size, op_nums=self.operator_nums, input_size=self.generate_nums)
self.generate = GenerateNode(hidden_size=self.hidden_size, op_nums=self.operator_nums, embedding_size=self.embedding_size)
self.merge = Merge(hidden_size=self.hidden_size, embedding_size=self.embedding_size)
self.decoder = TreeAttnDecoderRNN(self.hidden_size, self.embedding_size, self.output2_size, self.output2_size, self.n_layers, self.dropout_ratio)
self.loss = MaskedCrossEntropyLoss()
def _init_embedding_params(self, train_data, vocab, embedding_size, embedder):
sentences = []
for data in train_data:
sentence = []
for word in data['question']:
if word in vocab:
sentence.append(word)
else:
sentence.append(SpecialTokens.UNK_TOKEN)
sentences.append(sentence)
from gensim.models import word2vec
model = word2vec.Word2Vec(sentences, vector_size=embedding_size, min_count=1)
emb_vectors = []
pad_idx = vocab.index(SpecialTokens.PAD_TOKEN)
for idx in range(len(vocab)):
if idx != pad_idx:
emb_vectors.append(np.array(model.wv[vocab[idx]]))
else:
emb_vectors.append(np.zeros((embedding_size)))
emb_vectors = np.array(emb_vectors)
embedder.weight.data.copy_(torch.from_numpy(emb_vectors))
return embedder
def calculate_loss(self, batch_data):
"""Finish forward-propagating, calculating loss and back-propagation.
Args:
batch_data (dict): one batch data.
Returns:
float: loss value.
"""
input1_var = batch_data['input1']
input2_var = batch_data['input2']
input_length = batch_data['input1 len']
target1 = batch_data['output1']
target1_length = batch_data['output1 len']
target2 = batch_data['output2']
target2_length = batch_data['output2 len']
num_stack_batch = batch_data['num stack']
num_size_batch = batch_data['num size']
generate_list = self.generate_list
num_pos_batch = batch_data['num pos']
num_order_batch = batch_data['num order']
parse_graph = batch_data['parse graph']
equ_mask1 = batch_data['equ mask1']
equ_mask2 = batch_data['equ mask2']
unk1 = self.unk1
unk2 = self.unk2
num_start1 = self.num_start1
num_start2 = self.num_start2
sos2 = self.sos2
loss = self.train_double(input1_var,
input2_var,
input_length,
target1,
target1_length,
target2,
target2_length,
num_stack_batch,
num_size_batch,
generate_list,
unk1,
unk2,
num_start1,
num_start2,
sos2,
num_pos_batch,
num_order_batch,
parse_graph,
beam_size=5,
use_teacher_forcing=0.83,
english=False)
if self.USE_CUDA:
torch.cuda.empty_cache()
return loss
def model_test(self, batch_data):
"""Model test.
Args:
batch_data (dict): one batch data.
Returns:
tuple(str,list,list): predicted type, predicted equation, target equation.
"""
input1_var = batch_data['input1']
input2_var = batch_data['input2']
input_length = batch_data['input1 len']
target1 = batch_data['output1']
target1_length = batch_data['output1 len']
target2 = batch_data['output2']
target2_length = batch_data['output2 len']
num_stack_batch = batch_data['num stack']
num_size_batch = batch_data['num size']
generate_list = self.generate_list
num_pos_batch = batch_data['num pos']
num_order_batch = batch_data['num order']
parse_graph = batch_data['parse graph']
equ_mask1 = batch_data['equ mask1']
equ_mask2 = batch_data['equ mask2']
num_list = batch_data['num list']
unk1 = self.unk1
unk2 = self.unk2
num_start1 = self.num_start1
num_start2 = self.num_start2
sos2 = self.sos2
eos2 = self.eos2
result_type, test_res, score = self.evaluate_double(input1_var,
input2_var,
input_length,
generate_list,
num_start1,
sos2,
eos2,
num_pos_batch,
num_order_batch,
parse_graph,
beam_size=5,)
if result_type == "tree":
output1 = self.convert_idx2symbol1(test_res, num_list[0], copy_list(num_stack_batch[0]))
targets1 = self.convert_idx2symbol1(target1[0], num_list[0], copy_list(num_stack_batch[0]))
if self.USE_CUDA:
torch.cuda.empty_cache()
return result_type, output1, targets1
else:
output2 = self.convert_idx2symbol2(torch.tensor(test_res).view(1, -1), num_list, copy_list(num_stack_batch))
targets2 = self.convert_idx2symbol2(target2, num_list, copy_list(num_stack_batch))
if self.USE_CUDA:
torch.cuda.empty_cache()
return result_type, output2, targets2
def train_double(self,
input1_batch,
input2_batch,
input_length,
target1_batch,
target1_length,
target2_batch,
target2_length,
num_stack_batch,
num_size_batch,
generate_num1_ids,
unk1,
unk2,
num_start1,
num_start2,
sos2,
num_pos_batch,
num_order_batch,
parse_graph_batch,
beam_size=5,
use_teacher_forcing=0.83,
english=False):
# sequence mask for attention
seq_mask = []
max_len = max(input_length)
for i in input_length:
seq_mask.append([0 for _ in range(i)] + [1 for _ in range(i, max_len)])
seq_mask = torch.ByteTensor(seq_mask)
num_mask = []
max_num_size = max(num_size_batch) + len(generate_num1_ids)
for i in num_size_batch:
d = i + len(generate_num1_ids)
num_mask.append([0] * d + [1] * (max_num_size - d))
num_mask = torch.ByteTensor(num_mask)
num_pos_pad = []
max_num_pos_size = max(num_size_batch)
for i in range(len(num_pos_batch)):
temp = num_pos_batch[i] + [-1] * (max_num_pos_size - len(num_pos_batch[i]))
num_pos_pad.append(temp)
num_pos_pad = torch.LongTensor(num_pos_pad)
num_order_pad = []
max_num_order_size = max(num_size_batch)
for i in range(len(num_order_batch)):
temp = num_order_batch[i] + [0] * (max_num_order_size - len(num_order_batch[i]))
num_order_pad.append(temp)
num_order_pad = torch.LongTensor(num_order_pad)
num_stack1_batch = copy.deepcopy(num_stack_batch)
num_stack2_batch = copy.deepcopy(num_stack_batch)
# Turn padded arrays into (batch_size x max_len) tensors, transpose into (max_len x batch_size)
# input1_var = torch.LongTensor(input1_batch).transpose(0, 1)
# input2_var = torch.LongTensor(input2_batch).transpose(0, 1)
# target1 = torch.LongTensor(target1_batch).transpose(0, 1)
# target2 = torch.LongTensor(target2_batch).transpose(0, 1)
# parse_graph_pad = torch.LongTensor(parse_graph_batch)
input1_var = input1_batch.transpose(0, 1)
input2_var = input2_batch.transpose(0, 1)
target1 = target1_batch.transpose(0, 1)
target2 = target2_batch.transpose(0, 1)
parse_graph_pad = parse_graph_batch
padding_hidden = torch.FloatTensor([0.0 for _ in range(self.hidden_size)]).unsqueeze(0)
batch_size = len(input_length)
if self.USE_CUDA:
input1_var = input1_var.cuda()
input2_var = input2_var.cuda()
seq_mask = seq_mask.cuda()
padding_hidden = padding_hidden.cuda()
num_mask = num_mask.cuda()
num_pos_pad = num_pos_pad.cuda()
num_order_pad = num_order_pad.cuda()
parse_graph_pad = parse_graph_pad.cuda()
# Run words through encoder
encoder_outputs, encoder_hidden = self.encoder(input1_var, input2_var, input_length, parse_graph_pad)
copy_num_len = [len(_) for _ in num_pos_batch]
num_size = max(copy_num_len)
num_encoder_outputs, masked_index = self.get_all_number_encoder_outputs(encoder_outputs, num_pos_batch, batch_size, num_size, self.hidden_size)
encoder_outputs, num_outputs, problem_output = self.numencoder(encoder_outputs, num_encoder_outputs, num_pos_pad, num_order_pad)
num_outputs = num_outputs.masked_fill_(masked_index.bool(), 0.0)
decoder_hidden = encoder_hidden[:self.n_layers] # Use last (forward) hidden state from encoder
loss_0 = self.train_tree_double(encoder_outputs, problem_output, num_outputs, target1, target1_length, num_start1, batch_size, padding_hidden, seq_mask, num_mask, num_pos_batch, num_order_pad,
num_stack1_batch, unk1)
loss_1 = self.train_attn_double(encoder_outputs, decoder_hidden, target2, target2_length, sos2, batch_size, seq_mask, num_start2, num_stack2_batch, unk2, beam_size, use_teacher_forcing)
loss = loss_0 + loss_1
loss.backward()
return loss.item() # , loss_0.item(), loss_1.item()
def train_tree_double(self, encoder_outputs, problem_output, all_nums_encoder_outputs, target, target_length, num_start, batch_size, padding_hidden, seq_mask, num_mask, num_pos, num_order_pad,
nums_stack_batch, unk):
# Prepare input and output variables
node_stacks = [[TreeNode(_)] for _ in problem_output.split(1, dim=0)]
max_target_length = max(target_length)
all_node_outputs = []
# all_leafs = []
embeddings_stacks = [[] for _ in range(batch_size)]
left_childs = [None for _ in range(batch_size)]
for t in range(max_target_length):
num_score, op, current_embeddings, current_context, current_nums_embeddings = self.predict(node_stacks, left_childs, encoder_outputs, all_nums_encoder_outputs, padding_hidden, seq_mask,
num_mask)
# all_leafs.append(p_leaf)
outputs = torch.cat((op, num_score), 1)
all_node_outputs.append(outputs)
target_t, generate_input = self.generate_tree_input(target[t].tolist(), outputs, nums_stack_batch, num_start, unk)
target[t] = target_t
if self.USE_CUDA:
generate_input = generate_input.cuda()
left_child, right_child, node_label = self.generate(current_embeddings, generate_input, current_context)
left_childs = []
for idx, l, r, node_stack, i, o in zip(range(batch_size), left_child.split(1), right_child.split(1), node_stacks, target[t].tolist(), embeddings_stacks):
if len(node_stack) != 0:
node = node_stack.pop()
else:
left_childs.append(None)
continue
if i < num_start:
node_stack.append(TreeNode(r))
node_stack.append(TreeNode(l, left_flag=True))
o.append(TreeEmbedding(node_label[idx].unsqueeze(0), False))
else:
current_num = current_nums_embeddings[idx, i - num_start].unsqueeze(0)
while len(o) > 0 and o[-1].terminal:
sub_stree = o.pop()
op = o.pop()
current_num = self.merge(op.embedding, sub_stree.embedding, current_num)
o.append(TreeEmbedding(current_num, True))
if len(o) > 0 and o[-1].terminal:
left_childs.append(o[-1].embedding)
else:
left_childs.append(None)
# all_leafs = torch.stack(all_leafs, dim=1) # B x S x 2
all_node_outputs = torch.stack(all_node_outputs, dim=1) # B x S x N
target = target.transpose(0, 1).contiguous()
if self.USE_CUDA:
# all_leafs = all_leafs.cuda()
all_node_outputs = all_node_outputs.cuda()
target = target.cuda()
target_length = torch.LongTensor(target_length).cuda()
else:
target_length = torch.LongTensor(target_length)
# op_target = target < num_start
# loss_0 = masked_cross_entropy_without_logit(all_leafs, op_target.long(), target_length)
loss = masked_cross_entropy(all_node_outputs, target, target_length)
# loss = loss_0 + loss_1
return loss # , loss_0.item(), loss_1.item()
def train_attn_double(self, encoder_outputs, decoder_hidden, target, target_length, sos, batch_size, seq_mask, num_start, nums_stack_batch, unk, beam_size, use_teacher_forcing):
# Prepare input and output variables
decoder_input = torch.LongTensor([sos] * batch_size)
max_target_length = max(target_length)
all_decoder_outputs = torch.zeros(max_target_length, batch_size, self.decoder.output_size)
# Move new Variables to CUDA
if self.USE_CUDA:
all_decoder_outputs = all_decoder_outputs.cuda()
if random.random() < use_teacher_forcing:
# Run through decoder one time step at a time
for t in range(max_target_length):
if self.USE_CUDA:
decoder_input = decoder_input.cuda()
decoder_output, decoder_hidden = self.decoder(decoder_input, decoder_hidden, encoder_outputs, seq_mask)
all_decoder_outputs[t] = decoder_output
decoder_input = self.generate_decoder_input(target[t].cpu().tolist(), decoder_output, nums_stack_batch, num_start, unk)
target[t] = decoder_input
else:
beam_list = list()
score = torch.zeros(batch_size)
if self.USE_CUDA:
score = score.cuda()
beam_list.append(Beam(score, decoder_input, decoder_hidden, all_decoder_outputs))
# Run through decoder one time step at a time
for t in range(max_target_length):
beam_len = len(beam_list)
beam_scores = torch.zeros(batch_size, self.decoder.output_size * beam_len)
all_hidden = torch.zeros(decoder_hidden.size(0), batch_size * beam_len, decoder_hidden.size(2))
all_outputs = torch.zeros(max_target_length, batch_size * beam_len, self.decoder.output_size)
if self.USE_CUDA:
beam_scores = beam_scores.cuda()
all_hidden = all_hidden.cuda()
all_outputs = all_outputs.cuda()
for b_idx in range(len(beam_list)):
decoder_input = beam_list[b_idx].input_var
decoder_hidden = beam_list[b_idx].hidden
# rule_mask = generate_rule_mask(decoder_input, num_batch, output_lang.word2index, batch_size,
# num_start, copy_nums, generate_nums, english)
if self.USE_CUDA:
# rule_mask = rule_mask.cuda()
decoder_input = decoder_input.cuda()
decoder_output, decoder_hidden = self.decoder(decoder_input, decoder_hidden, encoder_outputs, seq_mask)
# score = f.log_softmax(decoder_output, dim=1) + rule_mask
score = F.log_softmax(decoder_output, dim=1)
beam_score = beam_list[b_idx].score
beam_score = beam_score.unsqueeze(1)
repeat_dims = [1] * beam_score.dim()
repeat_dims[1] = score.size(1)
beam_score = beam_score.repeat(*repeat_dims)
score += beam_score
beam_scores[:, b_idx * self.decoder.output_size:(b_idx + 1) * self.decoder.output_size] = score
all_hidden[:, b_idx * batch_size:(b_idx + 1) * batch_size, :] = decoder_hidden
beam_list[b_idx].all_output[t] = decoder_output
all_outputs[:, batch_size * b_idx: batch_size * (b_idx + 1), :] = \
beam_list[b_idx].all_output
topv, topi = beam_scores.topk(beam_size, dim=1)
beam_list = list()
for k in range(beam_size):
temp_topk = topi[:, k]
temp_input = temp_topk % self.decoder.output_size
temp_input = temp_input.data
if self.USE_CUDA:
temp_input = temp_input.cpu()
temp_beam_pos = temp_topk / self.decoder.output_size
indices = torch.LongTensor(range(batch_size))
if self.USE_CUDA:
indices = indices.cuda()
indices += temp_beam_pos * batch_size
temp_hidden = all_hidden.index_select(1, indices)
temp_output = all_outputs.index_select(1, indices)
beam_list.append(Beam(topv[:, k], temp_input, temp_hidden, temp_output))
all_decoder_outputs = beam_list[0].all_output
for t in range(max_target_length):
target[t] = self.generate_decoder_input(target[t].cpu().tolist(), all_decoder_outputs[t], nums_stack_batch, num_start, unk)
# Loss calculation and backpropagation
if self.USE_CUDA:
target = target.cuda()
target_length = torch.LongTensor(target_length).cuda()
else:
target_length = torch.LongTensor(target_length)
loss = masked_cross_entropy(
all_decoder_outputs.transpose(0, 1).contiguous(), # -> batch x seq
target.transpose(0, 1).contiguous(), # -> batch x seq
target_length)
return loss
def evaluate_double(self,
input1_batch,
input2_batch,
input_length,
generate_num1_ids,
num_start1,
sos2,
eos2,
num_pos_batch,
num_order_batch,
parse_graph_batch,
beam_size=5,
english=False,
max_length=30):
seq_mask = torch.ByteTensor(1, input_length).fill_(0)
# num_pos_pad = torch.LongTensor([num_pos_batch])
# num_order_pad = torch.LongTensor([num_order_batch])
# parse_graph_pad = torch.LongTensor(parse_graph_batch)
# Turn padded arrays into (batch_size x max_len) tensors, transpose into (max_len x batch_size)
# input1_var = torch.LongTensor(input1_batch).transpose()
# input2_var = torch.LongTensor(input2_batch).unsqueeze(1)
num_pos_pad = torch.LongTensor(num_pos_batch)
num_order_pad = torch.LongTensor(num_order_batch)
parse_graph_pad = parse_graph_batch
input1_var = input1_batch.transpose(0, 1)
input2_var = input2_batch.transpose(0, 1)
num_mask = torch.ByteTensor(1, len(num_pos_batch[0]) + len(generate_num1_ids)).fill_(0)
padding_hidden = torch.FloatTensor([0.0 for _ in range(self.hidden_size)]).unsqueeze(0)
batch_size = 1
if self.USE_CUDA:
input1_var = input1_var.cuda()
input2_var = input2_var.cuda()
seq_mask = seq_mask.cuda()
padding_hidden = padding_hidden.cuda()
num_mask = num_mask.cuda()
num_pos_pad = num_pos_pad.cuda()
num_order_pad = num_order_pad.cuda()
parse_graph_pad = parse_graph_pad.cuda()
# Run words through encoder
encoder_outputs, encoder_hidden = self.encoder(input1_var, input2_var, input_length, parse_graph_pad)
copy_num_len = [len(_) for _ in num_pos_batch]
num_size = max(copy_num_len)
#num_size = len(num_pos_batch)
num_encoder_outputs, masked_index = self.get_all_number_encoder_outputs(encoder_outputs, num_pos_batch, batch_size, num_size, self.hidden_size)
encoder_outputs, num_outputs, problem_output = self.numencoder(encoder_outputs, num_encoder_outputs, num_pos_pad, num_order_pad)
decoder_hidden = encoder_hidden[:self.n_layers] # Use last (forward) hidden state from encoder
tree_beam = self.evaluate_tree_double(encoder_outputs, problem_output, num_outputs, num_start1, batch_size, padding_hidden, seq_mask, num_mask, max_length, num_pos_batch, num_order_pad,
beam_size)
attn_beam = self.evaluate_attn_double(encoder_outputs, decoder_hidden, sos2, eos2, batch_size, seq_mask, max_length, beam_size)
if tree_beam.score >= attn_beam.score:
return "tree", tree_beam.out, tree_beam.score
else:
return "attn", attn_beam.all_output, attn_beam.score
def evaluate_tree_double(self, encoder_outputs, problem_output, all_nums_encoder_outputs, num_start, batch_size, padding_hidden, seq_mask, num_mask, max_length, num_pos, num_order_pad, beam_size):
# Prepare input and output variables
node_stacks = [[TreeNode(_)] for _ in problem_output.split(1, dim=0)]
# B x P x N
embeddings_stacks = [[] for _ in range(batch_size)]
left_childs = [None for _ in range(batch_size)]
beams = [TreeBeam(0.0, node_stacks, embeddings_stacks, left_childs, [])]
for t in range(max_length):
current_beams = []
while len(beams) > 0:
b = beams.pop()
if len(b.node_stack[0]) == 0:
current_beams.append(b)
continue
# left_childs = torch.stack(b.left_childs)
left_childs = b.left_childs
num_score, op, current_embeddings, current_context, current_nums_embeddings = self.predict(b.node_stack, left_childs, encoder_outputs, all_nums_encoder_outputs, padding_hidden,
seq_mask, num_mask)
out_score = nn.functional.log_softmax(torch.cat((op, num_score), dim=1), dim=1)
topv, topi = out_score.topk(beam_size)
for tv, ti in zip(topv.split(1, dim=1), topi.split(1, dim=1)):
current_node_stack = copy_list(b.node_stack)
current_left_childs = []
current_embeddings_stacks = copy_list(b.embedding_stack)
current_out = copy.deepcopy(b.out)
out_token = int(ti)
current_out.append(out_token)
node = current_node_stack[0].pop()
if out_token < num_start:
generate_input = torch.LongTensor([out_token])
if self.USE_CUDA:
generate_input = generate_input.cuda()
left_child, right_child, node_label = self.generate(current_embeddings, generate_input, current_context)
current_node_stack[0].append(TreeNode(right_child))
current_node_stack[0].append(TreeNode(left_child, left_flag=True))
current_embeddings_stacks[0].append(TreeEmbedding(node_label[0].unsqueeze(0), False))
else:
current_num = current_nums_embeddings[0, out_token - num_start].unsqueeze(0)
while len(current_embeddings_stacks[0]) > 0 and current_embeddings_stacks[0][-1].terminal:
sub_stree = current_embeddings_stacks[0].pop()
op = current_embeddings_stacks[0].pop()
current_num = self.merge(op.embedding, sub_stree.embedding, current_num)
current_embeddings_stacks[0].append(TreeEmbedding(current_num, True))
if len(current_embeddings_stacks[0]) > 0 and current_embeddings_stacks[0][-1].terminal:
current_left_childs.append(current_embeddings_stacks[0][-1].embedding)
else:
current_left_childs.append(None)
current_beams.append(TreeBeam(b.score + float(tv), current_node_stack, current_embeddings_stacks, current_left_childs, current_out))
beams = sorted(current_beams, key=lambda x: x.score, reverse=True)
beams = beams[:beam_size]
flag = True
for b in beams:
if len(b.node_stack[0]) != 0:
flag = False
if flag:
break
return beams[0]
def evaluate_attn_double(self, encoder_outputs, decoder_hidden, sos, eos, batch_size, seq_mask, max_length, beam_size):
# Create starting vectors for decoder
decoder_input = torch.LongTensor([sos]) # SOS
beam_list = list()
score = 0
beam_list.append(Beam(score, decoder_input, decoder_hidden, []))
# Run through decoder
for di in range(max_length):
temp_list = list()
beam_len = len(beam_list)
for xb in beam_list:
if int(xb.input_var[0]) == eos:
temp_list.append(xb)
beam_len -= 1
if beam_len == 0:
return beam_list[0]
beam_scores = torch.zeros(self.decoder.output_size * beam_len)
hidden_size_0 = decoder_hidden.size(0)
hidden_size_2 = decoder_hidden.size(2)
all_hidden = torch.zeros(beam_len, hidden_size_0, 1, hidden_size_2)
if self.USE_CUDA:
beam_scores = beam_scores.cuda()
all_hidden = all_hidden.cuda()
all_outputs = []
current_idx = -1
for b_idx in range(len(beam_list)):
decoder_input = beam_list[b_idx].input_var
if int(decoder_input[0]) == eos:
continue
current_idx += 1
decoder_hidden = beam_list[b_idx].hidden
# rule_mask = generate_rule_mask(decoder_input, [num_list], output_lang.word2index,
# 1, num_start, copy_nums, generate_nums, english)
if self.USE_CUDA:
# rule_mask = rule_mask.cuda()
decoder_input = decoder_input.cuda()
decoder_output, decoder_hidden = self.decoder(decoder_input, decoder_hidden, encoder_outputs, seq_mask)
# score = f.log_softmax(decoder_output, dim=1) + rule_mask.squeeze()
score = F.log_softmax(decoder_output, dim=1)
score += beam_list[b_idx].score
beam_scores[current_idx * self.decoder.output_size:(current_idx + 1) * self.decoder.output_size] = score
all_hidden[current_idx] = decoder_hidden
all_outputs.append(beam_list[b_idx].all_output)
topv, topi = beam_scores.topk(beam_size)
for k in range(beam_size):
word_n = int(topi[k])
word_input = word_n % self.decoder.output_size
temp_input = torch.LongTensor([word_input])
indices = int(word_n / self.decoder.output_size)
temp_hidden = all_hidden[indices]
temp_output = all_outputs[indices] + [word_input]
temp_list.append(Beam(float(topv[k]), temp_input, temp_hidden, temp_output))
temp_list = sorted(temp_list, key=lambda x: x.score, reverse=True)
if len(temp_list) < beam_size:
beam_list = temp_list
else:
beam_list = temp_list[:beam_size]
return beam_list[0]
def generate_tree_input(self, target, decoder_output, nums_stack_batch, num_start, unk):
# when the decoder input is copied num but the num has two pos, chose the max
target_input = copy.deepcopy(target)
for i in range(len(target)):
if target[i] == unk:
num_stack = nums_stack_batch[i].pop()
max_score = -float("1e12")
for num in num_stack:
if decoder_output[i, num_start + num] > max_score:
target[i] = num + num_start
max_score = decoder_output[i, num_start + num]
if target_input[i] >= num_start:
target_input[i] = 0
return torch.LongTensor(target), torch.LongTensor(target_input)
def get_all_number_encoder_outputs(self, encoder_outputs, num_pos, batch_size, num_size, hidden_size):
indices = list()
sen_len = encoder_outputs.size(0)
masked_index = []
temp_1 = [1 for _ in range(hidden_size)]
temp_0 = [0 for _ in range(hidden_size)]
for b in range(batch_size):
for i in num_pos[b]:
indices.append(i + b * sen_len)
masked_index.append(temp_0)
indices += [0 for _ in range(len(num_pos[b]), num_size)]
masked_index += [temp_1 for _ in range(len(num_pos[b]), num_size)]
indices = torch.LongTensor(indices)
masked_index = torch.ByteTensor(masked_index)
masked_index = masked_index.view(batch_size, num_size, hidden_size)
if self.USE_CUDA:
indices = indices.cuda()
masked_index = masked_index.cuda()
all_outputs = encoder_outputs.transpose(0, 1).contiguous()
all_embedding = all_outputs.view(-1, encoder_outputs.size(2)) # S x B x H -> (B x S) x H
all_num = all_embedding.index_select(0, indices)
all_num = all_num.view(batch_size, num_size, hidden_size)
return all_num.masked_fill_(masked_index.bool(), 0.0), masked_index
def generate_decoder_input(self, target, decoder_output, nums_stack_batch, num_start, unk):
# when the decoder input is copied num but the num has two pos, chose the max
if self.USE_CUDA:
decoder_output = decoder_output.cpu()
target = torch.LongTensor(target)
for i in range(target.size(0)):
if target[i] == unk:
num_stack = nums_stack_batch[i].pop()
max_score = -float("1e12")
for num in num_stack:
if decoder_output[i, num_start + num] > max_score:
target[i] = num + num_start
max_score = decoder_output[i, num_start + num]
return target
def convert_idx2symbol1(self, output, num_list, num_stack):
#batch_size=output.size(0)
'''batch_size=1'''
seq_len = len(output)
num_len = len(num_list)
output_list = []
res = []
for s_i in range(seq_len):
idx = output[s_i]
if idx in [self.out_sos_token1, self.out_eos_token1, self.out_pad_token1]:
break
symbol = self.out_idx2symbol1[idx]
if "NUM" in symbol:
num_idx = self.mask_list.index(symbol)
if num_idx >= num_len:
res = []
break
res.append(num_list[num_idx])
elif symbol == SpecialTokens.UNK_TOKEN:
try:
pos_list = num_stack.pop()
c = num_list[pos_list[0]]
res.append(c)
except:
return None
else:
res.append(symbol)
output_list.append(res)
return output_list
def convert_idx2symbol2(self, output, num_list, num_stack):
batch_size = output.size(0)
seq_len = output.size(1)
output_list = []
for b_i in range(batch_size):
res = []
num_len = len(num_list[b_i])
for s_i in range(seq_len):
idx = output[b_i][s_i]
if idx in [self.out_sos_token2, self.out_eos_token2, self.out_pad_token2]:
break
symbol = self.out_idx2symbol2[idx]
if "NUM" in symbol:
num_idx = self.mask_list.index(symbol)
if num_idx >= num_len:
res.append(symbol)
else:
res.append(num_list[b_i][num_idx])
elif symbol == SpecialTokens.UNK_TOKEN:
try:
pos_list = num_stack[b_i].pop()
c = num_list[b_i][pos_list[0]]
res.append(c)
except:
res.append(symbol)
else:
res.append(symbol)
output_list.append(res)
return output_list
class _MultiEncDec_(nn.Module):
def __init__(self, config, dataset):
super(_MultiEncDec_, self).__init__()
self.device = config['device']
self.rnn_cell_type = config['rnn_cell_type']
self.embedding_size = config['embedding_size']
self.hidden_size = config['hidden_size']
self.n_layers = config['num_layers']
self.hop_size = config['hop_size']
self.teacher_force_ratio = config['teacher_force_ratio']
self.beam_size = config['beam_size']
self.max_out_len = config['max_output_len']
self.dropout_ratio = config['dropout_ratio']
self.operator_nums = dataset.operator_nums
self.generate_nums = len(dataset.generate_list)
self.num_start1 = dataset.num_start1
self.num_start2 = dataset.num_start2
self.input1_size = len(dataset.in_idx2word_1)
self.input2_size = len(dataset.in_idx2word_2)
self.output2_size = len(dataset.out_idx2symbol_2)
self.unk1 = dataset.out_symbol2idx_1[SpecialTokens.UNK_TOKEN]
self.unk2 = dataset.out_symbol2idx_2[SpecialTokens.UNK_TOKEN]
self.sos2 = dataset.out_symbol2idx_2[SpecialTokens.SOS_TOKEN]
self.eos2 = dataset.out_symbol2idx_2[SpecialTokens.EOS_TOKEN]
self.out_symbol2idx1 = dataset.out_symbol2idx_1
self.out_idx2symbol1 = dataset.out_idx2symbol_1
self.out_symbol2idx2 = dataset.out_symbol2idx_2
self.out_idx2symbol2 = dataset.out_idx2symbol_2
generate_list = dataset.generate_list
self.generate_list = [self.out_symbol2idx1[symbol] for symbol in generate_list]
self.mask_list = NumMask.number
try:
self.out_sos_token1 = self.out_symbol2idx1[SpecialTokens.SOS_TOKEN]
except:
self.out_sos_token1 = None
try:
self.out_eos_token1 = self.out_symbol2idx1[SpecialTokens.EOS_TOKEN]
except:
self.out_eos_token1 = None
try:
self.out_pad_token1 = self.out_symbol2idx1[SpecialTokens.PAD_TOKEN]
except:
self.out_pad_token1 = None
try:
self.out_sos_token2 = self.out_symbol2idx2[SpecialTokens.SOS_TOKEN]
except:
self.out_sos_token2 = None
try:
self.out_eos_token2 = self.out_symbol2idx2[SpecialTokens.EOS_TOKEN]
except:
self.out_eos_token2 = None
try:
self.out_pad_token2 = self.out_symbol2idx2[SpecialTokens.PAD_TOKEN]
except:
self.out_pad_token2 = None
# Initialize models
embedder = BaiscEmbedder(self.input1_size, self.embedding_size, self.dropout_ratio)
in_embedder = self._init_embedding_params(dataset.trainset, dataset.in_idx2word_1, config['embedding_size'], embedder)
#self.out_embedder = BaiscEmbedder(self.output2_size,self.embedding_size,self.dropout_ratio)
self.encoder = GraphBasedMultiEncoder(input1_size=self.input1_size,
input2_size=self.input2_size,
embed_model=in_embedder,
embedding1_size=self.embedding_size,
embedding2_size=self.embedding_size // 4,
hidden_size=self.hidden_size,
n_layers=self.n_layers,
hop_size=self.hop_size)
self.numencoder = NumEncoder(node_dim=self.hidden_size, hop_size=self.hop_size)
self.predict = TreeDecoder(hidden_size=self.hidden_size, op_nums=self.operator_nums, generate_size=self.generate_nums)
self.generate = NodeGenerater(hidden_size=self.hidden_size, op_nums=self.operator_nums, embedding_size=self.embedding_size)
self.merge = SubTreeMerger(hidden_size=self.hidden_size, embedding_size=self.embedding_size)
self.decoder = TreeAttnDecoderRNN(self.hidden_size, self.embedding_size, self.output2_size, self.output2_size, self.n_layers, self.dropout_ratio)
# self.decoder = AttentionalRNNDecoder(embedding_size=self.embedding_size,
# hidden_size=self.hidden_size,
# context_size=self.hidden_size,
# num_dec_layers=self.n_layers,
# rnn_cell_type=self.rnn_cell_type,
# dropout_ratio=self.dropout_ratio)
#self.out = nn.Linear(self.hidden_size, self.output2_size)
self.loss = MaskedCrossEntropyLoss()
def _init_embedding_params(self, train_data, vocab, embedding_size, embedder):
sentences = []
for data in train_data:
sentence = []
for word in data['question']:
if word in vocab:
sentence.append(word)
else:
sentence.append(SpecialTokens.UNK_TOKEN)
sentences.append(sentence)
from gensim.models import word2vec
model = word2vec.Word2Vec(sentences, size=embedding_size, min_count=1)
emb_vectors = []
pad_idx = vocab.index(SpecialTokens.PAD_TOKEN)
for idx in range(len(vocab)):
if idx != pad_idx:
emb_vectors.append(np.array(model.wv[vocab[idx]]))
else:
emb_vectors.append(np.zeros((embedding_size)))
emb_vectors = np.array(emb_vectors)
embedder.embedder.weight.data.copy_(torch.from_numpy(emb_vectors))
return embedder
def forward(self,input1_var, input2_var, input_length, target1, target1_length, target2, target2_length,\
num_stack_batch, num_size_batch,generate_list,num_pos_batch, num_order_batch, parse_graph):
# sequence mask for attention
seq_mask = []
max_len = max(input_length)
for i in input_length:
seq_mask.append([0 for _ in range(i)] + [1 for _ in range(i, max_len)])
seq_mask = torch.ByteTensor(seq_mask)
num_mask = []
max_num_size = max(num_size_batch) + len(generate_list)
for i in num_size_batch:
d = i + len(generate_list)
num_mask.append([0] * d + [1] * (max_num_size - d))
num_mask = torch.ByteTensor(num_mask)
num_pos_pad = []
max_num_pos_size = max(num_size_batch)
for i in range(len(num_pos_batch)):
temp = num_pos_batch[i] + [-1] * (max_num_pos_size - len(num_pos_batch[i]))
num_pos_pad.append(temp)
num_pos_pad = torch.LongTensor(num_pos_pad)
num_order_pad = []
max_num_order_size = max(num_size_batch)
for i in range(len(num_order_batch)):
temp = num_order_batch[i] + [0] * (max_num_order_size - len(num_order_batch[i]))
num_order_pad.append(temp)
num_order_pad = torch.LongTensor(num_order_pad)
num_stack1_batch = copy.deepcopy(num_stack_batch)
num_stack2_batch = copy.deepcopy(num_stack_batch)
#num_start2 = output2_lang.n_words - copy_nums - 2
#unk1 = output1_lang.word2index["UNK"]
#unk2 = output2_lang.word2index["UNK"]
# Turn padded arrays into (batch_size x max_len) tensors, transpose into (max_len x batch_size)
# input1_var = torch.LongTensor(input1_batch).transpose(0, 1)
# input2_var = torch.LongTensor(input2_batch).transpose(0, 1)
# target1 = torch.LongTensor(target1_batch).transpose(0, 1)
# target2 = torch.LongTensor(target2_batch).transpose(0, 1)
input1_var = input1_var.transpose(0, 1)
input2_var = input2_var.transpose(0, 1)
target1 = target1.transpose(0, 1)
target2 = target2.transpose(0, 1)
parse_graph_pad = torch.LongTensor(parse_graph)
padding_hidden = torch.FloatTensor([0.0 for _ in range(self.hidden_size)]).unsqueeze(0)
batch_size = len(input_length)
encoder_outputs, encoder_hidden = self.encoder(input1_var, input2_var, input_length, parse_graph_pad)
copy_num_len = [len(_) for _ in num_pos_batch]
num_size = max(copy_num_len)
num_encoder_outputs, masked_index = self.get_all_number_encoder_outputs(encoder_outputs, num_pos_batch, num_size, self.hidden_size)
encoder_outputs, num_outputs, problem_output = self.numencoder(encoder_outputs, num_encoder_outputs, num_pos_pad, num_order_pad)
num_outputs = num_outputs.masked_fill_(masked_index.bool(), 0.0)
decoder_hidden = encoder_hidden[self.n_layers] # Use last (forward) hidden state from encoder
if target1 != None:
all_output1 = self.train_tree_double(encoder_outputs, problem_output, num_outputs, target1, target1_length, batch_size, padding_hidden, seq_mask, num_mask, num_pos_batch, num_order_pad,
num_stack1_batch)
all_output2 = self.train_attn_double(encoder_outputs, decoder_hidden, target2, target2_length, batch_size, seq_mask, num_stack2_batch)
return "train", all_output1, all_output2
else:
all_output1 = self.evaluate_tree_double(encoder_outputs, problem_output, num_outputs, batch_size, padding_hidden, seq_mask, num_mask)
all_output2 = self.evaluate_attn_double(encoder_outputs, decoder_hidden, batch_size, seq_mask)
if all_output1.score >= all_output2.score:
return "tree", all_output1.out, all_output1.score
else:
return "attn", all_output2.all_output, all_output2.score
def calculate_loss(self, batch_data):
input1_var = batch_data['input1']
input2_var = batch_data['input2']
input_length = batch_data['input1 len']
target1 = batch_data['output1']
target1_length = batch_data['output1 len']
target2 = batch_data['output2']
target2_length = batch_data['output2 len']
num_stack_batch = batch_data['num stack']
num_size_batch = batch_data['num size']
generate_list = self.generate_list
num_pos_batch = batch_data['num pos']
num_order_batch = batch_data['num order']
parse_graph = batch_data['parse graph']
equ_mask1 = batch_data['equ mask1']
equ_mask2 = batch_data['equ mask2']
# sequence mask for attention
seq_mask = []
max_len = max(input_length)
for i in input_length:
seq_mask.append([0 for _ in range(i)] + [1 for _ in range(i, max_len)])
seq_mask = torch.BoolTensor(seq_mask).to(self.device)
num_mask = []
max_num_size = max(num_size_batch) + len(generate_list)
for i in num_size_batch:
d = i + len(generate_list)
num_mask.append([0] * d + [1] * (max_num_size - d))
num_mask = torch.BoolTensor(num_mask).to(self.device)
num_pos_pad = []
max_num_pos_size = max(num_size_batch)
for i in range(len(num_pos_batch)):
temp = num_pos_batch[i] + [-1] * (max_num_pos_size - len(num_pos_batch[i]))
num_pos_pad.append(temp)
num_pos_pad = torch.LongTensor(num_pos_pad).to(self.device)
num_order_pad = []
max_num_order_size = max(num_size_batch)
for i in range(len(num_order_batch)):
temp = num_order_batch[i] + [0] * (max_num_order_size - len(num_order_batch[i]))
num_order_pad.append(temp)
num_order_pad = torch.LongTensor(num_order_pad).to(self.device)
num_stack1_batch = copy.deepcopy(num_stack_batch)
num_stack2_batch = copy.deepcopy(num_stack_batch)
#num_start2 = output2_lang.n_words - copy_nums - 2
#unk1 = output1_lang.word2index["UNK"]
#unk2 = output2_lang.word2index["UNK"]
# Turn padded arrays into (batch_size x max_len) tensors, transpose into (max_len x batch_size)
# input1_var = torch.LongTensor(input1_batch).transpose(0, 1)
# input2_var = torch.LongTensor(input2_batch).transpose(0, 1)
# target1 = torch.LongTensor(target1_batch).transpose(0, 1)
# target2 = torch.LongTensor(target2_batch).transpose(0, 1)
# input1_var = input1_var.transpose(0, 1)
# input2_var = input2_var.transpose(0, 1)
# target1 = target1.transpose(0, 1)
# target2 = target2.transpose(0, 1)
parse_graph_pad = parse_graph.long()
padding_hidden = torch.FloatTensor([0.0 for _ in range(self.hidden_size)]).unsqueeze(0).to(self.device)
batch_size = len(input_length)
encoder_outputs, encoder_hidden = self.encoder(input1_var, input2_var, input_length, parse_graph_pad)
copy_num_len = [len(_) for _ in num_pos_batch]
num_size = max(copy_num_len)
num_encoder_outputs, masked_index = self.get_all_number_encoder_outputs(encoder_outputs, num_pos_batch, num_size, self.hidden_size)
encoder_outputs, num_outputs, problem_output = self.numencoder(encoder_outputs, num_encoder_outputs, num_pos_pad, num_order_pad)
num_outputs = num_outputs.masked_fill_(masked_index.bool(), 0.0)
decoder_hidden = encoder_hidden[:self.n_layers] # Use last (forward) hidden state from encoder
all_output1, target1 = self.train_tree_double(encoder_outputs, problem_output, num_outputs, target1, target1_length, batch_size, padding_hidden, seq_mask, num_mask, num_pos_batch,
num_order_pad, num_stack1_batch)
all_output2, target2_ = self.train_attn_double(encoder_outputs, decoder_hidden, target2, target2_length, batch_size, seq_mask, num_stack2_batch)
self.loss.reset()
self.loss.eval_batch(all_output1, target1, equ_mask1)
self.loss.eval_batch(all_output2, target2_, equ_mask2)
self.loss.backward()
return self.loss.get_loss()
def model_test(self, batch_data):
input1_var = batch_data['input1']
input2_var = batch_data['input2']
input_length = batch_data['input1 len']
target1 = batch_data['output1']
target1_length = batch_data['output1 len']
target2 = batch_data['output2']
target2_length = batch_data['output2 len']
num_stack_batch = batch_data['num stack']
num_size_batch = batch_data['num size']
generate_list = self.generate_list
num_pos_batch = batch_data['num pos']
num_order_batch = batch_data['num order']
parse_graph = batch_data['parse graph']
num_list = batch_data['num list']
# sequence mask for attention
seq_mask = []
max_len = max(input_length)
for i in input_length:
seq_mask.append([0 for _ in range(i)] + [1 for _ in range(i, max_len)])
seq_mask = torch.BoolTensor(seq_mask).to(self.device)
num_mask = []
max_num_size = max(num_size_batch) + len(generate_list)
for i in num_size_batch:
d = i + len(generate_list)
num_mask.append([0] * d + [1] * (max_num_size - d))
num_mask = torch.BoolTensor(num_mask).to(self.device)
num_pos_pad = []
max_num_pos_size = max(num_size_batch)
for i in range(len(num_pos_batch)):
temp = num_pos_batch[i] + [-1] * (max_num_pos_size - len(num_pos_batch[i]))
num_pos_pad.append(temp)
num_pos_pad = torch.LongTensor(num_pos_pad).to(self.device)
num_order_pad = []
max_num_order_size = max(num_size_batch)
for i in range(len(num_order_batch)):
temp = num_order_batch[i] + [0] * (max_num_order_size - len(num_order_batch[i]))
num_order_pad.append(temp)
num_order_pad = torch.LongTensor(num_order_pad).to(self.device)
num_stack1_batch = copy.deepcopy(num_stack_batch)
num_stack2_batch = copy.deepcopy(num_stack_batch)
#num_start2 = output2_lang.n_words - copy_nums - 2
#unk1 = output1_lang.word2index["UNK"]
#unk2 = output2_lang.word2index["UNK"]
# Turn padded arrays into (batch_size x max_len) tensors, transpose into (max_len x batch_size)
# input1_var = torch.LongTensor(input1_batch).transpose(0, 1)
# input2_var = torch.LongTensor(input2_batch).transpose(0, 1)
# target1 = torch.LongTensor(target1_batch).transpose(0, 1)
# target2 = torch.LongTensor(target2_batch).transpose(0, 1)
# input1_var = input1_var.transpose(0, 1)
# input2_var = input2_var.transpose(0, 1)
# target1 = target1.transpose(0, 1)
# target2 = target2.transpose(0, 1)
parse_graph_pad = parse_graph.long()
padding_hidden = torch.FloatTensor([0.0 for _ in range(self.hidden_size)]).unsqueeze(0).to(self.device)
batch_size = len(input_length)
encoder_outputs, encoder_hidden = self.encoder(input1_var, input2_var, input_length, parse_graph_pad)
copy_num_len = [len(_) for _ in num_pos_batch]
num_size = max(copy_num_len)
num_encoder_outputs, masked_index = self.get_all_number_encoder_outputs(encoder_outputs, num_pos_batch, num_size, self.hidden_size)
encoder_outputs, num_outputs, problem_output = self.numencoder(encoder_outputs, num_encoder_outputs, num_pos_pad, num_order_pad)
num_outputs = num_outputs.masked_fill_(masked_index.bool(), 0.0)
decoder_hidden = encoder_hidden[:self.n_layers] # Use last (forward) hidden state from encoder
all_output1 = self.evaluate_tree_double(encoder_outputs, problem_output, num_outputs, batch_size, padding_hidden, seq_mask, num_mask)
all_output2 = self.evaluate_attn_double(encoder_outputs, decoder_hidden, batch_size, seq_mask)
if all_output1.score >= all_output2.score:
output1 = self.convert_idx2symbol1(all_output1.out, num_list[0], copy_list(num_stack1_batch[0]))
targets1 = self.convert_idx2symbol1(target1[0], num_list[0], copy_list(num_stack1_batch[0]))
return "tree", output1, targets1
else:
output2 = self.convert_idx2symbol2(torch.tensor(all_output2.all_output).view(1, -1), num_list, copy_list(num_stack2_batch))
targets2 = self.convert_idx2symbol2(target2, num_list, copy_list(num_stack2_batch))
return "attn", output2, targets2
def train_tree_double(self, encoder_outputs, problem_output, all_nums_encoder_outputs, target, target_length, batch_size, padding_hidden, seq_mask, num_mask, num_pos, num_order_pad,
nums_stack_batch):
# Prepare input and output variables
node_stacks = [[TreeNode(_)] for _ in problem_output.split(1, dim=0)]
max_target_length = max(target_length)
all_node_outputs = []
embeddings_stacks = [[] for _ in range(batch_size)]
left_childs = [None for _ in range(batch_size)]
for t in range(max_target_length):
num_score, op, current_embeddings, current_context, current_nums_embeddings = self.predict(node_stacks, left_childs, encoder_outputs, all_nums_encoder_outputs, padding_hidden, seq_mask,
num_mask)
# all_leafs.append(p_leaf)
outputs = torch.cat((op, num_score), 1)
all_node_outputs.append(outputs)
target_t, generate_input = self.generate_tree_input(target[:, t].tolist(), outputs, nums_stack_batch)
target[:, t] = target_t
# if USE_CUDA:
# generate_input = generate_input.cuda()
generate_input = generate_input.to(self.device)
left_child, right_child, node_label = self.generate(current_embeddings, generate_input, current_context)
left_childs = []
for idx, l, r, node_stack, i, o in zip(range(batch_size), left_child.split(1), right_child.split(1), node_stacks, target[:, t].tolist(), embeddings_stacks):
if len(node_stack) != 0:
node = node_stack.pop()
else:
left_childs.append(None)
continue
if i < self.num_start1:
node_stack.append(TreeNode(r))
node_stack.append(TreeNode(l, left_flag=True))
o.append(TreeEmbedding(node_label[idx].unsqueeze(0), False))
else:
current_num = current_nums_embeddings[idx, i - self.num_start1].unsqueeze(0)
while len(o) > 0 and o[-1].terminal:
sub_stree = o.pop()
op = o.pop()
current_num = self.merge(op.embedding, sub_stree.embedding, current_num)
o.append(TreeEmbedding(current_num, True))
if len(o) > 0 and o[-1].terminal:
left_childs.append(o[-1].embedding)
else:
left_childs.append(None)
# all_leafs = torch.stack(all_leafs, dim=1) # B x S x 2
all_node_outputs = torch.stack(all_node_outputs, dim=1) # B x S x N
return all_node_outputs, target
def train_attn_double(self, encoder_outputs, decoder_hidden, target, target_length, batch_size, seq_mask, nums_stack_batch):
max_target_length = max(target_length)
decoder_input = torch.LongTensor([self.sos2] * batch_size).to(self.device)
all_decoder_outputs = torch.zeros(batch_size, max_target_length, self.output2_size).to(self.device)
#all_decoder_outputs = []
seq_mask = torch.unsqueeze(seq_mask, dim=1)
# Move new Variables to CUDA
# if USE_CUDA:
# all_decoder_outputs = all_decoder_outputs.cuda()
if random.random() < self.teacher_force_ratio:
# if random.random() < 0:
# Run through decoder one time step at a time
#decoder_inputs = torch.cat([decoder_input.view(batch_size,1),target],dim=1)[:,:-1]
all_decoder_outputs = []
for t in range(max_target_length):
#decoder_inputs[:,t]=decoder_input
#decoder_input = decoder_inputs[:,t]
decoder_output, decoder_hidden = self.decoder(decoder_input, decoder_hidden, encoder_outputs, seq_mask.squeeze(1))
#all_decoder_outputs[:,t,:] = decoder_output
all_decoder_outputs.append(decoder_output)
decoder_input = self.generate_decoder_input(target[:, t].tolist(), decoder_output, nums_stack_batch)
target[:, t] = decoder_input
all_decoder_outputs = torch.stack(all_decoder_outputs, dim=1)
else:
decoder_input = torch.LongTensor([self.sos2] * batch_size).to(self.device)
beam_list = list()
score = torch.zeros(batch_size).to(self.device)
# if USE_CUDA:
# score = score.cuda()
beam_list.append(Beam(score, decoder_input, decoder_hidden, all_decoder_outputs))
# Run through decoder one time step at a time
for t in range(max_target_length):
beam_len = len(beam_list)
beam_scores = torch.zeros(batch_size, self.output2_size * beam_len).to(self.device)
all_hidden = torch.zeros(decoder_hidden.size(0), batch_size * beam_len, decoder_hidden.size(2)).to(self.device)
all_outputs = torch.zeros(batch_size * beam_len, max_target_length, self.output2_size).to(self.device)
# if USE_CUDA:
# beam_scores = beam_scores.cuda()
# all_hidden = all_hidden.cuda()
# all_outputs = all_outputs.cuda()
for b_idx in range(len(beam_list)):
decoder_input = beam_list[b_idx].input_var
decoder_hidden = beam_list[b_idx].hidden
decoder_input = decoder_input.to(self.device)
decoder_output, decoder_hidden = self.decoder(decoder_input, decoder_hidden, encoder_outputs, seq_mask.squeeze(1))
score = F.log_softmax(decoder_output, dim=1)
beam_score = beam_list[b_idx].score
beam_score = beam_score.unsqueeze(1)
repeat_dims = [1] * beam_score.dim()
repeat_dims[1] = score.size(1)
beam_score = beam_score.repeat(*repeat_dims)
score = score + beam_score
beam_scores[:, b_idx * self.output2_size:(b_idx + 1) * self.output2_size] = score
all_hidden[:, b_idx * batch_size:(b_idx + 1) * batch_size, :] = decoder_hidden
beam_list[b_idx].all_output[:, t, :] = decoder_output
all_outputs[batch_size * b_idx: batch_size * (b_idx + 1),:, :] = \
beam_list[b_idx].all_output
topv, topi = beam_scores.topk(self.beam_size, dim=1)
beam_list = list()
for k in range(self.beam_size):
temp_topk = topi[:, k]
temp_input = temp_topk % self.output2_size
temp_input = temp_input.data
temp_beam_pos = temp_topk // self.output2_size
indices = torch.LongTensor(range(batch_size)).to(self.device)
indices += temp_beam_pos * batch_size
temp_hidden = all_hidden.index_select(dim=1, index=indices)
temp_output = all_outputs.index_select(dim=0, index=indices)
beam_list.append(Beam(topv[:, k], temp_input, temp_hidden, temp_output))
all_decoder_outputs = beam_list[0].all_output
for t in range(max_target_length):
target[:, t] = self.generate_decoder_input(target[:, t].tolist(), all_decoder_outputs[:, t], nums_stack_batch)
return all_decoder_outputs, target
def evaluate_tree_double(self, encoder_outputs, problem_output, all_nums_encoder_outputs, batch_size, padding_hidden, seq_mask, num_mask):
# Prepare input and output variables
node_stacks = [[TreeNode(_)] for _ in problem_output.split(1, dim=0)]
#num_start = output_lang.num_start
# B x P x N
embeddings_stacks = [[] for _ in range(batch_size)]
left_childs = [None for _ in range(batch_size)]
beams = [TreeBeam(0.0, node_stacks, embeddings_stacks, left_childs, [])]
for t in range(self.max_out_len):
current_beams = []
while len(beams) > 0:
b = beams.pop()
if len(b.node_stack[0]) == 0:
current_beams.append(b)
continue
# left_childs = torch.stack(b.left_childs)
left_childs = b.left_childs
num_score, op, current_embeddings, current_context, current_nums_embeddings = self.predict(b.node_stack, left_childs, encoder_outputs, all_nums_encoder_outputs, padding_hidden,
seq_mask, num_mask)
out_score = nn.functional.log_softmax(torch.cat((op, num_score), dim=1), dim=1)
topv, topi = out_score.topk(self.beam_size)
for tv, ti in zip(topv.split(1, dim=1), topi.split(1, dim=1)):
current_node_stack = copy_list(b.node_stack)
current_left_childs = []
current_embeddings_stacks = copy_list(b.embedding_stack)
current_out = copy.deepcopy(b.out)
out_token = int(ti)
current_out.append(out_token)
node = current_node_stack[0].pop()
if out_token < self.num_start1:
generate_input = torch.LongTensor([out_token]).to(self.device)
# if USE_CUDA:
# generate_input = generate_input.cuda()
left_child, right_child, node_label = self.generate(current_embeddings, generate_input, current_context)
current_node_stack[0].append(TreeNode(right_child))
current_node_stack[0].append(TreeNode(left_child, left_flag=True))
current_embeddings_stacks[0].append(TreeEmbedding(node_label[0].unsqueeze(0), False))
else:
current_num = current_nums_embeddings[0, out_token - self.num_start1].unsqueeze(0)
while len(current_embeddings_stacks[0]) > 0 and current_embeddings_stacks[0][-1].terminal:
sub_stree = current_embeddings_stacks[0].pop()
op = current_embeddings_stacks[0].pop()
current_num = self.merge(op.embedding, sub_stree.embedding, current_num)
current_embeddings_stacks[0].append(TreeEmbedding(current_num, True))
if len(current_embeddings_stacks[0]) > 0 and current_embeddings_stacks[0][-1].terminal:
current_left_childs.append(current_embeddings_stacks[0][-1].embedding)
else:
current_left_childs.append(None)
current_beams.append(TreeBeam(b.score + float(tv), current_node_stack, current_embeddings_stacks, current_left_childs, current_out))
beams = sorted(current_beams, key=lambda x: x.score, reverse=True)
beams = beams[:self.beam_size]
flag = True
for b in beams:
if len(b.node_stack[0]) != 0:
flag = False
if flag:
break
return beams[0]
def evaluate_attn_double(self, encoder_outputs, decoder_hidden, batch_size, seq_mask):
# Create starting vectors for decoder
decoder_input = torch.LongTensor([self.sos2]).to(self.device) # SOS
beam_list = list()
score = 0
beam_list.append(Beam(score, decoder_input, decoder_hidden, []))
# Run through decoder
for di in range(self.max_out_len):
temp_list = list()
beam_len = len(beam_list)
for xb in beam_list:
if int(xb.input_var[0]) == self.eos2:
temp_list.append(xb)
beam_len -= 1
if beam_len == 0:
return beam_list[0]
beam_scores = torch.zeros(self.output2_size * beam_len).to(self.device)
hidden_size_0 = decoder_hidden.size(0)
hidden_size_2 = decoder_hidden.size(2)
all_hidden = torch.zeros(beam_len, hidden_size_0, 1, hidden_size_2).to(self.device)
all_outputs = []
current_idx = -1
for b_idx in range(len(beam_list)):
decoder_input = beam_list[b_idx].input_var
if int(decoder_input[0]) == self.eos2:
continue
current_idx += 1
decoder_hidden = beam_list[b_idx].hidden
decoder_output, decoder_hidden = self.decoder(decoder_input, decoder_hidden, encoder_outputs, seq_mask)
#decoder_output = self.out(decoder_output).squeeze(dim=1)
score = F.log_softmax(decoder_output, dim=1)
score += beam_list[b_idx].score
beam_scores[current_idx * self.output2_size:(current_idx + 1) * self.output2_size] = score
all_hidden[current_idx] = decoder_hidden
all_outputs.append(beam_list[b_idx].all_output)
topv, topi = beam_scores.topk(self.beam_size)
for k in range(self.beam_size):
word_n = int(topi[k])
word_input = word_n % self.output2_size
temp_input = torch.LongTensor([word_input]).to(self.device)
indices = int(word_n / self.output2_size)
temp_hidden = all_hidden[indices]
temp_output = all_outputs[indices] + [word_input]
temp_list.append(Beam(float(topv[k]), temp_input, temp_hidden, temp_output))
temp_list = sorted(temp_list, key=lambda x: x.score, reverse=True)
if len(temp_list) < self.beam_size:
beam_list = temp_list
else:
beam_list = temp_list[:self.beam_size]
return beam_list[0]
def get_all_number_encoder_outputs(self, encoder_outputs, num_pos, num_size, hidden_size):
indices = list()
sen_len = encoder_outputs.size(1)
batch_size = encoder_outputs.size(0)
masked_index = []
temp_1 = [1 for _ in range(hidden_size)]
temp_0 = [0 for _ in range(hidden_size)]
for b in range(batch_size):
for i in num_pos[b]:
if i == -1:
indices.append(0)
masked_index.append(temp_1)
continue
indices.append(i + b * sen_len)
masked_index.append(temp_0)
indices = indices + [0 for _ in range(len(num_pos[b]), num_size)]
masked_index = masked_index + [temp_1 for _ in range(len(num_pos[b]), num_size)]
# indices = torch.LongTensor(indices)
# masked_index = torch.ByteTensor(masked_index)
indices = torch.LongTensor(indices).to(self.device)
masked_index = torch.BoolTensor(masked_index).to(self.device)
masked_index = masked_index.view(batch_size, num_size, hidden_size)
all_outputs = encoder_outputs.transpose(0, 1).contiguous()
all_embedding = all_outputs.view(-1, encoder_outputs.size(2)) # S x B x H -> (B x S) x H
all_num = all_embedding.index_select(0, indices)
all_num = all_num.view(batch_size, num_size, hidden_size)
return all_num.masked_fill_(masked_index.bool(), 0.0), masked_index
def generate_tree_input(self, target, decoder_output, nums_stack_batch):
# when the decoder input is copied num but the num has two pos, chose the max
target_input = copy.deepcopy(target)
for i in range(len(target)):
if target[i] == self.unk1:
num_stack = nums_stack_batch[i].pop()
max_score = -float("1e12")
for num in num_stack:
if decoder_output[i, self.num_start1 + num] > max_score:
target[i] = num + self.num_start1
max_score = decoder_output[i, self.num_start1 + num]
if target_input[i] >= self.num_start1:
target_input[i] = 0
return torch.LongTensor(target), torch.LongTensor(target_input)
def generate_decoder_input(self, target, decoder_output, nums_stack_batch):
# when the decoder input is copied num but the num has two pos, chose the max
# if USE_CUDA:
# decoder_output = decoder_output.cpu()
target = torch.LongTensor(target).to(self.device)
for i in range(target.size(0)):
if target[i] == self.unk2:
num_stack = nums_stack_batch[i].pop()
max_score = -float("1e12")
for num in num_stack:
if decoder_output[i, self.num_start2 + num] > max_score:
target[i] = num + self.num_start2
max_score = decoder_output[i, self.num_start2 + num]
return target
def convert_idx2symbol1(self, output, num_list, num_stack):
#batch_size=output.size(0)
'''batch_size=1'''
seq_len = len(output)
num_len = len(num_list)
output_list = []
res = []
for s_i in range(seq_len):
idx = output[s_i]
if idx in [self.out_sos_token1, self.out_eos_token1, self.out_pad_token1]:
break
symbol = self.out_idx2symbol1[idx]
if "NUM" in symbol:
num_idx = self.mask_list.index(symbol)
if num_idx >= num_len:
res = []
break
res.append(num_list[num_idx])
elif symbol == SpecialTokens.UNK_TOKEN:
try:
pos_list = num_stack.pop()
c = num_list[pos_list[0]]
res.append(c)
except:
return None
else:
res.append(symbol)
output_list.append(res)
return output_list
def convert_idx2symbol2(self, output, num_list, num_stack):
batch_size = output.size(0)
seq_len = output.size(1)
output_list = []
for b_i in range(batch_size):
res = []
num_len = len(num_list[b_i])
for s_i in range(seq_len):
idx = output[b_i][s_i]
if idx in [self.out_sos_token2, self.out_eos_token2, self.out_pad_token2]:
break
symbol = self.out_idx2symbol2[idx]
if "NUM" in symbol:
num_idx = self.mask_list.index(symbol)
if num_idx >= num_len:
res.append(symbol)
else:
res.append(num_list[b_i][num_idx])
elif symbol == SpecialTokens.UNK_TOKEN:
try:
pos_list = num_stack[b_i].pop()
c = num_list[b_i][pos_list[0]]
res.append(c)
except:
res.append(symbol)
else:
res.append(symbol)
output_list.append(res)
return output_list
# def get_all_number_encoder_outputs(self,encoder_outputs, num_pos, num_size, hidden_size):
# indices = list()
# sen_len = encoder_outputs.size(1)
# batch_size=encoder_outputs.size(0)
# masked_index = []
# temp_1 = [1 for _ in range(hidden_size)]
# temp_0 = [0 for _ in range(hidden_size)]
# for b in range(batch_size):
# for i in num_pos[b]:
# indices.append(i + b * sen_len)
# masked_index.append(temp_0)
# indices += [0 for _ in range(len(num_pos[b]), num_size)]
# masked_index += [temp_1 for _ in range(len(num_pos[b]), num_size)]
# indices = torch.LongTensor(indices).to(self.device)
# masked_index = torch.BoolTensor(masked_index).to(self.device)
# masked_index = masked_index.view(batch_size, num_size, hidden_size)
# all_outputs = encoder_outputs.contiguous()
# all_embedding = all_outputs.view(-1, encoder_outputs.size(2)) # S x B x H -> (B x S) x H
# all_num = all_embedding.index_select(0, indices)
# all_num = all_num.view(batch_size, num_size, hidden_size)
# return all_num.masked_fill_(masked_index, 0.0)
# def replace_masked_values(tensor, mask, replace_with):
# return tensor.masked_fill((1 - mask).bool(), replace_with)
| 1.976563
| 2
|
calculation/gmhazard_calc/gmhazard_calc/exceptions.py
|
ucgmsim/gmhazard
| 0
|
12775447
|
<gh_stars>0
class GMHazardError(BaseException):
"""Base GMHazard error"""
def __init__(self, message: str):
self.message = message
class ExceedanceOutOfRangeError(GMHazardError):
"""Raised when the specified exceedance value is out of range when
going from exceedance to IM on the hazard curve"""
def __init__(self, im: str, exceedance: float, message: str):
super().__init__(message)
self.exceedance = exceedance
self.im = im
| 2.828125
| 3
|
pydzcvr/networkDiscovery/openstack/libvirt/discovery.py
|
cboling/pydzcvr
| 0
|
12775448
|
<gh_stars>0
'''
@author: <NAME>
@copyright: 2015 Boling Consulting Solutions. All rights reserved.
@license: Artistic License 2.0, http://opensource.org/licenses/Artistic-2.0
@contact: <EMAIL>
@deffield updated: Updated
Libvirt allows you to access hypervisors running on remote machines through authenticated
and encrypted connections. On the remote machine, libvirtd should be running in general.
See the section on configuring libvirtd for more information at https://libvirt.org/remote.html#Remote_libvirtd_configuration
For test purposes, use ssh to connect libvirtd such as: ::
sudo virsh --readonly --connect qemu+ssh://stack@kiwi-os-compute-01/system list
'''
import libvirt
import sys
import pprint
def discoverAllNodes():
'''
Discover VM instances (via libvirt)
TODO: This will change radically, for now, just do some hardcoded calls
to various interfaces and see what is available. Evenutally this
will be consolidated once patterns emerge.
TODO: Make each of the discovery calls below optional based on whether or
not they have the required APIs installed
'''
# use https://libvirt.org/uri.html for remote access
'''
Remote URIs have the general form ("[...]" meaning an optional part):
driver[+transport]://[username@][hostname][:port]/[path][?extraparameters]
'''
uri = 'qemu+ssh://stack@kiwi-os-compute-01/system'
conn = libvirt.openReadOnly(uri)
if conn == None:
print 'Failed to open connection to the hypervisor'
return
try:
# Get a list of all VM instances
domains = conn.listAllDomains()
# Walk each instance and dump its XML
for instance in domains:
name = instance.name()
xmlDescription = instance.XMLDesc()
# Look for <devices.interfaces> will have a type (probably 'bridge')
# and under it, the target connection will be 'target/<something>
# such as:
#
# <domain type='qemu' id='2'>
# <name>instance-0000002b</name>
# ...
# <devices>
# ...
# <interface type='bridge'>
# <mac address='fa:16:3e:38:f7:16'/>
# <source bridge='br-int'/>
# <virtualport type='openvswitch'>
# <parameters interfaceid='e32ca2e5-24e7-473d-a9dd-c3ab7ff38097'/>
# </virtualport>
# <target dev='tape32ca2e5-24'/>
# <model type='virtio'/>
# <driver name='qemu'/>
# <alias name='net0'/>
# <address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
# </interface>
# ...
#
# for dev in $(virsh dumpxml $instance |
# xmllint --xpath '//interface/target/@dev' -); do
# dev=${dev#dev=\"}
# dev=${dev%\"}
# echo "$instance $dev" >> edges
# done
pp = pprint.PrettyPrinter(indent=2)
pp.pprint(xmlDesc)
def discoverInterfaces():
# TODO: Actually, may need to try a variety of uri combinations
uri = 'qemu+ssh://stack@kiwi-os-compute-01/system'
conn = libvirt.openReadOnly(uri)
if conn == None:
print 'Failed to open connection to the hypervisor'
return
try:
networks = conn.listAllNetworks()
interfaces = conn.listAllInterfaces()
domains = conn.listAllDomains()
dom0 = conn.lookupByName("Domain-0")
except:
print 'Failed to find the main domain'
sys.exit(1)
print "Domain 0: id %d running %s" % (dom0.ID(), dom0.OSType())
print dom0.info()
pass
| 2.453125
| 2
|
vininfo/common.py
|
ghilesmeddour/vininfo
| 60
|
12775449
|
<filename>vininfo/common.py<gh_stars>10-100
from typing import Dict, Any, Type
if False: # pragma: nocover
from .details._base import VinDetails # noqa
class Annotatable:
annotate_titles = {}
def annotate(self) -> Dict[str, Any]:
annotations = {}
no_attr = set()
for attr_name, label in self.annotate_titles.items():
value = getattr(self, attr_name, no_attr)
if value is no_attr:
continue
if isinstance(value, list):
value = ', '.join(f'{val}' for val in value)
annotations[label] = f'{value}'
return dict((title, value) for title, value in sorted(annotations.items(), key=lambda item: item[0]))
class Brand:
__slots__ = ['manufacturer']
extractor: Type['VinDetails'] = None
def __init__(self, manufacturer: str = None):
self.manufacturer = manufacturer or self.title
@property
def title(self) -> str:
return self.__class__.__name__
def __str__(self):
return f'{self.title} ({self.manufacturer})'
class UnsupportedBrand(Brand):
"""Unsupported brand."""
| 2.53125
| 3
|
apps/frontend/hotel/views.py
|
12roshan12/Hotel-website
| 0
|
12775450
|
<reponame>12roshan12/Hotel-website
from django.contrib.messages.views import SuccessMessageMixin
from django.db.models.fields import CommaSeparatedIntegerField
from django.http.response import HttpResponse
from django.shortcuts import redirect,render
from django.contrib.auth.views import LoginView, LogoutView
from django.views.generic.base import View
from django.views.generic.edit import CreateView
from apps.backend.users.forms import BlogForm, EventsForm, UserRegistrationForm, EnquiryForm
from apps.backend.users.models import Blog, Enquiry,Events
from django.contrib import messages
from django.urls import reverse_lazy
from apps.backend.users.models import User
from django.views.generic.edit import FormView
# Create your views here.
# def userRegister(request):
# print('hello',request.method)
# if request.method == "POST":
# fname = request.POST['fname']
# lname = request.POST['lname']
# email = request.POST['email']
# password = request.POST['password']
# cpassword = request.POST['cpassword']
# mobile = request.POST['mobile']
# user = userReg.objects.filter(Email=email)
# if user:
# message="User already exist"
# print(message)
# return render(request,"frontend/signup.html", {'msg':message})
# else:
# if password==<PASSWORD>:
# newuser = userReg.objects.create(Firstname=fname,Lastname=lname,Password=password,Email=email,Mobile=mobile)
# message="registration sucessfull"
# newuser.save()
# print(message)
# return redirect(reverse_lazy("hotel:userReg"))
# # return render(request,"frontend/index.html",{'msg':message})
# else:
# message="password doesn't match"
# print(message)
# return render(request,"frontend/signup.html", {'msg':message})
# else:
# return HttpResponse("none")
class staffField(SuccessMessageMixin, CreateView):
template_name='frontend/signup.html'
form_class = UserRegistrationForm
success_message = 'User Successfully Created'
success_url = reverse_lazy('hotel:home')
def form_valid(self, form):
user = form.save(commit=False)
user.is_buyer = True
user.save()
messages.success(self.request, self.success_message)
return redirect(self.success_url)
class StaffLogoutField(LogoutView):
template_name = 'frontend/home.html'
class StaffLoginField(LoginView):
template_name = 'frontend/login.html'
def get_success_url(self):
return redirect(reverse_lazy('hotel:home'))
# return super().get_success_url()
class ContactUsField(SuccessMessageMixin, CreateView):
template_name = 'frontend/contact-us.html'
class EnquiryView(FormView):
template_name = 'frontend/contact-us.html'
form_class = EnquiryForm
# print("1")
def form_valid(self, form):
contact = form.save(commit=False)
contact.save()
print("2")
return redirect(reverse_lazy('hotel:home'))
class Eventsview(FormView):
template_name = 'frontend/addevent.html'
form_class = EventsForm
def form_valid(self,form):
event = form.save(commit=False)
event.save()
return redirect(reverse_lazy('hotel:events'))
def Eventdisplay(request):
data = Events.objects.all()
return render(request,'frontend/events.html',{'data':data})
class Blogview(FormView):
template_name = 'frontend/addblog.html'
form_class = BlogForm
def form_valid(self,form):
blog = form.save(commit= False)
blog.image = form.cleaned_data['image']
blog.save()
return redirect(reverse_lazy('hotel:blog'))
def Blogdisplay(request):
data = Blog.objects.all()
return render(request,'frontend/blog.html',{'data':data})
| 2.125
| 2
|