content stringlengths 5 1.05M |
|---|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2019 Chris Caron <lead2gold@gmail.com>
# All rights reserved.
#
# This code is licensed under the MIT License.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files(the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions :
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import requests
from json import dumps
from .NotifyBase import NotifyBase
from ..utils import GET_EMAIL_RE
from ..common import NotifyType
from ..utils import parse_list
from ..AppriseLocale import gettext_lazy as _
# Flag used as a placeholder to sending to all devices
PUSHBULLET_SEND_TO_ALL = 'ALL_DEVICES'
# Provide some known codes Pushbullet uses and what they translate to:
PUSHBULLET_HTTP_ERROR_MAP = {
401: 'Unauthorized - Invalid Token.',
}
class NotifyPushBullet(NotifyBase):
"""
A wrapper for PushBullet Notifications
"""
# The default descriptive name associated with the Notification
service_name = 'Pushbullet'
# The services URL
service_url = 'https://www.pushbullet.com/'
# The default secure protocol
secure_protocol = 'pbul'
# A URL that takes you to the setup/help of the specific protocol
setup_url = 'https://github.com/caronc/apprise/wiki/Notify_pushbullet'
# PushBullet uses the http protocol with JSON requests
notify_url = 'https://api.pushbullet.com/v2/pushes'
# Define object templates
templates = (
'{schema}://{accesstoken}',
'{schema}://{accesstoken}/{targets}',
)
# Define our template tokens
template_tokens = dict(NotifyBase.template_tokens, **{
'accesstoken': {
'name': _('Access Token'),
'type': 'string',
'private': True,
'required': True,
},
'target_device': {
'name': _('Target Device'),
'type': 'string',
'map_to': 'targets',
},
'target_channel': {
'name': _('Target Channel'),
'type': 'string',
'prefix': '#',
'map_to': 'targets',
},
'target_email': {
'name': _('Target Email'),
'type': 'string',
'map_to': 'targets',
},
'targets': {
'name': _('Targets'),
'type': 'list:string',
},
})
# Define our template arguments
template_args = dict(NotifyBase.template_args, **{
'to': {
'alias_of': 'targets',
},
})
def __init__(self, accesstoken, targets=None, **kwargs):
"""
Initialize PushBullet Object
"""
super(NotifyPushBullet, self).__init__(**kwargs)
self.accesstoken = accesstoken
self.targets = parse_list(targets)
if len(self.targets) == 0:
self.targets = (PUSHBULLET_SEND_TO_ALL, )
def send(self, body, title='', notify_type=NotifyType.INFO, **kwargs):
"""
Perform PushBullet Notification
"""
headers = {
'User-Agent': self.app_id,
'Content-Type': 'application/json'
}
auth = (self.accesstoken, '')
# error tracking (used for function return)
has_error = False
# Create a copy of the targets list
targets = list(self.targets)
while len(targets):
recipient = targets.pop(0)
# prepare JSON Object
payload = {
'type': 'note',
'title': title,
'body': body,
}
if recipient is PUSHBULLET_SEND_TO_ALL:
# Send to all
pass
elif GET_EMAIL_RE.match(recipient):
payload['email'] = recipient
self.logger.debug(
"Recipient '%s' is an email address" % recipient)
elif recipient[0] == '#':
payload['channel_tag'] = recipient[1:]
self.logger.debug("Recipient '%s' is a channel" % recipient)
else:
payload['device_iden'] = recipient
self.logger.debug(
"Recipient '%s' is a device" % recipient)
self.logger.debug('PushBullet POST URL: %s (cert_verify=%r)' % (
self.notify_url, self.verify_certificate,
))
self.logger.debug('PushBullet Payload: %s' % str(payload))
# Always call throttle before any remote server i/o is made
self.throttle()
try:
r = requests.post(
self.notify_url,
data=dumps(payload),
headers=headers,
auth=auth,
verify=self.verify_certificate,
)
if r.status_code != requests.codes.ok:
# We had a problem
status_str = \
NotifyPushBullet.http_response_code_lookup(
r.status_code, PUSHBULLET_HTTP_ERROR_MAP)
self.logger.warning(
'Failed to send PushBullet notification to {}:'
'{}{}error={}.'.format(
recipient,
status_str,
', ' if status_str else '',
r.status_code))
self.logger.debug(
'Response Details:\r\n{}'.format(r.content))
# Mark our failure
has_error = True
continue
else:
self.logger.info(
'Sent PushBullet notification to "%s".' % (recipient))
except requests.RequestException as e:
self.logger.warning(
'A Connection error occured sending PushBullet '
'notification to "%s".' % (recipient),
)
self.logger.debug('Socket Exception: %s' % str(e))
# Mark our failure
has_error = True
continue
return not has_error
def url(self):
"""
Returns the URL built dynamically based on specified arguments.
"""
# Define any arguments set
args = {
'format': self.notify_format,
'overflow': self.overflow_mode,
'verify': 'yes' if self.verify_certificate else 'no',
}
targets = '/'.join([NotifyPushBullet.quote(x) for x in self.targets])
if targets == PUSHBULLET_SEND_TO_ALL:
# keyword is reserved for internal usage only; it's safe to remove
# it from the recipients list
targets = ''
return '{schema}://{accesstoken}/{targets}/?{args}'.format(
schema=self.secure_protocol,
accesstoken=NotifyPushBullet.quote(self.accesstoken, safe=''),
targets=targets,
args=NotifyPushBullet.urlencode(args))
@staticmethod
def parse_url(url):
"""
Parses the URL and returns enough arguments that can allow
us to substantiate this object.
"""
results = NotifyBase.parse_url(url)
if not results:
# We're done early as we couldn't load the results
return results
# Fetch our targets
results['targets'] = \
NotifyPushBullet.split_path(results['fullpath'])
# The 'to' makes it easier to use yaml configuration
if 'to' in results['qsd'] and len(results['qsd']['to']):
results['targets'] += \
NotifyPushBullet.parse_list(results['qsd']['to'])
# Setup the token; we store it in Access Token for global
# plugin consistency with naming conventions
results['accesstoken'] = NotifyPushBullet.unquote(results['host'])
return results
|
# Get Launcher as well as OpenGL imports
from projects.launcher import *
w, h = 500, 500
square_posx, square_posy = 0.0, 0.0
triangle_scale = 1.0
def square(size=(100, 100)):
global square_posx, square_posy
square_posx = (square_posx + 0.05) % w
square_posy = (square_posy + 0.05) % h
glTranslated(square_posx, square_posy, 0)
glColor3f(0.25, 0.5, 0.75)
glBegin(GL_QUADS)
glVertex2f(size[0], 0)
glVertex2f(size[0], size[1])
glVertex2f(0, size[1])
glVertex2f(0, 0)
glEnd()
glLoadIdentity()
def scale_2d_object_in_position(position=(50, 50), object_size=(1, 1),
scale=(1.0, 1.0)):
pos_x = position[0] - (object_size[0] / 2 * scale[0])
pos_y = position[1] - (object_size[1] / 2 * scale[1])
# Multiple operations on matrix must be called in inverse order
glTranslate(pos_x, pos_y, 0)
glScale(scale[0], scale[1], 1.0)
def triangle(size=(100, 100)):
global triangle_scale
triangle_scale = (triangle_scale + 0.0005) % 1
scale_2d_object_in_position((w//2, h//2), size, [triangle_scale]*2)
glColor3f(0.8, 0, 0.25)
glBegin(GL_TRIANGLES)
glVertex2f(0, 0)
glVertex2f(size[0], 0)
glVertex2f(size[0] // 2, size[1])
glEnd()
glLoadIdentity()
def iterate():
glViewport(0, 0, w, h)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(0.0, w, 0.0, h, 0.0, 1.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
def display_func():
glLoadIdentity()
iterate()
square((50, 50))
triangle((w // 3, h // 3))
if __name__ == "__main__":
Launcher(display_func, (w, h)).loop()
|
#!/dls_sw/tools/bin/python2.4
from pkg_resources import require
require('dls_serial_sim')
from dls_serial_sim import serial_device
import re, os, time
class elcomat3000(serial_device):
Terminator = "\n"
def __init__(self):
'''Constructor. Remember to call the base class constructor.'''
serial_device.__init__(self,
protocolBranches = [])
print "Initialising Elcomat3000 simulator, V1.0"
print "Power is %s" % self.power
self.transmitType1 = False
self.transmitType3 = False
self.transmitType5 = False
self.transmitType6 = False
self.measuredX = 4.5
self.measuredY = 2.3
self.day = 12
self.month = 1
self.year = 2001
self.focalLength = 300
return
def output(self, text):
print 'Tx: %s' % repr(text)
self.outq.put(text)
def outputType1(self):
text = "1 103 %f %f\r" % (self.measuredX, self.measuredY)
self.output(text)
def outputType2(self):
text = "2 103 %f %f\r" % (self.measuredX, self.measuredY)
self.output(text)
def outputType3(self):
text = "3 003 %f %f\r" % (self.measuredX, self.measuredY)
self.output(text)
def outputType4(self):
text = "4 003 %f %f\r" % (self.measuredX, self.measuredY)
self.output(text)
def outputType8(self):
text = "8 423 %d %d %d %d\r" % (self.day, self.month, self.year, self.focalLength)
self.output(text)
def outputTables(self):
# TODO: Implement table output
pass
def reply(self, command):
'''This function must be defined. It is called by the serial_sim system
whenever an asyn command is send down the line. Must return a string
with a response to the command or None.'''
print "Rx: %s" % repr(command)
result = None
if self.isPowerOn():
if command == 's':
self.transmitType1 = False
self.transmitType3 = False
self.transmitType5 = False
self.transmitType6 = False
elif command == 'r':
self.transmitType1 = False
self.transmitType3 = False
self.outputType2()
elif command == 'R':
self.transmitType1 = True
elif command == 'a':
self.transmitType1 = False
self.transmitType3 = False
self.outputType4()
elif command == 'A':
self.transmitType3 = True
elif command == 't':
self.outputTables()
elif command == 'd':
self.outputType8()
else:
print "Unknown command %s" % repr(command)
return result
def initialise(self):
'''Called by the framework when the power is switched on.'''
pass
# The functions below are the backdoor RPC API
def setInfo(self, day, month, year, focalLength):
self.day = day
self.month = month
self.year = year
self.focalLength = focalLenght
def setMeasurement(self, measuredX, measuredY):
self.measuredX = measuredX
self.measuredY = measuredY
if __name__ == "__main__":
# little test function that runs only when you run this file
dev = elcomat3000()
dev.start_ip(9015)
dev.start_rpc(9016)
# cheesy wait to stop the program exiting immediately
while True:
time.sleep(1)
|
from mongoengine import *
class ConfigurationReference(EmbeddedDocument):
configId = ObjectIdField()
name = StringField()
filename = StringField() |
#!/usr/bin/env python3
"""
stores mysql DB secrets, should be kept secret for production
"""
from cryptography.fernet import Fernet
import os
import sys
MY_API_TOKEN = (
"bCCHjJ4CddOvuz&Yoce4^hpQqSr5393LtxF8#Dlv@a*O0MXlp8sdwQI%LwYlPAB*cNbVV9EUG"
"lDsAkwPpcBeQ$F0A1Tdtnf%9eEe"
)
KEY = os.getenv('TODO_KEY', '1OWU3m77xWS5r_CfcQ63mRlyQDvR3VEfud4Img8psVE=')
API_BEARER_TOKEN = os.getenv('API_TOKEN', MY_API_TOKEN)
TODO_KEY = KEY.encode('utf-8')
CIPHER = Fernet(KEY)
TODO_USER = os.getenv('TODO_USER', 'root')
TODO_PWD = os.getenv('TODO_DB_PWD', '')
TODO_DB_HOST = os.getenv('TODO_DB_HOST', None)
if TODO_DB_HOST is None:
print("Usage:", file=sys.stderr)
print("$ TODO_DB_HOST=[YOUR IP ADDRESS] python3 -m web_app.app",
file=sys.stderr)
print("Please specify the env variable TODO_DB_HOST", file=sys.stderr)
sys.exit(1)
TODO_MYSQL_DB = os.getenv('TODO_MYSQL_DB', 'todo_dev_db')
|
class UserResponse(object):
def __init__(self):
self.swaggerTypes = {
'ResponseStatus': 'ResponseStatus',
'Response': 'User',
'Notifications': 'list<Str>'
}
|
import os, sys, subprocess, shutil
sys.path.append(os.path.dirname(__file__) + "/../lib")
from test_helper import create_virtenv, run_test
ENV_NAME = "unidecode_test_env_" + os.path.basename(sys.executable)
SRC_DIR = os.path.abspath(os.path.join(ENV_NAME, "src"))
PYTHON_EXE = os.path.abspath(os.path.join(ENV_NAME, "bin", "python"))
def install_and_test_unidecode():
shutil.rmtree(SRC_DIR, ignore_errors=True)
os.makedirs(SRC_DIR)
url = "https://pypi.python.org/packages/source/U/Unidecode/Unidecode-0.04.16.tar.gz"
subprocess.check_call(["wget", url], cwd=SRC_DIR)
subprocess.check_call(["tar", "-zxf", "Unidecode-0.04.16.tar.gz"], cwd=SRC_DIR)
UNIDECODE_DIR = os.path.abspath(os.path.join(SRC_DIR, "Unidecode-0.04.16"))
subprocess.check_call([PYTHON_EXE, "setup.py", "build"], cwd=UNIDECODE_DIR)
subprocess.check_call([PYTHON_EXE, "setup.py", "install"], cwd=UNIDECODE_DIR)
expected = [{'ran': 8}]
expected_log_hash = '''
gECAAAAAAAAAAABAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgAAAAAAA
AAAAGAAAAAAAAAAAAAAAAAQAAggAAAAAAAAAAAAAABAAAAIAAAAAAAAAAAAAAAggAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAA=
'''
run_test([PYTHON_EXE, "setup.py", "test"], cwd=UNIDECODE_DIR, expected=expected, expected_log_hash=expected_log_hash)
create_virtenv(ENV_NAME, None, force_create = True)
install_and_test_unidecode()
|
load("@com_vistarmedia_rules_js//js:def.bzl", "npm_install")
load(
"@com_vistarmedia_rules_js//js:def.bzl",
"js_binary",
"js_library",
"js_test",
)
load(
"//coffee/private:rules.bzl",
"cjsx_src",
"cjsx_srcs",
)
def coffee_repositories():
npm_install(
name = "coffee-script",
version = "1.12.2",
sha256 = "c77cc751c5a9f13d75eb337fbb0adec99e7bfdd383f12e2789ddaabb64d14880",
)
npm_install(
name = "coffee-react-transform",
version = "4.0.0",
sha256 = "6519abf3c62ae16e7745d6f197ec062533277559f042ca1dc615bfe08ef4fe1d",
)
def cjsx_library(name, **kwargs):
src_name = name + ".js_src"
cjsx_srcs(name = src_name, srcs = kwargs.pop("srcs"))
js_library(
name = name,
srcs = [src_name],
compile_type = [".js", ".d.ts"],
**kwargs
)
coffee_library = cjsx_library
def cjsx_binary(name, **kwargs):
src_name = name + ".js_src"
cjsx_src(name = src_name, src = kwargs.pop("src"))
js_binary(
name = name,
src = src_name,
**kwargs
)
coffee_binary = cjsx_binary
def cjsx_test(name, **kwargs):
src_name = name + ".js_src"
requires = kwargs.pop("requires", [])
cjsx_srcs(name = src_name, srcs = kwargs.pop("srcs"), generate_dts = False)
js_test(
name = name,
srcs = [src_name],
requires = requires,
**kwargs
)
coffee_test = cjsx_test
|
# puzzle3a.py
def main():
horiz_step = 3
vert_step = 1
tree_char = "#"
horiz_pos = 0
trees = 0
input_file = open("input.txt", "r")
lines = input_file.readlines()
line_len = len(lines[0]) - 1 # Subtract 1 to account for '\n'
lines = lines[1:]
for line in lines:
horiz_pos = (horiz_pos + horiz_step) % line_len
if line[horiz_pos] == tree_char:
trees += 1
print("Trees encountered: " + str(trees))
if __name__ == "__main__":
main() |
from collections import OrderedDict
from tartiflette.types.field import GraphQLField
from tartiflette.types.interface import GraphQLInterfaceType
def test_graphql_interface_init(mocked_resolver_factory):
interface = GraphQLInterfaceType(
name="Name",
fields=OrderedDict(
[
("test", GraphQLField(name="arg", gql_type="Int")),
("another", GraphQLField(name="arg", gql_type="String")),
]
),
description="description",
)
assert interface.name == "Name"
assert interface.find_field("test") == GraphQLField(
name="arg", gql_type="Int"
)
assert interface.find_field("another") == GraphQLField(
name="arg", gql_type="String"
)
assert interface.description == "description"
def test_graphql_interface_eq(mocked_resolver_factory):
interface = GraphQLInterfaceType(
name="Name",
fields=OrderedDict(
[
("test", GraphQLField(name="arg", gql_type="Int")),
("another", GraphQLField(name="arg", gql_type="String")),
]
),
description="description",
)
## Same
assert interface == interface
assert interface == GraphQLInterfaceType(
name="Name",
fields=OrderedDict(
[
("test", GraphQLField(name="arg", gql_type="Int")),
("another", GraphQLField(name="arg", gql_type="String")),
]
),
description="description",
)
# Currently we ignore the description in comparing
assert interface == GraphQLInterfaceType(
name="Name",
fields=OrderedDict(
[
("test", GraphQLField(name="arg", gql_type="Int")),
("another", GraphQLField(name="arg", gql_type="String")),
]
),
)
## Different
assert interface != GraphQLInterfaceType(
name="Name",
fields=OrderedDict(
[
("another", GraphQLField(name="arg", gql_type="String")),
("test", GraphQLField(name="arg", gql_type="Int")),
# We reversed the order of arguments
]
),
)
assert interface != GraphQLInterfaceType(name="Name", fields=OrderedDict())
assert interface != GraphQLInterfaceType(
name="OtherName",
fields=OrderedDict(
[
("another", GraphQLField(name="arg", gql_type="String")),
("test", GraphQLField(name="arg", gql_type="Int")),
# We reversed the order of arguments
]
),
)
|
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Samragni Banerjee <samragnibanerjee4@gmail.com>
# Alexander Sokolov <alexander.y.sokolov@gmail.com>
#
import unittest
import numpy
from pyscf import gto
from pyscf import scf
from pyscf import adc
r = 0.969286393
mol = gto.Mole()
mol.atom = [
['O', ( 0., 0. , -r/2 )],
['H', ( 0., 0. , r/2)],]
mol.basis = {'O':'aug-cc-pvdz',
'H':'aug-cc-pvdz'}
mol.verbose = 0
mol.symmetry = False
mol.spin = 1
mol.build()
mf = scf.UHF(mol)
mf.conv_tol = 1e-12
mf.kernel()
myadc = adc.ADC(mf)
def tearDownModule():
global mol, mf
del mol, mf
class KnownValues(unittest.TestCase):
def test_ip_adc2(self):
e, t_amp1, t_amp2 = myadc.kernel()
self.assertAlmostEqual(e, -0.16402828164387906, 6)
e,v,p = myadc.ip_adc(nroots=3)
self.assertAlmostEqual(e[0], 0.4342864327917968, 6)
self.assertAlmostEqual(e[1], 0.47343844767816784, 6)
self.assertAlmostEqual(e[2], 0.5805631452815511, 6)
self.assertAlmostEqual(p[0], 0.9066975034860368, 6)
self.assertAlmostEqual(p[1], 0.8987660491377468, 6)
self.assertAlmostEqual(p[2], 0.9119655964285802, 6)
def test_ip_adc2x(self):
myadc.method = "adc(2)-x"
e, t_amp1, t_amp2 = myadc.kernel()
self.assertAlmostEqual(e, -0.16402828164387906, 6)
e,v,p = myadc.ip_adc(nroots=3)
self.assertAlmostEqual(e[0], 0.4389083582117278, 6)
self.assertAlmostEqual(e[1], 0.45720829251439343, 6)
self.assertAlmostEqual(e[2], 0.5588942056812034, 6)
self.assertAlmostEqual(p[0], 0.9169548953028459, 6)
self.assertAlmostEqual(p[1], 0.6997121885268642, 6)
self.assertAlmostEqual(p[2], 0.212879313736106, 6)
def test_ip_adc3(self):
myadc.method = "adc(3)"
e, t_amp1, t_amp2 = myadc.kernel()
self.assertAlmostEqual(e, -0.17616203329072194, 6)
e,v,p = myadc.ip_adc(nroots=3)
self.assertAlmostEqual(e[0], 0.4794423247368058, 6)
self.assertAlmostEqual(e[1], 0.4872370596653387, 6)
self.assertAlmostEqual(e[2], 0.5726961805214643, 6)
self.assertAlmostEqual(p[0], 0.9282869467221032, 6)
self.assertAlmostEqual(p[1], 0.5188529241094367, 6)
self.assertAlmostEqual(p[2], 0.40655844616580944, 6)
def test_ip_adc3_oneroot(self):
myadc.method = "adc(3)"
e, t_amp1, t_amp2 = myadc.kernel()
self.assertAlmostEqual(e, -0.17616203329072194, 6)
e,v,p = myadc.ip_adc(nroots=1)
self.assertAlmostEqual(e, 0.4794423247368058, 6)
self.assertAlmostEqual(p[0], 0.9282869467221032, 6)
if __name__ == "__main__":
print("IP calculations for different ADC methods for open-shell molecule")
unittest.main()
|
import plac
from rdflib import Graph
def main(page_ids_ttl, page_links_ttl, out_csv):
graph = Graph()
graph.parse(page_ids_ttl, format="ttl")
if __name__ == "__main__":
plac.call(main)
|
import sys
import os
import urllib
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
import pickle
from Network import nmt_model,SplitIUPAC
import unicodedata
import re
import selfies
import subprocess
import urllib.request
os.environ["CUDA_VISIBLE_DEVICES"]="2"
def main():
# Declaration of Global arguements used through out different functions.
global max_length_targ,max_length_inp,inp_lang,targ_lang,embedding_dim,units,encoder,decoder
global model_size
model_size='60'
# Arguement to invoke help.
if len(sys.argv) < 3 or sys.argv[1] == "--help" or sys.argv[1] == "--h":
print("\n Usage for 1 SMILES string:\n python STOUT_V_2.1.py --smiles input_SMILES\n\n",
"For multiple Smiles:\n python STOUT_V_2.1.py --STI input_file out_putfile\n\n",
"To check the translation accuracy you can re-translate the IUPAC names back to SMILES string using OPSIN.\n",
"Use this command for retranslation:\n python STOUT_V_2.1.py --STI_check input_file output_file\n\n",
"-------------------------------------------------------------------------------------------------------------\n"
"\n For reverse translation from IUPAC to SMILES use:\n python STOUT_V_2.1.py --iupac input_IUPAC_name\n\n",
"For multiple IUPAC names:\n python STOUT_V_2.1.py --ITS input_file output_file\n\n",
"The system set to default to choose the model trainined on 30 Mio data, to choose the other model available,\n",
"at the end of each command add 30 or 60 afer a space:\n",
"e.g.: python STOUT_V_2.1.py --smiles input_SMILES 60\n\n")
sys.exit()
#Argument to run STOUT for a given SMILES string.
elif (len(sys.argv) == 3 or len(sys.argv) == 4) and sys.argv[1] == '--smiles':
smiles_string = sys.argv[2]
if len(sys.argv) == 4 and (sys.argv[3] == '30' or sys.argv[3] == '60'):
model_size = sys.argv[3]
max_length_targ,max_length_inp,inp_lang,targ_lang,embedding_dim,units,encoder,decoder =check_model(model_size)
canonical_smiles = subprocess.check_output(['java', '-cp', 'Java_dependencies/cdk-2.3.jar:.' ,'SMILEStoCanonicalSMILES',smiles_string])
iupac_name = translate(selfies.encoder(canonical_smiles.decode('utf-8').strip()).replace("][","] ["))
print('\nPredicted translation: {}'.format(iupac_name.replace(" ","").replace("<end>","")),flush=True)
# Arguement to run STOUT on multiple SMILES string on a given inputfile and creats a output file that the user desired.
elif (len(sys.argv) == 4 or len(sys.argv) == 5) and sys.argv[1] == '--STI':
if len(sys.argv) == 5 and (sys.argv[4] == '30' or sys.argv[4] == '60'):
model_size = sys.argv[4]
input_file = sys.argv[2]
output_file = sys.argv[3]
max_length_targ,max_length_inp,inp_lang,targ_lang,embedding_dim,units,encoder,decoder =check_model(model_size)
out = batch_mode(input_file,output_file)
print("\nBatch mode completed, result saved in: ",out)
# Arguement to invoke OPSIN to check the translations done by STOUT.
elif (len(sys.argv) == 4 or len(sys.argv) == 5) and (sys.argv[1] == '--STI_check'):
if len(sys.argv) == 5 and (sys.argv[4] == '30' or sys.argv[4] == '60'):
model_size = sys.argv[4]
input_file = sys.argv[2]
output_file = sys.argv[3]
max_length_targ,max_length_inp,inp_lang,targ_lang,embedding_dim,units,encoder,decoder =check_model(model_size)
check_translation(input_file,output_file)
# Arguements for reverse translation.
# Argurment to invoke STOUT to translate a given IUPAC name to a SMILES string.
elif (len(sys.argv) == 3 or len(sys.argv) == 4) and sys.argv[1] == '--iupac':
iupac_string_input = sys.argv[2]
if len(sys.argv) == 4 and (sys.argv[3] == '30' or sys.argv[3] == '60'):
model_size = sys.argv[3]
max_length_targ,max_length_inp,inp_lang,targ_lang,embedding_dim,units,encoder,decoder =check_model_reverse(model_size)
SELFIES = translate(SplitIUPAC.get_modified_iupac(iupac_string_input))
print('\nPredicted translation: {}'.format(selfies.decoder(SELFIES.replace(" ","").replace("<end>","")),constraints='hypervalent'),flush=True)
# Arguement to invoke STOUT on multiple IUPAC names on a given inputfile and creats a output file with translated SMILES.
elif (len(sys.argv) == 4 or len(sys.argv) == 5) and sys.argv[1] == '--ITS':
if len(sys.argv) == 5 and (sys.argv[4] == '30' or sys.argv[4] == '60'):
model_size = sys.argv[4]
input_file = sys.argv[2]
output_file = sys.argv[3]
max_length_targ,max_length_inp,inp_lang,targ_lang,embedding_dim,units,encoder,decoder =check_model_reverse(model_size)
out = batch_mode_reverse(input_file,output_file)
print("\nBatch mode completed, result saved in: ",out)
# Call help, if the user arguments did not satisfy the rules.
else:
#print(len(sys.argv))
print("\nSee help using python3 STOUT_V_2.1.py --help")
# Converts the unicode file to ascii.
def unicode_to_ascii(s):
return ''.join(c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn')
# Preprocess the sentences to feed into model.
def preprocess_sentence(w):
w = unicode_to_ascii(w.strip())
w = re.sub(r"([?.!,¿])", r" \1 ", w)
w = re.sub(r'[" "]+', " ", w)
w = w.strip()
w = '<start> ' + w + ' <end>'
return w
# Main command to translate the SELFIES to IUPAC name and the IUPAC name to SELFIES.
def evaluate(sentence):
sentence = preprocess_sentence(sentence)
inputs = [inp_lang.word_index[i] for i in sentence.split(' ')]
inputs = tf.keras.preprocessing.sequence.pad_sequences([inputs],
maxlen=max_length_inp,
padding='post')
inputs = tf.convert_to_tensor(inputs)
result = ''
hidden = [tf.zeros((1, units))]
enc_out, enc_hidden = encoder(inputs, hidden)
dec_hidden = enc_hidden
dec_input = tf.expand_dims([targ_lang.word_index['<start>']], 0)
for t in range(max_length_targ):
predictions, dec_hidden, attention_weights = decoder(dec_input,
dec_hidden,
enc_out)
predicted_id = tf.argmax(predictions[0]).numpy()
result += targ_lang.index_word[predicted_id] + ' '
if targ_lang.index_word[predicted_id] == '<end>':
return result, sentence
# the predicted ID is fed back into the model
dec_input = tf.expand_dims([predicted_id], 0)
return result, sentence
# Translate funtion to invoke the evaluate funtion.
def translate(sentence):
result, sentence = evaluate(sentence)
return result
# Downloads the model and unzips the file downloaded, if the model is not present on the working directory.
def download_trained_weights(model_url,model_path,model_size, verbose=1):
#Download trained models
if verbose > 0:
print("Downloading trained model to " + model_path + " ...")
urllib.request.urlretrieve(model_url, "STOUT_trained_models_v2.1.zip")
if verbose > 0:
print("... done downloading trained model!")
subprocess.run(["unzip", "STOUT_trained_models_v2.1.zip"])
# Loads the appropriate model and resets the path for 30/60 Mio dataset related tokenizers(SMILES to IUPAC).
def check_model(model_size):
#load lengths
max_length_targ = pickle.load(open("important_assets/"+model_size+"_mil/forward/max_length_targ.pkl","rb"))
max_length_inp = pickle.load(open("important_assets/"+model_size+"_mil/forward/max_length_inp.pkl","rb"))
# restoring the latest checkpoint in checkpoint_dir
checkpoint_path = 'Trained_models/'+model_size+'/forward'
model_url = 'https://storage.googleapis.com/iupac_models_trained/Trained_model/STOUT_trained_models_v2.1.zip'
if not os.path.exists(checkpoint_path):
download_trained_weights(model_url,checkpoint_path,model_size)
#load model
inp_lang = pickle.load(open("important_assets/"+model_size+"_mil/forward/tokenizer_input.pkl","rb"))
targ_lang = pickle.load(open("important_assets/"+model_size+"_mil/forward/tokenizer_target.pkl","rb"))
vocab_inp_size = len(inp_lang.word_index)+1
vocab_tar_size = len(targ_lang.word_index)+1
#return max_length_targ,max_length_inp,vocab_inp_size,vocab_tar_size
embedding_dim = 256
units = 1024
encoder = nmt_model.Encoder(vocab_inp_size, embedding_dim, units)
decoder = nmt_model.Decoder(vocab_tar_size, embedding_dim, units)
optimizer = tf.keras.optimizers.Adam()
ckpt = tf.train.Checkpoint(optimizer=optimizer,encoder=encoder,decoder=decoder)
ckpt.restore(tf.train.latest_checkpoint(checkpoint_path)).expect_partial()
return max_length_targ,max_length_inp,inp_lang,targ_lang,embedding_dim,units,encoder,decoder
# To run translation on multiple SMILES to generate IUPAC names.
def batch_mode(input_file,output_file):
outfile = open(output_file,"w")
with open(input_file,"r") as f:
for i,line in enumerate(f):
smiles_string = line.strip()
canonical_smiles = subprocess.check_output(['java', '-cp', 'Java_dependencies/cdk-2.3.jar:.' ,'SMILEStoCanonicalSMILES',smiles_string])
iupac_name = translate(selfies.encoder(canonical_smiles.decode('utf-8').strip()).replace("][","] ["))
outfile.write(iupac_name.replace(" ","").replace("<end>","")+"\n")
outfile.close()
return output_file
# Invoke OPSIN to check the translation by reverse translating them.
def check_translation(input_file,output_file):
out_file = batch_mode(input_file,output_file)
print("\nRetranslated SMILES are saved in Retranslated_smiles file")
subprocess.run(['java', '-jar', 'Java_dependencies/opsin-2.5.0-jar-with-dependencies.jar' ,'-osmi',out_file,'Re-translated_smiles'])
# Functions for reverse translation
# Loads the appropriate model and resets the path for 30/60 Mio dataset related tokenizers(IUPAC to SMILES).
def check_model_reverse(model_size):
#load lengths
max_length_targ = pickle.load(open("important_assets/"+model_size+"_mil/reverse/max_length_targ.pkl","rb"))
max_length_inp = pickle.load(open("important_assets/"+model_size+"_mil/reverse/max_length_inp.pkl","rb"))
# restoring the latest checkpoint in checkpoint_dir
checkpoint_path = 'Trained_models/'+model_size+'/reverse'
model_url = 'https://storage.googleapis.com/iupac_models_trained/Trained_model/STOUT_trained_models_v2.1.zip'
if not os.path.exists(checkpoint_path):
download_trained_weights(model_url,checkpoint_path,model_size)
#load model
inp_lang = pickle.load(open("important_assets/"+model_size+"_mil/reverse/tokenizer_input.pkl","rb"))
targ_lang = pickle.load(open("important_assets/"+model_size+"_mil/reverse/tokenizer_target.pkl","rb"))
vocab_inp_size = len(inp_lang.word_index)+1
vocab_tar_size = len(targ_lang.word_index)+1
#return max_length_targ,max_length_inp,vocab_inp_size,vocab_tar_size
embedding_dim = 256
units = 1024
encoder = nmt_model.Encoder(vocab_inp_size, embedding_dim, units)
decoder = nmt_model.Decoder(vocab_tar_size, embedding_dim, units)
optimizer = tf.keras.optimizers.Adam()
ckpt = tf.train.Checkpoint(optimizer=optimizer,encoder=encoder,decoder=decoder)
ckpt.restore(tf.train.latest_checkpoint(checkpoint_path)).expect_partial()
return max_length_targ,max_length_inp,inp_lang,targ_lang,embedding_dim,units,encoder,decoder
# To run translation on multiple IUPAC names to generate SMILES.
def batch_mode_reverse(input_file,output_file):
outfile = open(output_file,"w")
with open(input_file,"r") as f:
for i,line in enumerate(f):
input_iupac_name = line.strip()
selfies_translated = translate(SplitIUPAC.get_modified_iupac(input_iupac_name))
outfile.write(selfies.decoder(selfies_translated.replace(" ","").replace("<end>",""),constraints='hypervalent')+"\n")
outfile.close()
return output_file
# Modify a IUPAC name before feeding it into the model.
'''
def get_modified_iupac(iupac_string):
modified_iupac = (iupac_string.replace(","," , ").replace("."," . ")
.replace("("," ( ").replace(")"," ) ")
.replace("["," [ ").replace("]"," ] ")
.replace("{"," { ").replace("}"," } ")
.replace("-"," - ").replace(" "," ")
.replace(" "," ").replace("acid"," acid"))
return modified_iupac
'''
if __name__ == '__main__':
main()
|
from PIL import Image, ImageOps
def add_border(input_image, output_image, border):
img = Image.open(input_image)
if isinstance(border, int) or isinstance(border, tuple):
bimg = ImageOps.expand(img, border=border)
else:
raise RuntimeError('Border is not an integer or tuple!')
bimg.save(output_image)
if __name__ == '__main__':
in_img = 'butterfly_grey.jpg'
add_border(in_img, output_image='out-butterfly_border2.jpg',
border=(10, 50))
|
import random
OPTIONS = ['rock', 'paper', 'scissors']
def print_options():
"""
This function prints game's options
:return:
"""
print('(1) Rock\n(2) Paper\n(3) Scissors')
def get_human_choice():
"""
This function gets the choice of the human
:return:
"""
human_choice_number = input('Enter the number of your choice: ')
human_choice_action = OPTIONS[int(human_choice_number) - 1]
print(f'Your choice is {human_choice_action}')
return human_choice_action
def get_computer_choice():
"""
This function gets the computer choice using a random number.
:return:
"""
computer_choice = random.choice(OPTIONS)
print(f'Computer choice is {computer_choice}')
return computer_choice
def print_win_lose_message(human_choice, computer_choice, human_beats, human_loses_to):
"""
This function prints the win or lose message.
:param human_choice:
:param computer_choice:
:param human_beats:
:param human_loses_to:
:return:
"""
if computer_choice == human_beats:
print(f'Yes, {human_choice} beats {computer_choice}')
elif computer_choice == human_loses_to:
print(f'Ops, {human_choice} loses to {computer_choice}')
def print_result(human_choice, computer_choice):
"""
This function prints the result of the game.
:param human_choice:
:param computer_choice:
:return:
"""
if human_choice == computer_choice:
print('Drawn, Please try again!')
elif human_choice == 'rock':
print_win_lose_message(human_choice=human_choice, computer_choice=computer_choice,
human_beats='scissors', human_loses_to='paper')
elif human_choice == 'scissors':
print_win_lose_message(human_choice=human_choice, computer_choice=computer_choice,
human_beats='paper', human_loses_to='rock')
elif human_choice == 'paper':
print_win_lose_message(human_choice=human_choice, computer_choice=computer_choice,
human_beats='rock', human_loses_to='scissors')
if __name__ == '__main__':
print_options()
human_choice = get_human_choice()
computer_choice = get_computer_choice()
print_result(human_choice=human_choice, computer_choice=computer_choice)
|
'''
Plot accuracy
'''
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import seaborn as sns
import argparse
acc_vs_param = {
"TRBA": { "Accuracy": 84.3, "Parameters": 49.6},
"STAR-Net": { "Accuracy": 81.8, "Parameters": 48.9},
"RARE": { "Accuracy": 81.9, "Parameters": 10.9},
"Rosetta": { "Accuracy": 78.2, "Parameters": 44.3},
"GCRNN": { "Accuracy": 78.3, "Parameters": 4.8},
"R2AM": { "Accuracy": 78.4, "Parameters": 2.9},
"CRNN": { "Accuracy": 76.7, "Parameters": 8.5},
"ViTSTR-Tiny\n(Ours)": { "Accuracy": 80.3, "Parameters": 5.4},
"ViTSTR-Small\n(Ours)": { "Accuracy": 82.6, "Parameters": 21.5},
"ViTSTR-\nTiny+Aug\n(Ours)": { "Accuracy": 82.1, "Parameters": 5.4},
"ViTSTR-Small\n+Aug(Ours)": { "Accuracy": 84.2, "Parameters": 21.5},
"ViTSTR-Base\n(Ours)": { "Accuracy": 83.7, "Parameters": 85.8},
"ViTSTR-Base+Aug(Ours)": { "Accuracy": 85.2, "Parameters": 85.8},
}
acc_vs_param_env = [[ (2.9, 5.4, 10.9, 21.5, 49.6, 85.8), (78.4, 80.3, 81.9, 82.6, 84.3, 83.7)],
[ (2.9, 5.4, 21.5, 85.8), (78.4, 82.1, 84.2, 85.2)] ]
acc_vs_time = {
"TRBA": { "Accuracy": 84.3, "Speed (msec/image)": 22.8},
"STAR-Net": { "Accuracy": 81.8, "Speed (msec/image)": 8.8},
"RARE": { "Accuracy": 81.9, "Speed (msec/image)": 18.8},
"Rosetta": { "Accuracy": 78.2, "Speed (msec/image)": 5.3},
"GCRNN": { "Accuracy": 78.3, "Speed (msec/image)": 11.2},
"R2AM": { "Accuracy": 78.4, "Speed (msec/image)": 22.9},
"CRNN": { "Accuracy": 76.7, "Speed (msec/image)": 3.7},
"ViTSTR-Tiny(Ours)": { "Accuracy": 80.3, "Speed (msec/image)": 9.3},
"ViTSTR-Small(Ours)": { "Accuracy": 82.6, "Speed (msec/image)": 9.5},
"ViTSTR-Tiny+Aug": { "Accuracy": 82.1, "Speed (msec/image)": 9.3},
"ViTSTR-Small\n+Aug(Ours)": { "Accuracy": 84.2, "Speed (msec/image)": 9.5},
"ViTSTR-Base(Ours)": { "Accuracy": 83.7, "Speed (msec/image)": 9.8},
"ViTSTR-Base+Aug(Ours)": { "Accuracy": 85.2, "Speed (msec/image)": 9.8},
}
acc_vs_time_env = [
[ (3.7, 9.8, 22.8), (76.7, 83.7, 84.3)],
#[ (3.7, 5.3, 8.8, 9.8, 22.8), (76.7, 78.2, 81.8, 83.7, 84.3)],
[ (3.7, 9.8, 22.8), (76.7, 85.2, 84.3)],
#[ (3.7, 5.3, 8.8, 9.5, 9.8, 22.8), (76.7, 78.2, 81.8, 84.2, 85.2, 84.3)],
]
acc_vs_flops = {
"TRBA": { "Accuracy": 84.3, "GFLOPS": 10.9},
"STAR-Net": { "Accuracy": 81.8, "GFLOPS": 10.7},
"RARE": { "Accuracy": 81.9, "GFLOPS": 2.0},
"Rosetta": { "Accuracy": 78.2, "GFLOPS": 10.0},
"GCRNN": { "Accuracy": 78.3, "GFLOPS": 1.8},
"R2AM": { "Accuracy": 78.4, "GFLOPS": 2.0},
"CRNN": { "Accuracy": 76.7, "GFLOPS": 1.4},
"ViTSTR-Tiny(Ours)": { "Accuracy": 80.3, "GFLOPS": 1.3},
"ViTSTR-Small(Ours)": { "Accuracy": 82.6, "GFLOPS": 4.6},
"ViTSTR\n-Tiny\n+Aug\n(Ours)": { "Accuracy": 82.1, "GFLOPS": 1.3},
"ViTSTR-Small\n+Aug(Ours)": { "Accuracy": 84.2, "GFLOPS": 4.6},
"ViTSTR-Base\n(Ours)": { "Accuracy": 83.7, "GFLOPS": 17.6},
"ViTSTR-Base+Aug(Ours)": { "Accuracy": 85.2, "GFLOPS": 17.6},
}
acc_vs_flops_env = [
[(1.3, 2.0, 4.6, 10.9, 17.6), (80.3, 81.9, 82.6, 84.3, 83.7)],
[(1.3, 4.6, 17.6), (82.1, 84.2, 85.2)]
]
def plot_(data, envelope, title, ylabel="Accuracy", xlabel="Parameters"):
plt.rc('font', size=14)
plt.rc('axes', titlesize=16)
plt.rc('xtick', labelsize=14)
fig, ax = plt.subplots()
plt.ylabel(ylabel)
plt.xlabel(xlabel)
colors = sns.color_palette() + sns.color_palette("tab10") # [0:11]
markers = ['^', 's', 'o', 'D', '*', 'P', 'x', 'd', 'v', '>', 'H', '1', '2']
#if "FLOPS"
#x = np.arange(0,60,10)
#ax.set_xticks(x)
isparam = True if "Parameters" in xlabel else False
isspeed = True if "Speed" in xlabel else False
isflops = True if "FLOPS" in xlabel else False
i = 0
labels = []
has_label = False
for key, val in data.items():
label = key
acc = val["Accuracy"]
if "Parameters" in xlabel:
par = val["Parameters"]
else:
par = val[xlabel]
color = colors[i]
ax.scatter(par, acc, marker=markers[i], s=100, label=label, color=color)
xytext = (8, -5)
if isparam:
if "GCRNN" in label:
xytext = (5, -15)
elif "R2AM" in label:
xytext = (5, 5)
elif "Rosetta" in label:
xytext = (5, 5)
elif "RARE" in label:
xytext = (5, -15)
elif "STAR" in label:
xytext = (10, -10)
elif "TRBA" in label:
xytext = (-25, -25)
elif "Aug" in label and "Small" in label:
xytext = (-30, 10)
elif "Aug" in label and "Tiny" in label:
xytext = (-25, 15)
elif "Aug" in label and "Base" in label:
xytext = (-180, 0)
elif "Small" in label:
xytext = (10, -25)
elif "Tiny" in label:
xytext = (10, -20)
elif "Base" in label:
xytext = (-75, -30)
elif isspeed:
if "STAR" in label:
xytext = (-10, -20)
elif "R2AM" in label:
xytext = (-25, 10)
elif "TRBA" in label:
xytext = (-35, -25)
elif "Tiny" in label and "Aug" in label:
xytext = (5, -5)
elif "Small" in label and "Aug" in label:
xytext = (-100, -10)
elif "Base" in label and "Aug" in label:
xytext = (5, 0)
elif "Base" in label:
xytext = (5, -12)
elif "Small" in label:
xytext = (5, 0)
elif isflops:
if "RARE" in label:
xytext = (5, -15)
elif "Rosetta" in label:
xytext = (-35, -25)
elif "R2AM" in label:
xytext = (5, 5)
elif "STAR" in label:
xytext = (5, -15)
elif "TRBA" in label:
xytext = (-15, -20)
elif "GCRNN" in label:
xytext = (0, -20)
elif "Tiny" in label and "Aug" in label:
xytext = (-15, 10)
elif "Small" in label and "Aug" in label:
xytext = (-35, 10)
elif "Small" in label:
xytext = (5, -10)
elif "Base" in label and "Aug" in label:
xytext = (-180, 0)
elif "Base" in label:
xytext = (-80, -30)
ax.annotate(key, (par, acc), xycoords='data',
xytext=xytext, textcoords='offset points')
i = i + 1
xval = envelope[0][0]
yval = envelope[0][1]
plt.plot(xval, yval, linewidth=2, color='orange')
xval = envelope[1][0]
yval = envelope[1][1]
plt.plot(xval, yval, linewidth=2, color='teal')
title = title.replace(" ", "_")
title = title.replace("%", "")
plt.savefig(title + ".png")
plt.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='ViTSTR')
parser.add_argument('--data',
default=None,
help='Data to plot')
parser.add_argument('--vspace',
default=0.2,
type=float,
help='Vertical space in bar graph label')
args = parser.parse_args()
ylabel = "Accuracy (%)"
ncolors = 9
if args.data == "time":
xlabel = "Speed (msec/image)"
title = "Accuracy vs Msec per Image"
data = acc_vs_time
envelope = acc_vs_time_env
elif args.data == "flops":
xlabel = "GFLOPS"
title = "Accuracy vs GFLOPS"
data = acc_vs_flops
envelope = acc_vs_flops_env
else:
xlabel = "Parameters (M)"
title = "Accuracy vs Number of Parameters"
data = acc_vs_param
envelope = acc_vs_param_env
plot_(data=data, envelope=envelope, title=title, ylabel=ylabel, xlabel=xlabel)
|
# coding: utf-8
# Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class SupportContact(object):
"""
Contact information to use to get support.
"""
def __init__(self, **kwargs):
"""
Initializes a new SupportContact object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param name:
The value to assign to the name property of this SupportContact.
:type name: str
:param phone:
The value to assign to the phone property of this SupportContact.
:type phone: str
:param email:
The value to assign to the email property of this SupportContact.
:type email: str
:param subject:
The value to assign to the subject property of this SupportContact.
:type subject: str
"""
self.swagger_types = {
'name': 'str',
'phone': 'str',
'email': 'str',
'subject': 'str'
}
self.attribute_map = {
'name': 'name',
'phone': 'phone',
'email': 'email',
'subject': 'subject'
}
self._name = None
self._phone = None
self._email = None
self._subject = None
@property
def name(self):
"""
Gets the name of this SupportContact.
The name of the contact.
:return: The name of this SupportContact.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this SupportContact.
The name of the contact.
:param name: The name of this SupportContact.
:type: str
"""
self._name = name
@property
def phone(self):
"""
Gets the phone of this SupportContact.
The phone number of the contact.
:return: The phone of this SupportContact.
:rtype: str
"""
return self._phone
@phone.setter
def phone(self, phone):
"""
Sets the phone of this SupportContact.
The phone number of the contact.
:param phone: The phone of this SupportContact.
:type: str
"""
self._phone = phone
@property
def email(self):
"""
Gets the email of this SupportContact.
The email of the contact.
:return: The email of this SupportContact.
:rtype: str
"""
return self._email
@email.setter
def email(self, email):
"""
Sets the email of this SupportContact.
The email of the contact.
:param email: The email of this SupportContact.
:type: str
"""
self._email = email
@property
def subject(self):
"""
Gets the subject of this SupportContact.
The email subject line to use when contacting support.
:return: The subject of this SupportContact.
:rtype: str
"""
return self._subject
@subject.setter
def subject(self, subject):
"""
Sets the subject of this SupportContact.
The email subject line to use when contacting support.
:param subject: The subject of this SupportContact.
:type: str
"""
self._subject = subject
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
import requests
import json
# Create via https://127.0.0.1:5000/account/settings/applications/tokens/new/
api = "https://127.0.0.1:5000"
token = "...."
# Define a list of records you want to upload:
# ('<record metadata json>.json', ['<datafile1>', '<datafile2>'])
records = [
('record.json', ['1911.00295.pdf',])
]
#
# HTTP Headers used during requests
#
h = {
"Accept": "application/json",
"Content-Type": "application/json",
"Authorization": f"Bearer {token}"
}
fh = {
"Accept": "application/json",
"Content-Type": "application/octet-stream",
"Authorization": f"Bearer {token}"
}
#
# Upload and publish all records.
#
for datafile, files in records:
# Load the record metadata JSON file.
with open(datafile) as fp:
data = json.load(fp)
# Create the record
# note: "verify=False" is so that we can connect to 127.0.0.1 with a
# self-signed certificate. You should not do this in production.
r = requests.post(
f"{api}/api/records", data=json.dumps(data), headers=h, verify=False)
assert r.status_code == 201, \
f"Failed to create record (code: {r.status_code})"
links = r.json()['links']
# Upload files
for f in files:
# Initiate the file
data = json.dumps([{"key": f}])
r = requests.post(links["files"], data=data, headers=h, verify=False)
assert r.status_code == 201, \
f"Failed to create file {f} (code: {r.status_code})"
file_links = r.json()["entries"][0]["links"]
# Upload file content by streaming the data
with open(f, 'rb') as fp:
r = requests.put(
file_links["content"], data=fp, headers=fh, verify=False)
assert r.status_code == 200, \
f"Failed to upload file contet {f} (code: {r.status_code})"
# Commit the file.
r = requests.post(file_links["commit"], headers=h, verify=False)
assert r.status_code == 200, \
f"Failed to commit file {f} (code: {r.status_code})"
# Publish the record
r = requests.post( links["publish"], headers=h, verify=False)
assert r.status_code == 202, \
f"Failed to publish record (code: {r.status_code})"
|
n = int(input()) #lost fights
helmet_price = float(input())
sword_price = float(input())
shield_price = float(input())
armor_price = float(input())
lost_fights_count = 0
helmet_brakes = 0
sword_brakes = 0
shield_brakes = 0
armor_brakes = 0
total_shield_brakes = 0
for x in range(n):
lost_fights_count += 1
if lost_fights_count % 2 == 0:
helmet_brakes += 1
if lost_fights_count % 3 == 0:
sword_brakes += 1
if lost_fights_count % 2 == 0:
shield_brakes += 1
total_shield_brakes += 1
if shield_brakes % 2 == 0 and shield_brakes != 0:
armor_brakes += 1
shield_brakes = 0
expenses = (helmet_brakes * helmet_price) + (sword_brakes * sword_price) \
+ (total_shield_brakes * shield_price) + (armor_brakes * armor_price)
print(f"Gladiator expenses: {expenses:.2f} aureus")
|
from flask import Blueprint
bp = Blueprint('pages', __name__)
@bp.record_once
def register(state):
from sopy.pages import views
state.app.add_url_rule('/chatroom', None, views.page, defaults={'name': 'chatroom'})
|
from ead.models import EAD
def run():
EAD.objects.all().delete()
|
from typing import BinaryIO, Union
from pymarc import MARCReader
from pymarc.constants import END_OF_RECORD
from pymarc import exceptions
from bookops_marc import Bib
class SierraBibReader(MARCReader):
"""
An interator class for reading file of local Sierra MARC records.
"""
def __init__(
self,
marc_target: Union[BinaryIO, bytes],
library: str = "",
to_unicode: bool = True,
force_utf8: bool = False,
hide_utf8_warnings: bool = False,
utf8_handling: str = "strict",
file_encoding: str = "iso8859-1",
permissive: bool = False,
) -> None:
super().__init__(
marc_target,
to_unicode,
force_utf8,
hide_utf8_warnings,
utf8_handling,
file_encoding,
permissive,
)
self.library = library
def __next__(self):
"""Read and parse the next record."""
if self._current_exception:
if isinstance(self._current_exception, exceptions.FatalReaderEror):
raise StopIteration
self._current_chunk = None
self._current_exception = None
self._current_chunk = first5 = self.file_handle.read(5)
if not first5:
raise StopIteration
if len(first5) < 5:
self._current_exception = exceptions.TruncatedRecord()
return
try:
length = int(first5)
except ValueError:
self._current_exception = exceptions.RecordLengthInvalid()
return
chunk = self.file_handle.read(length - 5)
chunk = first5 + chunk
self._current_chunk = chunk
if len(self._current_chunk) < length:
self._current_exception = exceptions.TruncatedRecord()
return
if self._current_chunk[-1] != ord(END_OF_RECORD):
self._current_exception = exceptions.EndOfRecordNotFound()
return
try:
return Bib(
chunk,
library=self.library,
to_unicode=self.to_unicode,
force_utf8=self.force_utf8,
hide_utf8_warnings=self.hide_utf8_warnings,
utf8_handling=self.utf8_handling,
file_encoding=self.file_encoding,
)
except Exception as ex:
self._current_exception = ex
|
# Copyright (c) 2017-2021 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
import logging
from dazl.testing import connect_with_new_party
import pytest
from .dars import PostOffice
@pytest.mark.asyncio
async def test_protocol_ledger_api(sandbox):
# first, administrative stuff--upload the DAR and allocate two parties that we'll use later
async with connect_with_new_party(url=sandbox, dar=PostOffice, party_count=2) as (postman, p1):
event = await postman.connection.create("Main:PostmanRole", {"postman": postman.party})
result = await postman.connection.exercise(
event.contract_id, "InviteParticipant", {"party": p1.party, "address": "Somewhere!"}
)
logging.info("Result of inviting a participant: %s", result)
# Stream results for Main:InviteAuthorRole, and then Main:InviteReceiverRole. Then break the
# stream once we find the first contract.
#
# We do NOT use query() here, because in a distributed ledger setting, the result of the
# postman inviting participants may not yet have been observed by the clients. Instead, use
# stream() since it remains open until explicitly closed. We break the never-ending iterator
# as soon as we see one of each contract.
async with p1.connection.stream("Main:InviteAuthorRole") as query:
async for event in query:
result = await p1.connection.exercise(event.contract_id, "AcceptInviteAuthorRole")
logging.info("The result of AcceptInviteAuthorRole: %s", result)
break
async with p1.connection.stream("Main:InviteReceiverRole") as query:
async for event in query:
result = await p1.connection.exercise(event.contract_id, "AcceptInviteReceiverRole")
logging.info("The result of AcceptInviteReceiverRole: %s", result)
break
logging.info("Done!")
|
'''
Manage App Service Environments
'''
from ... pyaz_utils import _call_az
def list(resource_group=None):
'''
List app service environments.
Optional Parameters:
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
'''
return _call_az("az appservice ase list", locals())
def list_addresses(name, resource_group=None):
'''
List VIPs associated with an app service environment v2.
Required Parameters:
- name -- Name of the app service environment
Optional Parameters:
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
'''
return _call_az("az appservice ase list-addresses", locals())
def list_plans(name, resource_group=None):
'''
List app service plans associated with an app service environment.
Required Parameters:
- name -- Name of the app service environment
Optional Parameters:
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
'''
return _call_az("az appservice ase list-plans", locals())
def show(name, resource_group=None):
'''
Show details of an app service environment.
Required Parameters:
- name -- Name of the app service environment
Optional Parameters:
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
'''
return _call_az("az appservice ase show", locals())
def create(name, resource_group, subnet, force_network_security_group=None, force_route_table=None, front_end_scale_factor=None, front_end_sku=None, ignore_network_security_group=None, ignore_route_table=None, ignore_subnet_size_validation=None, kind=None, location=None, no_wait=None, os_preference=None, virtual_ip_type=None, vnet_name=None, zone_redundant=None):
'''
Create app service environment.
Required Parameters:
- name -- Name of the app service environment
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- subnet -- Name or ID of existing subnet. To create vnet and/or subnet use `az network vnet [subnet] create`
Optional Parameters:
- force_network_security_group -- Override network security group for subnet. Applies to ASEv2 only.
- force_route_table -- Override route table for subnet. Applies to ASEv2 only.
- front_end_scale_factor -- Scale of front ends to app service plan instance ratio. Applies to ASEv2 only.
- front_end_sku -- Size of front end servers. Applies to ASEv2 only.
- ignore_network_security_group -- Configure network security group manually. Applies to ASEv2 only.
- ignore_route_table -- Configure route table manually. Applies to ASEv2 only.
- ignore_subnet_size_validation -- Do not check if subnet is sized according to recommendations.
- kind -- Specify App Service Environment version
- location -- Location. Values from: `az account list-locations`. You can configure the default location using `az configure --defaults location=<location>`.
- no_wait -- Do not wait for the long-running operation to finish.
- os_preference -- Determine if app service environment should start with Linux workers. Applies to ASEv2 only.
- virtual_ip_type -- Specify if app service environment should be accessible from internet
- vnet_name -- Name of the vNet. Mandatory if only subnet name is specified.
- zone_redundant -- Configure App Service Environment as Zone Redundant. Applies to ASEv3 only.
'''
return _call_az("az appservice ase create", locals())
def update(name, allow_new_private_endpoint_connections=None, front_end_scale_factor=None, front_end_sku=None, no_wait=None, resource_group=None):
'''
Update app service environment.
Required Parameters:
- name -- Name of the app service environment
Optional Parameters:
- allow_new_private_endpoint_connections -- (ASEv3 only) Configure Apps in App Service Environment to allow new private endpoint connections.
- front_end_scale_factor -- (ASEv2 only) Scale of front ends to app service plan instance ratio between 5 and 15.
- front_end_sku -- (ASEv2 only) Size of front end servers.
- no_wait -- Do not wait for the long-running operation to finish.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
'''
return _call_az("az appservice ase update", locals())
def delete(name, no_wait=None, resource_group=None, yes=None):
'''
Delete app service environment.
Required Parameters:
- name -- Name of the app service environment
Optional Parameters:
- no_wait -- Do not wait for the long-running operation to finish.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- yes -- Do not prompt for confirmation.
'''
return _call_az("az appservice ase delete", locals())
def create_inbound_services(name, resource_group, subnet, skip_dns=None, vnet_name=None):
'''
Private DNS Zone for Internal ASEv2.
Required Parameters:
- name -- Name of the app service environment
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- subnet -- Name or ID of existing subnet for inbound traffic to ASEv3. To create vnet and/or subnet use `az network vnet [subnet] create`
Optional Parameters:
- skip_dns -- Do not create Private DNS Zone and DNS records.
- vnet_name -- Name of the vNet. Mandatory if only subnet name is specified.
'''
return _call_az("az appservice ase create-inbound-services", locals())
|
# -*- coding: utf-8 -*-
""" SOXMOS spectrometer file parser, plotter"""
from .SOXMOSFile import SOXMOSFile
__version__ = "0.1.0"
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core.help_files import helps
helps['acr'] = """
type: group
short-summary: Manage Azure Container Registries.
"""
helps['acr credential'] = """
type: group
short-summary: Manage login credentials for Azure Container Registries.
"""
helps['acr repository'] = """
type: group
short-summary: Manage repositories for Azure Container Registries.
"""
helps['acr webhook'] = """
type: group
short-summary: Manage webhooks for Azure Container Registries.
"""
helps['acr replication'] = """
type: group
short-summary: Manage replications for Azure Container Registries.
"""
helps['acr check-name'] = """
type: command
short-summary: Checks if a container registry name is available for use.
examples:
- name: Check if a registry name already exists.
text: >
az acr check-name -n doesthisnameexist
"""
helps['acr list'] = """
type: command
short-summary: Lists all the container registries under the current subscription.
examples:
- name: List container registries and show the results in a table.
text: >
az acr list -o table
- name: List container registries in a resource group and show the results in a table.
text: >
az acr list -g MyResourceGroup -o table
"""
helps['acr create'] = """
type: command
short-summary: Creates a container registry.
examples:
- name: Create a managed container registry with the Standard SKU.
text: >
az acr create -n MyRegistry -g MyResourceGroup --sku Standard
- name: Create a container registry with a new storage account with the Classic SKU.
text: >
az acr create -n MyRegistry -g MyResourceGroup --sku Classic
"""
helps['acr delete'] = """
type: command
short-summary: Deletes a container registry.
examples:
- name: Delete a container registry.
text: >
az acr delete -n MyRegistry
"""
helps['acr show'] = """
type: command
short-summary: Get the details of a container registry.
examples:
- name: Get the login server for a container registry.
text: >
az acr show -n MyRegistry --query loginServer
"""
helps['acr update'] = """
type: command
short-summary: Update a container registry.
examples:
- name: Update tags for a container registry.
text: >
az acr update -n MyRegistry --tags key1=value1 key2=value2
- name: Update the storage account for a container registry.
text: >
az acr update -n MyRegistry --storage-account-name MyStorageAccount
- name: Enable the administrator user account for a container registry.
text: >
az acr update -n MyRegistry --admin-enabled true
"""
helps['acr login'] = """
type: command
short-summary: Log in to a container registry through Docker.
examples:
- name: Log in to a container registry
text: >
az acr login -n MyRegistry
"""
helps['acr show-usage'] = """
type: command
short-summary: Get the quota usages for a container registry.
examples:
- name: Get the quota usages for a container registry.
text: >
az acr show-usage -n MyRegistry
"""
helps['acr credential show'] = """
type: command
short-summary: Get the login credentials for a container registry.
examples:
- name: Get the login credentials for a container registry.
text: >
az acr credential show -n MyRegistry
- name: Get the username used to log into a container registry.
text: >
az acr credential show -n MyRegistry --query username
- name: Get a password used to log into a container registry.
text: >
az acr credential show -n MyRegistry --query passwords[0].value
"""
helps['acr credential renew'] = """
type: command
short-summary: Regenerate login credentials for a container registry.
examples:
- name: Renew the second password for a container registry.
text: >
az acr credential renew -n MyRegistry --password-name password2
"""
helps['acr repository list'] = """
type: command
short-summary: List repositories in a container registry.
examples:
- name: List repositories in a given container registry.
text:
az acr repository list -n MyRegistry
"""
helps['acr repository show-tags'] = """
type: command
short-summary: Show tags for a repository in a container registry.
examples:
- name: Show tags of a repository in a container registry.
text:
az acr repository show-tags -n MyRegistry --repository MyRepository
"""
helps['acr repository show-manifests'] = """
type: command
short-summary: Show manifests of a repository in a container registry.
examples:
- name: Show manifests of a repository in a container registry.
text:
az acr repository show-manifests -n MyRegistry --repository MyRepository
"""
helps['acr repository delete'] = """
type: command
short-summary: Delete a repository, manifest, or tag in a container registry.
examples:
- name: Delete a repository from a container registry.
text:
az acr repository delete -n MyRegistry --repository MyRepository
- name: Delete a tag from a repository. This does not delete the manifest referenced by the tag or any associated layer data.
text:
az acr repository delete -n MyRegistry --repository MyRepository --tag MyTag
- name: Delete the manifest referenced by a tag. This also deletes any associated layer data and all other tags referencing the manifest.
text:
az acr repository delete -n MyRegistry --repository MyRepository --tag MyTag --manifest
- name: Delete a manfiest from a repository. This also deletes any associated layer data and all tags referencing the manifest.
text:
az acr repository delete -n MyRegistry --repository MyRepository --manifest MyManifest
"""
helps['acr webhook list'] = """
type: command
short-summary: List all of the webhooks for a container registry.
examples:
- name: List webhooks and show the results in a table.
text: >
az acr webhook list -r MyRegistry -o table
"""
helps['acr webhook create'] = """
type: command
short-summary: Create a webhook for a container registry.
examples:
- name: Create a webhook for a container registry that will deliver Docker push and delete events to a service URI.
text: >
az acr webhook create -n MyWebhook -r MyRegistry --uri http://myservice.com --actions push delete
- name: Create a webhook for a container registry that will deliver Docker push events to a service URI with a basic authentication header.
text: >
az acr webhook create -n MyWebhook -r MyRegistry --uri http://myservice.com --actions push --headers "Authorization=Basic 000000"
"""
helps['acr webhook delete'] = """
type: command
short-summary: Delete a webhook from a container registry.
examples:
- name: Delete a webhook from a container registry.
text: >
az acr webhook delete -n MyWebhook -r MyRegistry
"""
helps['acr webhook show'] = """
type: command
short-summary: Get the details of a webhook.
examples:
- name: Get the details of a webhook.
text: >
az acr webhook show -n MyWebhook -r MyRegistry
"""
helps['acr webhook update'] = """
type: command
short-summary: Update a webhook.
examples:
- name: Update headers for a webhook.
text: >
az acr webhook update -n MyWebhook -r MyRegistry --headers "Authorization=Basic 000000"
- name: Update the service URI and actions for a webhook.
text: >
az acr webhook update -n MyWebhook -r MyRegistry --uri http://myservice.com --actions push delete
- name: Disable a webhook.
text: >
az acr webhook update -n MyWebhook -r MyRegistry --status disabled
"""
helps['acr webhook get-config'] = """
type: command
short-summary: Get the service URI and custom headers for the webhook.
examples:
- name: Get the configuration information for a webhook.
text: >
az acr webhook get-config -n MyWebhook -r MyRegistry
"""
helps['acr webhook ping'] = """
type: command
short-summary: Trigger a ping event for a webhook.
examples:
- name: Trigger a ping event for a webhook.
text: >
az acr webhook ping -n MyWebhook -r MyRegistry
"""
helps['acr webhook list-events'] = """
type: command
short-summary: List recent events for a webhook.
examples:
- name: List recent events for a webhook.
text: >
az acr webhook list-events -n MyWebhook -r MyRegistry
"""
helps['acr replication list'] = """
type: command
short-summary: List all of the replications for a container registry.
examples:
- name: List replications and show the results in a table.
text: >
az acr replication list -r MyRegistry -o table
"""
helps['acr replication create'] = """
type: command
short-summary: Create a replication for a container registry.
examples:
- name: Create a replication for a container registry.
text: >
az acr replication create -r MyRegistry -l westus
"""
helps['acr replication delete'] = """
type: command
short-summary: Delete a replication from a container registry.
examples:
- name: Delete a replication from a container registry.
text: >
az acr replication delete -n MyReplication -r MyRegistry
"""
helps['acr replication show'] = """
type: command
short-summary: Get the details of a replication.
examples:
- name: Get the details of a replication.
text: >
az acr replication show -n MyReplication -r MyRegistry
"""
helps['acr replication update'] = """
type: command
short-summary: Updates a replication.
examples:
- name: Update tags for a replication
text: >
az acr replication update -n MyReplication -r MyRegistry --tags key1=value1 key2=value2
"""
|
# -*- coding: utf-8 -*-
#BEGIN_HEADER
import os
import ntpath
from PIL import Image
import subprocess
from installed_clients.KBaseReportClient import KBaseReport
from installed_clients.GenomeFileUtilClient import GenomeFileUtil
from installed_clients.DataFileUtilClient import DataFileUtil as DFUClient
#END_HEADER
class CGView:
'''
Module Name:
CGView
Module Description:
A KBase module: CGView
'''
######## WARNING FOR GEVENT USERS ####### noqa
# Since asynchronous IO can lead to methods - even the same method -
# interrupting each other, you must be *very* careful when using global
# state. A method could easily clobber the state set by another while
# the latter method is running.
######################################### noqa
VERSION = "0.0.1"
GIT_URL = "https://github.com/kellyhuang21/CircularGenome.git"
GIT_COMMIT_HASH = "24002a39f02d947880d40e20d14889b44293820c"
#BEGIN_CLASS_HEADER
#END_CLASS_HEADER
# config contains contents of config file in a hash or None if it couldn't
# be found
def __init__(self, config):
#BEGIN_CONSTRUCTOR
self.callback_url = os.environ['SDK_CALLBACK_URL']
self.shared_folder = config['scratch']
# logging.basicConfig(format='%(created)s %(levelname)s: %(message)s',
# level=logging.INFO)
#END_CONSTRUCTOR
pass
def run_CGView(self, ctx, params):
"""
This example function accepts any number of parameters and returns results in a KBaseReport
:param params: instance of mapping from String to unspecified object
:returns: instance of type "ReportResults" -> structure: parameter
"report_name" of String, parameter "report_ref" of String
"""
# ctx is the context object
# return variables are: output
#BEGIN run_CGView
print('Starting run_kellyhuangCGView function. Params=')
print(params)
# Validating workspace_name and input_file is present
print('Validating parameters.')
if 'workspace_name' not in params:
raise ValueError('Parameter workspace_name is not set in input arguments')
workspace_name = params['workspace_name']
if 'input_file' not in params:
raise ValueError('Parameter input_file is not set in input arguments')
input_file = params['input_file']
# Set up CCT project_folder
subprocess.call("cd /opt/cgview_comparison_tool && ./update_cogs.sh && cgview_comparison_tool.pl -p project", shell=True)
# Turn genome object to Genbank file
gfu = GenomeFileUtil(self.callback_url)
gbk = gfu.genome_to_genbank({'genome_ref':input_file})
gbk_file = gbk["genbank_file"]["file_path"]
subprocess.call(["cp", gbk_file, "/opt/cgview_comparison_tool/project/reference_genome"])
base = ntpath.basename(gbk_file).rsplit(".", 1)[0]
name_gbff = base + ".gbff"
name_gbk = base + ".gbk"
from_path = "/opt/cgview_comparison_tool/project/reference_genome/" + name_gbff
print("===== from", from_path)
to_path = "/opt/cgview_comparison_tool/project/reference_genome/" + name_gbk
print("===== to", to_path)
subprocess.call(["mv", from_path, to_path])
# Add Genbank file to project_folder/reference_genome
# Generate map from Genbank file
# subprocess.call("cgview_comparison_tool.pl -p project", shell=True)
os.chdir("/opt/cgview_comparison_tool")
proc = subprocess.Popen(["cgview_comparison_tool.pl", "-p", "/opt/cgview_comparison_tool/project"], stdout=subprocess.PIPE)
# for line in proc.stdout:
# print(line)
proc.wait()
subprocess.call(["cgview_comparison_tool.pl", "-p", " project"], shell=True)
# Retrieve map PNG from project_folder/maps
subprocess.call(["cp", "/opt/cgview_comparison_tool/project/maps/medium.png", self.shared_folder])
subprocess.call(["cp", "/opt/cgview_comparison_tool/project/maps/medium.html", self.shared_folder])
# Resize image
basewidth = 900
img = Image.open('/opt/cgview_comparison_tool/project/maps/medium.png')
wpercent = (basewidth/float(img.size[0]))
hsize = int((float(img.size[1])*float(wpercent)))
img = img.resize((basewidth,hsize), Image.ANTIALIAS)
# img = img.resize((600, 600), Image.ANTIALIAS)
img.save('/opt/cgview_comparison_tool/project/maps/medium1.png', quality=95)
# print("=====", os.listdir("/opt/cgview_comparison_tool/project/maps/"))
subprocess.call(["cp", "/opt/cgview_comparison_tool/project/maps/medium1.png", self.shared_folder])
png_dir = os.path.join(self.shared_folder, 'medium1.png')
png_dir_higher = os.path.join(self.shared_folder, 'medium.png')
html_dir = os.path.join(self.shared_folder, 'medium.html')
png_dict = {'path':png_dir_higher, 'name': 'Circular_Genome_Map_PNG'}
html_dict = {'path': png_dir,'name':'Circular Genome Map'}
report_client = KBaseReport(self.callback_url)
report = report_client.create_extended_report({
'direct_html_link_index': 0,
'html_links':[html_dict],
'file_links':[png_dict],
'workspace_name': params['workspace_name'],
'summary_window_height': 900,
'html_window_height': 900
})
# subprocess.check_output(["cd", "/opt/cgview_comparison_tool"], shell=True)
# proj_output = subprocess.check_output(["pwd"], shell=True)
# print("=====cd /opt/cgview_comparison_tool=====", proj_output)
#
# report = KBaseReport(self.callback_url)
# report_info = report.create({'report': {'objects_created':[],
# 'text_message': params['input_file']},
# 'workspace_name': params['workspace_name']})
output = {
'report_name': report['name'],
'report_ref': report['ref'],
}
#END run_CGView
# At some point might do deeper type checking...
if not isinstance(output, dict):
raise ValueError('Method run_CGView return value ' +
'output is not type dict as required.')
# return the results
return [output]
def status(self, ctx):
#BEGIN_STATUS
returnVal = {'state': "OK",
'message': "",
'version': self.VERSION,
'git_url': self.GIT_URL,
'git_commit_hash': self.GIT_COMMIT_HASH}
#END_STATUS
return [returnVal]
|
import pytest
import pandas as pd
from data import clean, read_data
from unittest import mock
def test_clean():
plot = "\n\nHello\r\nWorld\'[1]"
assert clean(plot) == " Hello World"
@mock.patch('data.os.path.isfile', return_value=False)
def test_read_data_bad_file(_):
with pytest.raises(ValueError):
read_data('test.csv')
@mock.patch('data.os.path.isfile', return_value=True)
@mock.patch('data.pd.read_csv', return_value=pd.DataFrame({
'Genre': ['Horror'] * 150 + ['Romance'] * 50
}))
def test_read_data_filtering(mock_csv, mock_isfile):
data_from_popular_genres = read_data('test.csv')
assert set(data_from_popular_genres['Genre']) == {'Horror'}
assert len(data_from_popular_genres) == 150
@mock.patch('data.os.path.isfile', return_value=True)
def test_read_data_bad_extension(_):
with pytest.raises(ValueError):
read_data('test.cv')
|
"""
:mod:`response`
---------------
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import sys
from six import text_type
from . import status_strings
from .baseheaders import BaseHeaders as Headers
class CloseWrapper(object):
"""Conform to WSGI's facility for running code *after* a response is sent.
"""
def __init__(self, request, body):
self.request = request
self.body = body
def __iter__(self):
return iter(self.body)
def close(self):
# No longer using this since we ripped out Socket.IO support.
pass
class Response(Exception):
"""Represent an HTTP Response message.
"""
request = None
whence_raised = (None, None)
def __init__(self, code=200, body='', headers=None):
"""Takes an int, a string, a dict.
- code an HTTP response code, e.g., 404
- body the message body as a string
- headers a dict, list, or bytestring of HTTP headers
Code is first because when you're raising your own Responses, they're
usually error conditions. Body is second because one more often wants
to specify a body without headers, than a header without a body.
"""
if not isinstance(code, int):
raise TypeError("'code' must be an integer")
elif not isinstance(body, (bytes, text_type)) and not hasattr(body, '__iter__'):
raise TypeError("'body' must be a string or iterable of strings")
elif headers is not None and not isinstance(headers, (dict, list)):
raise TypeError("'headers' must be a dictionary or a list of " +
"2-tuples")
Exception.__init__(self)
self.code = code
self.body = body
self.headers = Headers(headers)
def to_wsgi(self, environ, start_response, charset):
wsgi_status = str(self._status_text())
for morsel in self.headers.cookie.values():
self.headers.add(b'Set-Cookie', morsel.OutputString().encode('ascii'))
# To comply with PEP 3333 headers should be `str` (bytes in py2 and unicode in py3)
wsgi_headers = []
for k, vals in self.headers.items():
try: # XXX This is a hack. It's red hot, baby.
k = k.encode('US-ASCII') if not isinstance(k, bytes) else k
except UnicodeEncodeError:
raise ValueError("Header key %s isn't US-ASCII." % k)
for v in vals:
try: # XXX This also is a hack. It is also red hot, baby.
v = v.encode('US-ASCII') if not isinstance(v, bytes) else v
except UnicodeEncodeError:
raise ValueError("Header value %s isn't US-ASCII." % k)
if str is bytes: # python2 shortcut, no need to decode
wsgi_headers.append((k, v))
continue
try:
wsgi_headers.append((k.decode('ascii'), v.decode('ascii')))
except UnicodeDecodeError:
k, v = k.decode('ascii', 'backslashreplace'), v.decode('ascii', 'backslashreplace')
raise ValueError("Header `%s: %s` isn't US-ASCII." % (k, v))
start_response(wsgi_status, wsgi_headers)
body = self.body
if not isinstance(body, (list, tuple)):
body = [body]
body = (x.encode(charset) if not isinstance(x, bytes) else x for x in body)
return CloseWrapper(self.request, body)
def __repr__(self):
return "<Response: %s>" % self._status_text()
def __str__(self):
body = self.body
if len(body) < 500:
if not isinstance(body, str):
if isinstance(body, bytes):
body = body.decode('ascii', 'backslashreplace')
else:
body = str(body)
return ': '.join((self._status_text(), body))
return self._status_text()
def _status_text(self):
return "%d %s" % (self.code, self._status())
def _status(self):
return status_strings.get(self.code, 'Unknown HTTP status')
def _to_http(self, version):
"""Given a version string like 1.1, return an HTTP message (bytestring).
"""
status_line = ("HTTP/%s" % version).encode('ascii')
headers = self.headers.raw
body = self.body
if self.headers.get(b'Content-Type', b'').startswith(b'text/'):
body = body.replace(b'\n', b'\r\n')
body = body.replace(b'\r\r', b'\r')
return b'\r\n'.join([status_line, headers, b'', body])
def set_whence_raised(self):
"""Sets self.whence_raised
It's a tuple, (filename, linenum) where we were raised from.
This function needs to be called from inside the `except` block.
"""
tb = filepath = linenum = None
try:
cls, response, tb = sys.exc_info()
if response is self:
while tb.tb_next is not None:
tb = tb.tb_next
frame = tb.tb_frame
# filepath
pathparts = tb.tb_frame.f_code.co_filename.split(os.sep)[-2:]
# XXX It'd be nice to use www_root and project_root here, but
# self.request is None at this point afaict, and it's enough to
# show the last two parts just to differentiate index.html or
# __init__.py.
filepath = os.sep.join(pathparts)
# linenum
linenum = frame.f_lineno
finally:
del tb # http://docs.python.org/2/library/sys.html#sys.exc_info
self.whence_raised = (filepath, linenum)
|
# Copyright 2021 The SODA Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import time
import six
from oslo_log import log as logging
from oslo_utils import units
from delfin import exception
from delfin.common import constants
from delfin.drivers.dell_emc.vnx.vnx_block import consts
from delfin.i18n import _
LOG = logging.getLogger(__name__)
class AlertHandler(object):
def __init__(self, navi_handler):
self.navi_handler = navi_handler
@staticmethod
def parse_alert(alert):
try:
alert_model = dict()
alert_model['alert_id'] = AlertHandler.check_event_code(
alert.get(consts.OID_MESSAGECODE))
alert_model['alert_name'] = alert.get(consts.OID_DETAILS)
alert_model['severity'] = consts.TRAP_LEVEL_MAP.get(
alert.get(consts.OID_SEVERITY),
constants.Severity.INFORMATIONAL)
alert_model['category'] = constants.Category.FAULT
alert_model['type'] = constants.EventType.EQUIPMENT_ALARM
alert_model['occur_time'] = int(time.time() * units.k)
alert_model['description'] = alert.get(consts.OID_DETAILS)
alert_model['resource_type'] = constants.DEFAULT_RESOURCE_TYPE
alert_model['match_key'] = hashlib.md5(
alert.get(consts.OID_DETAILS, '').encode()).hexdigest()
return alert_model
except Exception as e:
LOG.error(e)
msg = (_("Failed to build alert model as some attributes missing "
"in alert message."))
raise exception.InvalidResults(msg)
def handle_alerts(self, alerts):
alert_list = []
for alert in alerts:
alert_model = {
'alert_id': AlertHandler.check_event_code(
alert.get('event_code')),
'alert_name': alert.get('message'),
'severity': consts.SEVERITY_MAP.get(
alert.get('event_code')[0:2]),
'category': constants.Category.FAULT,
'type': constants.EventType.EQUIPMENT_ALARM,
'occur_time': alert.get('log_time_stamp'),
'description': alert.get('message'),
'resource_type': constants.DEFAULT_RESOURCE_TYPE,
'match_key': hashlib.md5(
alert.get('message', '').encode()).hexdigest()
}
alert_list.append(alert_model)
return alert_list
def list_alerts(self, query_para):
alert_lists = []
domains = self.navi_handler.get_domain()
host_ip_list = []
if domains:
for domain in domains:
host_ip = domain.get('ip_address')
if host_ip:
host_ip_list.append(host_ip)
else:
host_ip_list.append(self.navi_handler.navi_host)
for host_ip in host_ip_list:
alerts = self.navi_handler.get_log(host_ip, query_para)
alert_list = self.handle_alerts(alerts)
if alert_list:
alert_lists.extend(alert_list)
alert_lists = self.remove_duplication_alert(alert_lists)
return alert_lists
def get_sort_key(self, alert):
return '%s%s%s' % (
alert.get('alert_id'), alert.get('description'),
str(alert.get('occur_time')))
def remove_duplication_alert(self, alert_lists):
try:
if alert_lists:
alert_lists.sort(key=self.get_sort_key, reverse=True)
alert = alert_lists[-1]
for i in range(len(alert_lists) - 2, -1, -1):
main_alert_key = '%s%s' % (
alert.get('alert_id'), alert.get('description'))
other_alert_key = '%s%s' % (alert_lists[i].get('alert_id'),
alert_lists[i].get(
'description'))
if main_alert_key == other_alert_key:
if alert.get('occur_time') > alert_lists[i].get(
'occur_time'):
alert_lists.remove(alert_lists[i])
else:
alert_lists.remove(alert)
alert = alert_lists[i]
else:
alert = alert_lists[i]
return alert_lists
except Exception as e:
err_msg = "remove duplication failed: %s" % (six.text_type(e))
LOG.error(err_msg)
raise exception.InvalidResults(err_msg)
@staticmethod
def check_event_code(event_code):
if '0x' not in event_code:
event_code = '0x%s' % event_code
return event_code
|
#!/usr/bin/env python3
from __future__ import annotations
from typing import List, Callable, Optional
import bagua_core as B
import torch
from bagua.torch_api.globals import _get_global_state
from bagua.torch_api.tensor import BaguaTensor
from bagua.torch_api.utils import check_contiguous
class BaguaBucket:
def __init__(
self, tensors: List[BaguaTensor], name: str, flatten: bool, alignment: int = 1
) -> None:
"""
Create a Bagua bucket with a list of Bagua tensors.
Args:
tensors: A list of Bagua tensors to be put in the
bucket.
name: The unique name of the bucket.
flatten: If True, flatten the input tensors so that they are
contiguous in memory.
alignment: If alignment > 1, Bagua will create a padding tensor to
the bucket so that the total number of elements in the bucket divides
the given alignment.
"""
self.tensors = tensors
"""
The tensors contained within the bucket.
"""
self.name = name
"""
The bucket's name.
"""
self.padding_tensor = None
if alignment > 1:
padding = sum(tensor.numel() for tensor in self.tensors) % alignment
if padding > 0:
padding = alignment - padding
# padding tensor must be of name bagua_padding_tensor, so that they are always marked as ready for communication in the backend
self.padding_tensor = torch.zeros(
padding, dtype=self.tensors[0].dtype, device=self.tensors[0].device
).to_bagua_tensor("bagua_padding_tensor_bucket_" + name)
self._all_tensors = (
self.tensors + [self.padding_tensor]
if self.padding_tensor is not None
else self.tensors
)
self.backend_tensor = None
self.flatten = flatten
if self.flatten:
self._flatten_()
self.backend_bucket = B.BaguaBucketPy(
name, [tensor._bagua_backend_tensor for tensor in self._all_tensors]
)
for tensor in self._all_tensors:
tensor._bagua_bucket = self
def _flatten_(self):
"""
Flatten inner tensors in place.
"""
if self.check_flatten():
return
if len(self._all_tensors) == 0:
return
total_size = 0
for tensor in self._all_tensors:
total_size += tensor.numel()
flatten_tensor = torch.zeros(total_size, dtype=self._all_tensors[0].dtype).to(
self._all_tensors[0].device
)
flatten_storage = flatten_tensor.storage()
offset = 0
for tensor in self._all_tensors:
# copy data
flatten_tensor[offset : offset + tensor.numel()] = tensor.data.reshape(-1)
tensor.bagua_set_storage(flatten_storage, offset)
offset += tensor.numel()
# check
assert self.check_flatten()
def check_flatten(self) -> bool:
"""
Returns:
True if the bucket's tensors are contiguous in memory.
"""
return check_contiguous(self._all_tensors)
def append_python_op(self, python_function: Callable[[str], None]) -> BaguaBucket:
"""
Append a Python operation to a bucket. A Python operation is a Python function that
takes the bucket's name and returns ``None``. It can do arbitrary things within the
function body.
The operations will be executed by the Bagua backend in the order they are appended
when all the tensors within the bucket are marked ready.
Args:
python_function: The Python operation function.
Returns:
The bucket itself.
"""
def wrapper_function_factory(pyop):
def wrapped_pyop(name):
with torch.cuda.stream(_get_global_state().get_communication_stream()):
return pyop(name)
return wrapped_pyop
self.backend_bucket.append_python_op(wrapper_function_factory(python_function))
return self
def append_centralized_synchronous_op(
self,
hierarchical: bool = False,
average: bool = True,
scattergather: bool = False,
compression: Optional[str] = None,
) -> BaguaBucket:
"""
Append a centralized synchronous operation to a bucket. It will sum or average the tensors in the bucket
for all workers.
The operations will be executed by the Bagua backend in the order they are appended
when all the tensors within the bucket are marked ready.
Args:
hierarchical (bool): Enable hierarchical communication. Which means the GPUs on the same machine
will communicate will each other first. After that, machines do inter-node communication. This can
boost performance when the inter-node communication cost is high.
average (bool): If True, the gradients on each worker are averaged. Otherwise, they are summed.
scattergather (bool): If true, the communication between workers are done with scatter gather instead
of allreduce. This is required for using compression.
compression: If not None, the tensors will be compressed for communication. Currently "MinMaxUInt8" is
supported.
Returns:
The bucket itself.
"""
if hierarchical:
self.backend_bucket.append_centralized_synchronous_op(
_get_global_state().get_internode_communicator(),
_get_global_state().get_intranode_communicator(),
hierarchical=hierarchical,
average=average,
scattergather=scattergather,
compression=compression,
)
else:
self.backend_bucket.append_centralized_synchronous_op(
_get_global_state().get_global_communicator(),
None,
hierarchical=hierarchical,
average=average,
scattergather=scattergather,
compression=compression,
)
return self
def append_decentralized_synchronous_op(
self,
hierarchical: bool = True,
peer_selection_mode: str = "all",
communication_interval: int = 1,
) -> BaguaBucket:
"""
Append a decentralized synchronous operation to a bucket. It will do gossipy style model averaging among workers.
The operations will be executed by the Bagua backend in the order they are appended
when all the tensors within the bucket are marked ready.
Args:
hierarchical (bool): Enable hierarchical communication. Which means the GPUs on the same machine
will communicate will each other first. After that, machines do inter-node communication. This can
boost performance when the inter-node communication cost is high.
peer_selection_mode (str): Can be "all" or "shift_one". "all" means all workers'
weights are averaged in each communication step. "shift_one" means each worker
selects a different peer to do weights average in each communication step.
communication_interval (int): Number of iterations between two communication steps.
Returns:
The bucket itself.
"""
self.backend_bucket.append_decentralized_synchronous_op(
_get_global_state().get_internode_communicator(),
_get_global_state().get_intranode_communicator(),
hierarchical=hierarchical,
compression=None,
peer_selection_mode=peer_selection_mode,
communication_interval=communication_interval,
)
return self
def clear_ops(self) -> BaguaBucket:
"""
Clear the previously appended operations.
"""
self.backend_bucket.clear_ops()
return self
def bytes(self) -> int:
"""Returns the total number of bytes occupied by the bucket.
Returns:
int: number of bucket bytes
"""
return sum(tensor.numel() * tensor.element_size() for tensor in self.tensors)
|
"""Tests for RC Config."""
import unittest
from grow.common import rc_config
def mock_time(value):
"""Mock out the time value."""
def _mock():
return value
return _mock
class RCConfigTestCase(unittest.TestCase):
"""Test the RC Config."""
def _create_config(self, time_value=None):
self.config = rc_config.RCConfig(config={}, internal_time=mock_time(time_value))
def setUp(self):
self._create_config()
def test_last_checked(self):
"""Test the last_checked."""
self.assertEqual(0, self.config.last_checked)
self.config.set('update.last_checked', 12345)
self.assertEqual(12345, self.config.last_checked)
def test_set(self):
"""Test that set works on the config."""
self.config.set('update.last_checked', 12345)
self.assertEqual(12345, self.config.get('update.last_checked'))
|
#encoding:utf-8
import datetime
import csv
import logging
from multiprocessing import Process
import time
import yaml
from croniter import croniter
from supplier import supply
logger = logging.getLogger(__name__)
def read_own_cron(own_cron_filename, config):
with open(own_cron_filename) as tsv_file:
tsv_reader = csv.DictReader(tsv_file, delimiter='\t')
for row in tsv_reader:
now = datetime.datetime.now()
cron = croniter(row['MASK'])
# prev_run = cron.get_current(datetime.datetime)
prev_run = cron.get_prev(datetime.datetime)
prev_run = cron.get_next(datetime.datetime)
diff = now - prev_run
diff_seconds = diff.total_seconds()
if 0.0 <= diff_seconds and diff_seconds <= 59.9:
# print(row['submodule_name'], diff_seconds)
# supply(row['submodule_name'], config)
supplying_process = Process(target=supply, args=(row['submodule_name'], config))
supplying_process.start()
time.sleep(2)
def main(config_filename):
with open(config_filename) as config_file:
config = yaml.load(config_file.read())
read_own_cron(config['cron_file'], config)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--config', default='configs/prod.yml')
args = parser.parse_args()
main(args.config)
|
# -*- encoding: utf-8 -*-
from cooka.common import util
from cooka.common.exceptions import EntityNotExistsException
from cooka.common.model import Model, ModelStatusType
from cooka.dao.entity import ExperimentEntity, DatasetEntity
from sqlalchemy.sql import func
class BaseDao:
def require_one(self, items, entity_name):
one = self.checkout_one(items)
if one is None:
raise ValueError(f"Entity name = {entity_name} does not exists in db.")
else:
return one
def checkout_one(self, list_result):
if list_result is None:
return None
else:
if len(list_result) > 0:
return list_result[0]
else:
return None
class ExperimentDao(BaseDao):
def find_by_name(self, session, model_name) -> ExperimentEntity:
model = session.query(ExperimentEntity).filter(ExperimentEntity.name == model_name).all()
return self.require_one(model, model_name)
def require_by_name(self, s, model_name):
model = self.find_by_name(s, model_name)
if model is None:
raise EntityNotExistsException(Model, model_name)
return model
def find_by_dataset_name(self, session, dataset_name, page_num, page_size):
offset = (page_num - 1) * page_size
query = session\
.query(ExperimentEntity)\
.filter(ExperimentEntity.dataset_name == dataset_name)
total = query.count()
models = query.order_by(ExperimentEntity.create_datetime.desc()) \
.limit(page_size).offset(offset).all()
return [m.to_model_bean() for m in models], total
def find_running_model(self, session):
models = session \
.query(ExperimentEntity) \
.filter(ExperimentEntity.status == ModelStatusType.Running) \
.order_by(ExperimentEntity.create_datetime.desc()) \
.all()
return [m.to_model_bean() for m in models]
def update_model_by_name(self, session, model_name, properties):
n_affect = session \
.query(ExperimentEntity) \
.filter(ExperimentEntity.name == model_name) \
.update(properties)
if n_affect != 1:
raise Exception(f"Update model = {model_name} status failed, affect rows = {n_affect}, properties = {properties}")
def find_by_train_job_name(self, session, train_job_name):
models = session.query(ExperimentEntity).filter(ExperimentEntity.train_job_name == train_job_name).all()
one = self.checkout_one(models)
if one is None:
raise ValueError(f"No model of train job name = {train_job_name}")
return one
def get_max_experiment(self,session, dataset_name):
no_experiment = session.query(func.max(ExperimentEntity.no_experiment)).filter(ExperimentEntity.dataset_name == dataset_name).one_or_none()[0]
if no_experiment is None:
return 0 # start from 1
else:
return no_experiment
def query_n_experiment(self, session, dataset_name):
sql = f"select count(distinct(no_experiment)) from {ExperimentEntity.__tablename__} where dataset_name = '{dataset_name}'"
return session.execute(sql).fetchone()[0]
class DatasetDao(BaseDao):
def require_by_name(self, session, dataset_name) -> DatasetEntity:
d = self.find_by_name(session, dataset_name)
if d is None:
raise ValueError(f"Dataset name = {dataset_name} does not exists in db.")
else:
return d
def find_by_name(self, session, dataset_name) -> DatasetEntity:
list_result = session.query(DatasetEntity).filter(DatasetEntity.name == dataset_name).all()
return self.checkout_one(list_result)
def pagination(self, session, page_num, page_size, query_key, order_by, order):
# !! is False can not use
query = session.query(DatasetEntity).filter(DatasetEntity.is_temporary == False).filter(DatasetEntity.status == DatasetEntity.Status.Analyzed)
# page_num should > 1
if query_key is not None and len(query_key) > 0:
query = query.filter(DatasetEntity.name.like(f'%{query_key}%'))
total = query.count()
offset = (page_num - 1) * page_size
# Dataset.create_datetime.desc()
order_by_col = getattr(getattr(DatasetEntity, order_by), order)()
datasets = query.order_by(order_by_col).limit(page_size).offset(offset).all()
return datasets, total
def delete(self, session, dataset_name):
model = session.query(DatasetEntity).filter(DatasetEntity.name == dataset_name).all()
self.require_one(model, dataset_name)
session.query(DatasetEntity).filter(DatasetEntity.name == dataset_name).delete()
def update_by_name(self, session, dataset_name, properties):
n_affect = session \
.query(DatasetEntity) \
.filter(DatasetEntity.name == dataset_name) \
.update(properties)
if n_affect != 1:
raise Exception(f"Update dataset = {dataset_name} status failed, affect rows = {n_affect}, properties = {properties}") |
"""
TODO: Exit out of all demon programs when quit
TODO: Check the characters position before it moves or performs certain actions.
TODO: Integrate the BUY function into the shops.
"""
import pathlib as pathlib
import textwrap as textwrap
from app.main import world, enemies, command_parser, config, npcs, combat, events
verbs = config.verbs
stances = config.stances
action_history = []
wrapper = textwrap.TextWrapper(width=config.TEXT_WRAPPER_WIDTH)
def do_action(action_input, character=None):
action_history.insert(0,action_input)
if not character:
events.game_event(game_event_text="No character loaded. You will need to create a new character or load an existing character.")
return
if len(action_input) == 0:
events.game_event("")
return
kwargs = command_parser.parser(action_input)
return DoActions.do_action(kwargs['action_verb'], character, **kwargs).action_result
class DoActions:
def __init__(self, character, **kwargs):
self.character = character
self.action_result = {"room_change": {"room_change_flag": False,
"old_room": None,
"new_room": None},
"character_output": None,
"room_output": {},
"area_output": None,
"status_window": None}
do_actions = {}
@classmethod
def register_subclass(cls, action):
"""Catalogues actions in a dictionary for reference purposes"""
def decorator(subclass):
cls.do_actions[action] = subclass
return subclass
return decorator
@classmethod
def do_action(cls, action, character, **kwargs):
"""Method used to initiate an action"""
if action not in cls.do_actions:
events.game_event("I am sorry, I did not understand.")
return
return cls.do_actions[action](character, **kwargs)
def update_room(self, old_room_number, new_room_number):
self.action_result['room_change']['room_change_flag'] = True
self.action_result['room_change']['old_room'] = old_room_number
self.action_result['room_change']['new_room'] = new_room_number
return
def update_character_output(self, character_output_text):
self.action_result['character_output'] = character_output_text
def update_room_output(self, room_output_text):
self.action_result['room_output'] = room_output_text
def update_area_output(self, area_output_text):
self.action_result['area_output'] = area_output_text
def update_status(self, status_text):
self.action_result['status_window'] = status_text
@DoActions.register_subclass('ask')
class Ask(DoActions):
"""\
Certain npcs have information that is valuable for you. The ASK verb allows you to interact with these npcs
and obtain that information.
Usage:
ASK <npc> about <subject>\
"""
def __init__(self, character, **kwargs):
DoActions.__init__(self, character, **kwargs)
if character.check_round_time():
return
if character.is_dead():
return
elif not kwargs['direct_object']:
events.game_event("Who are you trying to ask?")
return
elif not kwargs['indirect_object']:
events.game_event("What are you trying to ask about?")
return
else:
for npc in character.room.npcs:
if set(npc.handle) & set(kwargs['direct_object']):
npc.ask_about(object=kwargs['indirect_object'])
return
else:
events.game_event("That doesn't seem to do any good.")
@DoActions.register_subclass('attack')
class Attack(DoActions):
"""\
ATTACK allows you to engage in combat with an enemy. Provided you are not in round time, ATTACK swings
the weapon in your right hand (or your bare fist if there is no weapon) at the enemy. You will not be able
to attack anyone other than enemies.
Usage:
ATTACK <enemy> : Engages an enemy and begins combat.\
"""
def __init__(self, character, **kwargs):
DoActions.__init__(self, character, **kwargs)
if character.check_round_time():
return
if character.is_dead():
return
if kwargs['direct_object']:
character.target = kwargs['direct_object']
if not character.target:
events.game_event("Who are you going to attack? You do not have a target.")
return
else:
for npc in character.room.npcs:
if set(npc.handle) & set(character.target):
events.game_event("{} will probably not appreciate that.".format(npc.name))
return
enemy_found = False
for enemy in character.room.enemies:
if set(enemy.handle) & set(character.target):
enemy_found = True
combat.melee_attack_enemy(character, enemy)
return
if not enemy_found:
events.game_event("{} is not around here.".format(kwargs['direct_object']))
return
@DoActions.register_subclass('attribute')
class Attributes(DoActions):
"""\
ATTRIBUTES allows you to view various attributes\
"""
def __init__(self, character, **kwargs):
DoActions.__init__(self, character, **kwargs)
events.game_event('''
Attribute: {}
'''.format(character.attack_strength_base))
@DoActions.register_subclass('buy')
class Buy(DoActions):
"""\
BUY enables you to purchase an item from a shop.
Usage:
BUY <#>: Finalize purchase of the selected item.\
"""
def __init__(self, character, **kwargs):
if character.check_round_time():
return
if character.is_dead():
return
if not character.check_position_to_move():
return
if character.room.is_shop == False:
events.game_event("You can't seem to find a way to order anything here.")
return
if character.room.shop_filled == False:
events.game_event("You will need to ORDER first.")
return
if character.room.shop.in_shop == False:
events.game_event("You have exited the shop. You will need to ORDER again.")
return
if character.get_dominant_hand_inv() is not None:
events.game_event("You will need to empty your right hand first.")
return
character.set_dominant_hand_inv(character.room.shop.buy_item(number=kwargs['number_1']))
@DoActions.register_subclass('drop')
class Drop(DoActions):
"""\
DROP sets an object within your environment. This verb works the same as PUT <item>.
Usage:
DROP <item> : Places an item within an environment.
DROP <item> in <object/item> : Will put an item within an object or within another item if that object or item
is a container and if that object or item has enough room within it.
DROP <item> on <object/item> : Will put an item on top of an object or on top of another item if that object
or item is stackable.\
"""
def __init__(self, character, **kwargs):
DoActions.__init__(self, character, **kwargs)
if character.check_round_time():
return
if character.is_dead():
return
elif not kwargs['direct_object']:
events.game_event("I'm sorry, I could not understand what you wanted.")
return
elif character.get_dominant_hand_inv() is None:
events.game_event("You do not have that item in your hand")
return
elif not set(character.get_dominant_hand_inv().handle) & set(kwargs['direct_object']):
events.game_event("You do not have that item in your right hand.")
return
else:
character.room.items.append(character.get_dominant_hand_inv())
events.game_event("You drop " + character.get_dominant_hand_inv().name)
character.set_dominant_hand_inv(item=None)
character.print_status()
return
@DoActions.register_subclass('east')
@DoActions.register_subclass('e')
class East(DoActions):
"""\
Moves you east, if you can move in that direction.\
"""
def __init__(self, character, **kwargs):
DoActions.__init__(self, character, **kwargs)
if character.check_round_time():
return
if character.is_dead():
return
if not character.check_position_to_move():
return
if world.tile_exists(x=self.character.location_x + 1, y=self.character.location_y, area=self.character.area):
if character.room.shop_filled == True:
if character.room.shop.in_shop == True:
character.room.shop.exit_shop()
old_room = self.character.room.room_number
self.character.move_east()
self.update_room(old_room_number=old_room, new_room_number=self.character.room.room_number)
self.update_status(character.get_status())
return
else:
events.game_event("You cannot find a way to move in that direction.")
return
@DoActions.register_subclass('exit')
class Exit(DoActions):
"""\
When ordering in a shop, EXIT leaves the order menu. In order to see the menu again, you will need to ORDER again.\
"""
def __init__(self, character, **kwargs):
DoActions.__init__(self, character, **kwargs)
if character.room.is_shop == False:
game_window.print_status("You have nothing to exit.")
return
if character.room.shop_filled == False:
game_window.print_status("You have nothing to exit.")
return
if character.room.shop.in_shop == False:
game_window.print_status("You have nothing to exit.")
return
else:
character.room.shop.exit_shop()
character.print_status()
return
@DoActions.register_subclass('experience')
@DoActions.register_subclass('exp')
class Experience(DoActions):
"""\
Displays your experience information.\
"""
def __init__(self, character, **kwargs):
DoActions.__init__(self, character, **kwargs)
events.game_event('''\
Experience: {}
'''.format(character.experience))
@DoActions.register_subclass('flee')
class Flee(DoActions):
"""\
FLEE sends you in a random direction in your environment. FLEE can only be used when not in round time.\
"""
def __init__(self, character, **kwargs):
DoActions.__init__(self, character, **kwargs)
if character.check_round_time():
return
if character.is_dead():
return
if not character.check_position_to_move():
return
if character.room.shop_filled == True:
if character.room.shop.in_shop == True:
character.room.shop.exit_shop()
available_moves = character.room.adjacent_moves()
r = random.randint(0, len(available_moves) - 1)
actions.do_action(action_input=available_moves[r], character=character)
character.print_status()
return
@DoActions.register_subclass('get')
@DoActions.register_subclass('take')
class Get(DoActions):
"""\
GET retrieves an item from your surroundings. Many objects cannot be moved from their current position.
The item will be taken by your right hand, therefore you right hand will need to be empty. This
verb functions the same as TAKE.
Usage:
GET <item>\
"""
def __init__(self, character, **kwargs):
DoActions.__init__(self, character, **kwargs)
if character.check_round_time():
return
if character.is_dead():
return
if not kwargs['direct_object']:
events.game_event("I'm sorry, I could not understand what you wanted.")
return
for room_object in character.room.objects:
if set(room_object.handle) & set(kwargs['direct_object']):
events.game_event("Perhaps picking up {} is not a good idea.".format(room_object.name))
return
if character.get_dominant_hand_inv() is None:
item_found = False
for room_item in character.room.items:
if set(room_item.handle) & set(kwargs['direct_object']):
character.set_dominant_hand_inv(room_item)
character.room.items.remove(room_item)
events.game_event("You pick up {}.".format(room_item.name))
character.print_status()
return
if not item_found:
for inv_item in character.inventory:
if inv_item.container:
for sub_item in inv_item.items:
if set(sub_item.handle) & set(kwargs['direct_object']):
character.set_dominant_hand_inv(sub_item)
inv_item.items.remove(sub_item)
events.game_event("You take {} from {}.".format(sub_item.name, inv_item.name))
character.print_status()
return
if not item_found:
events.game_event("A " + kwargs['direct_object'][0] + " is nowhere to be found")
else:
events.game_event('You already have something in your right hand')
@DoActions.register_subclass('give')
class Give(DoActions):
"""\
GIVE allows you to exchange items between you and various npcs. In order to give an item to an npc, you
must have the item in your right hand.
Usage:
GIVE <item> to <npc> : Gives the item to the npc if the npc has the ability to accept the item.\
"""
def __init__(self, character, **kwargs):
DoActions.__init__(self, character, **kwargs)
if character.check_round_time():
return
if character.is_dead():
return
elif not kwargs['direct_object']:
events.game_event("What are you trying to give?")
return
elif character.get_dominant_hand_inv() is None:
events.game_event("You don't seem to be holding that item in your hand.")
return
elif not set(character.get_dominant_hand_inv().handle) & set(kwargs['direct_object']):
events.game_event("You don't seem to be holding that item in your hand.")
return
elif not kwargs['indirect_object']:
events.game_event("To whom do you want to give?")
return
else:
for npc in character.room.npcs:
if {npc.first_name.lower()} & set(kwargs['indirect_object']):
if npc.give_item(character.get_dominant_hand_inv()):
character.set_dominant_hand_inv(item=None)
character.print_status()
return
else:
return
events.game_event("That didn't seem to work.")
return
@DoActions.register_subclass('go')
class Go(DoActions):
"""\
GO allows you to move toward a certain object. If the object can be passed through, you will pass through it.
Usage:
GO <object> : move toward or through an object.\
"""
def __init__(self, character, **kwargs):
DoActions.__init__(self, character, **kwargs)
if character.check_round_time():
return
if character.is_dead():
return
if not character.check_position_to_move():
return
if not kwargs['direct_object']:
events.game_event("Go where?")
return
for room_object in character.room.objects:
if set(room_object.handle) & set(kwargs['direct_object']):
events.game_event("You move toward {}.".format(room_object.name))
room_object.go_object(character=character)
return
for room_item in character.room.items:
if set(room_item.handle) & set(kwargs['direct_object']):
events.game_event("You move toward {}.".format(room_item.name))
return
for room_npc in character.room.npcs:
if set(room_npc.handle) & set(kwargs['direct_object']):
events.game_event("You move toward {}.".format(room_npc.name))
return
@DoActions.register_subclass('health')
class Health(DoActions):
"""\
HEALTH shows your current health attributes.\
"""
def __init__(self, character, **kwargs):
DoActions.__init__(self, character, **kwargs)
events.game_event('''
Health: {} of {} hit points
'''.format(character.health,
character.health_max))
@DoActions.register_subclass('help')
class Help(DoActions):
"""\
Provides help on all parts of the game
Usage:
HELP <subject> : Output help on a specific subject.\
"""
def __init__(self, character, **kwargs):
DoActions.__init__(self, character, **kwargs)
verb_list = ""
if kwargs['subject_verb'] == None:
for a, b, c in zip(verbs[::3], verbs[1::3], verbs[2::3]):
verb_list = verb_list + '{:30s}{:30s}{:30s}\n'.format(a,b,c)
events.game_event("""
Below are the list of actions for which you can ask for help.
Type HELP <verb> for more information about that specific verb.
{}\
""".format(verb_list))
elif kwargs['subject_verb'] in DoActions.do_actions:
events.game_event(DoActions.do_actions[kwargs['subject_verb']].__doc__)
else:
events.game_event("I'm sorry, what did you need help with?")
@DoActions.register_subclass('info')
@DoActions.register_subclass('information')
class Information(DoActions):
"""\
Provides general information on your character including level, experience, and other attributes.\
"""
def __init__(self, character, **kwargs):
DoActions.__init__(self, character, **kwargs)
events.game_event('''
Name: {} {}
Gender: {}
Race: {}
Profession: {}
Level: {}
'''.format(character.first_name, character.last_name,
character.gender,
character.race,
character.profession,
character.level))
@DoActions.register_subclass('inventory')
class Inventory(DoActions):
"""\
INVENTORY allows you to view your inventory. It will list all items you have in your possession. INVENTORY
will not list the items within any containers you have.\
"""
def __init__(self, character, **kwargs):
DoActions.__init__(self, character, **kwargs)
if character.get_dominant_hand_inv():
right_hand = "You have {} in your {} hand.".format(character.get_dominant_hand_inv().name, character.dominance)
else:
right_hand = "Your right hand is empty."
if character.get_non_dominant_hand_inv():
left_hand = "You have {} in your {} hand.".format(character.get_non_dominant_hand_inv().name, character.non_dominance)
else:
left_hand = "Your left hand is empty."
inventory_clothing = [x.name for x in character.inventory if x.category == 'clothing']
if len(inventory_clothing) > 1:
inventory_clothing = "You are wearing {} and {}.".format(', '.join(inventory_clothing[:-1]), inventory_clothing[-1])
elif len(inventory_clothing) == 1:
inventory_clothing = "You are wearing {}.".format(inventory_clothing[0])
else:
inventory_clothing = "You are wearing nothing."
inventory_armor = []
for category in character.armor:
inventory_armor.append(character.armor[category])
if len(inventory_armor) > 1:
inventory_armor ="You are also wearing {} and {}.".format(character.object_pronoun, ', '.join(inventory_armor[:-1]), inventory_armor[-1])
elif len(inventory_armor) == 1:
inventory_armor = "You are also wearing {}.".format(inventory_armor[0].name)
else:
inventory_armor = "You are also wearing no armor.".format(character.object_pronoun)
wealth = "You have {} gulden.".format(character.money)
events.game_event('''\
{}
{}
{}
{}
{}
\
'''.format(right_hand,
left_hand,
wrapper.fill(inventory_clothing),
wrapper.fill(inventory_armor),
wrapper.fill(wealth)))
@DoActions.register_subclass('kneel')
class Kneel(DoActions):
"""\
Moves you to a kneeling position. While you may perform many actions from this position,
movement is not possible.
"""
def __init__(self, character, **kwargs):
DoActions.__init__(self, character, **kwargs)
if character.check_round_time():
return
if character.is_dead():
return
if character.position == 'kneeling':
events.game_event('You seem to already be kneeling.')
character.print_status()
return
else:
character.position = 'kneeling'
events.game_event('You move yourself to a kneeling position.')
character.print_status()
return
@DoActions.register_subclass('lie')
class Lie(DoActions):
"""\
Moves you to a lying position on the ground. While many actions can be performed on the ground,
movement is not possible.
"""
def __init__(self, character, **kwargs):
DoActions.__init__(self, character, **kwargs)
if character.check_round_time():
return
if character.is_dead():
return
if character.position == 'lying':
events.game_event('You seem to already be lying down.')
character.print_status()
return
else:
character.position = 'lying'
events.game_event('You lower yourself to the ground and lie down.')
character.print_status()
return
@DoActions.register_subclass('look')
@DoActions.register_subclass('l')
class Look(DoActions):
"""\
View the environment and objects or items within your environment.
Usage:
LOOK : shows the descriptions of the environment around you.
LOOK <object/item> : shows the description of the object at which you want to look.
LOOK <npc> : shows the description of the npc at which you want to look.\
"""
def __init__(self, character, **kwargs):
DoActions.__init__(self, character, **kwargs)
if character.check_round_time():
return
if character.is_dead():
return
if kwargs['preposition'] == None:
character.room.intro_text()
return
if kwargs['preposition'][0] == 'in':
item_found = False
if kwargs['indirect_object'] is None:
events.game_event("I am not sure what you are referring to.")
return
for item in character.room.items + character.room.objects + character.room.npcs + character.inventory + [character.get_dominant_hand_inv()] + [character.get_non_dominant_hand_inv()]:
if isinstance(item, npcs.NPC):
events.game_event("It wouldn't be advisable to look in " + item.name)
return
if set(item.handle) & set(kwargs['indirect_object']):
events.game_event(item.contents())
return
if item_found is False:
events.game_event("A {} is nowhere to be found.".format(kwargs['indirect_object'][0]))
return
if kwargs['preposition'][0] == 'at':
item_found = False
if kwargs['indirect_object'] is None:
events.game_event("I am not sure what you are referring to.")
return
for item in character.room.items + character.room.objects + character.room.npcs + character.room.enemies + character.inventory + [character.get_dominant_hand_inv()] + [character.get_non_dominant_hand_inv()]:
if not item:
pass
elif set(item.handle) & set(kwargs['indirect_object']):
item.view_description()
return
for item in character.inventory:
if set(item.handle) & set(kwargs['indirect_object']):
item.view_description()
return
for object in character.room.objects:
if set(object.handle) & set(kwargs['indirect_object']):
object.view_description()
return
for npc in character.room.npcs:
if set(npc.handle) & set(kwargs['indirect_object']):
npc.view_description()
return
for enemy in character.room.enemies:
if set(npc.handle) & set(kwargs['indirect_object']):
enemy.view_description()
return
if item_found is False:
events.game_event("At what did you want to look?")
return
else:
events.game_event("I'm sorry, I didn't understand you.")
return
@DoActions.register_subclass('north')
@DoActions.register_subclass('n')
class North(DoActions):
"""\
Moves you north, if you can move in that direction.\
"""
def __init__(self, character, **kwargs):
DoActions.__init__(self, character, **kwargs)
if character.check_round_time():
return
if character.is_dead():
return
if not character.check_position_to_move():
return
if world.tile_exists(x=self.character.location_x, y=self.character.location_y - 1, area=self.character.area):
if character.room.shop_filled == True:
if character.room.shop.in_shop == True:
character.room.shop.exit_shop()
self.character.move_north()
character.print_status()
return
else:
events.game_event('You cannot find a way to move in that direction.')
return
@DoActions.register_subclass('order')
class Order(DoActions):
"""\
In certain rooms, you are able to order products through an ordering system. ORDER initiates the ordering system.\
Usage:
ORDER: Enters the shop and displays the shop menu in the status window
ORDER <#>: Orders the relevant item. You cannot order a specific item until you have entered the shop using the ORDER command by itself.
"""
def __init__(self, character, **kwargs):
DoActions.__init__(self, character, **kwargs)
if character.check_round_time():
return
if character.is_dead():
return
if character.room.is_shop == False:
events.game_event("You can't seem to find a way to order anything here.")
return
elif character.room.is_shop == True:
if character.room.shop_filled == False:
character.room.fill_shop()
character.room.shop.enter_shop()
return
if character.room.shop.in_shop == False:
character.room.shop.enter_shop()
return
character.room.shop.order_item(kwargs['number_1'])
return
@DoActions.register_subclass('position')
@DoActions.register_subclass('pos')
class Position(DoActions):
"""\
Displays the position you are currently in.\
"""
def __init__(self, character, **kwargs):
DoActions.__init__(self, character, **kwargs)
self.character = character
self.position()
def position(self):
events.game_event('''You are currently in the {} position.'''.format(self.character.position))
@DoActions.register_subclass('put')
class Put(DoActions):
"""\
PUT sets an object within your environment. This usage works the same as DROP <item>.
Usage:
PUT <item> : Places an item within an environment.
PUT <item> in <object/item> : Will put an item within an object or within another item if that object or item
is a container and if that object or item has enough room within it.
PUT <item> on <object/item> : Will put an item on top of an object or on top of another item if that object
or item is stackable.\
"""
def __init__(self, character, **kwargs):
DoActions.__init__(self, character, **kwargs)
if character.check_round_time():
return
if character.is_dead():
return
if not kwargs['direct_object']:
events.game_event("What is it you're trying to put down?")
return
elif character.get_dominant_hand_inv() is None:
events.game_event("You do not have that item in your hand.")
return
elif not set(character.get_dominant_hand_inv().handle) & set(kwargs['direct_object']):
events.game_event("You do not have that item in your right hand.")
return
elif kwargs['preposition'][0] == "in":
for inv_item in character.inventory:
if set(inv_item.handle) & set(kwargs['indirect_object']):
if inv_item.container == False:
events.game_event("{} won't fit in there.".format(character.get_dominant_hand_inv().name))
return
if len(inv_item.items) == inv_item.capacity:
events.game_event("{} can't hold any more items".format(inv_item.name))
return
inv_item.items.append(character.get_dominant_hand_inv())
events.game_event("You put {} {} {}".format(character.get_dominant_hand_inv().name, kwargs['preposition'][0], inv_item.name))
character.set_dominant_hand_inv(item=None)
character.print_status()
return
for room_item in character.room.items:
if set(room_item.handle) & set(kwargs['indirect_object']):
if room_item.container == False:
events.game_event("{} won't fit {} there.".format(character.right_hand_inv[0].name, kwargs['preposition'][0]))
return
room_item.items.append(character.get_dominant_hand_inv())
character.set_dominant_hand_inv(item=None)
events.game_event("You put {} {} {}".format(character.get_dominant_hand_inv().name, kwargs['preposition'][0], room_item.name))
character.set_dominant_hand_inv(item=None)
character.print_status()
return
elif kwargs['preposition'][0] == "on":
events.game_event("You cannot stack items yet.")
return
else:
events.game_event("That item is not around here, unfortunately.")
return
@DoActions.register_subclass('quit')
class Quit(DoActions):
"""\
Exits the game.\
"""
def __init__(self, character, **kwargs):
DoActions.__init__(self, character, **kwargs)
events.game_event("You will need to find a way to exit the game.")
@DoActions.register_subclass('save')
class Save(DoActions):
"""\
\
"""
def __init__(self, character, **kwargs):
DoActions.__init__(self, character, **kwargs)
self.character.save()
@DoActions.register_subclass('search')
class Search(DoActions):
"""\
SEARCH allows you to explore your environment if the object, enemy, or area can be explored.
Usage:
SEARCH : Searches the environment around you and uncovers hidden items or objects.
SEARCH <enemy> : Searches an enemy, uncovers any potential items that the enemy could be hiding, and places
them in your environment.\
"""
def __init__(self, character, **kwargs):
DoActions.__init__(self, character, **kwargs)
if character.check_round_time():
return
if character.is_dead():
return
if not kwargs['direct_object']:
items_found = 0
for hidden_item in character.room.hidden:
if 100 - character.level >= hidden_item.visibility:
character.room.add_item(hidden_item)
character.room.remove_hidden_item(hidden_item)
events.game_event('You found {}!'.format(hidden_item.name))
items_found += 1
if items_found == 0:
events.game_event("There doesn't seem to be anything around here.")
return
else:
for object in character.room.objects:
if set(object.handle) & set(kwargs['direct_object']):
object.search(character=character)
return
for item in character.room.items:
if set(item.handle) & set(kwargs['direct_object']):
events.game_event("Searching {} will not do you much good.".format(item.name))
return
for char in character.room.enemies + character.room.npcs:
if set(char.handle) & set(kwargs['direct_object']):
events.game_event("{} probably will not appreciate that.".format(char.first_name))
return
else:
events.game_event("That doesn't seem to be around here.")
return
@DoActions.register_subclass('sell')
class Sell(DoActions):
"""\
SELL allows you to exchange items for gulden. Certain merchants look for items you may find in the wilds.
Different merchants look for different items. The item must be in your right hand.
Usage:
SELL <item> to <npc> : Exchanges items for gulden with an npc if an item can be exchanged.
"""
def __init__(self, character, **kwargs):
DoActions.__init__(self, character, **kwargs)
if character.check_round_time():
return
if character.is_dead():
return
elif not kwargs['direct_object']:
events.game_event("What is it you are trying to sell?")
return
for npc in character.room.npcs:
if set(npc.handle) & {kwargs['indirect_object']}:
npc.sell_item(item=character.get_dominant_hand_inv())
return
else:
events.game_event("Who are you trying to sell to?")
@DoActions.register_subclass('sit')
class Sit(DoActions):
"""\
Moves you to a sitting position. While you can perform many actions while in a sitting position,
movement is no possible.
"""
def __init__(self, character, **kwargs):
DoActions.__init__(self, character, **kwargs)
if character.check_round_time():
return
if character.is_dead():
return
if character.position == 'sitting':
events.game_event('You seem to already be sitting.')
character.print_status()
return
else:
character.position = 'sitting'
events.game_event('You move yourself to a sitting position.')
character.print_status()
return
@DoActions.register_subclass('skills')
class Skills(DoActions):
"""\
SKILLS displays the skills available to you as well as the skill rating for your character. Different skills
allow you to accomplish different tasks.
Usage:
SKILLS: Shows your available skills and their rating.
"""
def __init__(self, character, **kwargs):
DoActions.__init__(self, character, **kwargs)
events.game_event('''
Edged Weapons Base: {}
Edged Weapons: {} ({}) Armor: {} ({})
Blunt Weapons: {} ({}) Shield: {} ({})
Polearm Weapons: {} ({}) Dodging: {} ({})
Thrown Weapons: {} ({}) Physical Fitness: {} ({})
Ranged Weapons: {} ({}) Perception: {} ({})
'''.format(character.skills_base['edged_weapons'],
character.skills['edged_weapons'], character.skills_bonus['edged_weapons'],
character.skills['armor'], character.skills_bonus['armor'],
character.skills['blunt_weapons'], character.skills_bonus['blunt_weapons'],
character.skills['shield'], character.skills_bonus['shield'],
character.skills['polearm_weapons'], character.skills_bonus['polearm_weapons'],
character.skills['dodging'], character.skills_bonus['dodging'],
character.skills['thrown_weapons'], character.skills_bonus['thrown_weapons'],
character.skills['physical_fitness'], character.skills_bonus['physical_fitness'],
character.skills['ranged_weapons'], character.skills_bonus['ranged_weapons'],
character.skills['perception'], character.skills_bonus['perception'])
)
@DoActions.register_subclass('skin')
class Skin(DoActions):
"""\
Many enemies are able to be skinned for various pelts, hides, etc. The SKIN verb allows you to skin enemies.
if successful the resulting item will be places within the environment. Not all enemies are able to be skinned.
Usage:
SKIN <enemy> : Skins an enemy and, if successful, leaves a skin.\
"""
def __init__(self, character, **kwargs):
DoActions.__init__(self, character, **kwargs)
if character.check_round_time():
return
if character.is_dead():
return
elif not kwargs['direct_object']:
events.game_event("What are you trying to skin?")
return
else:
for object in character.room.objects:
if set(object.handle) & set(kwargs['direct_object']):
object.skin_corpse()
return
for item in character.room.items:
if set(item.handle) & set(kwargs['direct_object']):
events.game_event("You can seem to find any way to skin {}.".format(item.name))
return
for npc in character.room.npcs:
if set(npc.handle) & set(kwargs['direct_object']):
events.game_event("You approach {}, but think better of it.".format(npc.name))
return
@DoActions.register_subclass('south')
@DoActions.register_subclass('s')
class South(DoActions):
"""\
Moves you south, if you can move in that direction.\
"""
def __init__(self, character, **kwargs):
DoActions.__init__(self, character, **kwargs)
if character.check_round_time():
return
if character.is_dead():
return
if not character.check_position_to_move():
return
if world.tile_exists(x=self.character.location_x, y=self.character.location_y + 1, area=self.character.area):
if character.room.shop_filled == True:
if character.room.shop.in_shop == True:
character.room.shop.exit_shop()
self.character.move_south()
character.print_status()
else:
events.game_event("You cannot find a way to move in that direction.")
@DoActions.register_subclass('stance')
class Stance(DoActions):
"""\
STANCE controls the position in which you carry yourself in combat. Your stance will affect the amount of
attack and defensive strength you have during combat.
Usage:
STANCE: Shows your current stance.
STANCE <type>: Changes your stance to the desired stance.
Types of Stances:
offensive
forward
neutral
guarded
defense\
"""
def __init__(self, character, **kwargs):
DoActions.__init__(self, character, **kwargs)
self.character = character
self.stance(character=character, desired_stance=kwargs['adjective_1'])
def stance(self, character, desired_stance):
if not desired_stance:
events.game_event('''You are currently in the {} stance.'''.format(self.character.stance))
return
if set(desired_stance) & set(stances):
self.character.stance = desired_stance[0]
events.game_event('''You are now in {} stance.'''.format(desired_stance[0]))
character.print_status()
return
else:
events.game_event("You cannot form that stance.")
return
@DoActions.register_subclass('stand')
class Stand(DoActions):
"""\
Raises you to the standing position if you are not already in the standing position.
"""
def __init__(self, character, **kwargs):
DoActions.__init__(self, character, **kwargs)
self.character = character
self.stand(character=self.character)
def stand(self, character):
if self.character.check_round_time():
return
if self.character.is_dead():
return
if self.character.position == 'standing':
self.update_character_output(character_output_text="You seem to already be standing.")
self.update_status(status_text=character.get_status())
return
else:
self.character.position = 'standing'
self.update_character_output(character_output_text="You raise yourself to a standing position.")
self.update_room_output(room_output_text={character.room.room_number: "{} raises {}self to a standing position".format(character.first_name, character.possessive_pronoun)})
self.update_status(status_text=character.get_status())
return
@DoActions.register_subclass('stats')
class Stats(DoActions):
"""\
Displays your general statistics.\
"""
def __init__(self, character, **kwargs):
DoActions.__init__(self, character, **kwargs)
events.game_event('''
Name: {} {}
Level: {}
Strength: {} ({}) Intellect: {} ({})
Constitution: {} ({}) Wisdom: {} ({})
Dexterity: {} ({}) Logic: {} ({})
Agility: {} ({}) Spirit: {} ({})
'''.format(character.first_name,
character.last_name,
character.level,
character.stats['strength'], character.stats_bonus['strength'],
character.stats['intellect'], character.stats_bonus['intellect'],
character.stats['constitution'], character.stats_bonus['constitution'],
character.stats['wisdom'], character.stats_bonus['wisdom'],
character.stats['dexterity'], character.stats_bonus['dexterity'],
character.stats['logic'], character.stats_bonus['logic'],
character.stats['agility'], character.stats_bonus['agility'],
character.stats['spirit'], character.stats_bonus['spirit'])
)
@DoActions.register_subclass('target')
class Target(DoActions):
"""\
When in combat, you must TARGET an enemy before you can ATTACK them. Use the TARGET verb to set the enemy
for which you want to ATTACK. TARGET only needs to be set once for the duration of the combat. The enemy
does not have to be within sight in order for you to TARGET it.
Usage:
TARGET <enemy> : Targets an enemy.\
"""
def __init__(self, character, **kwargs):
DoActions.__init__(self, character, **kwargs)
if not kwargs['direct_object']:
events.game_event("What do you want to target?")
return
else:
character.target = kwargs['direct_object']
events.game_event("You are now targeting {}".format(self.target[0]))
return
@DoActions.register_subclass('west')
@DoActions.register_subclass('w')
class West(DoActions):
"""\
Moves you west, if you can move in that direction.\
"""
def __init__(self, character, **kwargs):
DoActions.__init__(self, character, **kwargs)
if character.check_round_time():
return
if character.is_dead():
return
if not character.check_position_to_move():
return
if world.tile_exists(x=self.character.location_x - 1, y=self.character.location_y, area=self.character.area):
if character.room.shop_filled == True:
if character.room.shop.in_shop == True:
character.room.shop.exit_shop()
self.character.move_west()
character.print_status()
else:
events.game_event("You cannot find a way to move in that direction.")
class Action:
def __init__(self, method, name, action, **kwargs):
self.method = method
self.name = name
self.action = action
self.kwargs = kwargs
def __str__(self):
return "{}: {}".format(self.action, self.name)
class MoveNorthEnemy(Action):
def __init__(self, **kwargs):
super().__init__(method=enemies.Enemy.move_north,
name='Move North',
action=['north'],
kwargs=kwargs)
class MoveSouthEnemy(Action):
def __init__(self, **kwargs):
super().__init__(method=enemies.Enemy.move_south,
name='Move South',
action=['south'],
kwargs=kwargs)
class MoveEastEnemy(Action):
def __init__(self, **kwargs):
super().__init__(method=enemies.Enemy.move_east,
name='Move East',
action=['east'],
kwargs=kwargs)
class MoveWestEnemy(Action):
def __init__(self, **kwargs):
super().__init__(method=enemies.Enemy.move_west,
name='Move West',
action=['west'],
kwargs=kwargs)
|
# This file was scaffold by idol_data, but it will not be overwritten, so feel free to edit.
# This file will be regenerated if you delete it.
from ..codegen.schema.literal import SchemaLiteralDataclass as LiteralCodegen
class Literal(LiteralCodegen):
pass
|
import streamlit.components.v1 as components
import pandas as pd
import matplotlib.pyplot as plt
import streamlit as st
import plotly.express as px
import plotly.io as pio
import os
import base64
import pickle
import collections
from ..constants import *
from ...graph_utils import get_k_common_followers, get_top_followers, from_pandas_to_pyviz_net
pio.templates.default = "plotly_dark"
# Define list of streamer to visualize in a graph
def set_graph_analysis(df):
st.title('Graph Analysis of the Hispanic streaming community in Twitch')
menu_items = ["Network Metrics","PyViz", "Gephi"]
menu_variables = st.radio(
"",
menu_items,
)
if menu_items.index(menu_variables) == 0:
st.subheader("Network Metrics")
col1, col2 = st.columns((0.7,0.3) )
col1.write(get_df_metrics(df),use_container_width=True,height=600)
col2.plotly_chart(get_pie_cores_topusers(df), use_container_width=True,height=600)
# Multiselect dropdown menu (returns a list)
streamer_list = df.sort_values("num_followers", ascending=False).name.tolist()
col1, _, col2 = st.columns((0.3,.1,0.7) )
selected_streamer = col1.selectbox(
'Select streamer to show metrics of', streamer_list)
col1.image(df[df['name'] == selected_streamer]
['profile_image_url'].values[0], width=100)
col2.subheader('Position in the ranking of fundamental metrics (from 15460 streamers)')
col2.write(get_metrics_streamer(selected_streamer, df),use_container_width=True,height=600)
elif menu_items.index(menu_variables) == 1:
show_streamers_pyviz_graphs(df)
else:
# st.text("Sorry, this feature is not implemented yet")
show_gephi_graphs()
def pv_static(fig, name='graph'):
# https://github.com/napoles-uach/stvis
h1 = fig.height
h1 = int(h1.replace('px', ''))
w1 = fig.width
w1 = int(w1.replace('px', ''))
fig.show(name+'.html')
return components.html(
fig.html, height=h1+30, width=w1+30
)
def show_streamers_pyviz_graphs(df):
st.subheader("How do streamers follow each other?")
streamer_list = df.sort_values("num_followers", ascending=False).name.tolist()
# Multiselect dropdown menu (returns a list)
selected_streamer = st.selectbox(
'Select streamer to visualize', streamer_list)
image_col, _, _, _ = st.columns(4)
image_col.image(df[df['name'] == selected_streamer]
['profile_image_url'].values[0], width=100)
col1, _, col2 = st.columns( (0.45,.1,0.45) )
col1.subheader('Graph of Common Follows')
col1.markdown(explanations_of_graph_1)
col2.subheader('Graph of User Follows')
col2.markdown(explanations_of_graph_2)
col1, _, col2 = st.columns( (0.45,.1,0.45) )
with col1:
df1 = get_top_followers(
df.copy(), common_followers_with=selected_streamer)
net1 = from_pandas_to_pyviz_net(df1, emphasize_node=selected_streamer)
pv_static(net1, name="reports/graph")
with col2:
df2 = get_k_common_followers(
"data/streamers.feather", common_followers_with=selected_streamer)
net2 = from_pandas_to_pyviz_net(df2, emphasize_node=selected_streamer)
pv_static(net2, name="reports/graph2")
def show_gephi_graphs():
image_mapping = {"11. Carola Network Downsampled.png": "https://drive.google.com/file/d/15UYwfW-a4Jl66j8PKWqWbVxAMeQV32v_/view?usp=sharing",
"12. Nissaxter Network downsampeld.png": "https://drive.google.com/file/d/1hnw6cczJnbR6ZS8-uVWPFv1z7_4NHTX5/view?usp=sharing",
"05. Twitch 30000 followers downsampled.png": "https://drive.google.com/file/d/1itq2yLykr8n0l2nWAnYpz8L-XkU4RHRS/view?usp=sharing",
"03. Twitch 100000 followers by views downsample.png": "https://drive.google.com/file/d/1ivRn3VOoNoD8f-_LaViI4odpUZRGx_jK/view?usp=sharing",
"04. Twitch 100000 followers downsamled.png": "https://drive.google.com/file/d/1f-aCAXW4RhGf4WJqtA-V6b2vJUSKv_dL/view?usp=sharing",
"09. Twitch ASMR dowsampled.png": "https://drive.google.com/file/d/1PQvnT9uolchlgx5KFExtwQGGg_ZMUdFJ/view?usp=sharing",
"06. Twitch Just Chatting downsampled.png": "https://drive.google.com/file/d/1PyvyQMP704icQDy3S_lEF4vBD19Ml0JD/view?usp=sharing",
"07. Twitch League of Legends downsampled.png": "https://drive.google.com/file/d/1oR3PtAKy8Fwu85VAmU0Ks1NQk8mLJDeh/view?usp=sharing",
"08. Twitch Minecraft downsampled.png": "https://drive.google.com/file/d/1m5SNyNe4dsOL3ZBaX-A8qVeAZ5gQema3/view?usp=sharing",
"10. Twitch Music downsampled.png": "https://drive.google.com/file/d/160bllFOG2UhrZqCZ8ioPhWlSUa-JQG1f/view?usp=sharing",
"02. Twitch partners downsampled.png": "https://drive.google.com/file/d/1tNS8QpjHO_XKFMNnMRoNBUsoNL_U9Dcs/view?usp=sharing",
"01. Twitch top 100 streamers downsampled.png": "https://drive.google.com/file/d/1qOLXeuFEFQUGOhbyEVlZpmHSBFZEy-6v/view?usp=sharing"}
st.subheader('Graphs Made with Gephi')
# 2 columns showing the images of the graphs generated by Gephi
images = sorted([img for img in os.listdir('app/main/images') if img.endswith('.png')])
st.markdown(
"Click on the images to see them on full resolution or click [here](https://drive.google.com/drive/folders/1sLFmG8H_ccWvvZcTS-vsuiaTParDkmf5)"\
" to see more.", unsafe_allow_html=True)
for i in range(len(images)//2):
col1, col2 = st.columns(2)
# get the two images from the dictionary
image_path1, image_path2 = images[i*2:i*2+2]
name1, name2 = [" ".join(path.replace(".png","").split(' ')[:-1])
for path in [image_path1, image_path2]]
imagefile1, imagefile2 = open(f"app/main/images/{image_path1}", "rb"), open(f"app/main/images/{image_path2}", "rb")
contents1,contents2 = imagefile1.read(), imagefile2.read()
data_url1, data_url2 = base64.b64encode(contents1).decode("utf-8"),base64.b64encode(contents2).decode("utf-8")
imagefile1.close(), imagefile2.close()
with col1:
st.markdown(f'''
<a href="{image_mapping[images[i*2]]}" target="_blank" style="text-align: center; display: block; text-decoration:none" >
<img src="data:image/gif;base64,{data_url1}" width="600" alt="{name1}">
<p style="color:darkgrey" >{name1}</p>
</a>
''',unsafe_allow_html=True
)
with col2:
st.markdown(f'''
<a href="{image_mapping[images[i*2+1]]}" target="_blank" style="text-align: center; display: block; text-decoration:none" >
<img src="data:image/gif;base64,{data_url2}" width="600" alt="{name2}">
<p style="color:darkgrey" >{name2}</p>
</a>
''',unsafe_allow_html=True
)
@st.cache(show_spinner=False)
def get_df_metrics(df):
df_ranking_metrics = pd.DataFrame()
for m in ["indegree","outdegree", "closeness", "betweenness", "pagerank", "nx_cores"]:
# load metric
folder_name = "data/fundamental_metrics"
file_name = f"{folder_name}/{m}.pkl"
with open(file_name, 'rb') as f:
metric = pickle.load(f)
ranking10_ids = list(metric.keys())[:10]
ranking10_names = [df[df["id"]==node_id]["name"].iloc[0] for node_id in ranking10_ids]
df_ranking_metrics[m] = ranking10_names
return df_ranking_metrics
@st.cache(show_spinner=False)
def get_pie_cores_topusers(df):
df_ranking_metrics = get_df_metrics(df)
# load k-core descomposition
with open("data/fundamental_metrics/nx_cores.pkl", 'rb') as f:
nx_cores = pickle.load(f)
# reformat NetworkX solution
nx_cores_format = {c:set() for c in set(nx_cores.values())}
for node in nx_cores:
nx_cores_format[nx_cores[node]].add(node)
# order the imporant users (according to fundamental metrics) by core number
important_users_names = collections.Counter()
for m in df_ranking_metrics.drop("outdegree", axis=1):
for user_name in df_ranking_metrics[m]:
important_users_names.update([user_name,])
# get keys with highest values counter
important_users_names = [i[0] for i in important_users_names.most_common()][:11]
# extract ids from important users
important_users_ids = list(df[df["name"].isin(important_users_names)]["id"])
important_users_cores = { user_id: nx_cores[user_id] for user_id in important_users_ids}
# reformat NetworkX solution
important_users_cores_format = {c:set() for c in set(important_users_cores.values())}
for node in important_users_cores:
important_users_cores_format[important_users_cores[node]].add(node)
important_users_cores_format = dict(sorted(important_users_cores_format.items(), key=lambda x: x[0],reverse=True))
n_important_users = len(important_users_ids)
cores = []
percentage_cores = []
for core in important_users_cores_format:
cores.append(core)
users_in_core = [df[df["id"]==node_id]["name"].iloc[0] for node_id in important_users_cores_format[core]]
percentage_cores.append(len(users_in_core)/n_important_users)
fig = px.pie(values=percentage_cores, names=list(important_users_cores_format.keys()),
title = "K-Core Decomposition of the 10 top users",height=350
)
fig.update_layout(
paper_bgcolor="#222222",plot_bgcolor="#222222",
margin=dict(t=32, b=0.7, l=0.7, r=0.7)
)
return fig
@st.cache(show_spinner=False)
def get_metrics_streamer(streamer_name, df):
streamer_id = str(int(df.loc[df["name"]==streamer_name]["id"]))
df_position_metrics = pd.DataFrame()
for m in ["indegree","outdegree", "closeness", "betweenness", "pagerank", "nx_cores"]:
# load metric
with open(f"data/fundamental_metrics/{m}.pkl", 'rb') as f:
metric = pickle.load(f)
position = list(metric.keys()).index(streamer_id)
df_position_metrics[m] = [position+1,]
df_position_metrics.index = [streamer_name,]
return df_position_metrics
|
import time, json, logging
from typing import Dict
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks
import treq
from light9 import networking
log = logging.getLogger()
class MusicTime(object):
"""
fetch times from ascoltami in a background thread; return times
upon request, adjusted to be more precise with the system clock
"""
def __init__(self,
period=.2,
onChange=lambda position: None,
pollCurvecalc='ignored'):
"""period is the seconds between
http time requests.
We call onChange with the time in seconds and the total time
The choice of period doesn't need to be tied to framerate,
it's more the size of the error you can tolerate (since we
make up times between the samples, and we'll just run off the
end of a song)
"""
self.period = period
self.hoverPeriod = .05
self.onChange = onChange
self.position: Dict[str, float] = {}
# driven by our pollCurvecalcTime and also by Gui.incomingTime
self.lastHoverTime = None # None means "no recent value"
self.pollMusicTime()
def getLatest(self, frameTime=None) -> Dict:
"""
dict with 't' and 'song', etc.
frameTime is the timestamp from the camera, which will be used
instead of now.
Note that this may be called in a gst camera capture thread. Very often.
"""
if not hasattr(self, 'position'):
return {'t': 0, 'song': None}
pos = self.position.copy()
now = frameTime or time.time()
if pos.get('playing'):
pos['t'] = pos['t'] + (now - self.positionFetchTime)
else:
if self.lastHoverTime is not None:
pos['hoverTime'] = self.lastHoverTime
return pos
def pollMusicTime(self):
@inlineCallbacks
def cb(response):
if response.code != 200:
raise ValueError("%s %s", response.code,
(yield response.content()))
position = yield response.json()
# this is meant to be the time when the server gave me its
# report, and I don't know if that's closer to the
# beginning of my request or the end of it (or some
# fraction of the way through)
self.positionFetchTime = time.time()
self.position = position
self.onChange(position)
reactor.callLater(self.period, self.pollMusicTime)
def eb(err):
log.warn("talking to ascoltami: %s", err.getErrorMessage())
reactor.callLater(2, self.pollMusicTime)
d = treq.get(networking.musicPlayer.path("time").toPython())
d.addCallback(cb)
d.addErrback(eb) # note this includes errors in cb()
def sendTime(self, t):
"""request that the player go to this time"""
treq.post(
networking.musicPlayer.path('time'),
data=json.dumps({
"t": time
}).encode('utf8'),
headers={b"content-type": [b"application/json"]},
)
|
import boto3, os, json
from typing import Dict, List, Any, Union
from boto3 import Session
from botocore.exceptions import ClientError
class Entry:
def __init__(self, name, publickey, fingerprint):
self.name = name
self.publickey = publickey
self.fingerprint = fingerprint
def __str__(self):
return f'{str(self.__class__)}: {str(self.__dict__)}'
def __eq__(self, other):
if not isinstance(other, Entry):
return NotImplemented
return self.name == other.name and self.publickey == other.publickey and self.fingerprint == other.fingerprint
def __hash__(self):
# Make class instances usable as items in hashable collections
return hash((self.name, self.publickey, self.fingerprint))
# assumes the uploaded public key is named after the contact
def parse_name(key: str) -> str:
return key.replace('PublicKeys/', '').replace('.pub.txt', '')
# Not all public keys will have a corresponding fingerprint
def fetch_fingerprint(s3_client, bucket: str, name: str) -> Union[None, str]:
key = f'Fingerprints/{name}.fpr.txt'
try:
s3_obj = s3_client.get_object(Bucket=bucket, Key=key)
return str(s3_obj['Body'].read())
except ClientError as e:
if e.response['Error']['Code'] == 'NoSuchKey':
log = json.dumps({
'app': 'secure-contact',
'function': 'fetch_fingerprint',
'message': f'NoSuchKey: {key}'
})
print(log)
else:
raise e
def generate_entry(s3_client, bucket: str, key: str) -> Entry:
contact_name = parse_name(key)
fingerprint = fetch_fingerprint(s3_client, bucket, contact_name)
return Entry(contact_name, key, fingerprint)
def create_session(profile=None) -> Session:
return boto3.Session(profile_name=profile)
def get_matching_s3_objects(s3_client, bucket: str, prefix: str) -> List[Dict[str, Any]]:
kwargs = {'Bucket': bucket, 'Prefix': prefix, 'StartAfter': prefix}
while True:
# The S3 API response is a large blob of metadata.
# 'Contents' contains information about the listed objects.
resp = s3_client.list_objects_v2(**kwargs)
try:
contents = resp['Contents']
except KeyError:
return
for obj in contents:
key = obj['Key']
if key.startswith(prefix):
yield obj
# The S3 API is paginated, returning up to 1000 keys at a time.
# Pass the continuation token into the next response, until we
# reach the final page (where this field will be missing).
try:
kwargs['ContinuationToken'] = resp['NextContinuationToken']
except KeyError:
break
def get_matching_s3_keys(s3_client, bucket: str, prefix: str) -> List[str]:
for obj in get_matching_s3_objects(s3_client, bucket, prefix):
yield obj['Key']
def should_be_public(entry: Entry) -> bool:
if '.pub.txt' not in entry.publickey:
print(f'WARNING: should_be_public check did not pass for {entry.publickey}')
return False
return True
# should copy a list of s3 objects from one bucket to another, preserving the directory structure
def copy_keys_to_public_bucket(session: Session, source_bucket: str, dest_bucket: str, entries: List[Entry]) -> None:
# could we set a lifecycle on the bucket to deal with old keys?
client = session.client('s3')
for entry in entries:
if should_be_public(entry):
copy_source = {
'Bucket': source_bucket,
'Key': entry.publickey
}
client.copy(copy_source, dest_bucket, entry.publickey)
def get_content_type(filename: str) -> str:
if filename.endswith('.html'):
return 'text/html'
if filename.endswith('.css'):
return 'text/css'
if filename.endswith('.ico'):
return 'image/x-icon'
else:
return ''
def upload_html(session: Session, bucket: str, key: str, body: str) -> None:
client = session.client('s3')
content_type = 'text/html'
client.put_object(Body=body, Bucket=bucket, Key=key, ContentType=content_type)
def upload_files(session: Session, bucket: str, path: str, prefix: str = '') -> None:
client = session.client('s3')
for subdir, dirs, files in os.walk(path):
for file in files:
content_type = get_content_type(file)
full_path = os.path.join(subdir, file)
s3path = prefix + file
client.upload_file(full_path, bucket, s3path, ExtraArgs={'ContentType': content_type})
# fetch all of the required data from S3 and return a List containing an Entry for each contact
def get_all_entries(session: Session, data_bucket: str) -> List[Entry]:
client = session.client('s3')
public_keys = list(get_matching_s3_keys(client, data_bucket, 'PublicKeys/'))
return [generate_entry(client, data_bucket, key) for key in public_keys]
if __name__ == "__main__":
if os.getenv('STAGE'):
DATA_BUCKET_NAME = os.getenv('DATA_BUCKET_NAME')
PUBLIC_BUCKET_NAME = os.getenv('PUBLIC_BUCKET_NAME')
AWS_PROFILE = os.getenv('AWS_PROFILE')
else:
config_path = os.path.expanduser('~/.gu/secure-contact.json')
config = json.load(open(config_path))
DATA_BUCKET_NAME = config['DATA_BUCKET_NAME']
PUBLIC_BUCKET_NAME = config['PUBLIC_BUCKET_NAME']
AWS_PROFILE = config['AWS_PROFILE']
|
import pandas as pd
import numpy as np
from scipy import stats
import os, sys
sys.path.append("../utils/")
from utils import *
data_dir = '../../out/'
games = []
games += get_games(data_dir, 'experiment')
lengths = {}
count = 0
for game_id in games:
data_dir = in_dir + game_id + '/games/'
for game in os.listdir(data_dir):
if game[-4:] != '.csv':
continue
noise = game.split('_')[2]
data = pd.io.parsers.read_csv(data_dir + game)
time = len(set(data['tick']))
lengths[noise] = lengths[noise] + [time] if noise in lengths else [time]
count += 1
|
from common_data_structures.periodic_bar import Quote, PeriodicBar
from ctypes import *
from datetime import datetime
from event_processing.external_data_listener import ExternalDataListener
from event_processing.market_book import MarketBook
from utils.datetime_convertor import get_unix_timestamp_from_hhmm_tz
import os
import pytz
class C_TimeInternal(Structure):
_fields_ = [("tv_usec", c_int), ("tv_sec", c_int)]
class C_Time(Union):
_fields_ = [("val", c_ulong), ("time", C_TimeInternal)]
class C_Quote(Structure):
_fields_ = [('bid_price', c_double), ('bid_size', c_int), ('ask_price', c_double), ('ask_size', c_int)]
class C_PERIODIC_BAR(Structure):
_fields_ = [('open', C_Quote), ('close', C_Quote), ('low', c_double), ('high', c_double), ('volume', c_ulonglong),
('ts', C_Time)]
class PeriodicBarFileSource(ExternalDataListener):
def __init__(self, shortcode, watch, start_date, end_date, periodic_bar_period=1):
self.watch = watch
self.shortcode = shortcode
self.start_date = start_date # The date from which this file source should load data
self.end_date = end_date # The last date till which this file source should consider data
self.current_date = start_date # The date for which csi file source has read the latest struct
self.current_index = 0
self.file_reader = None # The file from which this filesource will read structs
self.current_quote = None # The latest read daily_quote (is needed by dispatcher to see the next timestamp of a source)
self.market_books = MarketBook.GetUniqueInstances(
watch) # A pointer to the market book to which the data packets are to be sent
self.periodic_bars = []
self.periodic_bar_period = periodic_bar_period
self.load_data()
# Set next_event_timestamp_
if self.current_index < len(self.periodic_bars):
self.next_event_timestamp = self.periodic_bars[self.current_index].ts
else:
self.next_event_timestamp = 0 # Go to passive mode
def _filesource(self, ticker):
return os.path.expanduser('./datafiles/{}'.format(ticker))
def load_data(self):
self.process_etf_data([self._filesource(self.shortcode)])
self.periodic_bars.sort(key=lambda x: x.ts)
@staticmethod
def get_minutebar(filesource):
output = []
with open(filesource, 'rb') as file_:
a = C_PERIODIC_BAR()
while (file_.readinto(a) == sizeof(C_PERIODIC_BAR)):
open_ = Quote(a.open.bid_price, a.open.bid_size, a.open.ask_price, a.open.ask_size)
close_ = Quote(a.close.bid_price, a.close.bid_size, a.close.ask_price, a.close.ask_size)
ts = datetime.utcfromtimestamp(a.ts.time.tv_sec + a.ts.time.tv_usec / 1000000)
ts_with_tz = datetime(year=ts.year,
month=ts.month,
day=ts.day,
hour=ts.hour,
minute=ts.minute,
second=ts.second,
tzinfo=pytz.UTC)
elem = PeriodicBar(open_, close_, a.high, a.low, a.volume, ts_with_tz)
output.append(elem)
return output
## @brief Make quote objects from futures data
# date,product,specific_ticker,open,high,low,close,contract_volume,contract_oi,total_volume,total_oi
#
def process_etf_data(self, filesources):
for filesource in filesources:
min_bar_list = self.get_minutebar(filesource)
for minutebar in min_bar_list:
self.periodic_bars.append(minutebar)
def seek_to_first_event_after(self, end_time):
# Go through all the quotes which are timestamped <= end_time
while ((self.current_index < len(self.periodic_bars)) and
(self.periodic_bars[self.current_index].ts <= end_time)):
self.current_index += 1
if self.current_index < len(self.periodic_bars): # If there are more events
source_has_events = True
self.next_event_timestamp = self.periodic_bars[self.current_index].ts
else: # there are no more events
source_has_events = False
self.next_event_timestamp = 0 # Go to passive mode
return source_has_events
def process_all_events(self):
# If there are no events, then simply return
if self.current_index == len(self.periodic_bars):
self.next_event_timestamp = 0 # Go to passive mode
return
# Else process the events
while self.current_index < len(self.periodic_bars):
self.next_event_timestamp = self.periodic_bars[self.current_index].ts
self.watch.on_new_market_event(self.next_event_timestamp) # Notify the watch first
# Notify the market book
self.market_books[0].on_new_minute_bar(self.periodic_bars[self.current_index], self.periodic_bar_period)
self.current_index += 1
self.next_event_timestamp = 0 # Since we have processed all events, go to passive mode
def process_events_till(self, end_time):
# If there are no events, return
if self.current_index == len(self.periodic_bars):
self.next_event_timestamp = 0 # Go to passive mode
return
# Go through all the quotes which are timestamped <= end_time
while (self.current_index < len(self.periodic_bars)) and (self.next_event_timestamp <= end_time):
self.watch.on_new_market_event(self.next_event_timestamp) # Notify the watch first
# Notify the market book
self.market_books[0].on_new_minute_bar(self, self.periodic_bars[self.current_index],
self.periodic_bar_period)
self.current_index += 1
if self.current_index < len(self.periodic_bars):
self.next_event_timestamp = self.periodic_bars[self.current_index].ts
# If there are events
if self.current_index < len(self.periodic_bars):
self.next_event_timestamp = self.periodic_bars[self.current_index].ts
else: # There are no more events
self.next_event_timestamp = 0 # Go to passive mode
|
import os
def renameFile():
# get thefile names
original_path = os.getcwd()
print(original_path)
os.chdir(original_path + "\prank")
file_list = os.listdir(original_path + "\prank")
print(file_list)
# for each file, rename the file
for file in file_list:
print(file)
try:
print("renaming file: " + file)
os.rename(file,file.translate(None,"0123456789"))
except:
print("Error renaming the file")
renameFile()
|
#!/usr/bin/env python3
"""
Purpose: Describe what is off the larboard bow using the correct a/an article.
"""
import argparse
def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description="Warn the captain",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("name", metavar="name", help="Name of the creature")
return parser.parse_args()
def main():
"""Print a warning to the captain using the command line arguments."""
name = get_args().name
article = "an" if name[0].lower() in "aeiou" else "a"
print(f"Ahoy, Captain, {article} {name} off the larboard bow!")
if __name__ == "__main__":
main()
|
import pytest
from mongomap import *
@pytest.fixture(scope='module')
def schema():
class ASchema(Schema):
foo = Field(IntType)
bar = Field(FloatType)
return ASchema
@pytest.fixture(scope='module')
def nested_schema(schema):
class BSchema(Schema):
spam = Field(StringType)
egg = Field(schema)
return BSchema
def test_schema(schema):
sch = schema({'foo': 1, 'bar': 2.3})
assert sch._dirty
assert sch.foo == 1
assert sch.bar == 2.3
def test_explicit_field_names():
class ASchema(Schema):
foo = Field(IntType, name='bar')
sch = ASchema(bar=1)
assert isinstance(ASchema.foo, Field)
assert sch.bar == 1
with pytest.raises(AttributeError):
sch.foo
def test_schema_drop_extra(schema):
sch = schema(dict(spam=3))
with pytest.raises(AttributeError):
sch.spam
def test_schema_missing(schema):
sch = schema(dict(spam=3))
assert sch.foo is None
def test_schema_marshal(schema):
sch_dct = {'foo': 1, 'bar': 2.3}
sch = schema(sch_dct)
assert sch_dct == sch.marshal()
def test_nested(nested_schema):
sch_dct = {'spam': 'egg', 'egg': {'foo': 1, 'bar': 2.3}}
sch = nested_schema(sch_dct)
assert sch.egg.foo == 1
def test_nested_marshal(nested_schema):
sch_dct = {'spam': 'egg', 'egg': {'foo': 1, 'bar': 2.3}}
sch = nested_schema(sch_dct)
assert sch_dct == sch.marshal()
def test_init_params(nested_schema):
sch_dct = {'spam': 'egg', 'egg': {'foo': 1, 'bar': 2.3}}
sch = nested_schema(**sch_dct)
assert sch_dct == sch.marshal()
def test_empty_marshal(nested_schema):
sch = nested_schema()
assert sch.marshal() == {'spam': None,
'egg': {'bar': None,
'foo': None}}
def test_attach_values_later(nested_schema):
sch = nested_schema()
sch.spam = 'egg'
assert sch.marshal() == {'spam': 'egg',
'egg': {'bar': None,
'foo': None}}
def test_attach_deep_values(nested_schema):
sch = nested_schema()
sch.egg.foo = 42
assert sch.marshal() == {'spam': None,
'egg': {'bar': None,
'foo': 42}}
|
from setuptools import setup, find_packages
with open('README.md') as f:
readme = f.read()
with open('LICENSE.md') as f:
license = f.read()
setup(
name='hafalin',
version='0.1.0',
description='Hafalin API, an API to generate questions given document',
long_description=readme,
author='Geraldi Dzakwan; Ari Pratama Zhorifiandi',
author_email='geraldi.dzakwan@gmail.com; arizho16@gmail.com',
url='https://github.com/geraldzakwan/tajong.ai',
license=license,
packages=find_packages(exclude=('tests', 'docs'))
)
|
from graph import *
class WeightedEdge(Edge):
def __init__(self, src, dest, weight):
self.src = src
self.dest = dest
self.weight = weight
def getWeight(self):
return self.weight
def __str__(self):
return str(self.src) + "->" + str(self.dest) + " (" + str(self.weight) + ")" |
from os import getenv
from gitWebScrapper import logger
GITLAB_API_BASE_URL = 'https://gitlab.com/api/v4'
def check_token():
token = getenv('ACCESS_TOKEN_LAB')
if token is None:
logger.error('ACCESS_TOKEN_LAB not found')
return token
ACCESS_TOKEN = check_token() |
# ___________________________________________________________________________
#
# Prescient
# Copyright 2020 National Technology & Engineering Solutions of Sandia, LLC
# (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the U.S.
# Government retains certain rights in this software.
# This software is distributed under the Revised BSD License.
# ___________________________________________________________________________
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from prescient.engine.abstract_types import G, S
from collections import deque
import math
from prescient.engine.forecast_helper import get_forecastables
from . import SimulationState
from . import _helper
from .state_with_offset import StateWithOffset
from .time_interpolated_state import TimeInterpolatedState
class MutableSimulationState(SimulationState):
''' A simulation state that can be updated with data pulled from RUCs and sceds.
'''
def __init__(self):
self._forecasts = []
self._actuals = []
self._commits = {}
self._init_gen_state = {}
self._init_power_gen = {}
self._init_soc = {}
# Timestep durations
self._minutes_per_forecast_step = 60
self._minutes_per_actuals_step = 60
# How often a SCED is run
self._sced_frequency = 60
# The current simulation minute
self._simulation_minute = 0
# Next simulation minute when forecasts should be popped
self._next_forecast_pop_minute = 0
self._next_actuals_pop_minute = 0
@property
def timestep_count(self) -> int:
''' The number of timesteps we have data for '''
return len(self._forecasts[0]) if len(self._forecasts) > 0 else 0
@property
def minutes_per_step(self) -> int:
''' The duration of each time step in minutes '''
return self._minutes_per_forecast_step
def get_generator_commitment(self, g:G, time_index:int) -> Sequence[int]:
''' Get whether the generator is committed to be on (1) or off (0) for each time period
'''
return self._commits[g][time_index]
def get_initial_generator_state(self, g:G) -> float:
''' Get the generator's state in the previous time period '''
return self._init_gen_state[g]
def get_initial_power_generated(self, g:G) -> float:
''' Get how much power was generated in the previous time period '''
return self._init_power_gen[g]
def get_initial_state_of_charge(self, s:S) -> float:
''' Get state of charge in the previous time period '''
return self._init_soc[s]
def get_current_actuals(self) -> Iterable[float]:
''' Get the current actual value for each forecastable.
This is the actual value for the current time period (time index 0).
Values are returned in the same order as forecast_helper.get_forecastables,
but instead of returning arrays it returns a single value.
'''
for forecastable in self._actuals:
yield forecastable[0]
def get_forecasts(self) -> Iterable[Sequence[float]]:
''' Get the forecast values for each forecastable
This is very similar to forecast_helper.get_forecastables(); the
function yields an array per forecastable, in the same order as
get_forecastables().
Note that the value at index 0 is the forecast for the current time,
not the actual value for the current time.
'''
for forecastable in self._forecasts:
yield forecastable
def get_future_actuals(self) -> Iterable[Sequence[float]]:
''' Warning: Returns actual values for the current time AND FUTURE TIMES.
Be aware that this function returns information that is not yet known!
The function lets you peek into the future. Future actuals may be used
by some (probably unrealistic) algorithm options, such as
'''
for forecastable in self._actuals:
yield forecastable
def apply_ruc(self, options, ruc:RucModel) -> None:
''' Incorporate a RUC instance into the current state.
This will save the ruc's forecasts, and for the very first ruc
this will also save initial state info.
If there is a ruc delay, as indicated by options.ruc_execution_hour and
options.ruc_every_hours, then the RUC is applied to future time periods,
offset by the ruc delay. This does not apply to the very first RUC, which
is used to set up the initial simulation state with no offset.
'''
ruc_delay = -(options.ruc_execution_hour % (-options.ruc_every_hours))
# If we've never stored forecasts before...
first_ruc = (len(self._forecasts) == 0)
if first_ruc:
# The is the first RUC, save initial state
for g, g_dict in ruc.elements('generator', generator_type='thermal'):
self._init_gen_state[g] = g_dict['initial_status']
self._init_power_gen[g] = g_dict['initial_p_output']
# Create a queue where we can store generator commitments
# Fixed length so we can have old values fall of the list
max_ruc_length = ruc_delay + options.ruc_horizon
self._commits[g] = deque(maxlen=max_ruc_length)
for s,s_dict in ruc.elements('storage'):
self._init_state_of_charge[s] = s_dict['initial_state_of_charge']
# If this is first RUC, also save data to indicate when to pop RUC-related state
self._minutes_per_forecast_step = ruc.data['system']['time_period_length_minutes']
self._next_forecast_pop_minute = self._minutes_per_forecast_step
self._sced_frequency = options.sced_frequency_minutes
# Now save all generator commitments
# Keep the first "ruc_delay" commitments from the prior ruc
for g, g_dict in ruc.elements('generator', generator_type='thermal'):
commits = self._commits[g]
# This puts the first "ruc_delay" items at the end of the list.
# As we add our new items, all other old items will roll off the end of the list.
commits.rotate(-ruc_delay)
# Put the new values into the new value queue
commits.extend(int(round(g_dict['commitment']['values'][t]))
for t in range(0,options.ruc_horizon))
# And finally, save forecastables
_save_forecastables(options, ruc, self._forecasts)
def apply_actuals(self, options, actuals) -> None:
''' Incorporate actuals into the current state.
This will save the actuals RUC's forecastables. If there is a ruc delay,
as indicated by options.ruc_execution_hour and options.ruc_every_hours,
then the actuals are applied to future time periods, offset by the ruc delay.
This does not apply to the very first actuals RUC, which is used to set up the
initial simulation state with no offset.
'''
# If this is the first actuals, save data to indicate when to pop actuals-related state
first_actuals = (len(self._actuals) == 0)
if first_actuals:
self._minutes_per_actuals_step = actuals.data['system']['time_period_length_minutes']
self._next_actuals_pop_minute = self._minutes_per_actuals_step
self._sced_frequency = options.sced_frequency_minutes
_save_forecastables(options, actuals, self._actuals)
def apply_sced(self, options, sced) -> None:
''' Incorporate a sced's results into the current state, and move to the next time period.
This saves the sced's first time period of data as initial state information,
and advances the current time forward by one time period.
'''
for gen_state in _helper.get_generator_states_at_sced_offset(self, sced, 0):
g = gen_state.generator
self._init_gen_state[g] = gen_state.status
self._init_power_gen[g] = gen_state.power_generated
for s,soc in _helper.get_storage_socs_at_sced_offset(sced, 0):
self._init_soc[s] = soc
# Advance time, dropping data if necessary
self._simulation_minute += self._sced_frequency
while self._next_forecast_pop_minute <= self._simulation_minute:
for value_deque in self._forecasts:
value_deque.popleft()
for value_deque in self._commits.values():
value_deque.popleft()
self._next_forecast_pop_minute += self._minutes_per_forecast_step
while self._simulation_minute >= self._next_actuals_pop_minute:
for value_deque in self._actuals:
value_deque.popleft()
self._next_actuals_pop_minute += self._minutes_per_actuals_step
def get_state_with_step_length(self, minutes_per_step:int) -> SimulationState:
# If our data matches what's stored here, no need to create an interpolated view
if self._minutes_per_forecast_step == minutes_per_step and \
self._minutes_per_actuals_step == minutes_per_step and \
self._sced_frequency == minutes_per_step:
return self
# Found out what fraction past the first step of each type we currently are
minutes_past_forecast = self._simulation_minute - self._next_forecast_pop_minute + self._minutes_per_forecast_step
minutes_past_actuals = self._simulation_minute - self._next_actuals_pop_minute + self._minutes_per_actuals_step
return TimeInterpolatedState(self, self._minutes_per_forecast_step, minutes_past_forecast,
self._minutes_per_actuals_step, minutes_past_actuals,
minutes_per_step)
def _save_forecastables(options, ruc, where_to_store):
first_ruc = (len(where_to_store) == 0)
ruc_delay = -(options.ruc_execution_hour % (-options.ruc_every_hours))
max_length = ruc_delay + options.ruc_horizon
# Save all forecastables, in forecastable order
for idx, (new_ruc_vals,) in enumerate(get_forecastables(ruc)):
if first_ruc:
# append to storage array
forecast = deque(maxlen=max_length)
where_to_store.append(forecast)
else:
forecast = where_to_store[idx]
# This puts the first "ruc_delay" items at the end of the list.
# As we add our new items, all other old items will roll off the end of the list.
forecast.rotate(-ruc_delay)
# Put the new values into the new value queue
forecast.extend(new_ruc_vals)
|
from limits import limits
class TestGranularity:
def test_seconds_value(self):
assert limits.RateLimitItemPerSecond(1).get_expiry() == 1
assert limits.RateLimitItemPerMinute(1).get_expiry() == 60
assert limits.RateLimitItemPerHour(1).get_expiry() == 60 * 60
assert limits.RateLimitItemPerDay(1).get_expiry() == 60 * 60 * 24
assert limits.RateLimitItemPerMonth(1).get_expiry() == 60 * 60 * 24 * 30
assert limits.RateLimitItemPerYear(1).get_expiry() == 60 * 60 * 24 * 30 * 12
def test_representation(self):
assert "1 per 1 second" in str(limits.RateLimitItemPerSecond(1))
assert "1 per 1 minute" in str(limits.RateLimitItemPerMinute(1))
assert "1 per 1 hour" in str(limits.RateLimitItemPerHour(1))
assert "1 per 1 day" in str(limits.RateLimitItemPerDay(1))
assert "1 per 1 month" in str(limits.RateLimitItemPerMonth(1))
assert "1 per 1 year" in str(limits.RateLimitItemPerYear(1))
def test_comparison(self):
assert limits.RateLimitItemPerSecond(1) < limits.RateLimitItemPerMinute(1)
assert limits.RateLimitItemPerMinute(1) < limits.RateLimitItemPerHour(1)
assert limits.RateLimitItemPerHour(1) < limits.RateLimitItemPerDay(1)
assert limits.RateLimitItemPerDay(1) < limits.RateLimitItemPerMonth(1)
assert limits.RateLimitItemPerMonth(1) < limits.RateLimitItemPerYear(1)
|
import numpy as np
from scipy.spatial.distance import cdist
from scipy.misc import imresize
def nearest_icons(icons, dataset, scale=None, grayscale=False, get_dist=False):
""" Find nearest neighbour in the set for a given icon
Args:
icons: array of icons to which the nearest neighbour is to be found (N_i,y_res,x_res,chan)
dataset: array of original icons which is searched (N_s,y_res,x_res,chan)
scale: integer resolution to which icons are rescaled before comparing (optional)
grayscale: boolean if true, channel values are summed before comparing
Returns index of nearest neighbour in set """
if len(icons.shape) == 3:
icons = icons.reshape((1,) + icons.shape)
if scale is not None:
icons = np.array([imresize(icon, (scale, scale)) for icon in icons])
dataset = np.array([imresize(icon, (scale, scale)) for icon in dataset])
if grayscale and (icons.shape[3] == 3):
# average over all colour channels and flatten
icons = [[sum(pixel) / 3 for pixel in icon]
for icon in icons.reshape((icons.shape[0], icons.shape[1] * icons.shape[2], 3))]
dataset = [[sum(pixel) / 3 for pixel in icon]
for icon in dataset.reshape((dataset.shape[0], dataset.shape[1] * dataset.shape[2], 3))]
else:
icons = icons.reshape((icons.shape[0], icons.shape[1] * icons.shape[2] * icons.shape[3]))
dataset = dataset.reshape((dataset.shape[0], dataset.shape[1] * dataset.shape[2] * dataset.shape[3]))
# array of shape (n_icons, n_data) containing the distances
dist = cdist(icons, dataset, metric='euclidean')
# get the index of the corresponding data_icon with lowest distance for each icon
idxs = [np.argmin(d) for d in dist]
# flag get_dist determines if distances are returned or not
if not get_dist:
return idxs
else:
# range(len(dist)) selects each row individually, and for each row the index in idxs
return idxs, dist[range(len(dist)), idxs]
def icon_dist(icons1, icons2):
icons1 = icons1.reshape((icons1.shape[0], icons1.shape[1] * icons1.shape[2] * icons1.shape[3]))
icons2 = icons2.reshape((icons2.shape[0], icons2.shape[1] * icons2.shape[2] * icons2.shape[3]))
dist = cdist(icons1, icons2, metric='euclidean')
return dist
|
import cv2
from lib.tracking_utils.timer import Timer
tracking_model_name = 'fair_mot'
url = '0.0.0.0:8001'
protocol = 'grpc'
batch_size = 1
is_async = True
is_streaming = True
cap = cv2.VideoCapture('./data/MOT16-03.mp4')
frame_rate = int(round(cap.get(cv2.CAP_PROP_FPS)))
count = 0
w, h = 1920, 1080
input_size=(576, 320)
timer = Timer()
|
import asyncio
import colorsys
from colorthief import ColorThief
from govee_api_laggat import Govee, GoveeAbstractLearningStorage, GoveeLearnedInfo
from os import getenv
from time import sleep
from typing import Dict
from watchdog.events import FileSystemEventHandler
from watchdog.observers import Observer
from logipy import logi_led
# Async function to change Govee RGB device color
async def change_color(api_key, rgb_color, learning_storage):
# Create new govee object
govee = await Govee.create(api_key, learning_storage=learning_storage)
# get_devices is mandatory here!
devices, err = await govee.get_devices()
# Turn the light on if it's not and set the color
for dev in devices:
success, err = await govee.turn_on(dev.device)
success, err = await govee.set_color(dev.device, rgb_color)
await govee.close()
# Dummy class needed for the Govee object
class LearningStorage(GoveeAbstractLearningStorage):
async def read(self) -> Dict[str, GoveeLearnedInfo]:
return (
{}
)
async def write(self, learned_info: Dict[str, GoveeLearnedInfo]):
persist_this_somewhere = learned_info # save this dictionary to disk
# Get the dominant color from the image
def take_color():
# Get the top 20 palette colors from wallpaper
color_thief = ColorThief(currentWallpaper)
palette = color_thief.get_palette(color_count=20, quality=8)
# White color on failure
final = (228, 226, 226)
# Looking for a big difference between 2 RGB values to decide
# that the color has a tint that can be used
for color in palette:
dif1 = abs(color[0] - color[1])
dif2 = abs(color[1] - color[2])
dif3 = abs(color[0] - color[2])
if dif1 > 70 or dif2 > 70 or dif3 > 70:
# print(color)
final = color
break
# Converting to the HSV color space and setting to 100% Saturation and 75% Value for the best colors for RGB
hsv_col = colorsys.rgb_to_hsv(final[0] / 255, final[1] / 255, final[2] / 255)
hsv_col = (hsv_col[0], 1, 0.75)
# Convert back to RGB for LEDs
final = colorsys.hsv_to_rgb(hsv_col[0], hsv_col[1], hsv_col[2])
final = (int(final[0] * 255), int(final[1] * 255), int(final[2] * 255))
return (final)
# Observer actions when the wallpaper changes
class Handler(FileSystemEventHandler):
def on_modified(self, event):
# Check that the wallpaper has changed
currentWallpaper = getenv('APPDATA') + "\\Microsoft\\Windows\\Themes\\WallpaperEngineOverride.jpg"
if event.src_path == currentWallpaper:
# Take the color from wallpaper and set the lights for logitech and Govee
final = take_color()
logi_led.logi_led_set_lighting(final[0], final[1], final[2]) # Logitech
loop = asyncio.new_event_loop() # Govee
try:
loop.run_until_complete(change_color(api_key, final, learning_storage))
except:
loop.close()
if __name__ == "__main__":
# Govee api_key for your account
api_key = "YOUR API KEY HERE"
# Initializing a learning storage dummy
learning_storage = LearningStorage()
# Default location for the wallpaper on Windows
currentWallpaper = getenv('APPDATA') + "\\Microsoft\\Windows\\Themes\\WallpaperEngineOverride.jpg"
# Creating an observer to watch upates to the wallpaper copy
observer = Observer()
observer.schedule(Handler(), getenv('APPDATA') + "\\Microsoft\\Windows\\Themes")
observer.start()
# Wait a bit for the system to start on startup
sleep(15)
# Initialize the LEDs
logi_led.logi_led_init()
sleep(0.5)
# Take the color on initial startup and set the led color for logitech mouse
final = take_color()
logi_led.logi_led_set_lighting(final[0], final[1], final[2]) # Logitech
# An asyncio loop for the Govee lights
loop = asyncio.new_event_loop()
try:
loop.run_until_complete(change_color(api_key, final, learning_storage))
except:
loop.close()
# Folder observer interrupt when the wallpaper changes in the folder
try:
while True:
sleep(1)
except KeyboardInterrupt:
observer.stop()
# Start the observer
observer.join()
|
from .shapenet import Dataset as ShapeNet
from .dfaust import Dataset as DFaust
|
import json
import os.path
def _load_items():
global _items
fname = 'items.json'
if os.path.exists(fname):
f = open(fname, "r")
_items = json.loads(f.read())
f.close
else:
_items = []
def _save_items():
global _items
fname = 'items.json'
f = open(fname, "w")
f.write(json.dumps(_items))
f.close
def init():
_load_items()
def items():
global _items
return _items
def add_item(product_code, location_code):
global _items
_items.append((product_code, location_code))
_save_items()
def remove_item(product_code, location_code):
global _items
for i in range(len(_items)):
prod_code, loc_code = _items[i]
if prod_code == product_code and loc_code == location_code:
del _items[i]
_save_items()
return True
return False
def set_products(products):
global _products
_products = products
def products():
global _products
return _products
def locations():
global _locations
return _locations
def set_locations(locations):
global _locations
_locations = locations
|
# Generated by Django 2.1.8 on 2019-06-03 18:12
from django.db import migrations, models
import wagtail.core.blocks
import wagtail.core.fields
class Migration(migrations.Migration):
dependencies = [
('content', '0007_auto_20190603_1322'),
]
operations = [
migrations.AddField(
model_name='section',
name='lateral_link_title',
field=models.TextField(blank=True),
),
migrations.AddField(
model_name='section',
name='lateral_links',
field=wagtail.core.fields.StreamField([('links', wagtail.core.blocks.PageChooserBlock())], default=''),
preserve_default=False,
),
]
|
#!/usr/bin/env python
"""
build.py
Build HTML output
"""
from __future__ import absolute_import, division, print_function
import re
import os
import argparse
import datetime
import subprocess
import shutil
import lxml.html
def main():
p = argparse.ArgumentParser(usage=__doc__.rstrip())
p.add_argument('--html', action='store_true', help="Build HTML output")
p.add_argument('--no-clean', action='store_true', help="Skip removing old output")
args = p.parse_args()
os.chdir(os.path.abspath(os.path.dirname(__file__)))
dst_path = os.path.join('docs', 'items')
dst_path_ipynb = os.path.join('docs', 'static', 'items')
if os.path.isdir(dst_path) and not args.no_clean:
shutil.rmtree(dst_path)
if not os.path.isdir(dst_path):
os.makedirs(dst_path)
if os.path.isdir(dst_path_ipynb):
shutil.rmtree(dst_path_ipynb)
shutil.copytree(os.path.join('ipython', 'attachments'),
os.path.join(dst_path, 'attachments'))
shutil.copytree('ipython', dst_path_ipynb,
ignore=lambda src, names: [x for x in names if not x.endswith('.ipynb')])
tags = parse_wiki_legacy_tags()
titles, tags_new = generate_files(dst_path=dst_path)
tags.update(tags_new)
write_index(dst_path, titles, tags)
if args.html:
subprocess.check_call(['sphinx-build', '-b', 'html', '.', '_build/html'],
cwd='docs')
def write_index(dst_path, titles, tags):
"""
Write index files under `dst_path`
"""
titles = dict(titles)
index_rst = os.path.join(dst_path, 'index.txt')
# Write doctree
toctree_items = []
index_text = []
# Fill in missing tags
for fn in titles.keys():
if fn not in tags or not tags[fn]:
tags[fn] = ['Other examples']
# Count tags
tag_counts = {}
for fn, tagset in tags.items():
for tag in tagset:
if tag not in tag_counts:
tag_counts[tag] = 1
else:
tag_counts[tag] += 1
tag_counts['Outdated'] = 1e99
# Generate tree
def get_section_name(tag_id):
return ("idx_" +
re.sub('_+', '_', re.sub('[^a-z0-9_]', "_", "_" + tag_id.lower())).strip('_'))
tag_sets = {}
for fn, tagset in tags.items():
tagset = list(set(tagset))
tagset.sort(key=lambda tag: -tag_counts[tag])
if 'Outdated' in tagset:
tagset = ['Outdated']
tag_id = " / ".join(tagset[:2])
tag_sets.setdefault(tag_id, set()).add(fn)
if len(tagset[:2]) > 1:
# Add sub-tag to the tree
sec_name = get_section_name(tag_id)
titles[sec_name] = tagset[1]
tag_sets.setdefault(tagset[0], set()).add(sec_name)
tag_sets = list(tag_sets.items())
def tag_set_sort(item):
return (1 if 'Outdated' in item[0] else 0,
item)
tag_sets.sort(key=tag_set_sort)
# Produce output
for tag_id, fns in tag_sets:
fns = list(fns)
fns.sort(key=lambda fn: titles[fn])
section_base_fn = get_section_name(tag_id)
section_fn = os.path.join(dst_path, section_base_fn + '.rst')
if ' / ' not in tag_id:
toctree_items.append(section_base_fn)
non_idx_items = [fn for fn in fns if not fn.startswith('idx_')]
if non_idx_items:
index_text.append("\n{0}\n{1}\n\n".format(tag_id, "-"*len(tag_id)))
for fn in non_idx_items:
index_text.append(":doc:`{0} <items/{1}>`\n".format(titles[fn], fn))
with open(section_fn, 'w') as f:
sec_title = titles.get(section_base_fn, tag_id)
f.write("{0}\n{1}\n\n".format(sec_title, "="*len(sec_title)))
sub_idx = [fn for fn in fns if fn.startswith('idx')]
if sub_idx:
f.write(".. toctree::\n"
" :maxdepth: 1\n\n")
for fn in sub_idx:
f.write(" {0}\n".format(fn))
f.write("\n\n")
f.write(".. toctree::\n"
" :maxdepth: 1\n\n")
for fn in fns:
if fn in sub_idx:
continue
f.write(" {0}\n".format(fn))
# Write index
with open(index_rst, 'w') as f:
f.write(".. toctree::\n"
" :maxdepth: 1\n"
" :hidden:\n\n")
for fn in toctree_items:
f.write(" items/%s\n" % (fn,))
f.write("\n\n")
f.write('.. raw:: html\n\n <div id="cookbook-index">\n\n')
f.write("".join(index_text))
f.write('\n\n.. raw:: html\n\n </div>\n')
f.close()
def generate_files(dst_path):
"""
Read all .ipynb files and produce .rst under `dst_path`
Returns
-------
titles : dict
Dictionary {file_basename: notebook_title, ...}
tags : dict
Dictionary {file_basename: set([tag1, tag2, ...]), ...}
"""
titles = {}
tags = {}
legacy_editors = parse_wiki_legacy_users()
created, modified = parse_wiki_legacy_timestamps()
for fn in sorted(os.listdir('ipython')):
if not fn.endswith('.ipynb'):
continue
fn = os.path.join('ipython', fn)
basename = os.path.splitext(os.path.basename(fn))[0]
# Get old wiki editors
editors = list(legacy_editors.get(basename, []))
# Get names from Git
created_stamp = created.get(basename, 0)
modified_stamp = modified.get(basename, created_stamp)
p = subprocess.Popen(['git', 'log', '--format=%at:%an', 'ef45029096..', fn],
stdout=subprocess.PIPE)
names, _ = p.communicate()
for name in names.splitlines():
timestamp, name = name.strip().split(':', 1)
timestamp = int(timestamp)
if name and name not in editors:
editors.append(name)
if created_stamp is None:
created_stamp = timestamp
if timestamp > modified_stamp:
modified_stamp = timestamp
# Continue
title, tagset = convert_file(dst_path, fn, editors,
created_stamp,
modified_stamp)
titles[basename] = title
if tagset:
tags[basename] = tagset
return titles, tags
def convert_file(dst_path, fn, editors, created, modified):
"""
Convert .ipynb to .rst, placing output under `dst_path`
Returns
-------
title : str
Title of the notebook
tags : set of str
Tags given in the notebook file
"""
print(fn)
subprocess.check_call(['jupyter', 'nbconvert', '--to', 'html',
'--output-dir', os.path.abspath(dst_path),
os.path.abspath(fn)],
cwd=dst_path, stderr=subprocess.STDOUT)
basename = os.path.splitext(os.path.basename(fn))[0]
rst_fn = os.path.join(dst_path, basename + '.rst')
html_fn = os.path.join(dst_path, basename + '.html')
title = None
tags = set()
editors = list(editors)
legacy_editors = True
lines = []
# Parse and munge HTML
tree = lxml.html.parse(html_fn)
os.unlink(html_fn)
root = tree.getroot()
head = root.find('head')
container, = root.xpath("//div[@id='notebook-container']")
headers = container.xpath('//h1')
if headers:
title = headers[0].text
if isinstance(title, unicode):
title = title.encode('utf-8')
h1_parent = headers[0].getparent()
h1_parent.remove(headers[0])
lines.extend([u".. raw:: html", u""])
for element in head.getchildren():
if element.tag in ('script',):
text = lxml.html.tostring(element)
lines.extend(" " + x for x in text.splitlines())
text = lxml.html.tostring(container)
m = re.search(ur'<p>TAGS:\s*(.*)\s*</p>', text)
if m:
tag_line = m.group(1).strip().replace(';', ',')
if isinstance(tag_line, unicode):
tag_line = tag_line.encode('utf-8')
tags.update([x.strip() for x in tag_line.split(",")])
text = text[:m.start()] + text[m.end():]
m = re.search(ur'<p>AUTHORS:\s*(.*)\s*</p>', text)
if m:
# Author lines override editors
if legacy_editors:
editors = []
legacy_editors = False
author_line = m.group(1).strip().replace(';', ',')
if isinstance(author_line, unicode):
author_line = author_line.encode('utf-8')
for author in author_line.split(","):
author = author.strip()
if author and author not in editors:
editors.append(author)
text = text[:m.start()] + text[m.end():]
text = text.replace(u'attachments/{0}/'.format(basename),
u'../_downloads/')
lines.extend(u" " + x for x in text.splitlines())
lines.append(u"")
# Produce output
text = u"\n".join(lines).encode('utf-8')
if not title:
title = basename
updateinfo = ""
def fmt_time(timestamp):
return datetime.datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d')
if created != 0 and modified != 0:
updateinfo = ":Date: {0} (last modified), {1} (created)".format(fmt_time(modified),
fmt_time(created))
elif created != 0:
updateinfo = ":Date: {0} (created)".format(fmt_time(created))
elif modified != 0:
updateinfo = ":Date: {0} (last modified)".format(fmt_time(modified))
authors = ", ".join(editors)
text = "{0}\n{1}\n\n{2}\n\n{3}".format(title, "="*len(title),
updateinfo,
text)
with open(rst_fn, 'w') as f:
f.write(text)
if authors:
f.write("\n\n.. sectionauthor:: {0}".format(authors))
del text
attach_dir = os.path.join('ipython', 'attachments', basename)
if os.path.isdir(attach_dir) and len(os.listdir(attach_dir)) > 0:
with open(rst_fn, 'a') as f:
f.write("""
.. rubric:: Attachments
""")
for fn in sorted(os.listdir(attach_dir)):
if os.path.isfile(os.path.join(attach_dir, fn)):
f.write('- :download:`%s <attachments/%s/%s>`\n' % (
fn, basename, fn))
return title, tags
def parse_wiki_legacy_tags():
tags = [None, None, None]
items = {}
with open('wiki-legacy-tags.txt', 'r') as f:
prev_line = None
for line in f:
if re.match('^====+\s*$', line):
tags[0] = prev_line.strip()
tags[1] = None
tags[2] = None
continue
if re.match('^----+\s*$', line):
tags[1] = prev_line.strip()
tags[2] = None
continue
if re.match('^""""+\s*$', line):
tags[2] = prev_line.strip()
continue
prev_line = line
m = re.search(r'\[\[(.*?)(?:\|.*)?\]\]', line)
if m:
name = m.group(1).strip()
name = re.sub('Cookbook/', '', name)
name = re.sub('^/', '', name)
name = name.replace('/', '_').replace(' ', '_')
fn = os.path.join('ipython', name + '.ipynb')
if os.path.isfile(fn):
basename = os.path.splitext(os.path.basename(fn))[0]
items.setdefault(basename, set()).update([x for x in tags if x])
continue
return items
def parse_wiki_legacy_users():
items = {}
with open('wiki-legacy-users.txt', 'r') as f:
for line in f:
line = line.strip()
if not line or line.startswith('#'):
continue
page, rest = line.split(':', 1)
editors = [x.strip() for x in rest.split(',')]
items[page] = editors
return items
def parse_wiki_legacy_timestamps():
created = {}
modified = {}
with open('wiki-legacy-timestamps.txt', 'r') as f:
for line in f:
line = line.strip()
if not line or line.startswith('#'):
continue
page, c, m = line.split()
created[page] = int(c)
modified[page] = int(m)
return created, modified
if __name__ == "__main__":
main()
|
import random
import string
strGen = lambda length:"".join([random.choice(string.ascii_letters+string.digits) for _ in range(length)])
|
from .base import Base
from .models import Player
|
# encoding: utf-8
# =========================================================================
# ©2017-2018 北京国美云服科技有限公司
# -------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
import os
import sys
from .base import BaseAction
from ...misc.utils import prints_body, json_dumps
from ..constants import (
BUFSIZE,
HTTP_OK,
HTTP_OK_CREATED,
HTTP_OK_NO_CONTENT,
HTTP_OK_PARTIAL_CONTENT,
)
class CreateObjectAction(BaseAction):
command = "create-object"
usage = "%(prog)s -b <bucket> -k <key> -d <data> [-t <type> -f <conf_file>]"
@classmethod
def add_ext_arguments(cls, parser):
parser.add_argument(
"-b",
"--bucket",
dest="bucket",
required=True,
help="The bucket name"
)
parser.add_argument(
"-k",
"--key",
dest="key",
help="The object name"
)
parser.add_argument(
"-F",
"--file",
dest="file",
help="The object file"
)
parser.add_argument(
"-d",
"--data",
dest="data",
help="The object data"
)
parser.add_argument(
"-t",
"--type",
dest="type",
default="application/octet-stream",
help="The object type"
)
return parser
@classmethod
def send_request(cls, options):
if options.file:
if not os.path.isfile(options.file):
print("No such file: %s" % options.file)
sys.exit(-1)
key = options.key or os.path.basename(options.file)
data = open(options.file, "rb")
elif options.data:
key = options.key
if not key:
print("Must specify --key parameter")
sys.exit(-1)
data = options.data
else:
print("Must specify --file or --data parameter")
sys.exit(1)
headers = {}
if options.type:
headers["Content-Type"] = options.type
resp = cls.conn.make_request("PUT", options.bucket, key,
headers=headers, data=data)
if resp.status != HTTP_OK_CREATED:
prints_body(resp)
class GetObjectAction(BaseAction):
command = "get-object"
usage = "%(prog)s -b <bucket> -k <key> [-F <file> -B <bytes> -z <zone> -f <conf_file>]"
@classmethod
def add_ext_arguments(cls, parser):
parser.add_argument(
"-b",
"--bucket",
dest="bucket",
required=True,
help="The bucket name"
)
parser.add_argument(
"-k",
"--key",
dest="key",
required=True,
help="The object name"
)
parser.add_argument(
"-F",
"--file",
dest="file",
help="The file that the object content should save to"
)
parser.add_argument(
"-B",
"--bytes",
dest="bytes",
help="The object data range"
)
return parser
@classmethod
def send_request(cls, options):
if options.file:
if os.path.isdir(options.file):
path = "%s/%s" % (options.file, options.key)
else:
path = options.file
else:
path = "%s/%s" % (os.getcwd(), options.key)
directory = os.path.dirname(path)
if not os.path.isdir(directory):
print("No such directory: %s" % directory)
sys.exit(-1)
headers = {}
if options.bytes:
headers["Range"] = "bytes=%s" % options.bytes
resp = cls.conn.make_request("GET", options.bucket, options.key, headers=headers)
if resp.status in (HTTP_OK, HTTP_OK_PARTIAL_CONTENT):
with open(path, "w") as f:
while True:
buf = resp.read(BUFSIZE)
if not buf:
break
f.write(buf)
else:
prints_body(resp)
class DeleteObjectAction(BaseAction):
command = "delete-object"
usage = "%(prog)s -b <bucket> -k <key> [-f <conf_file>]"
@classmethod
def add_ext_arguments(cls, parser):
parser.add_argument(
"-b",
"--bucket",
dest="bucket",
type=str,
required=True,
help="The bucket name"
)
parser.add_argument(
"-k",
"--key",
dest="key",
required=True,
help="The object name"
)
return parser
@classmethod
def send_request(cls, options):
resp = cls.conn.make_request("DELETE", options.bucket, options.key)
if resp.status != HTTP_OK_NO_CONTENT:
prints_body(resp)
class HeadObjectAction(BaseAction):
command = "head-object"
usage = "%(prog)s -b <bucket> -k <key> [-f <conf_file>]"
@classmethod
def add_ext_arguments(cls, parser):
parser.add_argument(
"-b",
"--bucket",
dest="bucket",
action="store",
type=str,
required=True,
help="The bucket name"
)
parser.add_argument(
"-k",
"--key",
dest="key",
required=True,
help="The object name"
)
return parser
@classmethod
def send_request(cls, options):
resp = cls.conn.make_request("HEAD", options.bucket, options.key)
if resp.status == HTTP_OK:
data = {
"Content-Length": resp.getheader("content-length"),
"Content-Type": resp.getheader("content-type"),
"ETag": resp.getheader("etag"),
"Last-Modified": resp.getheader("last-modified")
}
print(json_dumps(data, indent=2))
else:
print("Error: %s %s" % (resp.status, resp.reason))
|
import pandas as pd
import re
import tempfile
import matplotlib.pyplot as plt
def read_log(path):
edited_lines = []
with open(path, 'r') as file:
lines = file.readlines()
for line in lines:
# Extract set if necessary
result = re.search(r'{(.*)}', line)
if result:
edited_lines.append(re.sub(r'{(.*)}',
result.group(1).replace('\'', '').replace(',', ''),
line))
else:
edited_lines.append(line)
tmpfile = tempfile.mktemp('.csv')
with open(tmpfile, 'w') as file:
file.writelines(edited_lines)
return pd.read_csv(tmpfile)
def get_plot_data(log_path, header):
data = read_log(log_path)
times = data['# timestamp']
times = times - min(times)
counts = data[header]
return times, counts
if __name__ == '__main__':
pn = 13
path = f'Problem{pn}_mutation_cluster.log'
times, counts = get_plot_data(path, ' state_count')
path2 = f'Problem{pn}_mutation_nocluster.log'
times2, counts2 = get_plot_data(path, ' state_count')
log1 = read_log(path)
log2 = read_log(path2)
times1 = log1['# timestamp']
times1 = times1 - min(times1)
times1 = times1 #/ 60
times2 = log2['# timestamp']
times2 = times2 - min(times2)
times2 = times2 #/ 60
states1 = log1[' state_count']
states2 = log2[' state_count']
alpha = 0.8
plt.step(times1, states1, label='With clustering', alpha=alpha)
plt.step(times2, states2, label='Without clustering', alpha=alpha)
plt.legend()
plt.title(f"Problem {pn}")
if pn == 11:
#plt.xlim(left=-0.5, right=min(max(times2), max(times1)))
print('lol')
else:
plt.xlim(left=plt.ylim()[0], right=min(max(times2), max(times1)))
plt.xlabel('time(s)')
plt.ylabel('reached states')
plt.savefig(f'problem_{pn}_mutatingcomparison')
plt.show()
|
#!/usr/bin/env pvbatch
# state file generated using paraview version 5.8.0
from paraview.simple import *
paraview.simple._DisableFirstRenderCameraReset()
import argparse
import numpy as np
import paratools
parser = argparse.ArgumentParser(
description="Renders vorticity magnitude with LAMMPS polymers")
parser.add_argument('--lammps', nargs='+', help="List of data files 'a_*.vtk'")
parser.add_argument('--aphros',
nargs='+',
help="List of data files 'omm_*.xmf'")
parser.add_argument('--force',
action="store_true",
help="Overwrite existing files")
parser.add_argument('--draft',
action="store_true",
help="Fewer samples and lower resolution")
parser.add_argument('--res',
type=list,
default=[1080, 1080],
help="Image resolution in pixels")
parser.add_argument('--samples',
type=int,
default=10,
help="Number of samples per pixel")
parser.add_argument('--colormap', type=str, default="blue_yellow_red")
args = parser.parse_args()
sources_ft = []
timearrays = []
files_omm = args.aphros
source_omm = XDMFReader(FileNames=files_omm)
source_omm.CellArrayStatus = ['omm']
source_omm.GridStatus = ['Grid_10220']
(source_omm, ), (timearray, ) = paratools.ApplyForceTime([source_omm])
sources_ft.append(source_omm)
timearrays.append(timearray)
files_polymer = args.lammps
source_polymer = LegacyVTKReader(FileNames=files_polymer)
(source_polymer, ), (timearray, ) = paratools.ApplyForceTime([source_polymer])
sources_ft.append(source_polymer)
timearrays.append(timearray)
viewsize = args.res
if args.draft:
viewsize = [max(1, s // 2) for s in viewsize]
args.samples = min(10, args.samples // 5)
renderView1 = CreateView('RenderView')
renderView1.AxesGrid = 'GridAxes3DActor'
renderView1.OrientationAxesVisibility = 0
renderView1.KeyLightWarmth = 0.5
renderView1.FillLightWarmth = 0.5
renderView1.CameraPosition = [
3.2933362831074464, 2.111426020928463, 1.6757588679166233
]
renderView1.CameraFocalPoint = [
0.502268266864121, 0.5000021504238248, 0.5027382206171751
]
renderView1.CameraViewUp = [
-0.296198132726024, -0.17101007166283444, 0.9396926207859084
]
renderView1.CameraParallelScale = 0.709257758997805
renderView1.CameraParallelProjection = 1
renderView1.ViewSize = viewsize
renderView1.EnableRayTracing = 1
renderView1.BackEnd = 'OSPRay raycaster'
renderView1.Background = [1] * 3
renderView1.SamplesPerPixel = 1 if args.draft else args.samples
tube1 = Tube(registrationName='Tube1', Input=source_polymer)
tube1.Scalars = ['POINTS', '']
tube1.Vectors = ['POINTS', '1']
tube1.Radius = 0.005
glyph1 = Glyph(registrationName='Glyph1',
Input=source_polymer,
GlyphType='Sphere')
glyph1.OrientationArray = ['POINTS', 'No orientation array']
glyph1.ScaleArray = ['POINTS', 'No scale array']
glyph1.GlyphMode = 'All Points'
glyph1.GlyphType.Radius = 0.1
glyph1.ScaleFactor = 0.09989973691408523
tube1Display = Show(tube1, renderView1, 'GeometryRepresentation')
tube1Display.Representation = 'Surface'
tube1Display.AmbientColor = [0.0, 0.0, 0.0]
tube1Display.ColorArrayName = [None, '']
tube1Display.DiffuseColor = [0.0, 0.0, 0.0]
tube1Display.Opacity = 0.5
tube1Display.Ambient = 0.25
glyph1Display = Show(glyph1, renderView1, 'GeometryRepresentation')
glyph1Display.Representation = 'Surface'
glyph1Display.ColorArrayName = [None, '']
ommDisplay = Show(source_omm, renderView1, 'UniformGridRepresentation')
ommLUT = GetColorTransferFunction('omm')
ommLUT.AutomaticRescaleRangeMode = 'Never'
ommLUT.RGBPoints = [
3.0, 0.0, 1.0, 1.0, 7.05, 0.0, 0.0, 1.0, 7.5, 0.0, 0.0, 0.501960784314,
7.95, 1.0, 0.0, 0.0, 12.0, 1.0, 1.0, 0.0
]
ommLUT.ColorSpace = 'RGB'
ommLUT.ScalarRangeInitialized = 1.0
ommPWF = GetOpacityTransferFunction('omm')
ommPWF.Points = [3.0, 0.0, 0.5, 0.0, 12.0, 1.0, 0.5, 0.0]
ommPWF.ScalarRangeInitialized = 1
ommDisplay.Representation = 'Volume'
ommDisplay.ColorArrayName = ['CELLS', 'omm']
ommDisplay.LookupTable = ommLUT
ommDisplay.SetScaleArray = [None, '']
ommDisplay.ScaleTransferFunction = 'PiecewiseFunction'
ommDisplay.OpacityArray = [None, '']
ommDisplay.OpacityTransferFunction = 'PiecewiseFunction'
ommDisplay.ScalarOpacityUnitDistance = 0.1
ommDisplay.ScalarOpacityFunction = ommPWF
ommDisplay.OpacityArrayName = ['CELLS', 'omm']
ommDisplay.Shade = 1
# FIXME: workaround, otherwise `omm` not shown in the first image
paratools.SetTimeStep(1, sources_ft, timearrays)
pattern = "a_{}.png"
steps = paratools.GetSteps(args.aphros)
paratools.SaveAnimation(steps,
renderView1,
sources_ft,
timearrays,
force=args.force,
pattern=pattern)
|
'''
Verify correct handling of session and transaction delay.
'''
# @file
#
# Copyright 2021, Verizon Media
# SPDX-License-Identifier: Apache-2.0
#
Test.Summary = '''
Verify correct handling of session and transaction delay.
'''
#
# Test 1: Run a few sessions and transactions with client-side delay.
#
r = Test.AddTestRun("Verify the handling of the client-side delay specification.")
client = r.AddClientProcess("client_client_delay", "client-side-delay.yaml")
server = r.AddServerProcess("server_client_delay", "client-side-delay.yaml")
# The test proxy is not featureful enough to handle both HTTP/1 and HTTP/2
# traffic. Thankfully this is easily addressed by running a separate process
# for each.
proxy = r.AddProxyProcess("proxy_http_client_delay", listen_port=client.Variables.http_port,
server_port=server.Variables.http_port)
proxy = r.AddProxyProcess("proxy_https_client_delay", listen_port=client.Variables.https_port,
server_port=server.Variables.https_port,
use_ssl=True, use_http2_to_2=True)
server.Streams.stdout += Testers.ContainsExpression(
"Ready with 2 transactions.",
"The server should have parsed 2 transactions.")
client.Streams.stdout += Testers.ContainsExpression(
"2 transactions in 2 sessions .* in .* milliseconds",
"The client should have reported running the transactions with timing data.")
client.Streams.stdout += Testers.ExcludesExpression(
"Violation:",
"There should be no verification errors because there are none added.")
server.Streams.stdout += Testers.ExcludesExpression(
"Violation:",
"There should be no verification errors because there are none added.")
#
# Test 2: Verify that the timing data indicates that the delays took place.
#
r = Test.AddTestRun("Verify the client-side delay replay took an expected amount of time to run.")
verifier_script = 'verify_duration.py'
client_output = client.Streams.stdout.AbsTestPath
expected_min_delay_ms = "1500"
r.Processes.Default.Setup.Copy(verifier_script)
r.Processes.Default.Command = \
f'python3 {verifier_script} {client_output} {expected_min_delay_ms}'
r.ReturnCode = 0
r.Streams.stdout += Testers.ContainsExpression(
'Good',
'The verifier script should report success.')
#
# Test 3: Run a few sessions and transactions with server-side delay.
#
r = Test.AddTestRun("Verify the handling of the server-side delay specification.")
client = r.AddClientProcess("client_server_delay", "server-side-delay.yaml")
server = r.AddServerProcess("server_server_delay", "server-side-delay.yaml")
# The test proxy is not featureful enough to handle both HTTP/1 and HTTP/2
# traffic. Thankfully this is easily addressed by running a separate process
# for each.
proxy = r.AddProxyProcess("proxy_http_server_delay", listen_port=client.Variables.http_port,
server_port=server.Variables.http_port)
proxy = r.AddProxyProcess("proxy_https_server_delay", listen_port=client.Variables.https_port,
server_port=server.Variables.https_port,
use_ssl=True, use_http2_to_2=True)
server.Streams.stdout += Testers.ContainsExpression(
"Ready with 2 transactions.",
"The server should have parsed 2 transactions.")
client.Streams.stdout += Testers.ContainsExpression(
"2 transactions in 2 sessions .* in .* milliseconds",
"The client should have reported running the transactions with timing data.")
client.Streams.stdout += Testers.ExcludesExpression(
"Violation:",
"There should be no verification errors because there are none added.")
server.Streams.stdout += Testers.ExcludesExpression(
"Violation:",
"There should be no verification errors because there are none added.")
#
# Test 4: Verify that the timing data indicates that the delays took place.
#
r = Test.AddTestRun("Verify the server-side delay replay took an expected amount of time to run.")
client_output = client.Streams.stdout.AbsTestPath
expected_min_delay_ms = "1000"
r.Processes.Default.Setup.Copy(verifier_script)
r.Processes.Default.Command = \
f'python3 {verifier_script} {client_output} {expected_min_delay_ms}'
r.ReturnCode = 0
r.Streams.stdout += Testers.ContainsExpression(
'Good',
'The verifier script should report success.')
|
# -*- coding: utf-8 -*-
"""
d7sms
"""
import unittest
from ..http_response_catcher import HttpResponseCatcher
from d7sms.d_7_sms_client import D7smsClient
from d7sms.configuration import Configuration
class ControllerTestBase(unittest.TestCase):
"""All test classes inherit from this base class. It abstracts out
common functionality and configuration variables set up."""
@classmethod
def setUpClass(cls):
"""Class method called once before running tests in a test class."""
cls.api_client = D7smsClient()
cls.request_timeout = 30
cls.assert_precision = 0.01
def setUp(self):
"""Method called once before every test in a test class."""
self.response_catcher = HttpResponseCatcher()
self.controller.http_call_back = self.response_catcher
|
# PyDia Self Documentation Series - Part III : All Objects
# Copyright (c) 2007, Hans Breuer <hans@breuer.org>
#
# generates a new diagram which contains all the currently
# registered objects sorted to layers by their containing package
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
import sys, dia, string
def set_object_string (o) :
keys = o.properties.keys()
for s in keys :
p = o.properties[s]
if p.type in ["string", "text"] :
if s in ["name", "text"] :
o.properties[s] = o.type.name
else :
o.properties[s] = s
def aobjects_cb(data, flags) :
# copied from otypes.py
if data :
diagram = None # we may be running w/o GUI
else :
diagram = dia.new("All Objects.dia")
data = diagram.data
layer = data.active_layer
otypes = dia.registered_types()
keys = otypes.keys()
keys.sort()
packages = {}
for s in keys :
kt = string.split(s, " - ")
if len(kt) == 2 :
if len(kt[0]) == 0 :
sp = "<unnamed>"
else :
sp = kt[0]
st = kt[1]
else :
sp = "<broken>"
st = kt[0]
if packages.has_key(sp) :
packages[sp].append(s)
else :
packages[sp] = [s]
for sp in packages.keys() :
# add a layer per package
layer = data.add_layer (sp)
cx = 0.0
cy = 0.0
n = 0 # counting objects
my = 0.0
pkg = packages[sp]
for st in pkg :
if st == "Group" :
continue # can't create empty group
#print st
o, h1, h2 = dia.get_object_type(st).create (cx, cy)
# to make the resulting diagram more interesting we set every sting property with it's name
set_object_string (o)
w = o.bounding_box.right - o.bounding_box.left
h = o.bounding_box.bottom - o.bounding_box.top
o.move (cx, cy)
cx += w * 1.5
if h > my : my = h
n += 1
if n % 10 == 0 :
cx = 0.0
cy += my * 1.5
my = 0
layer.add_object (o)
layer.update_extents()
data.update_extents()
if diagram :
diagram.display()
diagram.flush()
return data
dia.register_action ("HelpAObjects", "All Objects",
"/ToolboxMenu/Help/HelpExtensionStart",
aobjects_cb)
|
"""
"""
import turtle
class Pluma():
def __init__(self, color, grosor):
self.color = color
self.grosor = grosor
self.posicion = [0, 0]
self.previa = self.posicion
def mueve(self, x, y):
self.previa = self.posicion
self.posicion = [x, y]
turtle.goto(*self.posicion)
def regresa(self):
self.posicion, self.previa = self.previa, self.posicion
turtle.goto(*self.posicion)
# añade a la clase Pluma una función llamada 'regresa' que mueva la pluma a su previa posición
|
from django.contrib.localflavor.au.models import AUStateField, AUPostCodeField
from django.db import models
class AustralianPlace(models.Model):
state = AUStateField(blank=True)
state_required = AUStateField()
state_default = AUStateField(default="NSW", blank=True)
postcode = AUPostCodeField(blank=True)
postcode_required = AUPostCodeField()
postcode_default = AUPostCodeField(default="2500", blank=True)
name = models.CharField(max_length=20)
class Meta:
app_label = 'localflavor'
|
# Copyright (C) 2004-2018 by
# All rights reserved.
# MIT license.
#
# Author: Vadim Ivlev
# Some functions to show tree graphs.
# Can be used both in standalone programs
# and in `jupyther` nonebooks.
# Preconditions
# -------------
# The folowing libraries should be installed
# `matplotlib, networkx, graphviz, pygraphviz`
# Please use conda or pip.
# Usage
# -----
# from showtree import show_binary_tree, show_tree_graph
import matplotlib.pyplot as plt
import matplotlib
import networkx as nx
from networkx.drawing.nx_agraph import write_dot, graphviz_layout
def uid_gen():
'''node id generator'''
n = 0
while True:
n += 1
yield n
uid = uid_gen()
# ------------------------------------------------------
def show_tree_graph(G, file_name=None):
"""
Shows a tree graph.
Parameters
----------
G : NetworkX tree graph
A tree graph created with NetworkX
file_name: if specified the picture will be saved instead of showing.
Examples
--------
>>> gg = nx.balanced_tree(3, 2)
>>> show_tree_graph(gg)
"""
plt.rcParams["figure.figsize"] = [10., 7.]
pos = graphviz_layout(G, prog='dot')
# null_nodes = [x for x in G.nodes if G.node[x]['label'] == '']
not_null_nodes = [
x for x in G.nodes if G.node[x].get('label', str(x)) != '']
# null_edges = [e for e in G.edges if G.node[e[1]]['label'] == '']
# not_null_edges = [e for e in G.edges if G.node[e[1]]['label'] != '']
node_lbls = nx.get_node_attributes(G, 'label')
edge_lbls = nx.get_edge_attributes(G, 'label')
nx.draw(G, pos, with_labels=True,
nodelist=not_null_nodes if len(not_null_nodes) > 0 else None,
# edgelist=not_null_edges,
labels=node_lbls if len(node_lbls) > 0 else None,
width=1.0,
linewidths=0.0,
node_size=700,
node_color="#1485CC",
edge_color="#cccccc",
font_size=12,
label="BST",
alpha=1.0
)
nx.draw_networkx_edge_labels(G, pos, font_size=8,
edge_labels=edge_lbls
)
# nx.draw_networkx_nodes(G, pos, node_size=700, alpha=1.0,
# node_color="white", nodelist=null_nodes)
# nx.draw_networkx_edges(G, pos, alpha=0.9, width=6, edge_color="orange", edgelist=[(1, 'Petya')])
if not file_name:
plt.show()
else:
plt.savefig(file_name)
plt.clf()
# ------------------------------------------------------------
def build_binary_tree_graph(nx_graph, parent_node_id, tree_node, label_attr='data', edge_label=None):
if not tree_node:
node_id = next(uid)
nx_graph.add_node(node_id, label='')
if parent_node_id != None:
nx_graph.add_edge(parent_node_id, node_id, label=edge_label)
return
node_id = next(uid)
nx_graph.add_node(node_id, label=getattr(tree_node, label_attr, ''))
if parent_node_id != None:
nx_graph.add_edge(parent_node_id, node_id, label=edge_label)
if tree_node.left or tree_node.right:
build_binary_tree_graph(
nx_graph, node_id, tree_node.left, label_attr, 'L')
build_binary_tree_graph(
nx_graph, node_id, tree_node.right, label_attr, 'R')
# -------------------------------------------------------
def show_binary_tree(root_node, label_attr='data', file_name=None):
"""
Shows a tree of nodes similar to:
```
class Node:
def __init__(self, val=''):
self.data = val
self.left = None
self.right = None
```
The nodes on the chart will be labeled with `data` attribute.
If you want to use a different attribute change `label_attr` parameter.
Parameters
----------
root_node : the root node of a tree.
A tree graph created with NetworkX
label_attr: an attribute used for labeling nodes
file_name: if specified the picture will be saved instead of showing.
Examples
--------
>>> show_binary_tree(root)
"""
G = nx.DiGraph()
build_binary_tree_graph(G, None, root_node, label_attr)
show_tree_graph(G, file_name=file_name)
# TESTING -----------------------------------------------
if __name__ == '__main__':
from random import sample, seed
class Node:
def __init__(self, val=''):
self.data = val
self.left = None
self.right = None
def add_node(root, val):
if not root:
return Node(val)
if val < root.data:
root.left = add_node(root.left, val)
elif val > root.data:
root.right = add_node(root.right, val)
return root
def build_bst(lst):
bst = None
for v in lst:
bst = add_node(bst, v)
return bst
seed(1)
r = sample(range(11, 100), 20)
show_binary_tree(build_bst(r))
# show_binary_tree(build_bst(r), file_name='bst.png')
gg = nx.balanced_tree(3, 2)
show_tree_graph(gg)
# show_tree_graph(gg, file_name='tree.png')
|
import lane_lines
from moviepy.editor import VideoFileClip
def process_image(image):
result = lane_lines_finder.run(image)
return result
lane_lines_finder = lane_lines.LaneFinder('calibration_output/wide_dist_pickle.p')
video = 'project_video'
input_video = video + '.mp4'
output_video = 'out_videos/' + video + '_out.mp4'
white_output = output_video
clip1 = VideoFileClip(input_video)
white_clip = clip1.fl_image(process_image)
white_clip.write_videofile(white_output, audio=False)
|
from django.conf.urls import url
from sound.views import AuthorDetail, AuthorList, AuthorUpdate, \
SoundDetail, SoundList, \
RequestDetail, RequestGetOut, RequestGetIn, RequestList, RequestCreate
urlpatterns = [
url(r'^author/(?P<pk>[\d]+)/edit/$', AuthorUpdate.as_view(), name="sound-author-update"),
url(r'^author/(?P<pk>[\d]+)/$', AuthorDetail.as_view(), name="sound-author-detail"),
url(r'^author/$', AuthorList.as_view(), name="sound-author-list"),
url(r'^req/add/$', RequestCreate.as_view(), name="request-create"),
url(r'^req/(?P<request_pk>[\d]+)/get-out/(?P<voice_pk>[\d]+)/$',
RequestGetOut.as_view(),
name="request-author-get-out"
),
url(r'^req/(?P<request_pk>[\d]+)/get-in/(?P<voice_pk>[\d]+)/$',
RequestGetIn.as_view(),
name="request-author-get-in"
),
url(r'^req/(?P<pk>[\d]+)/$', RequestDetail.as_view(), name="request-detail"),
url(r'^req/$', RequestList.as_view(), name="request-list"),
url(r'^(?P<pk>[\d]+)/$', SoundDetail.as_view(), name="sound-detail"),
url(r'^$', SoundList.as_view(), name="sound-list"),
]
|
import pygame
class MeleeAttack():
def __init__(self, damage, player, handler):
self.damage = damage
self.handler = handler
self.player = player
self.attackrange = 100
self.lfist = pygame.transform.rotate(pygame.image.load("media/Misc/fist.png").convert_alpha(), 90)
self.rfist = pygame.transform.flip(self.lfist, True, False)
def attack(self, screen):
if self.player.facing == 1:
if self.handler.getPlayer1().name == self.player.name:
screen.blit(self.rfist, [self.handler.getPlayer1().rect.x + self.handler.getPlayer1().width + 5, self.handler.getPlayer1().rect.y + 20])
if 0 < self.handler.getPlayer2().rect.x - self.player.rect.x < self.attackrange and -self.player.height <= self.handler.getPlayer2().rect.y - self.player.rect.y < self.player.height:
self.handler.getPlayer2().takeDamage(self.damage)
elif -self.attackrange > self.handler.getPlayer2().rect.x - self.player.rect.x > 0 and 0 < self.handler.getPlayer2().rect.y - self.player.rect.y < self.player.height:
self.handler.getPlayer2().takeDamage(self.damage)
if self.handler.getPlayer2().name == self.player.name:
screen.blit(self.rfist, [self.handler.getPlayer2().rect.x + self.handler.getPlayer2().width + 5, self.handler.getPlayer2().rect.y + 20])
if 0 < self.handler.getPlayer1().rect.x - self.player.rect.x < self.attackrange and -self.player.height <= self.handler.getPlayer1().rect.y - self.player.rect.y < self.player.height:
self.handler.getPlayer1().takeDamage(self.damage)
elif -self.attackrange > self.handler.getPlayer1().rect.x - self.player.rect.x > 0 and 0 < self.handler.getPlayer1().rect.y - self.player.rect.y < self.player.height:
self.handler.getPlayer1().takeDamage(self.damage)
if self.player.facing == -1:
if self.handler.getPlayer1().name == self.player.name:
screen.blit(self.lfist, [self.handler.getPlayer1().rect.x - 25, self.handler.getPlayer1().rect.y + 20])
if 0 > self.handler.getPlayer2().rect.x - self.player.rect.x > -self.attackrange and -self.player.height <= self.handler.getPlayer2().rect.y - self.player.rect.y < self.player.height:
self.handler.getPlayer2().takeDamage(self.damage)
elif self.attackrange < self.handler.getPlayer2().rect.x - self.player.rect.x < 0 and 0 > self.handler.getPlayer2().rect.y - self.player.rect.y > -self.player.height:
self.handler.getPlayer2().takeDamage(self.damage)
if self.handler.getPlayer2().name == self.player.name:
screen.blit(self.lfist, [self.handler.getPlayer2().rect.x - 25, self.handler.getPlayer2().rect.y + 20])
if 0 > self.handler.getPlayer1().rect.x - self.player.rect.x > -self.attackrange and -self.player.height <= self.handler.getPlayer1().rect.y - self.player.rect.y < self.player.height:
self.handler.getPlayer1().takeDamage(self.damage)
elif self.attackrange < self.handler.getPlayer1().rect.x - self.player.rect.x < 0 and 0 < self.handler.getPlayer1().rect.y - self.player.rect.y < self.player.height:
self.handler.getPlayer1().takeDamage(self.damage)
|
import sys
import argparse
from sklearn.model_selection import KFold
from sklearn.externals import joblib
import numpy as np
import h5py
from sklearn.metrics import confusion_matrix, accuracy_score
from sklearn.metrics import confusion_matrix, accuracy_score, matthews_corrcoef, \
classification_report
from keras.layers import Input, Activation, Dense, Dropout
from keras.layers.normalization import BatchNormalization
from keras.optimizers import Adam
from keras.models import Model, load_model
from keras.layers.advanced_activations import ELU
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
''' This code is based on Núñez-Marcos, A., Azkune, G., & Arganda-Carreras,
I. (2017). "Vision-Based Fall Detection with Convolutional Neural Networks"
Wireless Communications and Mobile Computing, 2017.
Also, new features were added by Gabriel Pellegrino Silva working in
Semantix.
'''
''' Documentation: class Result
This class has a few methods:
pre_result
result
check_videos
The methods that should be called outside of this class are:
result: show the results of a prediction based on a feed forward on the
classifier of this worker.
'''
class Result:
def __init__(self, streams, classes, fid, cid):
self.features_key = 'features'
self.labels_key = 'labels'
self.samples_key = 'samples'
self.num_key = 'num'
self.classes = classes
self.streams = streams
self.fid = fid
self.cid = cid
self.sliding_height = 10
def pre_result(self, stream):
self.classifier = load_model(stream + '_classifier_' + self.cid + '.h5')
# Reading information extracted
h5features = h5py.File(stream + '_features_' + self.fid + '.h5', 'r')
h5labels = h5py.File(stream + '_labels_' + self.fid + '.h5', 'r')
# all_features will contain all the feature vectors extracted from
# optical flow images
self.all_features = h5features[self.features_key]
self.all_labels = np.asarray(h5labels[self.labels_key])
predicteds = []
for data in self.all_features:
pred = self.classifier.predict(np.asarray(data.reshape(1, -1)))
pred = pred.flatten()
predicteds.append(pred)
return self.all_features, self.all_labels, np.asarray(predicteds)
def evaluate_max(self, truth, avg_predicted):
predicted = np.zeros(len(truth), dtype=np.float)
for i in range(len(truth)):
predicted[i] = np.argmax(avg_predicted[i])
return self.evaluate(truth, predited)
def evaluate(self, truth, predicted):
print("Classification report for classifier \n%s\n"
% (classification_report(truth, predicted)))
print("Confusion matrix:\n%s" % confusion_matrix(truth, predicted))
# Compute metrics and print them
cm = confusion_matrix(truth, predicted, labels=[i for i in range(len(self.classes))])
accuracy = accuracy_score(truth, predicted)
print('Accuracy: {}'.format(accuracy))
print('Matthews: {}'.format(matthews_corrcoef(truth, predicted)))
return predicted
def result(self, f_classif):
predicteds = []
len_STACK = 0
Truth = 0
key = ''.join(self.streams)
for stream in self.streams:
X, Y, predicted = self.pre_result(stream)
len_STACK = len(Y)
Truth = Y
predicteds.append(np.copy(predicted))
predicteds = np.asarray(predicteds)
cont_predicteds = np.zeros(shape=(len_STACK, len(self.classes)), dtype=np.float)
if f_classif == 'max_avg':
for j in range(len_STACK):
for i in range(len(self.streams)):
for k in range(len(self.classes)):
cont_predicteds[j][k] += (predicteds[i][j][k] / len(self.streams))
cont_predicteds = self.evaluate_max(Truth, cont_predicteds)
elif f_classif == 'svm_avg':
for j in range(len_STACK):
for i in range(len(self.streams)):
for k in range(len(self.classes)):
cont_predicteds[j][k] += (predicteds[i][j][k] / len(self.streams))
clf = joblib.load('svm_avg_' + key + '.pkl')
print('EVALUATE WITH average and svm')
cont_predicteds = clf.predict(cont_predicteds)
cont_predicteds = self.evaluate(Truth, cont_predicteds)
elif f_classif == 'svm_1':
svm_cont_1_test_predicteds = []
for i in range(len(self.streams)):
aux_svm = joblib.load('svm_' + self.streams[i] + '_1_aux.pkl')
svm_cont_1_test_predicteds.append(aux_svm.predict(predicteds[i]))
svm_cont_1_test_predicteds = np.asarray(svm_cont_1_test_predicteds)
svm_cont_1_test_predicteds = np.reshape(svm_cont_1_test_predicteds, svm_cont_1_test_predicteds.shape[::-1])
clf = joblib.load('svm_' + key + '_cont_1.pkl')
print('EVALUATE WITH continuous values and SVM 1')
cont_predicteds = clf.predict(svm_cont_1_test_predicteds)
cont_predicteds = self.evaluate(Truth, cont_predicteds)
elif f_classif == 'svm_2':
clf = joblib.load('svm_' + key + '_cont_2.pkl')
svm_cont_2_test_predicteds = np.asarray([list(predicteds[:, i, j]) for i in range(len(Truth)) for j in range(len(self.classes))])
svm_cont_2_test_predicteds = svm_cont_2_test_predicteds.reshape(len(Truth), len(self.classes) * len(self.streams))
print('EVALUATE WITH continuous values and SVM 2')
cont_predicteds = clf.predict(svm_cont_2_test_predicteds)
cont_predicteds = self.evaluate(Truth, cont_predicteds)
else:
print("FUNCAO CLASSIFICADORA INVALIDA!!!!")
return
self.check_videos(Truth, cont_predicteds, self.streams[0])
def check_videos(self, _y2, predicted, stream):
h5samples = h5py.File(stream + '_samples_' + self.fid + '.h5', 'r')
h5num = h5py.File(stream + '_num_' + self.fid + '.h5', 'r')
all_samples = np.asarray(h5samples[self.samples_key])
all_num = np.asarray(h5num[self.num_key])
stack_c = 0
class_c = 0
video_c = 0
all_num = [y for x in all_num for y in x]
for amount_videos in all_num:
cl = self.classes[class_c]
message = '###### ' + cl + ' videos ' + str(amount_videos)+' ######'
print(message)
for num_video in range(amount_videos):
num_miss = 0
FP = 0
FN = 0
for num_stack in range(stack_c, stack_c + all_samples[video_c+num_video][0]):
if num_stack >= len(predicted):
break
elif predicted[num_stack] != _y2[num_stack]:
if _y2[num_stack] == 0:
FN += 1
else:
FP += 1
num_miss+=1
if num_miss == 0:
print("Hit video %3d [%5d miss %5d stacks %5d FP %5d FN]" %(num_video+1, num_miss, all_samples[video_c+num_video][0], FP, FN))
else:
print("Miss video %3d [%5d miss %5d stacks %5d FP %5d FN]" %(num_video+1, num_miss, all_samples[video_c+num_video][0], FP, FN))
stack_c += all_samples[video_c + num_video][0]
video_c += amount_videos
class_c += 1
if __name__ == '__main__':
'''
todo: make this weight_0 (w0) more general for multiple classes
'''
'''
todo: verify if all these parameters are really required
'''
print("***********************************************************",
file=sys.stderr)
print(" SEMANTIX - UNICAMP DATALAB 2018", file=sys.stderr)
print("***********************************************************",
file=sys.stderr)
argp = argparse.ArgumentParser(description='Do result tasks')
argp.add_argument("-class", dest='classes', type=str, nargs='+',
help='Usage: -class <class0_name> <class1_name>..<n-th_class_name>',
required=True)
argp.add_argument("-streams", dest='streams', type=str, nargs='+',
help='Usage: -streams spatial temporal (to use 2 streams example)',
required=True)
argp.add_argument("-fid", dest='fid', type=str, nargs=1,
help='Usage: -id <identifier_to_features>',
required=True)
argp.add_argument("-cid", dest='cid', type=str, nargs=1,
help='Usage: -id <identifier_to_classifier>',
required=True)
argp.add_argument("-f_classif", dest='f_classif', type=str, nargs=1,
help='Usage: -f_classif <max_avg> or <svm_avg> or <svm_1> or <svm_2>',
required=True)
try:
args = argp.parse_args()
except:
argp.print_help(sys.stderr)
exit(1)
result = Result(args.streams, args.classes, args.fid[0], args.cid[0])
# Need to sort
args.streams.sort()
result.result(args.f_classif[0])
'''
todo: criar excecoes para facilitar o uso
'''
'''
todo: nomes diferentes para classificadores
'''
|
from setuptools import find_packages, setup
VERSION = '0.0.9'
setup(
name='lywsd02',
version=VERSION,
packages=find_packages(exclude=("tests",)),
url='https://github.com/h4/lywsd02',
license='MIT',
author='h4',
classifiers=[
'Programming Language :: Python :: 3.7',
],
install_requires=['bluepy==1.3.0'],
scripts=['scripts/lywsd02'],
author_email='mikhail.baranov@gmail.com',
description='Lywsd02 BLE sendor Library',
long_description_content_type='text/x-rst',
long_description='Library to read data from Mi Temperature and Humidity Sensor (E-Inc version with Clock)',
)
|
from ..schema import types
from .extensions import SpecificationExtensions
Contact = types.Schema(
name='Contact',
pattern_properties=SpecificationExtensions,
additional_properties=False,
properties={
'name': types.StringType(),
'url': types.UrlType(),
'email': types.EmailType()
}
)
|
class vocab(object):
"""
Base CRIPTs vocabulary object. Does nothing right now.
"""
@classmethod
def values(cls, sort=False):
"""
Get available values in a list.
:param sort: Should the list be sorted.
:type sort: bool
:returns: list
"""
l = []
for k,v in cls.__dict__.iteritems():
if ('__' not in k and
isinstance(v, basestring) and
'__' not in v and
'vocabulary' not in v):
l.append(v)
if sort:
l.sort()
return l
|
print '***** Guided Proofreading *****'
from theano.sandbox.cuda import dnn
print 'CuDNN support:', dnn.dnn_available()
|
from flask_ember.util.string import dasherize
class ResourceGenerator:
def __init__(self, ember, resource_class):
self.ember = ember
self.resource_class = resource_class
def generate(self, app):
# TODO generation of api endpoints etc
resource = self.resource_class
name = resource.__qualname__
app.add_url_rule('/' + dasherize(name), name, lambda: name)
|
workerNameList = hero.findNearest(hero.findFriends()).workerList
def build(construction, start):
for i in range(start, workerNameList.length, 3):
hero.say(workerNameList[i] + " - " + construction)
build("tower", 1)
build("tent", 0)
build("fence", 2)
while True:
enemy = hero.findNearestEnemy()
if enemy:
hero.attack(enemy)
|
#! /usr/bin/env python
import pysnmp
import paramiko
import my_func
def print_ver():
print "\nPySNMP Version: %s" % (pysnmp.__version__)
print "Paramiko Version: %s\n" % (paramiko.__version__)
if __name__ == '__main__':
print_ver()
|
#CONFIG FILE
#Made by Luka Dragar
#@thelukadragar
operatingmode="xyz"
#"xy"---move only in xy
#"xyz"--move in xy and z
#----------------------------------------------
sendinginterval=0.15
#interval to send points[seconds]
#try changing if there are problems
myIP= "127.0.0.1" # localhost or your ip "192.168.64.101"
myPort=55555
#limit coordinates for moving
#change if robot out of reach
xmax=600
xmin=0
ymin=0
ymax=600
zmax=300
zmin=-500
zrange=1000 #difference betwen max and min
zoffsetcamera=-500#offset according to where you want Z0 to be
areatocompare=120000#compares area of your hand to this value
#scale in percent
livefeedwindowscalex=100
livefeedwindowscaley=100
#camera you can use ip camera like this.
#cameratouse="http://192.168.64.102:8080/video" #for ip camera
cameratouse=0 #default camera
#coordinatemultiplier
cormultiplyx=1
cormultiplyy=1
cormultiplyz=1
#sensitivity send move if object moves by px +-1 default
sensitivity=1
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class BrokerZerodhaTxn(models.Model):
bzt_id = models.AutoField(primary_key=True)
bzt_user = models.ForeignKey(User, on_delete=models.DO_NOTHING)
bzt_tdate = models.DateField(blank=True, null=True)
bzt_tsymbol = models.TextField(blank=True, null=True)
bzt_exchange = models.TextField(blank=True, null=True)
bzt_segment = models.TextField(blank=True, null=True)
bzt_trade_type = models.TextField(blank=True, null=True)
bzt_quantity = models.FloatField(blank=True, null=True)
bzt_price = models.FloatField(blank=True, null=True)
bzt_order_id = models.IntegerField(blank=True, null=True)
bzt_trade_id = models.IntegerField(blank=True, null=True)
bzt_order_exec_time = models.TextField(blank=True, null=True)
class Meta:
db_table = 'broker_zerodha_txn'
unique_together = (('bzt_tsymbol', 'bzt_tdate'),)
|
from ice.jcache import JCache
import functools
import hashlib
import datetime
#Source: https://unterwaditzer.net/2016/sync-algorithm.html
class SyncMap:
def __init__(self,identifier):
"""
The identifier is e.g. the account + calendar name / address book name... must be unique.
E.g. CalDav-Resource URL...
"""
self.identifier = identifier
self.cache = JCache()
self.status = {}
self.load(self.identifier)
def __del__(self):
self.save()
def etag(self, uid, created, last_modified):
"""
Generates an etag for an item. The datetime values should include seconds (not a requirement).
"""
etag_raw = uid + last_modified.strftime("%F %T") + created.strftime("%F %T")
etag = hashlib.md5(etag_raw.encode('utf-8')).hexdigest()
return etag
def delete_status(self, uid):
"""
Delete the status etag entries for a certain item. This is the same as update_status(uid, None, None).
"""
self.update_status(uid)
def update(self, uid, etag):
"""
This is a convenient wrapper for update_status().
Call this function after a successfull sync of two items and supply the etag, which should be the same for both
items after the sync.
"""
self.update_status(uid,etag)
def update_status(self, uid, etaga = None, etagb = None):
"""
After a successfull sync of the item this function needs to be called. If both tags are None the status entry will be deleted.
Normally this function is getting called after the successfull sync of two events/items. In this case the etaga and etagb would
be the same. Hence you can call this function this way:
update_status(uid, etag)
"""
try:
if(uid and (etaga or etagb)):
if(etaga == None):
etaga = etagb
elif(etagb == None):
etagb = etaga
elif(etaga == None and etagb == None):
raise Exception("Must supply at least one etag.")
uid = str(uid)
new_entry = [etaga,etagb]
entry = self.status.setdefault(uid,new_entry)
if functools.reduce(lambda x, y : x and y, map(lambda p, q: p == q,entry,new_entry), True):
# Status did not change (entry === new_entry)
pass
else:
# Status changed
self.status[uid] = new_entry
return
elif(uid):
uid = str(uid)
del self.status[uid]
except KeyError as e:
raise Exception("Synchronization Error. There is no cached entry for item with UID " + str(uid))
def load(self, identifier):
"""
Restores all classvariables from the persistent storage.
Is getting called from __init__()
"""
try:
self.status = self.cache.fetch(identifier)
except:
self.status = {}
def save(self):
"""
Saves all classvariables to the persistent storage.
Is also getting called on obj.__del__()
"""
self.cache.stash(self.identifier, self.status)
def compile_instruction(self, uid = None, a = None, b = None):
"""
@Parameters
uid = Universal Identifier of the item
a = etag of the local item if loacl item exists else pass None
b = etag of the remote item if remote item exists else pass None
@Returns
The instructions on what needs to be done with two corresponding events ( = an event with the same uid).
The instruction returned has three possible Cmd Values:
- Copy (From 'Source' to 'Target[]')
- Delete (From 'Source[]')
- Conflict
E.g.:
{
"Cmd" : "Copy",
"Source" : "A",
"Target" : ["B", "status"]
}
"""
statustags = self.status.get(uid)
if(a and not b and not statustags):
instruction = {
"Cmd" : "Copy",
"Source" : "A",
"Target" : ["B", "status"],
"Tasks" : 2,
"Done" : 0
}
return instruction
elif(not a and b and not statustags):
instruction = {
"Cmd" : "Copy",
"Source" : "B",
"Target" : ["A", "status"],
"Tasks" : 2,
"Done" : 0
}
return instruction
elif(a and not b and statustags):
instruction = {
"Cmd" : "Delete",
"Source" : ["A","status"],
"Tasks" : 2,
"Done" : 0
}
return instruction
elif(not a and b and statustags):
instruction = {
"Cmd" : "Delete",
"Source" : ["B","status"],
"Tasks" : 2,
"Done" : 0
}
return instruction
elif(a and b and not statustags):
instruction = {
"Cmd" : "Conflict"
}
return instruction
elif(not a and not b and statustags):
instruction = {
"Cmd" : "Delete",
"Source" : ["status"],
"Tasks" : 1,
"Done" : 0
}
return instruction
elif(a and b and statustags):
if(statustags[0] != a and statustags[1] == b):
instruction = {
"Cmd" : "Copy",
"Source" : "A",
"Target" : ["B","status"],
"Tasks" : 2,
"Done" : 0
}
return instruction
elif(statustags[0] == a and statustags[1] != b):
instruction = {
"Cmd" : "Copy",
"Source" : "B",
"Target" : ["A","status"],
"Tasks" : 2,
"Done" : 0
}
return instruction
elif(statustags[0] != a and statustags[1] != b):
instruction = {
"Cmd" : "Conflict",
"Tasks" : 1,
"Done" : 0
}
return instruction
else:
raise Exception("Synchronisation Error. Modification of item with UID " + str(uid) + " locally and remote.")
else:
raise Exception("Synchronisation Error. Unseen case for item with UID " + str(uid))
def getInstruction(self, uid, dtscreated, dtslastmodified):
"""
This routine is a convenient method. Basically it is the same as compile_instruction, but it creates the etags by itself.
"""
#Get instructions for event
etaga = self.etag(uid, dtscreated[0], dtslastmodified[0])
etagb = self.etag(uid, dtscreated[1], dtslastmodified[1])
instruction = self.compile_instruction(uid,etaga,etagb)
return instruction
|
# coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
import json
from ..utils import sanitize_for_serialization
class Biography(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
Biography - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'biography': 'str',
'interests': 'list[str]',
'hobbies': 'list[str]',
'spouse': 'str',
'education': 'list[Education]'
}
self.attribute_map = {
'biography': 'biography',
'interests': 'interests',
'hobbies': 'hobbies',
'spouse': 'spouse',
'education': 'education'
}
self._biography = None
self._interests = None
self._hobbies = None
self._spouse = None
self._education = None
@property
def biography(self):
"""
Gets the biography of this Biography.
Personal detailed description
:return: The biography of this Biography.
:rtype: str
"""
return self._biography
@biography.setter
def biography(self, biography):
"""
Sets the biography of this Biography.
Personal detailed description
:param biography: The biography of this Biography.
:type: str
"""
self._biography = biography
@property
def interests(self):
"""
Gets the interests of this Biography.
:return: The interests of this Biography.
:rtype: list[str]
"""
return self._interests
@interests.setter
def interests(self, interests):
"""
Sets the interests of this Biography.
:param interests: The interests of this Biography.
:type: list[str]
"""
self._interests = interests
@property
def hobbies(self):
"""
Gets the hobbies of this Biography.
:return: The hobbies of this Biography.
:rtype: list[str]
"""
return self._hobbies
@hobbies.setter
def hobbies(self, hobbies):
"""
Sets the hobbies of this Biography.
:param hobbies: The hobbies of this Biography.
:type: list[str]
"""
self._hobbies = hobbies
@property
def spouse(self):
"""
Gets the spouse of this Biography.
:return: The spouse of this Biography.
:rtype: str
"""
return self._spouse
@spouse.setter
def spouse(self, spouse):
"""
Sets the spouse of this Biography.
:param spouse: The spouse of this Biography.
:type: str
"""
self._spouse = spouse
@property
def education(self):
"""
Gets the education of this Biography.
User education details
:return: The education of this Biography.
:rtype: list[Education]
"""
return self._education
@education.setter
def education(self, education):
"""
Sets the education of this Biography.
User education details
:param education: The education of this Biography.
:type: list[Education]
"""
self._education = education
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_json(self):
"""
Returns the model as raw JSON
"""
return json.dumps(sanitize_for_serialization(self.to_dict()))
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
n = int(input())
buttonList = list()
for _ in range(n):
buttonList.append(int(input()))
if n % 2 != 0:
print('still running')
else:
syn = '+'
Sum = 0
for i in buttonList:
if syn == '+':
Sum += i
syn = '-'
else:
Sum -= i
syn = '+'
print(abs(Sum))
|
from datetime import datetime
from .base import CpObject
class ApplepaySession(CpObject):
epoch_timestamp: datetime = None
expires_at: datetime = None
merchant_session_identifier: str = None
nonce: str = None
merchant_identifier: str = None
domain_name: str = None
display_name: str = None
signature: str = None
|
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
#添加层
def add_layer(inputs, in_size, out_size, n_layer, activation_function=None):
layer_name = 'layer' + n_layer
with tf.name_scope(layer_name):
with tf.name_scope('weights'):
Weights = tf.Variable(tf.random_normal([in_size, out_size]), name='W')
tf.summary.histogram(layer_name + '/Weights', Weights) #可以在tensorboard中看到变化过程
with tf.name_scope('biases'):
biases = tf.Variable(tf.zeros([1, out_size]) + 0.1, name='b')
tf.summary.histogram(layer_name + '/biases', biases) #注意:tf.histogram_summary 变成 tf.summary.histogram
with tf.name_scope('Wx_plus_b'):
Wx_plus_b = tf.add(tf.matmul(inputs, Weights), biases)
if activation_function is None:
outputs = Wx_plus_b
else:
outputs = activation_function(Wx_plus_b,)
tf.summary.histogram(layer_name + '/outputs', outputs)
return outputs
#模拟输入输出的测试数据集
x_data = np.linspace(-1, 1, 600)[:, np.newaxis]
noise = np.random.normal(0, 0.05, x_data.shape)
y_data = np.square(x_data) - 0.5 + noise
#定义输入输出的占位符
with tf.name_scope('inputs'):
xs = tf.placeholder(tf.float32, [None, 1], name='x_inputs')
ys = tf.placeholder(tf.float32, [None, 1], name='y_inputs')
#添加第一层
l1 = add_layer(xs, 1, 10, '1', activation_function=tf.nn.relu)
#添加输出层(第二层)
prediction = add_layer(l1, 10, 1, '2', activation_function=None)
#定义loss函数
with tf.name_scope('loss'):
loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction), reduction_indices=[1], name='reduce_sum'), name='reduce_mean')
tf.summary.scalar('loss', loss) #注意:tf.train.scalar_summary 变成 tf.summary.scalar
#定义梯度下降优化算法
with tf.name_scope('train'):
train_step = tf.train.GradientDescentOptimizer(0.1, name='gradientDescentOptimizer').minimize(loss, name='minimize')
sess = tf.Session()
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter('C:/tf/logs/tf06', sess.graph) #注意:tf.train.summary 变成 tf.summary.FileWriter
sess.run(tf.global_variables_initializer())
feed_data = {xs: x_data, ys: y_data}
for i in range(1000):
sess.run(train_step, feed_dict=feed_data)
if i % 50 == 0:
result = sess.run(merged, feed_dict=feed_data)
writer.add_summary(result, i) #每50次观察数值变化
# prediction_value = sess.run(prediction, feed_dict=feed_data)
# loss_value = sess.run(loss, feed_dict=feed_data)
# for x in range(0, len(y_data)):
# print('%0.3f' % y_data[x][0], ' --- %0.3f' % prediction_value[x][0], ' --- %0.3f' % loss_value)
# print('')
writer.close()
sess.close()
#不带单引号
#tensorboard --logdir=C:/tf/logs --debug
#http://127.0.0.1:6006/ |
from .api.players import * |
"""Template tags provided by projectroles for use in other apps"""
import mistune
from importlib import import_module
from django import template
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.staticfiles import finders
from django.core.exceptions import ObjectDoesNotExist
from django.template.loader import get_template
from django.templatetags.static import static
from django.urls import reverse
import projectroles
from projectroles.app_settings import AppSettingAPI
from projectroles.models import Project, RemoteProject, SODAR_CONSTANTS
from projectroles.plugins import get_backend_api, BackendPluginPoint
from projectroles.utils import get_display_name as _get_display_name
app_settings = AppSettingAPI()
site = import_module(settings.SITE_PACKAGE)
User = get_user_model()
register = template.Library()
# SODAR constants
SITE_MODE_SOURCE = SODAR_CONSTANTS['SITE_MODE_SOURCE']
SITE_MODE_TARGET = SODAR_CONSTANTS['SITE_MODE_TARGET']
SITE_MODE_PEER = SODAR_CONSTANTS['SITE_MODE_PEER']
# SODAR and site operations ----------------------------------------------------
@register.simple_tag
def site_version():
"""Return the site version"""
return site.__version__ if hasattr(site, '__version__') else '[UNKNOWN]'
@register.simple_tag
def core_version():
"""Return the SODAR Core version"""
return projectroles.__version__
@register.simple_tag
def check_backend(name):
"""Return True if backend app is available, else False"""
return True if get_backend_api(name) else False
@register.simple_tag
def get_project_by_uuid(sodar_uuid):
"""Return Project by sodar_uuid"""
try:
return Project.objects.get(sodar_uuid=sodar_uuid)
except Project.DoesNotExist:
return None
@register.simple_tag
def get_user_by_username(username):
"""Return User by username"""
try:
return User.objects.get(username=username)
except User.DoesNotExist:
return None
# Django helpers ---------------------------------------------------------------
@register.simple_tag
def get_django_setting(name, default=None, js=False):
"""
Return value of Django setting by name or the default value if the setting
is not found. Return a Javascript-safe value if js=True.
"""
val = getattr(settings, name, default)
if js and isinstance(val, bool):
val = int(val)
return val
@register.simple_tag
def get_app_setting(app_name, setting_name, project=None, user=None):
"""Get a project/user specific app setting from AppSettingAPI"""
return app_settings.get_app_setting(app_name, setting_name, project, user)
@register.simple_tag
def static_file_exists(path):
"""Return True/False based on whether a static file exists"""
return True if finders.find(path) else False
@register.simple_tag
def template_exists(path):
"""Return True/False based on whether a template exists"""
try:
get_template(path)
return True
except template.TemplateDoesNotExist:
return False
@register.simple_tag
def get_full_url(request, url):
"""Get full URL based on a local URL"""
return request.scheme + '://' + request.get_host() + url
# Template rendering -----------------------------------------------------------
@register.simple_tag
def get_display_name(key, title=False, count=1, plural=False):
"""Return display name from SODAR_CONSTANTS"""
return _get_display_name(key, title, count, plural)
@register.simple_tag
def get_role_display_name(role_as, title=False):
"""Return display name for role assignment"""
role_suffix = role_as.role.name.split(' ')[1]
if title:
role_suffix = role_suffix.title()
return '{} {}'.format(
_get_display_name(role_as.project.type, title=title), role_suffix
)
@register.simple_tag
def get_project_title_html(project):
"""Return HTML version of the full project title including parents"""
ret = ''
if project.get_parents():
ret += ' / '.join(project.full_title.split(' / ')[:-1]) + ' / '
ret += project.title
return ret
@register.simple_tag
def get_project_link(project, full_title=False, request=None):
"""Return link to project with a simple or full title"""
remote_icon = ''
if request:
remote_icon = get_remote_icon(project, request)
return (
'<a href="{}" title="{}" data-toggle="tooltip" '
'data-placement="top">{}</a>{}'.format(
reverse(
'projectroles:detail', kwargs={'project': project.sodar_uuid}
),
project.description if project.description else '',
project.full_title if full_title else project.title,
' ' + remote_icon if remote_icon else '',
)
)
@register.simple_tag
def get_user_html(user):
"""Return standard HTML representation for a User object"""
return (
'<a title="{}" href="mailto:{}" data-toggle="tooltip" '
'data-placement="top">{}'
'</a>'.format(user.get_full_name(), user.email, user.username)
)
@register.simple_tag
def get_backend_include(backend_name, include_type='js'):
"""
Return import string for backend app Javascript or CSS. Returns empty string
if not found.
"""
# TODO: Replace with get_app_plugin() and if None check
# TODO: once get_app_plugin() can be used for backend plugins
# TODO: Don't forget to remove ObjectDoesNotExist import
try:
plugin = BackendPluginPoint.get_plugin(backend_name)
except ObjectDoesNotExist:
return ''
include = ''
include_string = ''
try:
if include_type == 'js':
include = plugin.javascript_url
include_string = '<script type="text/javascript" src="{}"></script>'
elif include_type == 'css':
include = plugin.css_url
include_string = (
'<link rel="stylesheet" type="text/css" href="{}"/>'
)
except AttributeError:
return ''
if include and finders.find(include):
return include_string.format(static(include))
return ''
@register.simple_tag
def get_history_dropdown(obj, project=None):
"""Return link to object timeline events within project"""
timeline = get_backend_api('timeline_backend')
if not timeline:
return ''
url = timeline.get_object_url(obj, project)
return (
'<a class="dropdown-item" href="{}">\n'
'<i class="iconify" data-icon="mdi:clock-time-eight-outline"></i> '
'History</a>\n'.format(url)
)
@register.simple_tag
def highlight_search_term(item, terms):
"""Return string with search term highlighted"""
# Skip highlighting for multiple terms (at least for now)
if isinstance(terms, list) and len(terms) > 1:
return item
elif isinstance(terms, list) and len(terms) == 1:
term = terms[0]
else:
term = terms # Old implementation
if not term: # If something goes wrong and we end up with no search term
return item
def get_highlights(item):
pos = item.lower().find(term.lower())
tl = len(term)
if pos == -1:
return item # Nothing to highlight
ret = item[:pos]
ret += (
'<span class="sodar-search-highlight">'
+ item[pos : pos + tl]
+ '</span>'
)
if len(item[pos + tl :]) > 0:
ret += get_highlights(item[pos + tl :])
return ret
return get_highlights(item)
@register.simple_tag
def get_info_link(content, html=False):
"""Return info popover link icon"""
return (
'<a class="sodar-info-link" tabindex="0" data-toggle="popover" '
'data-trigger="focus" data-placement="top" data-content="{}" {}>'
'<i class="iconify text-info" data-icon="mdi:information"></i>'
'</a>'.format(content, 'data-html="true"' if html else '')
)
@register.simple_tag
def get_remote_icon(project, request):
"""Get remote project icon HTML"""
if project.is_remote() and request.user.is_superuser:
remote_project = RemoteProject.objects.filter(
project=project, site__mode=SITE_MODE_SOURCE
).first()
if remote_project:
return (
'<i class="iconify {} mx-1 '
'sodar-pr-remote-project-icon" data-icon="mdi:cloud" '
'title="{} project from '
'{}" data-toggle="tooltip" data-placement="top">'
'</i>'.format(
'text-danger' if project.is_revoked() else 'text-info',
'REVOKED remote' if project.is_revoked() else 'Remote',
remote_project.site.name,
)
)
return ''
@register.simple_tag
def get_visible_projects(projects, can_view_hidden_projects=False):
"""
Return all projects that are either visible by user display or by view
hidden permission.
"""
return [
p for p in projects if p.site.user_display or can_view_hidden_projects
]
@register.simple_tag
def render_markdown(raw_markdown):
"""Markdown field rendering helper"""
return mistune.markdown(raw_markdown)
@register.filter
def force_wrap(s, length):
"""Force wrapping of string"""
# If string contains spaces or hyphens, leave wrapping to browser
if not {' ', '-'}.intersection(s) and len(s) > length:
return '<wbr />'.join(
[s[i : i + length] for i in range(0, len(s), length)]
)
return s
# General helpers -------------------------------------------------------------
@register.simple_tag
def get_class(obj, lower=False):
"""Return object class as string"""
c = obj.__class__.__name__
return c.lower() if lower else c
|
import torch.nn as nn
import torch
from at_learner_core.models.wrappers.losses import get_loss
from at_learner_core.models.wrappers.simple_classifier_wrapper import SimpleClassifierWrapper
from at_learner_core.models.architectures import get_backbone_block
from ..architectures.transformer import TransformerEncoder
from collections import OrderedDict
class DLASWrapper(SimpleClassifierWrapper):
def __init__(self, wrapper_config):
super().__init__(wrapper_config)
def _init_modules(self, wrapper_config):
self.input_modalities = wrapper_config.input_modalities
for modal_key in self.input_modalities:
for idx in range(0, 4):
if 'optical_flow' in modal_key and idx == 0:
backbone, feature_size = get_backbone_block(wrapper_config.backbone, idx, get_feature_size=True,
in_size=2)
else:
backbone, feature_size = get_backbone_block(wrapper_config.backbone, idx, get_feature_size=True)
setattr(self, f'{modal_key}_block{idx}', backbone)
feature_sizes = []
for idx in range(1, 4):
backbone, feature_size = get_backbone_block(wrapper_config.backbone, idx, get_feature_size=True)
feature_sizes.append(feature_size)
setattr(self, f'agg_block{idx}', backbone)
for idx in range(1, 4):
planes = feature_sizes[idx-1]
adaptive_block = nn.Sequential(nn.Conv2d(planes, planes, 1), nn.ReLU(inplace=True))
setattr(self, f'agg_adaptive_block{idx}', adaptive_block)
self.backbone_pooling = nn.AdaptiveAvgPool2d((1, 1))
self.backbone_feature_size = feature_size
self.pooling = nn.AdaptiveAvgPool2d((1, feature_size))
self.pooling2 = nn.AdaptiveMaxPool2d((1, feature_size))
self.pooling3 = nn.AdaptiveMaxPool2d((1, feature_size))
self.classifier = nn.Linear(3*feature_size, wrapper_config.nclasses)
def forward(self, x):
B, C, W, H = x[self.input_modalities[0]].size()
device = x[self.input_modalities[0]].device
features_dict = OrderedDict()
for modal_key in self.input_modalities:
features_dict[modal_key] = getattr(self, f'{modal_key}_block0')(x[modal_key])
features_agg = features_dict[self.input_modalities[0]]
for modal_key in self.input_modalities[1:]:
features_agg = features_agg + features_dict[modal_key]
features_dict['agg'] = features_agg
for idx in range(1, 4):
for modal_key in self.input_modalities + ['agg']:
features_dict[modal_key] = getattr(self, f'{modal_key}_block{idx}')(features_dict[modal_key])
features_agg = features_dict[self.input_modalities[0]]
for modal_key in self.input_modalities[1:]:
features_agg = features_agg + features_dict[modal_key]
features_dict['agg'] = features_dict['agg'] + getattr(self, f'agg_adaptive_block{idx}')(features_agg)
for modal_key in self.input_modalities + ['agg']:
features_dict[modal_key] = self.backbone_pooling(features_dict[modal_key]).squeeze()
M = len(self.input_modalities) + 1
features = torch.empty((B, M, self.backbone_feature_size)).to(device)
for idx, key in enumerate(self.input_modalities + ['agg']):
features[:, idx, :] = features_dict[key]
features = features.view((B, M, -1))
"""
results_dict = OrderedDict()
for modal_key in self.input_modalities + ['agg']:
results_dict[modal_key] = getattr(self, f'{modal_key}_clf')(features_dict[modal_key])
"""
features1 = self.pooling(features)
features2 = self.pooling2(features)
features3 = self.pooling3(-features)
features = torch.cat([features1, features2, features3], axis=2)
features = features.squeeze()
output = self.classifier(features)
sigmoid_output = torch.sigmoid(output)
if isinstance(self.loss, nn.modules.loss.CrossEntropyLoss):
x['target'] = x['target'].squeeze()
output_dict = {'output': sigmoid_output.detach().cpu().numpy(),
'target': x['target'].detach().cpu().numpy()}
for k, v in x.items():
if k not in ['data', 'target'] + self.input_modalities:
output_dict[k] = v
loss = self.loss(output, x['target'])
return output_dict, loss
|
# -*- coding: utf-8 -*-
##############################################################################
# Copyright (c), Gianfranco Ulian and Giovanni Valdre'. #
# All rights reserved. #
# #
# This file is part of the Quantas code. #
# #
# For further information on the license, see the LICENSE file #
##############################################################################
import os
import sys
from setuptools import setup
from distutils.core import Extension
try:
from Cython.Build import cythonize
from Cython.Distutils import build_ext
except ImportError:
print('\nError: Cython package not found')
print('\nCython is required to prooceed with the installation of Quantas')
print("\nPlease, install Cython via 'pip install cython'")
print('before installing this package')
print('\nWill now exit')
sys.exit(0)
with open("README.md", "r") as fh:
long_description = fh.read()
requirements = [
'cython>=0.29',
'click>=7.0',
'numpy>=1.18',
'scipy>=1.4',
'pyyaml>=5.3',
'h5py>=2.10'
]
packages = [
'quantas',
'quantas.cmdline',
'quantas.cmdline.commands',
'quantas.cmdline.utils',
'quantas.core',
'quantas.eosfit',
'quantas.eosfit.commands',
'quantas.eosfit.utils',
'quantas.harmonic',
'quantas.harmonic.commands',
'quantas.harmonic.utils',
'quantas.interfaces',
'quantas.IO',
'quantas.qha',
'quantas.qha.commands',
'quantas.qha.utils',
'quantas.soec',
'quantas.soec.commands',
'quantas.soec.utils',
'quantas.utils',
'quantas.utils.chemistry',
'quantas.utils.math',
'quantas.utils.physics',
]
cwd = os.path.join(os.path.dirname(__file__),'quantas')
directories = [
os.path.join(cwd, ''),
os.path.join(cwd, 'cmdline'),
os.path.join(cwd, 'cmdline', 'commands'),
os.path.join(cwd, 'cmdline', 'utils'),
os.path.join(cwd, 'core'),
os.path.join(cwd, 'eosfit'),
os.path.join(cwd, 'eosfit', 'commands'),
os.path.join(cwd, 'eosfit', 'utils'),
os.path.join(cwd, 'harmonic'),
os.path.join(cwd, 'harmonic', 'commands'),
os.path.join(cwd, 'harmonic', 'utils'),
os.path.join(cwd, 'interfaces'),
os.path.join(cwd, 'IO'),
os.path.join(cwd, 'qha'),
os.path.join(cwd, 'qha', 'commands'),
os.path.join(cwd, 'qha', 'utils'),
os.path.join(cwd, 'soec'),
os.path.join(cwd, 'soec', 'commands'),
os.path.join(cwd, 'soec', 'utils'),
os.path.join(cwd, 'utils'),
os.path.join(cwd, 'utils', 'chemistry'),
os.path.join(cwd, 'utils', 'math'),
os.path.join(cwd, 'utils', 'physics'),
]
dirs = {}
for i in range(len(packages)):
dirs[packages[i]] = directories[i]
# Compiler flags for 'cythonized' modules
if sys.platform == 'win32':
extra_compile_args = ['/openmp']
extra_link_args = []
elif sys.platform == 'linux':
extra_compile_args = ['-fopenmp']
extra_link_args = ['-fopenmp']
else:
extra_compile_args = []
extra_link_args = []
setup(name='quantas',
version='0.9.0',
description='QUANtistic Thermomechanical Analysis of Solids',
long_description=long_description,
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.6',
'Operating System :: OS Independent',
'Environment :: Console',
'Topic :: Scientific/Engineering :: Chemistry',
'Topic :: Scientific/Engineering :: Physics',
'Intended Audience :: Science/Research'
],
url='https://github.com/gfulian/quantas',
author='Gianfranco Ulian',
author_email='gianfranco.ulian2@unibo.it',
license='MIT',
package_dir=dirs,
packages=packages,
entry_points={
'console_scripts': [
'quantas = quantas.cmdline.commands.cmd_quantas:cli'
]
},
ext_modules=cythonize([
Extension('quantas.utils.physics.statistical_mechanics',
['quantas/utils/physics/statistical_mechanics.pyx'],
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
),
Extension('quantas.utils.physics.thermodynamics',
['quantas/utils/physics/thermodynamics.pyx'],
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
),
Extension('quantas.utils.math.fast_math',
['quantas/utils/math/fast_math.pyx'],
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
),
]),
python_requires='>=3.5',
install_requires=requirements,
include_package_data=True,
zip_safe=False)
|
'''def fatorial(num = 1):
f = 1
for c in range(num, 0, -1):
f *= c
return f
n = int(input('Digite um número:'))
print(f'O fatoria de {n} é {fatorial(n)}')
f1 = fatorial(4)
f2 = fatorial(5)
f3 = fatorial()
print(f'Os resultados são {f1}, {f2} e {f3}')'''
def parOuImpar(n = 0):
if n % 2 == 0:
return True
else:
return False
num = int(input('Digite um número'))
if parOuImpar(num):
print('É par')
else:
print('Não é par') |
"""
This script provides an example to wrap UER-py for feature extraction.
"""
import sys
import os
import torch
import torch.nn as nn
import argparse
import numpy as np
uer_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
sys.path.append(uer_dir)
from uer.layers import *
from uer.encoders import *
from uer.targets import *
from uer.utils.constants import *
from uer.utils import *
from uer.utils.config import load_hyperparam
from uer.model_loader import load_model
from uer.opts import infer_opts, tokenizer_opts
def batch_loader(batch_size, src, seg):
instances_num = src.size(0)
for i in range(instances_num // batch_size):
src_batch = src[i * batch_size : (i + 1) * batch_size]
seg_batch = seg[i * batch_size : (i + 1) * batch_size]
yield src_batch, seg_batch
if instances_num > instances_num // batch_size * batch_size:
src_batch = src[instances_num // batch_size * batch_size:]
seg_batch = seg[instances_num // batch_size * batch_size:]
yield src_batch, seg_batch
def read_dataset(args, path):
dataset = []
PAD_ID = args.tokenizer.vocab.get(PAD_TOKEN)
with open(path, mode="r", encoding="utf-8") as f:
for line in f:
src = args.tokenizer.convert_tokens_to_ids(args.tokenizer.tokenize(line))
if len(src) == 0:
continue
src = args.tokenizer.convert_tokens_to_ids([CLS_TOKEN]) + src
seg = [1] * len(src)
if len(src) > args.seq_length:
src = src[:args.seq_length]
seg = seg[:args.seq_length]
while len(src) < args.seq_length:
src.append(PAD_ID)
seg.append(PAD_ID)
dataset.append((src, seg))
return dataset
class FeatureExtractor(torch.nn.Module):
def __init__(self, args):
super(FeatureExtractor, self).__init__()
self.embedding = str2embedding[args.embedding](args, len(args.tokenizer.vocab))
self.encoder = str2encoder[args.encoder](args)
self.pooling = args.pooling
def forward(self, src, seg):
emb = self.embedding(src, seg)
output = self.encoder(emb, seg)
seg = torch.unsqueeze(seg, dim=-1).type(torch.float)
output = output * seg
if self.pooling == "mean":
output = torch.sum(output, dim=1)
output = torch.div(output, torch.sum(seg, dim=1))
elif self.pooling == "max":
output = torch.max(output + (seg - 1) * sys.maxsize, dim=1)[0]
elif self.pooling == "last":
output = output[torch.arange(output.shape[0]), torch.squeeze(torch.sum(seg, dim=1).type(torch.int64) - 1), :]
else:
output = output[:, 0, :]
return output
class WhiteningHandle(torch.nn.Module):
"""
Whitening operation.
@ref: https://github.com/bojone/BERT-whitening/blob/main/demo.py
"""
def __init__(self, args, vecs):
super(WhiteningHandle, self).__init__()
self.kernel, self.bias = self._compute_kernel_bias(vecs)
def forward(self, vecs, n_components=None, normal=True, pt=True):
vecs = self._format_vecs_to_np(vecs)
vecs = self._transform(vecs, n_components)
vecs = self._normalize(vecs) if normal else vecs
vecs = torch.tensor(vecs) if pt else vecs
return vecs
def _compute_kernel_bias(self, vecs):
vecs = self._format_vecs_to_np(vecs)
mu = vecs.mean(axis=0, keepdims=True)
cov = np.cov(vecs.T)
u, s, vh = np.linalg.svd(cov)
W = np.dot(u, np.diag(1 / np.sqrt(s)))
return W, -mu
def _transform(self, vecs, n_components):
w = self.kernel[:, :n_components] \
if isinstance(n_components, int) else self.kernel
return (vecs + self.bias).dot(w)
def _normalize(self, vecs):
return vecs / (vecs**2).sum(axis=1, keepdims=True)**0.5
def _format_vecs_to_np(self, vecs):
vecs_np = []
for vec in vecs:
if isinstance(vec, list):
vec = np.array(vec)
elif torch.is_tensor(vec):
vec = vec.detach().numpy()
elif isinstance(vec, np.ndarray):
vec = vec
else:
raise Exception('Unknown vec type.')
vecs_np.append(vec)
vecs_np = np.array(vecs_np)
return vecs_np
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
infer_opts(parser)
parser.add_argument("--pooling", choices=["first", "last", "max", "mean"], \
default="first", help="Pooling Type.")
parser.add_argument("--whitening_size", type=int, default=None, help="Output vector size after whitening.")
tokenizer_opts(parser)
args = parser.parse_args()
args = load_hyperparam(args)
args.tokenizer = str2tokenizer[args.tokenizer](args)
# Build feature extractor model.
model = FeatureExtractor(args)
model = load_model(model, args.load_model_path)
# For simplicity, we use DataParallel wrapper to use multiple GPUs.
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = model.to(device)
if torch.cuda.device_count() > 1:
print("{} GPUs are available. Let's use them.".format(torch.cuda.device_count()))
model = nn.DataParallel(model)
model.eval()
dataset = read_dataset(args, args.test_path)
src = torch.LongTensor([sample[0] for sample in dataset])
seg = torch.LongTensor([sample[1] for sample in dataset])
feature_vectors = []
for i, (src_batch, seg_batch) in enumerate(batch_loader(args.batch_size, src, seg)):
src_batch = src_batch.to(device)
seg_batch = seg_batch.to(device)
output = model(src_batch, seg_batch)
feature_vectors.append(output.cpu().detach())
feature_vectors = torch.cat(feature_vectors, 0)
# Vector whitening.
if args.whitening_size is not None:
whitening = WhiteningHandle(args, feature_vectors)
feature_vectors = whitening(feature_vectors, args.whitening_size, pt=True)
print("The size of feature vectors (sentences_num * vector size): {}".format(feature_vectors.shape))
torch.save(feature_vectors, args.prediction_path)
|
from w3lib.http import basic_auth_header
from scrapy.utils.project import get_project_settings
class ProxyMiddleware(object):
def process_request(self, request, spider):
settings = get_project_settings()
request.meta['proxy'] = settings.get('PROXY_HOST') + ':' + settings.get('PROXY_PORT')
request.headers["Proxy-Authorization"] = basic_auth_header(settings.get('PROXY_USER'), settings.get('PROXY_PASSWORD'))
spider.log('Proxy : %s' % request.meta['proxy'])
|
from __future__ import print_function
import subprocess
import logging
from ..address_translator import AT
l = logging.getLogger('cle.backends.symbol')
class Symbol:
"""
Representation of a symbol from a binary file. Smart enough to rebase itself.
There should never be more than one Symbol instance representing a single symbol. To make sure of this, only use
the :meth:`cle.backends.Backend.get_symbol()` to create new symbols.
:ivar owner: The object that contains this symbol
:vartype owner: cle.backends.Backend
:ivar str name: The name of this symbol
:ivar int addr: The un-based address of this symbol, an RVA
:iver int size: The size of this symbol
:ivar int type: The type of this symbol as one of SYMBOL.TYPE_*
:ivar bool resolved: Whether this import symbol has been resolved to a real symbol
:ivar resolvedby: The real symbol this import symbol has been resolve to
:vartype resolvedby: None or cle.backends.Symbol
:ivar str resolvewith: The name of the library we must use to resolve this symbol, or None if none is required.
"""
# enum for symbol types
TYPE_OTHER = 0
TYPE_NONE = 1
TYPE_FUNCTION = 2
TYPE_OBJECT = 3
TYPE_SECTION = 4
def __init__(self, owner, name, relative_addr, size, sym_type):
"""
Not documenting this since if you try calling it, you're wrong.
"""
self.owner = owner
self.name = name
self.relative_addr = relative_addr
self.size = size
self.type = sym_type
self.resolved = False
self.resolvedby = None
# would be nice if we could populate demangled_names here...
#demangled = self.demangled_name
#if demangled is not None:
# self.owner.demangled_names[self.name] = demangled
def __repr__(self):
if self.is_import:
return '<Symbol "%s" in %s (import)>' % (self.name, self.owner.provides)
else:
return '<Symbol "%s" in %s at %#x>' % (self.name, self.owner.provides, self.rebased_addr)
def resolve(self, obj):
self.resolved = True
self.resolvedby = obj
self.owner.resolved_imports.append(self)
@property
def rebased_addr(self):
"""
The address of this symbol in the global memory space
"""
return AT.from_rva(self.relative_addr, self.owner).to_mva()
@property
def linked_addr(self):
return AT.from_rva(self.relative_addr, self.owner).to_lva()
@property
def is_function(self):
"""
Whether this symbol is a function
"""
return self.type == Symbol.TYPE_FUNCTION
# These may be overridden in subclasses
is_static = False
is_common = False
is_import = False
is_export = False
is_local = False
is_weak = False
is_extern = False
is_forward = False
@property
def demangled_name(self):
"""
The name of this symbol, run through a C++ demangler
Warning: this calls out to the external program `c++filt` and will fail loudly if it's not installed
"""
# make sure it's mangled
if self.name.startswith("_Z"):
name = self.name
if '@@' in self.name:
name = self.name.split("@@")[0]
args = ['c++filt']
args.append(name)
pipe = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
stdout, _ = pipe.communicate()
demangled = stdout.decode().split("\n")
if demangled:
return demangled[0]
return self.name
def resolve_forwarder(self):
"""
If this symbol is a forwarding export, return the symbol the forwarding refers to, or None if it cannot be found.
"""
return self
# compatibility layer
_complained_owner = False
@property
def owner_obj(self):
if not Symbol._complained_owner:
Symbol._complained_owner = True
l.critical("Deprecation warning: use symbol.owner instead of symbol.owner_obj")
return self.owner
|
import polars as pl
class GroupClassification:
def __init__(self, zid, df, min_thickness):
self.zid = zid
start_depth = df["depth"][0]
df_group = df[:, ["depth", "soil_type"]]
self.df_group = (
df_group.pipe(self.group_equal_layers, "soil_type", "depth", start_depth)
.pipe(group_significant_layers, min_thickness, start_depth)
.pipe(self.group_equal_layers, "layer", "zf", start_depth)
)
def group_equal_layers(self, df_group, column1, column2, start_depth):
"""
Group equal layers by checking the difference between the original column of soil type and the shifted one and
storing that in a boolean array, then it does a cumulative sum of the boolean array and it groups the
result of the cumulative sum.
:param df_group: Original DataFrame to group.
:param column1: Column to group, it can be soil_type or layer.
:param column2: Depth or zf (final z).
:param start_depth: First value of depth.
:return: Grouped dataframe.
"""
# df_group = (
# df_group.groupby((df_group[column1] != df_group[column1].shift(periods=1)).cumsum())
# .max()
# .reset_index(drop=True)
# )
# df_group = (
# df_group.groupby(column1)
# .agg(pl.last("*").exclude(column1).keep_name())
# .sort(column2)
# )
df_group = pl.DataFrame(
{
"layer": df_group[column1],
"z_in": df_group[column2].shift(periods=1),
# TODO: .fillna(start_depth),
"zf": df_group[column2],
}
)
return (
df_group.pipe(calculate_thickness)
.pipe(calculate_z_centr)
.pipe(calculate_z_in_NAP, self.zid)
.pipe(calculate_zf_NAP, self.zid)
.pipe(calculate_z_centr_NAP, self.zid)
)
def group_significant_layers(df_group, min_thickness, start_depth):
"""
Drop the layers with thickness < min_thickness and adjust the limits of the others.
:param df_group: Original DataFrame.
:param min_thickness: Minimum thickness.
:param start_depth: First value of depth.
:return: DataFrame without the dropped layers.
"""
df_group = df_group[:, ["zf", "layer", "thickness"]]
# Get the last zf value
depth = df_group["zf"].tail(length=1)[0]
df_group = df_group.filter(pl.col("thickness") >= min_thickness)
# Create a new column z_in by shifting zf and filling the empty first spot
# with the starting depth
df_group = df_group.with_column(
pl.col("zf").shift(periods=1).fill_null(start_depth).alias("z_in")
)
# TODO: df_group[-1, df_group.columns.get_loc("zf")] = depth
return df_group.pipe(calculate_thickness).pipe(calculate_z_centr)
def calculate_thickness(df):
"""
Assign the thickness to an existing DataFrame.
:param df: Original DataFrame.
:return: Dataframe with thickness column.
"""
df["thickness"] = df["zf"] - df["z_in"]
return df
def calculate_z_centr(df):
"""
Assign the central z to each layer of an existing DataFrame.
:param df: Original DataFrame.
:return: Dataframe with the z_centr column.
"""
df["z_centr"] = (df["zf"] + df["z_in"]) / 2
return df
def calculate_zf_NAP(df, z_id):
"""
Assign the zf with respect to NAP to each layer of a DataFrame.
:param df: Original DataFrame.
:param z_id: (float) Elevation with respect to the NAP of my field.
:return: DataFrame with zf_NAP column.
"""
df["zf_NAP"] = z_id - df["zf"]
return df
def calculate_z_in_NAP(df, z_id):
"""
Assign z_in with respect to NAP to each layer of a DataFrame.
:param df: Original DataFrame.
:param z_id: Elevation with respect to the NAP of my field.
:return: DataFrame with z_in_NAP column.
"""
df["z_in_NAP"] = z_id - df["z_in"]
return df
def calculate_z_centr_NAP(df, z_id):
"""
Assign z_centr with respect to NAP to each layer of a DataFrame.
:param df: Original DataFrame.
:param z_id: Elevation with respect to the NAP of my field.
:return: DataFrame with z_centr_NAP column.
"""
df["z_centr_NAP"] = z_id - df["z_centr"]
return df
|
# Copyright 2013 Google Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Part of Lask, the Web Lab Task management system.
Support classes used by the various Lask server interfaces.
"""
import webapp2
import inspect
try: import simplejson as json
except ImportError: import json
class LaskRPC():
"""A simple RPC wrapper for Lask
"""
@classmethod
def request(cls, target, m, arguments, valid_methods, JSONin=False, m_name_for_messages=None, files=None, target_name_for_messages=None):
"""Attempts to call a method with the name provided in m on the object target,
using the arguments dict to supply arguments. Only methods matching the names
in the list valid_methods are callable.
:target:
The object or class on which to call a method.
:m:
The name of the method to call, as a string
:arguments:
A dictionary of name-value pairs for the method's arguments
:valid_methods:
A list of method names we're allowed to call
:JSONin:
Optional. Whether or not to run each argument value through json.loads() before
passing it to the method.
IMPORTANT: the request() method will NEVER JSON-encode the results of the method call.
It is up to the application to properly encode the results.
:m_name_for_messages:
Optional. A name to use in place of the actual method name in messages (e.g. exceptions)
* Raises an InvalidRPCTargetError if the target object is None
* Raises an InvalidRPCMethodError if m is None
* DEPRICATED: Raises an InvalidRPCArgumentError if arguments is not a dict
Effective from 2/29 we will try to use arguments, whatever type it is. It should be dict-like though.
* Raises an InvalidRPCMethodError if m is not in valid_methods
* Raises an InvalidRPCMethodError is valid_methods is not a list
* Raises an InvalidRPCMethodError if target does not have a method named m
* Raises an InvalidRPCMethodError if target has an attribute named m, but it isn't callable
* Raises an InvalidRPCMethodError if target does not have a method named m
* Raises an InvalidRPCArgumentError if the method requires an argument which is not present in arguments
* DEPRICATED: Raises an InvalidRPCArgumentError if an argument is present in arguments which isn't used by the method
Effective from 2/29 arguments present in the request which are not needed by the method are simply ignored.
* Raises an RPCArgumentJSONDecodeError if JSONin==True and one or more of the argument values is an invalid JSON string
"""
if m_name_for_messages is None:
m_name_for_messages = m
if target_name_for_messages is None:
target_name_for_messages = ''
else:
target_name_for_messages = ' of %s' % (target_name_for_messages)
if target is None:
raise InvalidRPCTargetError('No target object specified')
if m is None:
raise InvalidRPCMethodError('No method specified')
if arguments is None:
arguments = {}
# if not isinstance(arguments, dict) and not isinstance(arguments, UnicodeMultiDict):
# raise InvalidRPCArgumentError('Arguments not specified in a dict (instead they are in a %s)' % (type(arguments)))
if not isinstance(valid_methods, list):
raise InvalidRPCMethodError('No valid methods specified')
if any(m == val for val in valid_methods) == False:
raise InvalidRPCMethodError('Attempt to call an invalid method, called \'%s\'%s' % (m_name_for_messages, target_name_for_messages))
# make sure that m is being used like a string
m = str(m)
# check target has the specified method
try:
m_attr = getattr(target, m)
except AttributeError:
raise InvalidRPCMethodError('Specified method does not exist')
# and that the method is callable (as opposed to a property)
if not callable(m_attr):
raise InvalidRPCMethodError('Specified method is not callable')
# check arguments to make sure everything required by m_attr is present
arginfo = inspect.getargspec(m_attr)
# print arginfo
# print arguments
pargs = list()
if arginfo.args is not None:
# go through each argument to the function and
# make sure that the caller has supplied a value for it
# or that there is a default available for it if the caller hasn't supplied a value
# put these arg values in pargs
# see what index default values start for
# (this works because named defaults must be after positional arguments in
# a Python method signature)
defaults_start_at = len(arginfo.args)
if arginfo.defaults is not None:
defaults_start_at -= len(arginfo.defaults)
for i, arg in enumerate(arginfo.args):
if i > 0: # skip the first arg (cls, self, etc.)
if arg in arguments:
if not JSONin or hasattr(arguments[arg], 'file'):
# append the argument as-is because either JSONin is Fale, or the parameter is a file
pargs.append(arguments[arg])
else:
# decode the JSON value and append that
try:
pargs.append(json.loads(arguments[arg]))
except TypeError:
raise RPCArgumentJSONDecodeError('The \'%s\' argument to the \'%s\' method%s was expected to be a value encoded as a JSON string, but instead it was a %s' % (arg, m_name_for_messages, target_name_for_messages, type(arguments[arg])))
except json.JSONDecodeError as jde:
raise RPCArgumentJSONDecodeError('The \'%s\' argument to the \'%s\' method%s was expected to be a value encoded as a JSON string, but it could not be decoded: %s' % (arg, m_name_for_messages, target_name_for_messages, jde))
else:
# couldn't find this argument in the supplied list
# so check if the method has a default for it
if arginfo.defaults is None or i < defaults_start_at:
# missing required argument with no default
raise InvalidRPCArgumentError('The \'%s\' argument was missing from the request, but it is required by the \'%s\' method%s' % (arg, m_name_for_messages, target_name_for_messages))
elif arginfo.defaults is not None:
pargs.append(arginfo.defaults[i - defaults_start_at])
# execute and return
return m_attr(*pargs)
class InvalidRPCMethodError(Exception):
"""Someone tried to call an invalid RPC method
"""
HTTP_status_code = 400
pass
class InvalidRPCTargetError(Exception):
"""Tried to call an RPC method on an invalid (not allowed) object
"""
HTTP_status_code = 400
pass
class InvalidRPCArgumentError(Exception):
"""Tried to make an RPC call with invalid argument(s) or no specified arguments
"""
HTTP_status_code = 400
pass
class RPCArgumentJSONDecodeError(Exception):
"""Tried to JSON decode an argument which was not JSON encoded
"""
HTTP_status_code = 400
pass
class Console():
"""A console for working live with LaskRPC
"""
_target = None
_m = None
_m_attr = None
_arginfo = None
def __init__(self, target, m):
self._target = target
# make sure that m is being used like a string
self._m = str(m)
# check target has the specified method
try:
self._m_attr = getattr(self._target, self._m)
except AttributeError:
raise InvalidRPCMethodError('The "%s" method does not exist for %s' % (self._m, self._target))
# and that the method is callable (as opposed to a property)
if not callable(self._m_attr):
raise InvalidRPCMethodError('The "%s" method is not callable for %s' % (self._m, self._target))
# get info about the arguments to the method
self._arginfo = inspect.getargspec(self._m_attr)
def get_html_docs(self):
"""Returns the docstring for the method being used by this Console
"""
return inspect.getdoc(self._m_attr)
def get_mehod_args(self):
return self._arginfo.args
|
## Train a D2v Model on 100% of data
import pandas as pd
import sanalytics.estimators.d2vestimator as sed
import logging
logging.basicConfig(level=logging.DEBUG)
## Read all data
X = pd.read_parquet("datasets/model_selection_CV/train.parquet")
x = pd.read_parquet("datasets/model_selection_CV/test.parquet")
X = pd.concat([X,x], sort=False)
## Train Doc2Vec for each split
d2v = sed.D2VEstimator().fit(X)
model = d2v.model
model.save("datasets/kfold_d2v/d2v_all.model") |
# -*- coding: utf-8 -*-
# Copyright 2020 The HuggingFace Inc. team and Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tensorflow Auto Model modules."""
import logging
import warnings
import os
import copy
from collections import OrderedDict
from tensorflow_tts.configs import (
FastSpeechConfig,
FastSpeech2Config,
MelGANGeneratorConfig,
MultiBandMelGANGeneratorConfig,
HifiGANGeneratorConfig,
Tacotron2Config,
ParallelWaveGANGeneratorConfig,
)
from tensorflow_tts.models import (
TFMelGANGenerator,
TFMBMelGANGenerator,
TFHifiGANGenerator,
TFParallelWaveGANGenerator,
)
from tensorflow_tts.inference.savable_models import (
SavableTFFastSpeech,
SavableTFFastSpeech2,
SavableTFTacotron2
)
from tensorflow_tts.utils import CACHE_DIRECTORY, MODEL_FILE_NAME, LIBRARY_NAME
from tensorflow_tts import __version__ as VERSION
from huggingface_hub import hf_hub_url, cached_download
TF_MODEL_MAPPING = OrderedDict(
[
(FastSpeech2Config, SavableTFFastSpeech2),
(FastSpeechConfig, SavableTFFastSpeech),
(MultiBandMelGANGeneratorConfig, TFMBMelGANGenerator),
(MelGANGeneratorConfig, TFMelGANGenerator),
(Tacotron2Config, SavableTFTacotron2),
(HifiGANGeneratorConfig, TFHifiGANGenerator),
(ParallelWaveGANGeneratorConfig, TFParallelWaveGANGenerator),
]
)
class TFAutoModel(object):
"""General model class for inferencing."""
def __init__(self):
raise EnvironmentError("Cannot be instantiated using `__init__()`")
@classmethod
def from_pretrained(cls, pretrained_path=None, config=None, **kwargs):
# load weights from hf hub
if pretrained_path is not None:
if not os.path.isfile(pretrained_path):
# retrieve correct hub url
download_url = hf_hub_url(repo_id=pretrained_path, filename=MODEL_FILE_NAME)
downloaded_file = str(
cached_download(
url=download_url,
library_name=LIBRARY_NAME,
library_version=VERSION,
cache_dir=CACHE_DIRECTORY,
)
)
# load config from repo as well
if config is None:
from tensorflow_tts.inference import AutoConfig
config = AutoConfig.from_pretrained(pretrained_path)
pretrained_path = downloaded_file
assert config is not None, "Please make sure to pass a config along to load a model from a local file"
for config_class, model_class in TF_MODEL_MAPPING.items():
if isinstance(config, config_class) and str(config_class.__name__) in str(
config
):
model = model_class(config=config, **kwargs)
model.set_config(config)
model._build()
if pretrained_path is not None and ".h5" in pretrained_path:
try:
model.load_weights(pretrained_path)
except:
model.load_weights(
pretrained_path, by_name=True, skip_mismatch=True
)
return model
raise ValueError(
"Unrecognized configuration class {} for this kind of TFAutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(c.__name__ for c in TF_MODEL_MAPPING.keys()),
)
)
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
from bs4 import BeautifulSoup
import numpy as np
import requests
setList = np.genfromtxt(r'20190926.txt')
session = requests.Session()
pieces = 0
unfound = 0
sets = len(setList)
for i, st in enumerate(setList):
resp = session.get(f'https://brickset.com/sets/{int(st)}-1')
if resp.status_code == 200:
soup = BeautifulSoup(resp.text)
# There must be a better way for this
idx = 0
for elem in soup.find_all('dt'):
if elem.text == 'Pieces':
break
else:
idx += 1
pieces += int(soup.find_all('dd')[idx].text)
print(f'Processing {i+1} of {sets}', end="\r", flush=True)
else:
unfound += 1
print(f'Total number of pieces owned {pieces}.')
if unfound > 0:
print(f"I couldn't find {unfound} sets on Brickset")
|
'''
https://www.kdnuggets.com/2019/08/numpy-neural-networks-computational-graphs.html
neuron
z = w1⋅x1 + w2⋅x2 + b,
σ(z) = 1 / (1 + e^-z)
∂L / ∂ŷ = ∂(1/2(ŷ - y)^2) / ∂ŷ = -(y - ŷ)
dŷ / dz = ŷ⋅(1 - ŷ) => ∂L / ∂z = (∂L / ∂ŷ)⋅(∂ŷ / ∂z)
∂L / ∂w1 = (∂L / ∂z)⋅(∂z / ∂w1) = (∂L / ∂z)⋅x1
∂L / ∂w2 = (∂L / ∂z)⋅(∂z / ∂w2) = (∂L / ∂z)⋅x2
∂L / ∂b = 1
Ci = 1/2m Σ (yi - ŷi)^2, where i is i-th training example, Y = [y1, y2, ...]
C(Y, Ŷ) = 1/2m Σ (Y - Ŷ)⋅^2
= 1/2m [(y1 - ŷ1)^2, (y2 - ŷ2)^2, ...]
∂C/∂Ŷ = [∂C/ŷ1, ∂C/ŷ2, ...]
= - 1/m [y1 - ŷ1, y2 - ŷ2, ...]
= - 1/m (Y - Ŷ)
'''
import matplotlib.pyplot as plt
import numpy as np
from math import sin, pi
fig, ax = plt.subplots() # Create a figure containing a single axes.
fig.canvas.set_window_title("plt.style.use('ggplot')")
s1 = np.linspace(-3*pi, 3*pi, 200)
s3 = [0] * len(s1)
for i in range(len(s1)):
s3[i] = sin(s1[i] * pi * 0.1)
plt.style.use('ggplot')
ax.plot(s1, np.sinc(s1), label='np.sinc')
ax.plot(s1, np.sin(s1), 'r-.', linewidth=.5, label='sin')
ax.plot(s1, s3, label='sin * 10', linewidth=.25)
plt.ylabel('y-Y')
plt.xlabel('... -- ...')
plt.legend()
plt.title('abc')
plt.grid(True)
plt.show()
print(plt.style.available)
print('OK!')
|
import argparse
import os
import re
import importlib
from core_data_modules.logging import Logger
from storage.google_cloud import google_cloud_utils
log = Logger(__name__)
def get_file_paths(dir_path):
# search for .gzip archive files only because os.listdir(dir_path)
# returns all files in the directory
files_list = [file for file in os.listdir(dir_path) if file.endswith((".gzip"))]
file_paths = [os.path.join(dir_path, basename) for basename in files_list]
return file_paths
def get_uploaded_file_dates(uploaded_files_list, date_pattern):
dates_match = [re.search(date_pattern, file) for file in uploaded_files_list]
uploaded_file_dates = []
for date_match in dates_match:
if date_match == None:
continue
uploaded_file_dates.append(date_match.group())
return uploaded_file_dates
def get_files_by_date(dir_path, uploaded_file_dates):
file_paths = get_file_paths(dir_path)
files_by_date = {}
if len(file_paths) > 0:
for file in file_paths:
file_date_match = re.search(date_pattern, file)
file_date = file_date_match.group()
if file_date in uploaded_file_dates:
log.info(f" file already uploaded for {file_date}, skipping...")
else:
if file_date not in files_by_date:
files_by_date[file_date] = []
files_by_date[file_date].append(file)
else:
log.info(f" No file found in {dir_path}!, skipping...")
return files_by_date
def delete_old_archive_files(dir_path, uploaded_file_dates):
archive_file_paths = get_file_paths(dir_path)
files_for_days_that_upload_failed = {}
most_recent_file_path = None
if len(archive_file_paths) > 0:
most_recent_file_path = max(archive_file_paths, key=os.path.getmtime)
for file_path in archive_file_paths:
file_date_match = re.search(date_pattern, file_path)
file_date = file_date_match.group()
# Create a list of files for days that failed to upload
if file_date in uploaded_file_dates:
if file_path == most_recent_file_path:
log.info(f"Retaining latest modified file {file_path} for quick retrieval")
continue
log.warning(f"Deleting {file_path} because files for {file_date} already uploaded to cloud")
os.remove(os.path.join(dir_path, file_path))
# Delete files for days that have a file uploaded in g-cloud
else:
log.debug(f'Files for {file_date} not yet uploaded to cloud, '
f'will delete other files and retain the latest modified file for upload')
if file_date not in files_for_days_that_upload_failed:
files_for_days_that_upload_failed[file_date] = []
files_for_days_that_upload_failed[file_date].append(file_path)
# Check for latest modified file path for each day that failed to upload
# Delete other files for that date
for file_date in files_for_days_that_upload_failed:
most_recent_file_path = max(files_for_days_that_upload_failed[file_date], key=os.path.getmtime)
for file_path in files_for_days_that_upload_failed[file_date]:
if file_path == most_recent_file_path:
log.debug(f"Retaining {file_path}")
continue
log.warning(f"Deleting old file {file_path} for {file_date}")
os.remove(os.path.join(dir_path, file_path))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Uploads pipeline archive files to g-cloud")
parser.add_argument("user", help="User launching this program")
parser.add_argument("google_cloud_credentials_file_path", metavar="google-cloud-credentials-file-path",
help="Path to a Google Cloud service account credentials file to use to access the "
"credentials bucket")
parser.add_argument("configuration_module",
help="Configuration module to import e.g. 'configurations.test_config'. "
"This module must contain a PIPELINE_CONFIGURATION property")
parser.add_argument("archive_dir_path", metavar="archive_dir_path",
help="Path to the data archive directory with file to upload")
args = parser.parse_args()
user = args.user
pipeline_config = importlib.import_module(args.configuration_module).PIPELINE_CONFIGURATION
google_cloud_credentials_file_path = args.google_cloud_credentials_file_path
archive_dir_path = args.archive_dir_path
date_pattern = r'\d{4}-\d{2}-\d{2}'
uploaded_data_archives = google_cloud_utils.list_blobs(google_cloud_credentials_file_path,
pipeline_config.archive_configuration.archive_upload_bucket,
pipeline_config.archive_configuration.bucket_dir_path)
uploaded_archive_dates = get_uploaded_file_dates(uploaded_data_archives, date_pattern)
log.warning(f"Deleting old data archives files from local disk...")
delete_old_archive_files(archive_dir_path, uploaded_archive_dates)
log.info(f"Uploading archive files...")
archive_files_by_date = get_files_by_date(archive_dir_path, uploaded_archive_dates)
for file_date in archive_files_by_date:
latest_archive_file_path = max(archive_files_by_date[file_date], key=os.path.getmtime)
archive_upload_location = f"{pipeline_config.archive_configuration.archive_upload_bucket}/" \
f"{pipeline_config.archive_configuration.bucket_dir_path}/{os.path.basename(latest_archive_file_path)}"
log.info(f"Uploading data archive from {latest_archive_file_path} to {archive_upload_location}...")
with open(latest_archive_file_path, "rb") as f:
google_cloud_utils.upload_file_to_blob(google_cloud_credentials_file_path, archive_upload_location, f)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.