content
stringlengths 5
1.05M
|
|---|
"""Apply the mask to the nifiti data
Usage: roinii_fh roifile
"""
import re
import os
import sys
import nibabel as nb
from roi.pre import join_time
from fmrilearn.load import load_roifile
from fmrilearn.preprocess.nii import findallnii
from fmrilearn.preprocess.nii import masknii
def create(args):
"""Create a roi dataset using all Ploran 2007 (i.e. butterfly)
datasets.
Parameters
----------
mask - name of a roi found in the Harvard Oxford atlas (see roi
package for details)
newname - what (if anything) to rename the roi data as,
often the default name (i.e. mask) are rather long
subdatatable - a dictionary whose keys are subject numbers
and whose values are absoluate paths to that Ss
whole brain (functional) data.
basepath - the top-level directory where all the Ss BOLD
(and other) data lives.
"""
# Process args, a dunmb way to make
# this function compatible with pool.map()
mask, newname, subdatatable, basepath = args
print("Mask is {0}.".format(mask))
maskednames = []
for s in sorted(subdatatable.keys()):
print("Running subject {0}.".format(s))
# Look up the location of that Ss data,
# and mask it, finally save the masked
# file to disk
datadir = subdatatable[s]
saveas = os.path.join(basepath,
'roinii', "{0}_{1}.nii.gz".format(newname, s))
masknii(mask, datadir, save=saveas)
if __name__ == '__main__':
"""Command line invocation setup."""
from multiprocessing import Pool
# ----
# User parameters
basepath = os.getcwd()
ncore = 3
## Set ncore > 1 if you want to
## parallelize the roi extractions
# ----
# Create a place for the roi data to
# live if necessary.
try:
os.mkdir("./roinii")
except OSError:
pass
# ----
# Process argv
if len(sys.argv[1:]) != 1:
raise ValueError("One argument are required.")
rois, names = load_roifile(sys.argv[1])
# ----
# Link subjects with paths to data
subdatatable = {9 : os.path.join(basepath, 'fh09', 'warfh.nii'),
11 : os.path.join(basepath, 'fh11', 'warfh.nii'),
13 : os.path.join(basepath, 'fh13', 'warfh.nii'),
14 : os.path.join(basepath, 'fh14', 'warfh.nii'),
15 : os.path.join(basepath, 'fh15', 'warfh.nii'),
17 : os.path.join(basepath, 'fh17', 'warfh.nii'),
19 : os.path.join(basepath, 'fh19', 'warfh.nii'),
21 : os.path.join(basepath, 'fh21', 'warfh.nii'),
23 : os.path.join(basepath, 'fh23', 'warfh.nii'),
24 : os.path.join(basepath, 'fh24', 'warfh.nii'),
25 : os.path.join(basepath, 'fh25', 'warfh.nii'),
26 : os.path.join(basepath, 'fh26', 'warfh.nii'),
27 : os.path.join(basepath, 'fh27', 'warfh.nii'),
28 : os.path.join(basepath, 'fh28', 'warfh.nii')}
# subdatatable = {
# 14 : os.path.join(basepath, 'fh14', 'warfh.nii')}
# Build up 4-tuples that contain all the args
# create needs, iterate over all the entries
# in the roifile
arglist = []
for roi, name in zip(rois, names):
arglist.append((roi, name, subdatatable, basepath))
# ---
# Go!
# Parallelize using the arglist and Pool
pool = Pool(ncore)
pool.map(create, arglist)
|
"""
<name>Raw to Compositional Data</name>
<author>By Serge-Etienne Parent, firstly generated using Widget Maker written by Kyle R. Covington</author>
<description></description>
<RFunctions>compositions</RFunctions>
<tags>Compositions</tags>
<icon>raw2comp.png</icon>
<outputWidgets>plotting_plot, base_rViewer</outputWidgets>
"""
from OWRpy import *
from libraries.base.qtWidgets.lineEdit import lineEdit as redRlineEdit
from libraries.base.qtWidgets.radioButtons import radioButtons as redRradioButtons
from libraries.base.qtWidgets.comboBox import comboBox as redRcomboBox
from libraries.base.qtWidgets.checkBox import checkBox as redRcheckBox
from libraries.base.qtWidgets.textEdit import textEdit as redRtextEdit
import libraries.base.signalClasses as signals
class RedRcomp(OWRpy):
settingsList = []
def __init__(self, parent=None, signalManager=None):
OWRpy.__init__(self)
self.data = {}
self.setRvariableNames(['acomp', 'rcomp', 'aplus', 'rplus'])
self.RFunctionParam_X = ''
self.inputs.addInput("X", "X", signals.RDataFrame.RDataFrame, self.processX)
self.outputs.addOutput("acomp Output","acomp Output", signals.RMatrix.RMatrix)
self.outputs.addOutput("rcomp Output","rcomp Output", signals.RMatrix.RMatrix)
self.outputs.addOutput("rplus Output","rplus Output", signals.RMatrix.RMatrix)
self.outputs.addOutput("aplus Output","aplus Output", signals.RMatrix.RMatrix)
self.compositionType = redRradioButtons(self.controlArea, label = "Composition Type:", buttons = ['acomp', 'rcomp', 'aplus', 'rplus'], setChecked = "acomp", orientation='horizontal') # choose composition type
self.RFunctionParamparts_lineEdit = redRlineEdit(self.controlArea, label = "parts:", text = '')
self.RFunctionParamtotal_lineEdit = redRlineEdit(self.controlArea, label = "total:", text = '1')
redRCommitButton(self.bottomAreaRight, "Commit", callback = self.commitFunction)
def processX(self, data):
if not self.require_librarys(["compositions"]):
self.status.setText('R Libraries Not Loaded.')
return
if data:
self.RFunctionParam_X=data.getData()
#self.data = data
self.commitFunction()
else:
self.RFunctionParam_X=''
def commitFunction(self):
if str(self.RFunctionParam_X) == '': return
injection = []
if str(self.RFunctionParamparts_lineEdit.text()) != '':
string = 'parts='+str(self.RFunctionParamparts_lineEdit.text())+''
injection.append(string)
if str(self.RFunctionParamtotal_lineEdit.text()) != '':
string = 'total='+str(self.RFunctionParamtotal_lineEdit.text())+''
injection.append(string)
inj = ','.join(injection)
if self.compositionType.getChecked() =='acomp':
self.R(self.Rvariables['acomp']+'<-acomp(X='+str(self.RFunctionParam_X)+','+inj+')')
newData = signals.RMatrix.RMatrix(data = self.Rvariables["acomp"], checkVal = False)
self.rSend("acomp Output", newData)
self.rSend('rcomp Output', None)
self.rSend('aplus Output', None)
self.rSend('rplus Output', None)
elif self.compositionType.getChecked() =='rcomp':
self.R(self.Rvariables['rcomp']+'<-rcomp(X='+str(self.RFunctionParam_X)+','+inj+')')
newData = signals.RMatrix.RMatrix(data = self.Rvariables["rcomp"], checkVal = False)
self.rSend("rcomp Output", newData)
self.rSend('acomp Output', None)
self.rSend('aplus Output', None)
self.rSend('rplus Output', None)
elif self.compositionType.getChecked() =='aplus':
self.R(self.Rvariables['aplus']+'<-aplus(X='+str(self.RFunctionParam_X)+','+inj+')')
newData = signals.RMatrix.RMatrix(data = self.Rvariables["aplus"], checkVal = False)
self.rSend("aplus Output", newData)
self.rSend('acomp Output', None)
self.rSend('rcomp Output', None)
self.rSend('rplus Output', None)
else:
self.R(self.Rvariables['rplus']+'<-rplus(X='+str(self.RFunctionParam_X)+','+inj+')')
newData = signals.RMatrix.RMatrix(data = self.Rvariables["rplus"], checkVal = False)
self.rSend("rplus Output", newData)
self.rSend('acomp Output', None)
self.rSend('rcomp Output', None)
self.rSend('aplus Output', None)
|
import datetime
import hashlib
import httplib
import types
from ..service.jammer import Jammer
from ..service.speed import Speed
DATE_FMT = '%a, %d %b %Y %H:%M:%S GMT'
def _if_modified_since(if_modified_since, last_modified):
modified = datetime.datetime.fromtimestamp(last_modified).strftime(DATE_FMT)
if modified == if_modified_since:
return True
return False
class JamResponse(object):
def __init__(self, jam, headers=None, code=httplib.OK):
super(JamResponse, self).__init__()
self._jam = jam
self.headers = headers or {}
self.code = code
@property
def body(self):
return self._jam.contents if isinstance(self._jam, Jammer) else ''
def handle_jam_request(request_path, if_modified_since, require_dependencies=False, excluded_dependencies=None):
# !warning: if you feel the urge to make changes here, please consult pagespeed and yslow docs
# step 0. sometimes the dynamic mod time or dependency order can change depending on how the request uri was formed.
# if there is an old request out there, redirect to the correct url
# step 1. check the dynamic last mod. if its cool, return the correct status code
# step 2a. if we can't return 304, build out the response by using jammer content
# step 2b. check the gel_page_cache for the generated file
# step 2c. add headers that browsers love for assets
# step 3. return the correct code, message, and body
# note 1. this bypasses a renderer and returns directly to dispatcher
# note 2. we gzip in nginx. in dev/debug, we manually deflate
# test cases:
# 1. /paste/ -> 400
# 2. /paste/<ver-1>/paste.js -> 302
# 3. /paste/<ver+1>/paste.js -> 404
# 4. /paste/paste.*js -> 302
# 5. /paste/paste.js -> 302
# 6. /paste/require/paste.event.js -> 302
# 7. /paste/require/paste.js?filter=paste -> 400
# 8. /paste/require/paste.event.js?filter=paste.oop -> 302
# 9. /paste/require/paste.event%2Cpaste.framerate.js -> 302
_jam = Jammer(request_path=request_path, require_dependencies=require_dependencies)
path_dependencies = _jam.parse_request_path_dependencies(request_path)
path_last_modified = _jam.parse_request_path_last_modified(request_path)
if isinstance(excluded_dependencies, types.StringTypes):
excluded_dependencies = set(dependency_name.strip() for dependency_name in excluded_dependencies.split(','))
if isinstance(excluded_dependencies, set):
_jam.filter_loaded(excluded_dependencies)
has_valid_jam = _jam is not None and _jam.uri
if not has_valid_jam:
return JamResponse(None, code=httplib.BAD_REQUEST)
last_modified_required = not _jam.is_debug
if (_jam.checksum != path_dependencies or
(last_modified_required and
(not path_last_modified or not path_last_modified.isdigit() or
int(path_last_modified) < _jam.last_modified))):
return JamResponse(
_jam,
code=httplib.FOUND,
headers={
'Access-Control-Allow-Origin': '*',
'Cache-Control': 'private, no-cache, no-store, max-age=0, must-revalidate',
'Expires': datetime.datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT'),
'ETag': hashlib.md5(_jam.checksum + '-' + str(_jam.last_modified)).hexdigest(),
'Location': _jam.uri
}
)
elif last_modified_required and _jam.last_modified != int(path_last_modified):
return JamResponse(None, code=httplib.NOT_FOUND)
elif _if_modified_since(if_modified_since, path_last_modified):
return JamResponse(None, code=httplib.NOT_MODIFIED)
else:
headers = {}
Speed.header_caching(request_path, headers.setdefault, _jam.last_modified,
hashlib.md5(_jam.checksum + '-' + str(_jam.last_modified)).hexdigest())
return JamResponse(
_jam,
code=httplib.OK,
headers=headers
)
|
# Write your x_length_words function here:
def x_length_words(sentence, x):
words = sentence.split(" ")
for word in words:
if len(word) < x:
return False
return True
# Uncomment these function calls to test your function:
print(x_length_words("i like apples", 2))
# should print False
print(x_length_words("he likes apples", 2))
# should print True
|
import logging
import bspump.ssh
import datetime
import bspump
import bspump.mongodb
import bspump.ssh
import bspump.common
import bspump.file
import bspump.trigger
###
L = logging.getLogger(__name__)
"""
Config file for SSH connection should look like this:
# SSH Connection
[connection:SSHConnection]
host=remotehost
port=22
user=remoteuser
password=p455w0rd
known_hosts=.ssh/known_hosts
client_host_keysign=/path/to/dir/with/ssh-keysign
client_host_keys=skey
..
If do not know the known_hosts or client_host_keys, leave it empty or do not append it to a Config file.
If do not know the path to the directory with keys of client_host_keysign, do not append it to a Config file or
set it to 0 (zero).
Add conf file to ../etc/
This example connects to mongodb.
#host = mongodb://127.0.0.1:27017
#databaseb=users
#collections=user_location
query's the database according to the parameters passed into query_parms
and outputs the result into a file though SFTP.
"""
class SamplePipeline(bspump.Pipeline):
def __init__(self, app, pipeline_id = None):
super().__init__(app, pipeline_id)
self.fileextjson = ".json"
# Set the file name as "todays-date.json" for sftpsink
self.jsonfilename = datetime.datetime.now().strftime('%d-%m-%Y') + self.fileextjson
print(self.jsonfilename)
query_parms = {
}
#run the application bspump-es-source.py -c ./etc/site.cofig
self.build(
bspump.mongodb.MongoDBSource(app, self, "MongoDBConnection", query_parms=query_parms,
config={'database':'users',
'collection':'user_location',
}).on(bspump.trigger.PeriodicTrigger(app,5 )),
bspump.common.DictToJsonBytesParser(app, self),
bspump.ssh.SFTPSink(app, self, "SSHConnection2" ,config={
'remote_path': '/tmp/bspump_ssh/',
'filename': 'testfile',
'mode': 'a',
})
)
if __name__ == '__main__':
app = bspump.BSPumpApplication()
svc = app.get_service("bspump.PumpService")
# Make connection to SSH
svc.add_connection(
bspump.ssh.SSHConnection(app, "SSHConnection2" ,config={
"host": "bandit.labs.overthewire.org",
"user": "bandit0",
"password": "bandit0",
"port": 2220,
})
)
# Make connection to localhost Mongo
svc.add_connection(bspump.mongodb.MongoDBConnection(app,"MongoDBConnection" ,config={
"host": "mongodb://127.0.0.1:27017"}))
# Construct and register Pipeline
pl = SamplePipeline(app, 'SamplePipeline')
svc.add_pipeline(pl)
pl.PubSub.publish("go!")
app.run()
|
# Dolares
x = int(input())
# Nro de juegos
n = int(input())
comprados = 0
# Para cada juego
for i in range(0, n):
precio = int(input())
# Verificamos si se puede comprar
if x >= precio:
# Si lo compramos nos queda
# menos dinero
x = x - precio
comprados = comprados + 1
print comprados
|
from sqlalchemy import (
Boolean,
CheckConstraint,
Column,
Enum,
Float,
ForeignKey,
Integer,
MetaData,
String,
Table,
UniqueConstraint,
)
from deckman.model.artist import ArtistStatus
metadata_obj = MetaData()
artists = Table(
"artists",
metadata_obj,
Column("id", Integer, primary_key=True),
Column("musicbrainz_id", String(36), unique=True),
Column("name", String(512), nullable=True),
Column("name_sort", String(512), nullable=True),
Column("image_url", String(512), nullable=True),
Column("description", String(2048), nullable=True),
Column("status", Enum(ArtistStatus, create_constraint=True)),
# Column("profile_id", ForeignKey("profile.id"), nullable=True),
)
settings_lossless = Table(
"settings_lossless",
metadata_obj,
Column("id", Integer, primary_key=True),
Column("name", String(128), unique=True),
Column("sample_rate_khz", Float),
Column("bit_depth", Integer),
Column("channels", Integer),
UniqueConstraint("sample_rate_khz", "bit_depth", "channels")
)
settings_lossy = Table(
"settings_lossy",
metadata_obj,
Column("id", Integer, primary_key=True),
Column("name", String(128), unique=True),
Column("bitrate", Integer, unique=True)
)
qualities = Table(
"qualities",
metadata_obj,
Column("id", Integer, primary_key=True),
Column("profile_id", ForeignKey("profiles.id")),
Column("position", Integer),
Column(
"settings_lossless_id",
ForeignKey("settings_lossless.id"),
nullable=True
),
Column(
"settings_lossy_id",
ForeignKey("settings_lossy.id"),
nullable=True
),
Column("finish", Boolean),
CheckConstraint(
"(settings_lossy_id IS NULL) <> (settings_lossless_id IS NULL)",
name="lossy_xor_lossless"
)
)
profiles = Table(
"profiles",
metadata_obj,
Column("id", Integer, primary_key=True),
Column("name", String(128), unique=True),
Column("tolerance", Float, default=0.2),
Column("dual_formats", Boolean, default=False),
)
|
# Copyright 2019 The PlaNet Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import csv
import fnmatch
import functools
import multiprocessing.dummy as multiprocessing
import os
import re
import sys
import traceback
# import imageio
import numpy as np
import skimage.io
import tensorflow as tf
from tensorboard.backend.event_processing import (
plugin_event_multiplexer as event_multiplexer)
lock = multiprocessing.Lock()
def safe_print(*args, **kwargs):
with lock:
print(*args, **kwargs)
def create_reader(logdir):
reader = event_multiplexer.EventMultiplexer()
reader.AddRun(logdir, 'run')
reader.Reload()
return reader
def extract_values(reader, tag):
events = reader.Tensors('run', tag)
steps = [event.step for event in events]
times = [event.wall_time for event in events]
values = [tf.make_ndarray(event.tensor_proto) for event in events]
return steps, times, values
def export_scalar(basename, steps, times, values):
safe_print('Writing', basename + '.csv')
values = [value.item() for value in values]
with tf.gfile.Open(basename + '.csv', 'w') as outfile:
writer = csv.writer(outfile)
writer.writerow(('wall_time', 'step', 'value'))
for row in zip(times, steps, values):
writer.writerow(row)
def export_image(basename, steps, times, values):
tf.reset_default_graph()
tf_string = tf.placeholder(tf.string)
tf_tensor = tf.image.decode_image(tf_string)
with tf.Session() as sess:
for step, time_, value in zip(steps, times, values):
filename = '{}-{}-{}.png'.format(basename, step, time_)
width, height, string = value[0], value[1], value[2]
del width
del height
tensor = sess.run(tf_tensor, {tf_string: string})
# imageio.imsave(filename, tensor)
skimage.io.imsave(filename, tensor)
filename = '{}-{}-{}.npy'.format(basename, step, time_)
np.save(filename, tensor)
def process_logdir(logdir, args):
clean = lambda text: re.sub('[^A-Za-z0-9_]', '_', text)
basename = os.path.join(args.outdir, clean(logdir))
if len(tf.gfile.Glob(basename + '*')) > 0 and not args.force:
safe_print('Exists', logdir)
return
try:
safe_print('Start', logdir)
reader = create_reader(logdir)
for tag in reader.Runs()['run']['tensors']: # tensors -> scalars
if fnmatch.fnmatch(tag, args.tags):
steps, times, values = extract_values(reader, tag)
filename = '{}___{}'.format(basename, clean(tag))
export_scalar(filename, steps, times, values)
# for tag in tags['images']:
# if fnmatch.fnmatch(tag, args.tags):
# steps, times, values = extract_values(reader, tag)
# filename = '{}___{}'.format(basename, clean(tag))
# export_image(filename, steps, times, values)
del reader
safe_print('Done', logdir)
except Exception:
safe_print('Exception', logdir)
safe_print(traceback.print_exc())
def main(args):
logdirs = tf.gfile.Glob(args.logdirs)
print(len(logdirs), 'logdirs.')
assert logdirs
tf.gfile.MakeDirs(args.outdir)
np.random.shuffle(logdirs)
pool = multiprocessing.Pool(args.workers)
worker_fn = functools.partial(process_logdir, args=args)
pool.map(worker_fn, logdirs)
if __name__ == '__main__':
boolean = lambda x: ['False', 'True'].index(x)
parser = argparse.ArgumentParser()
parser.add_argument(
'--logdirs', required=True,
help='glob for log directories to fetch')
parser.add_argument(
'--tags', default='trainer*score',
help='glob for tags to save')
parser.add_argument(
'--outdir', required=True,
help='output directory to store values')
parser.add_argument(
'--force', type=boolean, default=False,
help='overwrite existing files')
parser.add_argument(
'--workers', type=int, default=10,
help='number of worker threads')
args_, remaining = parser.parse_known_args()
args_.logdirs = os.path.expanduser(args_.logdirs)
args_.outdir = os.path.expanduser(args_.outdir)
remaining.insert(0, sys.argv[0])
tf.app.run(lambda _: main(args_), remaining)
|
from sqlalchemy import Column, ForeignKey, Integer, Table, Text
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import backref, relationship
Base = declarative_base()
metadata = Base.metadata
WinesRegion = Table(
'wines_region', metadata,
Column('wine_id', Integer, ForeignKey('wines.id'), nullable=False),
Column('region_id', Integer, ForeignKey('regions.id'), nullable=False)
)
WinesUser = Table(
'wines_user', metadata,
Column('wine_id', Integer, ForeignKey('wines.id'), nullable=False),
Column('user_id', Integer, ForeignKey('users.id'), nullable=False)
)
class Regions(Base):
__tablename__ = 'regions'
id = Column(Integer, primary_key=True)
name = Column(Text, nullable=False)
class Wines(Base):
__tablename__ = 'wines'
id = Column(Integer, primary_key=True)
variety = Column(Text)
description = Column(Text)
points = Column(Integer)
price = Column(Integer)
winery = Column(Text)
country = Column(Text)
province = Column(Text)
users = relationship('User', secondary=WinesUser, backref=backref('wines', lazy='dynamic'))
regions = relationship('Regions', secondary=WinesRegion, backref=backref('regions', lazy='dynamic'))
def to_dict(self):
return {
'id': self.id,
'variety': self.variety,
'description': self.description,
'points': self.points,
'price': self.price,
'winery': self.winery,
'country': self.country,
'province': self.province,
'regions': [region.name for region in self.regions]
}
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
name = Column(Text)
username = Column(Text, unique=True)
password = Column(Text)
email = Column(Text, unique=True)
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
return self.id
@staticmethod
def create(db_session, name, username, password, email):
user = User(name=name, username=username, password=password, email=email)
db_session.add(user)
db_session.flush()
db_session.commit()
db_session.refresh(user)
return user
|
# Festlegen der Abbruchbedingung
ungueltige_eingabe = True
# Starten der While-Schleife
while ungueltige_eingabe:
# Versuche Nutzereingabe zu erhalten
try:
alter = int(input("Bitte geben Sie Ihr Alter ein: "))
if alter >= 18:
print("Dein Alter ist", alter, ".")
ungueltige_eingabe = False
else:
print("Du bist leider nicht alt genug.")
# Sollte Nutzereingabe nicht int sein, gibt folgende exception aus
except:
print("Bitte geben Sie nur ganze Zahlen ein.")
|
# Helper from bluezero
from bluezero import async_tools
from queue import Queue
from robotpacket import RobotPacket
from robotcommand import RobotCommand
from robotstate import RobotState
import blepy
class RobotPeripheral(blepy.Peripheral):
# Custom service UUID
CPU_TMP_SRVC = '12341000-1234-1234-1234-123456789abc'
def __init__(self, adapter_address, state: RobotState, event_queue: Queue):
self.notifiers = {
"Acceleration": None,
"Steering": None,
"Collision": None
}
self.event_queue = event_queue
self.services = [RobotService(True, state, event_queue, self.notifiers)]
super().__init__(self.services, adapter_address, "Robot", appearance=0)
def _on_connect_callback(self, device):
print("Connected to " + str(device.address))
packet = RobotPacket(RobotCommand.CONNECTED)
self.event_queue.put(packet)
def _on_disconnect_callback(self, adapter_address, device_address):
print("Disconnected from " + device_address)
packet = RobotPacket(RobotCommand.DISCONNECTED)
self.event_queue.put(packet)
def event_handler(self, packet: RobotPacket):
switcher = {
RobotCommand.ACCELERATION: self.notifiers["Acceleration"],
RobotCommand.STEERING: self.notifiers["Steering"],
RobotCommand.COLLISION: self.notifiers["Collision"]
}
notifier = switcher.get(packet.get_command())
if notifier is not None:
notifier.set_value(list(packet.get_parameter().to_bytes(1, byteorder='little', signed=True)))
class RobotService(blepy.Service):
# Custom service UUID
CPU_TMP_SRVC = '12341001-1234-1234-1234-123456789abc'
def __init__(self, primary, state: RobotState, event_queue: Queue, notifiers: map):
super().__init__(self.CPU_TMP_SRVC, primary)
self.characteristics = [
RobotCharacteristics.Acceleration(state, event_queue, notifiers),
RobotCharacteristics.Steering(state, event_queue, notifiers),
RobotCharacteristics.Collision(state, event_queue, notifiers),
RobotCharacteristics.Automove(state, event_queue, notifiers),
]
class RobotCharacteristics:
class Acceleration(blepy.Characteristic):
def __init__(self, state: RobotState, event_queue: Queue, notifiers: map):
super().__init__("12341002-1234-1234-1234-123456789abc", event_queue)
self.flags = ['write']
self.write_callback = self.write
self.state = state
def write(self, value, options):
packet = RobotPacket(RobotCommand.ACCELERATION, int.from_bytes(value, byteorder='little', signed=True))
self.event_queue.put(packet)
class Steering(blepy.Characteristic):
def __init__(self, state: RobotState, event_queue: Queue, notifiers: map):
super().__init__("12341003-1234-1234-1234-123456789abc", event_queue)
self.flags = ['write']
self.write_callback = self.write
self.state = state
def write(self, value, options):
packet = RobotPacket(RobotCommand.STEERING, int.from_bytes(value, byteorder='little', signed=True))
self.event_queue.put(packet)
class Collision(blepy.Characteristic):
def __init__(self, state: RobotState, event_queue: Queue, notifiers: map):
super().__init__("12341004-1234-1234-1234-123456789abc", event_queue)
self.flags = ['notify']
self.notify_callback = self.notify
self.notifiers = notifiers
def notify(self, notifying, characteristic):
self.notifiers["Collision"] = characteristic if notifying else None
return notifying
class Automove(blepy.Characteristic):
def __init__(self, state: RobotState, event_queue: Queue, notifiers: map):
super().__init__("12341005-1234-1234-1234-123456789abc", event_queue)
self.flags = ['write']
self.write_callback = self.write
self.notifiers = notifiers
def write(self, value, options):
packet = RobotPacket(RobotCommand.MODE)
self.event_queue.put(packet)
|
"""
models module for organising table and validation
"""
from django.db import models
class Contact(models.Model):
"""
A class for storing instance contact
"""
contact_id = models.AutoField(primary_key=True)
first_name = models.CharField(max_length=130, blank=True, null=True)
middle_name = models.CharField(max_length=130, blank=True, null=True)
last_name = models.CharField(max_length=130, blank=True, null=True)
email = models.EmailField(max_length=110, blank=True, null=True)
country = models.CharField(max_length=65, blank=True, null=True)
phone = models.CharField(max_length=19, null=True)
address = models.CharField(max_length=254, blank=True, null=True)
|
from pymongo import MongoClient
from pymongo.errors import PyMongoError
from bson.json_util import dumps
# client = MongoClient(port=27017)
# db = client.STUDENTMDB
# collections = db['STUDENT']
# collections.insert_one({"St_id": "21425454", "Name":"Ragul","Email":"ravi@gmail.com","Grade" :"11", "Stream":"Maths"})
class Database(object):
_instance = None
database_error_msg = ' error raised in db ,'
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = object.__new__(cls)
return cls._instance
def __init__(self):
try:
self.client = MongoClient("mongodb://localhost:27017")
self.db = self.client['STUDENTMDB']
except PyMongoError as e:
raise
print(e)
def insert_data(self, insert_collection, inserted_data):
res=[]
try:
db_collection = self.db[f'{insert_collection}']
db_collection.insert_one(inserted_data)
except PyMongoError:
return {'message':self.database_error_msg}
else:
return {'message': 'Successfully insert a response'}
def get_data(self, get_collection, search_id=None):
data=[]
if search_id == None:
try:
db_collection = self.db[f'{get_collection}']
users = db_collection.find(projection= {'_id':False})
data = [ user for user in users]
return { 'request':{'message': 'All records received'}, 'response':data}
except PyMongoError:
return {'message':self.database_error_msg}
else:
try:
db_collection = self.db[f'{get_collection}']
data = db_collection.find_one(filter={'St_id':search_id}, projection= {'_id':False})
return { 'request':{'message': 'record received', 'Search Id':f'{search_id}'}, 'response':data}
except PyMongoError:
return {'message':self.database_error_msg}
def detele_data(self, del_collections, search_id):
try:
db_collection = self.db[f'{del_collections}']
db_collection.delete_one({'St_id': search_id})
except PyMongoError:
return {'message':self.database_error_msg}
else:
return {'message': 'Successfully remove the user'}
def update_data(self, up_collections, search_id, updated_value):
try:
db_collection = self.db[f'{up_collections}']
db_collection.update_one({'St_id':search_id},{"$set":updated_value})
except PyMongoError:
return {'request': {'message':self.database_error_msg}}
else:
return {'request':{'message':'Successfully update the response', 'updated':updated_value}}
def course_lookup_db(self, code):
resp=[]
pipeline = [
{'$match': {'Course_code': code}}
]
results = self.db['TEACHER'].aggregate(pipeline)
for result in results:
data = {'Name' : result['Name'], 'Subject': result['Subject']}
resp.append(data)
return resp
if __name__ == "__main__":
db = Database()
# data = {
# "St_id": "21425456",
# "Name":"ravi ram",
# "Email":"raja@gmail.com",
# "Grade" :"12",
# "Stream":"Bio"
# }
# db.insert_data('STUDENT', data)
|
#!/bin/python
import sys
def kangaroo(x1, v1, x2, v2):
if x1<x2 and v1<v2:
return 'NO'
elif x1==x2 and v1>v2:
return 'NO'
elif x1==x2 and v2>v1:
return 'YES'
else:
if v1!=v2:
if (float(x1-x2)/(v2-v1))==(x1-x2)/(v2-v1):
return 'YES'
else:
return 'NO'
elif v1==v2:
return 'NO'
x1, v1, x2, v2 = raw_input().strip().split(' ')
x1, v1, x2, v2 = [int(x1), int(v1), int(x2), int(v2)]
result = kangaroo(x1, v1, x2, v2)
print(result)
|
from scipy.stats import norm
import numpy as np
import pandas as pd
from . import common_args
from ..util import (read_param_file, compute_groups_matrix, ResultDict,
extract_group_names, _check_groups)
from types import MethodType
from multiprocessing import Pool, cpu_count
from functools import partial
from itertools import combinations, zip_longest
def analyze(problem, Y, calc_second_order=True, num_resamples=100,
conf_level=0.95, print_to_console=False, parallel=False,
n_processors=None, keep_resamples=False, seed=None):
"""Perform Sobol Analysis on model outputs.
Returns a dictionary with keys 'S1', 'S1_conf', 'ST', and 'ST_conf', where
each entry is a list of size D (the number of parameters) containing the
indices in the same order as the parameter file. If calc_second_order is
True, the dictionary also contains keys 'S2' and 'S2_conf'.
Compatible with
---------------
* `saltelli`
Parameters
----------
problem : dict
The problem definition
Y : numpy.array
A NumPy array containing the model outputs
calc_second_order : bool
Calculate second-order sensitivities (default True)
num_resamples : int
The number of resamples (default 100)
conf_level : float
The confidence interval level (default 0.95)
print_to_console : bool
Print results directly to console (default False)
keep_resamples : bool
Whether or not to store intermediate resampling results (default False)
References
----------
.. [1] Sobol, I. M. (2001). "Global sensitivity indices for nonlinear
mathematical models and their Monte Carlo estimates." Mathematics
and Computers in Simulation, 55(1-3):271-280,
doi:10.1016/S0378-4754(00)00270-6.
.. [2] Saltelli, A. (2002). "Making best use of model evaluations to
compute sensitivity indices." Computer Physics Communications,
145(2):280-297, doi:10.1016/S0010-4655(02)00280-1.
.. [3] Saltelli, A., P. Annoni, I. Azzini, F. Campolongo, M. Ratto, and
S. Tarantola (2010). "Variance based sensitivity analysis of model
output. Design and estimator for the total sensitivity index."
Computer Physics Communications, 181(2):259-270,
doi:10.1016/j.cpc.2009.09.018.
Examples
--------
>>> X = saltelli.sample(problem, 512)
>>> Y = Ishigami.evaluate(X)
>>> Si = sobol.analyze(problem, Y, print_to_console=True)
"""
if seed:
# Set seed to ensure CIs are the same
rng = np.random.default_rng(seed).integers
else:
rng = np.random.randint
# determining if groups are defined and adjusting the number
# of rows in the cross-sampled matrix accordingly
groups = _check_groups(problem)
if not groups:
D = problem['num_vars']
else:
_, D = extract_group_names(groups)
if calc_second_order and Y.size % (2 * D + 2) == 0:
N = int(Y.size / (2 * D + 2))
elif not calc_second_order and Y.size % (D + 2) == 0:
N = int(Y.size / (D + 2))
else:
raise RuntimeError("""
Incorrect number of samples in model output file.
Confirm that calc_second_order matches option used during sampling.""")
if not 0 < conf_level < 1:
raise RuntimeError("Confidence level must be between 0-1.")
# normalize the model output
Y = (Y - Y.mean()) / Y.std()
A, B, AB, BA = separate_output_values(Y, D, N, calc_second_order)
r = rng(N, size=(N, num_resamples))
Z = norm.ppf(0.5 + conf_level / 2)
if not parallel:
S = create_Si_dict(D, num_resamples, keep_resamples, calc_second_order)
for j in range(D):
S['S1'][j] = first_order(A, AB[:, j], B)
S1_conf_j = first_order(A[r], AB[r, j], B[r])
if keep_resamples:
S['S1_conf_all'][:, j] = S1_conf_j
S['S1_conf'][j] = Z * S1_conf_j.std(ddof=1)
S['ST'][j] = total_order(A, AB[:, j], B)
ST_conf_j = total_order(A[r], AB[r, j], B[r])
if keep_resamples:
S['ST_conf_all'][:, j] = ST_conf_j
S['ST_conf'][j] = Z * ST_conf_j.std(ddof=1)
# Second order (+conf.)
if calc_second_order:
for j in range(D):
for k in range(j + 1, D):
S['S2'][j, k] = second_order(
A, AB[:, j], AB[:, k], BA[:, j], B)
S['S2_conf'][j, k] = Z * second_order(A[r], AB[r, j],
AB[r, k], BA[r, j],
B[r]).std(ddof=1)
else:
tasks, n_processors = create_task_list(
D, calc_second_order, n_processors)
func = partial(sobol_parallel, Z, A, AB, BA, B, r)
pool = Pool(n_processors)
S_list = pool.map_async(func, tasks)
pool.close()
pool.join()
S = Si_list_to_dict(S_list.get(), D, num_resamples,
keep_resamples, calc_second_order)
# Add problem context and override conversion method for special case
S.problem = problem
S.to_df = MethodType(to_df, S)
# Print results to console
if print_to_console:
res = S.to_df()
for df in res:
print(df)
return S
def first_order(A, AB, B):
"""
First order estimator following Saltelli et al. 2010 CPC, normalized by
sample variance
"""
return np.mean(B * (AB - A), axis=0) / np.var(np.r_[A, B], axis=0)
def total_order(A, AB, B):
"""
Total order estimator following Saltelli et al. 2010 CPC, normalized by
sample variance
"""
return 0.5 * np.mean((A - AB) ** 2, axis=0) / np.var(np.r_[A, B], axis=0)
def second_order(A, ABj, ABk, BAj, B):
"""Second order estimator following Saltelli 2002"""
Vjk = np.mean(BAj * ABk - A * B, axis=0) / np.var(np.r_[A, B], axis=0)
Sj = first_order(A, ABj, B)
Sk = first_order(A, ABk, B)
return Vjk - Sj - Sk
def create_Si_dict(D: int, num_resamples: int, keep_resamples: bool, calc_second_order: bool):
"""initialize empty dict to store sensitivity indices"""
S = ResultDict((k, np.zeros(D))
for k in ('S1', 'S1_conf', 'ST', 'ST_conf'))
if keep_resamples:
# Create entries to store intermediate resampling results
S['S1_conf_all'] = np.zeros((num_resamples, D))
S['ST_conf_all'] = np.zeros((num_resamples, D))
if calc_second_order:
S['S2'] = np.full((D, D), np.nan)
S['S2_conf'] = np.full((D, D), np.nan)
return S
def separate_output_values(Y, D, N, calc_second_order):
AB = np.zeros((N, D))
BA = np.zeros((N, D)) if calc_second_order else None
step = 2 * D + 2 if calc_second_order else D + 2
A = Y[0:Y.size:step]
B = Y[(step - 1):Y.size:step]
for j in range(D):
AB[:, j] = Y[(j + 1):Y.size:step]
if calc_second_order:
BA[:, j] = Y[(j + 1 + D):Y.size:step]
return A, B, AB, BA
def sobol_parallel(Z, A, AB, BA, B, r, tasks):
sobol_indices = []
for d, j, k in tasks:
if d == 'S1':
s = first_order(A, AB[:, j], B)
elif d == 'S1_conf':
s = Z * first_order(A[r], AB[r, j], B[r]).std(ddof=1)
elif d == 'ST':
s = total_order(A, AB[:, j], B)
elif d == 'ST_conf':
s = Z * total_order(A[r], AB[r, j], B[r]).std(ddof=1)
elif d == 'S2':
s = second_order(A, AB[:, j], AB[:, k], BA[:, j], B)
elif d == 'S2_conf':
s = Z * second_order(A[r], AB[r, j], AB[r, k],
BA[r, j], B[r]).std(ddof=1)
sobol_indices.append([d, j, k, s])
return sobol_indices
def create_task_list(D, calc_second_order, n_processors):
"""
Create list with one entry (key, parameter 1, parameter 2) per sobol
index (+conf.). This is used to supply parallel tasks to
multiprocessing.Pool
"""
tasks_first_order = [[d, j, None] for j in range(
D) for d in ('S1', 'S1_conf', 'ST', 'ST_conf')]
# Add second order (+conf.) to tasks
tasks_second_order = []
if calc_second_order:
tasks_second_order = [[d, j, k] for j in range(D) for k in
range(j + 1, D) for d in ('S2', 'S2_conf')]
if n_processors is None:
n_processors = min(cpu_count(), len(
tasks_first_order) + len(tasks_second_order))
if not calc_second_order:
tasks = np.array_split(tasks_first_order, n_processors)
else:
# merges both lists alternating its elements and splits the
# resulting lists into n_processors sublists
tasks = np.array_split([v for v in sum(
zip_longest(tasks_first_order[::-1], tasks_second_order), ())
if v is not None], n_processors)
return tasks, n_processors
def Si_list_to_dict(S_list, D: int, num_resamples: int, keep_resamples:bool, calc_second_order: bool):
"""Convert the parallel output into the regular dict format for
printing/returning"""
S = create_Si_dict(D, num_resamples, keep_resamples, calc_second_order)
L = []
for l in S_list: # first reformat to flatten
L += l
for s in L: # First order (+conf.)
if s[2] is None:
S[s[0]][s[1]] = s[3]
else:
S[s[0]][s[1], s[2]] = s[3]
return S
def Si_to_pandas_dict(S_dict):
"""Convert Si information into Pandas DataFrame compatible dict.
Parameters
----------
S_dict : ResultDict
Sobol sensitivity indices
See Also
----------
Si_list_to_dict
Returns
----------
tuple : of total, first, and second order sensitivities.
Total and first order are dicts.
Second order sensitivities contain a tuple of parameter name
combinations for use as the DataFrame index and second order
sensitivities.
If no second order indices found, then returns tuple of
(None, None)
Examples
--------
>>> X = saltelli.sample(problem, 512)
>>> Y = Ishigami.evaluate(X)
>>> Si = sobol.analyze(problem, Y, print_to_console=True)
>>> T_Si, first_Si, (idx, second_Si) = sobol.Si_to_pandas_dict(Si, problem)
"""
problem = S_dict.problem
total_order = {
'ST': S_dict['ST'],
'ST_conf': S_dict['ST_conf']
}
first_order = {
'S1': S_dict['S1'],
'S1_conf': S_dict['S1_conf']
}
idx = None
second_order = None
if 'S2' in S_dict:
groups = _check_groups(problem)
if groups:
names, _ = extract_group_names(groups)
else:
names = problem.get('names')
if len(names) > 2:
idx = list(combinations(names, 2))
else:
idx = (names, )
second_order = {
'S2': [S_dict['S2'][names.index(i[0]), names.index(i[1])]
for i in idx],
'S2_conf': [S_dict['S2_conf'][names.index(i[0]), names.index(i[1])]
for i in idx]
}
return total_order, first_order, (idx, second_order)
def to_df(self):
'''Conversion method to Pandas DataFrame. To be attached to ResultDict.
Returns
--------
List : of Pandas DataFrames in order of Total, First, Second
Example
-------
>>> Si = sobol.analyze(problem, Y, print_to_console=True)
>>> total_Si, first_Si, second_Si = Si.to_df()
'''
total, first, (idx, second) = Si_to_pandas_dict(self)
problem = self.problem
groups = _check_groups(problem)
if not groups:
names = problem.get('names')
else:
names, _ = extract_group_names(groups)
ret = [pd.DataFrame(total, index=names),
pd.DataFrame(first, index=names)]
if second:
ret += [pd.DataFrame(second, index=idx)]
return ret
def cli_parse(parser):
parser.add_argument('--max-order', type=int, required=False, default=2,
choices=[1, 2],
help='Maximum order of sensitivity indices to '
'calculate')
parser.add_argument('-r', '--resamples', type=int, required=False,
default=100,
help='Number of bootstrap resamples for Sobol '
'confidence intervals')
parser.add_argument('--parallel', action='store_true', help='Makes '
'use of parallelization.',
dest='parallel')
parser.add_argument('--processors', type=int, required=False,
default=None,
help='Number of processors to be used with the ' +
'parallel option.', dest='n_processors')
return parser
def cli_action(args):
problem = read_param_file(args.paramfile)
Y = np.loadtxt(args.model_output_file, delimiter=args.delimiter,
usecols=(args.column,))
analyze(problem, Y, (args.max_order == 2),
num_resamples=args.resamples, print_to_console=True,
parallel=args.parallel, n_processors=args.n_processors,
seed=args.seed)
if __name__ == "__main__":
common_args.run_cli(cli_parse, cli_action)
|
from ...common_descs import *
from ...hek.defs.objs.tag import HekTag
from supyr_struct.defs.tag_def import TagDef
animation_comment = """
Played immediately after the old unit's transform out animation.
The new actor is braindead during the animation, and is invincible until it ends.
"""
vitality_inheritance_overrides = (
'neither',
'both',
'shield_only',
'health_only'
)
keyframe_action = Struct("keyframe_action",
SEnum16("keyframe",
"primary",
"secondary",
"final",
),
SEnum16("rider_handling",
'none',
'kill',
'eject',
),
SEnum16("target",
'self',
'riders',
),
Pad(2),
dependency_os("damage_effect", "jpt!"),
dependency_os("effect", "effe"),
ascii_str32("effect_marker"),
SIZE=72
)
transform_in_target = Struct("transform_in_target",
ascii_str32("target_name"),
Bool16("flags",
'try_to_use_existing_unit',
'drop_weapon',
'inherit_seated_units',
'delete_attached_actors',
),
Pad(2),
QStruct("selection_chances",
Float("easy"), Float("normal"), Float("hard"), Float("imposs"),
ORIENT="h"
),
Struct("ai",
dependency_os("actor_variant", "actv"),
SEnum16("encounter_squad_handling",
'inherit_from_old_unit',
'inherit_from_attacker',
'free_actor',
),
Pad(2),
SEnum16("team_handling",
'inherit_from_old_unit',
'inherit_from_attacker',
'override',
),
SEnum16("team_override", *unit_teams),
SEnum16("initial_state_handling",
'inherit',
'override',
),
SEnum16("initial_state_override", *actor_states),
SEnum16("return_state_handling",
'inherit',
'override',
'actor_default',
),
SEnum16("return_state_override", *actor_states),
Pad(4),
),
Struct("animation",
ascii_str32("transform_in_anim"),
reflexive("keyframe_actions", keyframe_action, 9,
DYN_NAME_PATH='.effect_marker'),
COMMENT=animation_comment
),
Struct("vitality",
Pad(4),
SEnum16("inheritance", *vitality_inheritance_overrides),
SEnum16("override", *vitality_inheritance_overrides),
Float("shield_override"),
Float("health_override"),
),
SIZE=172
)
avti_body = Struct("tagdata",
reflexive("targets", transform_in_target, 16,
DYN_NAME_PATH='.target_name'),
SIZE=36
)
def get():
return avti_def
avti_def = TagDef("avti",
blam_header_os('avti', 1),
avti_body,
ext=".actor_variant_transform_in", endian=">", tag_cls=HekTag
)
|
"""
Reading the leaderboards with :class:`SteamLeaderboard` is as easy as iterating over a list.
"""
import logging
from steam.core.msg import MsgProto
from steam.enums import EResult, ELeaderboardDataRequest, ELeaderboardSortMethod, ELeaderboardDisplayType
from steam.enums.emsg import EMsg
from steam.util import _range, chunks
from steam.util.throttle import ConstantRateLimit
class Leaderboards(object):
def __init__(self, *args, **kwargs):
super(Leaderboards, self).__init__(*args, **kwargs)
def get_leaderboard(self, app_id, name):
""".. versionadded:: 0.8.2
Find a leaderboard
:param app_id: application id
:type app_id: :class:`int`
:param name: leaderboard name
:type name: :class:`str`
:return: leaderboard instance
:rtype: :class:`SteamLeaderboard`
:raises: :class:`LookupError` on message timeout or error
"""
message = MsgProto(EMsg.ClientLBSFindOrCreateLB)
message.header.routing_appid = app_id
message.body.app_id = app_id
message.body.leaderboard_name = name
message.body.create_if_not_found = False
resp = self.send_job_and_wait(message, timeout=15)
if not resp:
raise LookupError("Didn't receive response within 15seconds :(")
if resp.eresult != EResult.OK:
raise LookupError(EResult(resp.eresult))
return SteamLeaderboard(self, app_id, name, resp)
class SteamLeaderboard(object):
""".. versionadded:: 0.8.2
Steam leaderboard object.
Generated via :meth:`Leaderboards.get_leaderboard()`
Works more or less like a :class:`list` to access entries.
.. note::
Each slice will produce a message to steam.
Steam and protobufs might not like large slices.
Avoid accessing individual entries by index and instead use iteration or well sized slices.
Example usage:
.. code:: python
lb = client.get_leaderboard(...)
for entry in lb[:100]: # top 100
print entry
"""
ELeaderboardDataRequest = ELeaderboardDataRequest
ELeaderboardSortMethod = ELeaderboardSortMethod
ELeaderboardDisplayType = ELeaderboardDisplayType
app_id = 0
name = '' #: leaderboard name
id = 0 #: leaderboard id
entry_count = 0
sort_method = ELeaderboardSortMethod.NONE #: :class:`steam.enums.common.ELeaderboardSortMethod`
display_type = ELeaderboardDisplayType.NONE #: :class:`steam.enums.common.ELeaderboardDisplayType`
data_request = ELeaderboardDataRequest.Global #: :class:`steam.enums.common.ELeaderboardDataRequest`
def __init__(self, steam, app_id, name, data):
self._steam = steam
self.app_id = app_id
for field in data.DESCRIPTOR.fields:
if field.name.startswith('leaderboard_'):
self.__dict__[field.name.replace('leaderboard_', '')] = getattr(data, field.name)
self.sort_method = ELeaderboardSortMethod(self.sort_method)
self.display_type = ELeaderboardDisplayType(self.display_type)
def __repr__(self):
return "<%s(%d, %s, %d, %s, %s)>" % (
self.__class__.__name__,
self.app_id,
repr(self.name),
len(self),
self.sort_method,
self.display_type,
)
def __len__(self):
return self.entry_count
def get_entries(self, start=0, end=0, data_request=None, steam_ids=None):
"""Get leaderboard entries.
:param start: start entry, not index (e.g. rank 1 is ``start=1``)
:type start: :class:`int`
:param end: end entry, not index (e.g. only one entry then ``start=1,end=1``)
:type end: :class:`int`
:param data_request: data being requested
:type data_request: :class:`steam.enums.common.ELeaderboardDataRequest`
:param steam_ids: list of steam ids when using :attr:`.ELeaderboardDataRequest.Users`
:type steamids: :class:`list`
:return: a list of entries, see ``CMsgClientLBSGetLBEntriesResponse``
:rtype: :class:`list`
:raises: :class:`LookupError` on message timeout or error
"""
message = MsgProto(EMsg.ClientLBSGetLBEntries)
message.body.app_id = self.app_id
message.body.leaderboard_id = self.id
message.body.range_start = start
message.body.range_end = end
message.body.leaderboard_data_request = self.data_request if data_request is None else data_request
if steam_ids:
message.body.steamids.extend(steam_ids)
resp = self._steam.send_job_and_wait(message, timeout=15)
if not resp:
raise LookupError("Didn't receive response within 15seconds :(")
if resp.eresult != EResult.OK:
raise LookupError(EResult(resp.eresult))
if resp.HasField('leaderboard_entry_count'):
self.entry_count = resp.leaderboard_entry_count
return resp.entries
def __getitem__(self, x):
if isinstance(x, slice):
stop_max = len(self)
start = 0 if x.start is None else x.start if x.start >= 0 else max(0, x.start + stop_max)
stop = stop_max if x.stop is None else x.stop if x.stop >= 0 else max(0, x.stop + stop_max)
step = x.step or 1
if step < 0:
start, stop = stop, start
step = abs(step)
if start >= stop: return []
else:
if x < 0: x += self.entry_count
start, stop, step = x, x + 1, 1
if x < 0 or x >= self.entry_count:
raise IndexError('list index out of range')
entries = self.get_entries(start+1, stop)
if isinstance(x, slice):
return [entries[i] for i in _range(0, len(entries), step)]
else:
return entries[0]
def get_iter(self, times, seconds, chunk_size=2000):
"""Make a iterator over the entries
See :class:`steam.util.throttle.ConstantRateLimit` for ``times`` and ``seconds`` parameters.
:param chunk_size: number of entries per request
:type chunk_size: :class:`int`
:returns: generator object
:rtype: :class:`generator`
The iterator essentially buffers ``chuck_size`` number of entries, and ensures
we are not sending messages too fast.
For example, the ``__iter__`` method on this class uses ``get_iter(1, 1, 2000)``
"""
def entry_generator():
with ConstantRateLimit(times, seconds, sleep_func=self._steam.sleep) as r:
for entries in chunks(self, chunk_size):
if not entries:
return
for entry in entries:
yield entry
r.wait()
return entry_generator()
def __iter__(self):
return self.get_iter(1, 1, 2000)
|
import os, argparse, time
parser = argparse.ArgumentParser()
parser.add_argument('-d','--dork',help="Enter your dork")
parser.add_argument('-a','--amount',help="Enter amount of site to printout")
arg = parser.parse_args()
class Dorker:
def __init__(self, dork, amount):
self.dork = dork
self.amount = amount
def check_requirements(self):
try:
from googlesearch import search
if(self.dork is None and self.amount is None):
print(f'Usage: python3 {os.path.basename(__file__)} -d <dork> -a <amount>')
exit()
else:
dork.searcher(search)
except ModuleNotFoundError:
print("Installing Required Module!..")
os.system("pip3 install googlesearch-python")
except Exception as err:
print("Error: {}".format(err))
def searcher(self,search):
count=0
print("="*14+"-Dorking-"+"="*14)
print()
for site in search(self.dork, num_results=int(self.amount)):
count+=1
print(f'({count}): {site}')
time.sleep(.1)
print()
print("="*14+"Done"+"="*14)
if __name__=="__main__":
print("""
888~-_ 888 _
888 \\ e88~-_ 888-~\\ 888 e~ ~ e88~~8e 888-~\\
888 | d888 i 888 888d8b d888 88b 888
888 | 8888 | 888 888Y88b 8888__888 888
888 / Y888 ' 888 888 Y88b Y888 , 888
888_-~ "88_-~ 888 888 Y88b "88___/ 888
DORKER BY: Anikin Luke
""")
dork = Dorker(arg.dork, arg.amount)
dork.check_requirements()
|
# This script's purpose is to train a preliminary CNN for tracking by Keras
# Author: Billy Li
# Email: li000400@umn.edu
# import starts
import sys
from pathlib import Path
import csv
import random
import pickle
import numpy as np
import tensorflow as tf
from tensorflow.keras import Model, initializers, regularizers
from tensorflow.keras.layers import(
Input,
Dense,
Conv2D,
BatchNormalization,
MaxPool2D,Dropout,
Flatten,
TimeDistributed,
Embedding,
Reshape,
Softmax
)
from tensorflow.keras.optimizers import Adam
import unet05 as unet
util_dir = Path.cwd().parent.joinpath('util')
sys.path.insert(1, str(util_dir))
from Config import extractor_config as Config
from mu2e_output import *
from Loss import unmasked_cce
from Metric import *
### import ends
def photographic_train(C):
pstage("Start Training")
### load inputs
pinfo('Loading processed arrays')
X = np.load(C.X_npy)
Y = np.load(C.Y_npy)
pinfo('Standarlizing input arrays')
mean = X.mean()
std = X.std()
std_inv = 1/std
X = (X-mean)*std_inv
### outputs
pinfo('Configuring output paths')
cwd = Path.cwd()
data_dir = cwd.parent.parent.joinpath('data')
weights_dir = cwd.parent.parent.joinpath('weights')
model_weights = weights_dir.joinpath(C.model_name+'.h5')
record_file = data_dir.joinpath(C.record_name+'.csv')
### build the model
# input_layer = Input(shape=(X.shape[1],X.shape[2],1))
# x = Conv2D(512, (3, 3), padding='same', activation='relu', kernel_initializer=initializers.RandomNormal(stddev=0.01), kernel_regularizer=regularizers.l2(1e-8))(input_layer)
# x = BatchNormalization()(x)
# #x = MaxPool2D(pool_size=(2, 2), padding='same')(x)
# x = Dropout(0.3)(x)
#
# x = Conv2D(128, (3, 3), padding='same', activation='relu', kernel_initializer=initializers.RandomNormal(stddev=0.01), kernel_regularizer=regularizers.l2(1e-8))(x)
# x = BatchNormalization()(x)
# #x = MaxPool2D(pool_size=(2, 2), padding='same')(x)
# x = Dropout(0.3)(x)
#
# x = Conv2D(256, (3, 3), padding='same', activation='relu', kernel_initializer=initializers.RandomNormal(stddev=0.01), kernel_regularizer=regularizers.l2(1e-8))(x)
# x = BatchNormalization()(x)
# #x = MaxPool2D(pool_size=(2, 2), padding='same')(x)
# x = Dropout(0.3)(x)
#
# x = Conv2D(512, (3, 3), padding='same', activation='relu', kernel_initializer=initializers.RandomNormal(stddev=0.01), kernel_regularizer=regularizers.l2(1e-8))(x)
# x = BatchNormalization()(x)
# #x = MaxPool2D(pool_size=(2, 2), padding='same')(x)
# x = Dropout(0.3)(x)
#
# x = Conv2D(256, (3, 3), padding='same', activation='relu', kernel_initializer=initializers.RandomNormal(stddev=0.01), kernel_regularizer=regularizers.l2(1e-8))(x)
# x = BatchNormalization()(x)
# #x = MaxPool2D(pool_size=(2, 2), padding='same')(x)
# x = Dropout(0.3)(x)
#
# x = Conv2D(128, (3, 3), padding='same', activation='relu', kernel_initializer=initializers.RandomNormal(stddev=0.01), kernel_regularizer=regularizers.l2(1e-8))(x)
# x = BatchNormalization()(x)
# #x = MaxPool2D(pool_size=(2, 2), padding='same')(x)
# x = Dropout(0.3)(x)
# #
# #
# x = Conv2D(64, (3, 3), padding='same', activation='relu', kernel_initializer=initializers.RandomNormal(stddev=0.01), kernel_regularizer=regularizers.l2(1e-8))(x)
# x = BatchNormalization()(x)
# #x = MaxPool2D(pool_size=(2, 2), padding='same')(x)
# x = Dropout(0.3)(x)
# #
# x = Conv2D(3, (3, 3), padding='same', activation='relu', kernel_initializer=initializers.RandomNormal(stddev=0.01), kernel_regularizer=regularizers.l2(1e-8))(x)
# x = BatchNormalization()(x)
# #x = MaxPool2D(pool_size=(2, 2), padding='same')(x)
# x = Dropout(0.3)(x)
#
#
#
# output_layer = Softmax()(x)
# model = Model(inputs=input_layer, outputs=output_layer)
input_shape = (X.shape[1], X.shape[2], 1)
model = unet.get_architecture(input_shape,3)
print(model.summary())
# setup loss
cce = unmasked_cce
# setup metric
ca = unmasked_categorical_accuracy
# setup optimizer
adam = Adam(1e-4)
# setup callback
CsvCallback = tf.keras.callbacks.CSVLogger(str(record_file), separator=",", append=False)
LRCallback = tf.keras.callbacks.ReduceLROnPlateau(
monitor='loss', factor=0.1, patience=10, verbose=0, mode='auto',
min_delta=1e-6, cooldown=0, min_lr=1e-7
)
# print(cnn.summary())
model.compile(optimizer=adam,\
metrics = ca,\
loss=cce)
model.fit(x=X, y=Y,\
validation_split=0.2,\
shuffle=True,\
batch_size=64, epochs=200,\
callbacks = [CsvCallback, LRCallback])
model.save(model_weights)
pcheck_point('Finished Training')
return C
if __name__ == "__main__":
pbanner()
psystem('Photographic track extractor')
pmode('Testing Feasibility')
pinfo('Input DType for testing: StrawDigiMC')
# load pickle
cwd = Path.cwd()
pickle_path = cwd.joinpath('photographic.train.config.pickle')
C = pickle.load(open(pickle_path,'rb'))
# initialize parameters
model_name = "photographic_05"
record_name = "photographic_record_05"
# setup parameters
C.set_outputs(model_name, record_name)
photographic_train(C)
|
henn1 = 12
henn2 = 14
henn3 = 124
if henn1 == henn2 and henn2 == henn3:
print(1)
elif henn1 == henn2 or henn2 == henn3 or henn1 == henn3:
print(2)
else:
print(3)
|
import importlib.resources as pkg_resources
import requests
from functools import cache, total_ordering
from typing import Any, Callable
import adventofcode2021.data
session = requests.Session()
cookie = pkg_resources.read_text(adventofcode2021.data, "cookie.txt").strip()
requests.utils.add_dict_to_cookiejar(session.cookies, {"session": cookie})
@cache
def get_input_data(day: int) -> list[str]:
url = f"https://adventofcode.com/2021/day/{day}/input"
text = session.get(url).text
return [line.strip() for line in text.split("\n") if line.strip() != '']
@total_ordering
class DayTemplate:
def __init__(self, day: int, process: Callable[[str], Any] = None):
"""
Create a new day of the advent calender.
`day`: an int representing which day this is, for sorting and dynamically grabbing data.
`process`: a function that can take a string and return any value, since all input data
is a list of strings and that might not be what we want.
"""
self.day = day
self.link = f"https://adventofcode.com/2021/day/{self.day}"
self._data = get_input_data(self.day)
self.process = process if process is not None else lambda x: x
@property
def data(self) -> list:
return [self.process(i) for i in self._data]
def part_1(self) -> Any:
print("Calculating part 1...")
def part_2(self) -> Any:
print("Calculating part 2...")
def __lt__(self, other) -> bool:
self.day < other.day
def __eq__(self, other) -> bool:
self.day == other.day
def __str__(self) -> str:
return f"Day {self.day} | {self.part_1()} | {self.part_2()}"
class ThisShouldNeverHappenExeception(Exception):
pass
|
import math
import astropy as ast
import matplotlib.pyplot as plt
import numpy as np
import matplotlib as mpl
import os
from astropy.time import Time
from decimal import *
from pylab import *
from astropy import units as u
import warnings
from scipy.stats import chi2,chisquare, norm
import pandas as pd
class PeriodicityTest():
'''
A class to apply and store the information of the periodicty tests.
Parameters
----------
pulsar_phases : PulsarPhases object
Attributes
----------
chisqr_test :
Results of the chi square tests. Format: [Statistic, p_value, nsigmas]
number : int
Number of phases used in the analysis
cos : list of float
Cosine moments of the list of phases
sin: list of float
Sine moments of the list of phases
Zntest_res:
Results of the Zn tests. Format: [Statistic, p_value, nsigmas]. Default is n=10
Htest_res:
Results of the H test. Format: [Statistic, p_value, nsigmas]
'''
def __init__(self,pulsar_phases):
self.apply_all_tests(pulsar_phases)
##############################################
#EXECUTION
#############################################
def apply_all_tests(self,pulsar_phases):
#Apply chi square test using Lightcurve Class
self.chisqr_res=pulsar_phases.histogram.chi_sqr_pulsar_test()
#Apply unbinned statistical tests
self.moments(pulsar_phases)
self.apply_moment_tests()
def apply_moment_tests(self):
#Default n=10 for zn test
self.zn_test(n=10)
self.Htest_res=self.H_test()
def moments(self,pulsar_phases,n=25):
#Transform list to be between 0 and 2pi
plist=(pulsar_phases.phases)*2*np.pi
#Calculate moments
k=np.arange(1,n+1)
cos_moment=sum(np.cos(np.outer(plist,k)),axis=0)
sin_moment=sum(np.sin(np.outer(plist,k)),axis=0)
#Store the information
self.number=len(plist)
self.cos=cos_moment
self.sin=sin_moment
return(self.number,self.cos,self.sin)
def zn_test(self,n=10):
cos_moment=self.cos[0:n+1]
sin_moment=self.sin[0:n+1]
self.Zn_n=n
#Calculate statistic and pvalue
Zn=2/self.number*sum(np.power(cos_moment,2)+np.power(sin_moment,2))
pvalue_zn=chi2.sf(float(Zn),2*n)
sigmas_zn=norm.isf(pvalue_zn, loc=0, scale=1)
#Store information
self.Zntest_res=Zn,pvalue_zn,sigmas_zn
return(self.Zntest_res)
def H_test(self):
bn=0.398
h=[]
#Calculate statistic and pvalue
for m in np.arange(1,len(self.cos)):
h.append(2/self.number*sum(np.power(self.cos[0:m],2)+np.power(self.sin[0:m],2))-4*m+4)
H=max(h)
pvalue_H=np.exp(-bn*H)
sigmas_H=norm.isf(float(pvalue_H), loc=0, scale=1)
#Store information
self.Htest_res=H,pvalue_H,sigmas_H
return(self.Htest_res)
##############################################
#RESULTS
#############################################
def show_Pstats(self):
return(pd.DataFrame(data={'Chi_square_test':self.chisqr_res,'Zn_test':self.Zntest_res,'H_test':self.Htest_res},index=["Statistic", "p-value", "Number of $\sigma$"]))
|
from django import forms
from .models import AuthUser,Books,Storages,Comments,Reserves
class ProfileForm(forms.ModelForm):
class Meta:
model = AuthUser
# fields = [ 'id', 'password','last_login','is_superuser','username',
# 'first_name', 'last_name','email','is_staff','is_active','date_joined']
fields = [ 'first_name', 'last_name','email' ]
# class BookForm(forms.ModelForm):
# class Meta:
# model = Books
# fields = [ 'isbn', 'title', 'authors', 'publisher', 'price' ]
# class StorageForm(forms.ModelForm):
# class Meta:
# model = Storages
# fields = [ 'stono', 'isbn', 'lno']
# class AddBookForm(forms.ModelForm):
# class Meta:
# model = Books, Storages
# fields = [ 'isbn', 'title', 'authors', 'publisher', 'price', 'lno']
class AddBookForm(forms.Form):
isbn = forms.CharField(max_length=50)
title = forms.CharField(max_length=50)
authors = forms.CharField(max_length=50)
publisher = forms.CharField(max_length=50)
price = forms.FloatField()
lno = forms.ChoiceField(choices=(('1', 'Lee Shau Kee Library'),
('2', 'Liberal Arts Library'),
('3', 'Science Library'),
('4', 'Zhangjiang Library')))
#lno = forms.IntegerField(max_value=4,min_value=1)
lno.widget.attrs.update({'class':'dropdown-trigger waves-effect'})
class CommentForm(forms.ModelForm):
class Meta:
model = Comments
fields = [ 'text']
class ReserveForm(forms.Form):
lno = forms.ChoiceField(choices=(('1', 'Lee Shau Kee Library'),
('2', 'Liberal Arts Library'),
('3', 'Science Library'),
('4', 'Zhangjiang Library')))
#lno = forms.IntegerField(max_value=4,min_value=1)
lno.widget.attrs.update({'class':'dropdown-trigger waves-effect'})
class SearchBookForm(forms.Form):
text=forms.CharField(max_length=50, required=False)
|
import json
import re
from collections import ChainMap
from functools import partial
from pathlib import Path
from typing import Any, Callable, Dict
import yaml
from jinja2 import UndefinedError
from jinja2.sandbox import SandboxedEnvironment
from plumbum.cli.terminal import ask, choose, prompt
from plumbum.colors import bold, info, italics
from yamlinclude import YamlIncludeConstructor
from ..tools import get_jinja_env, printf_exception
from ..types import AnyByStrDict, Choices, OptStrOrPath, PathSeq, StrOrPath
from .objects import DEFAULT_DATA, EnvOps, UserMessageError
__all__ = ("load_config_data", "query_user_data")
class ConfigFileError(ValueError):
pass
class InvalidConfigFileError(ConfigFileError):
def __init__(self, conf_path: Path, quiet: bool):
msg = str(conf_path)
printf_exception(self, "INVALID CONFIG FILE", msg=msg, quiet=quiet)
super().__init__(msg)
class MultipleConfigFilesError(ConfigFileError):
def __init__(self, conf_paths: PathSeq, quiet: bool):
msg = str(conf_paths)
printf_exception(self, "MULTIPLE CONFIG FILES", msg=msg, quiet=quiet)
super().__init__(msg)
class InvalidTypeError(TypeError):
pass
def load_yaml_data(
conf_path: Path, quiet: bool = False, _warning: bool = True
) -> AnyByStrDict:
YamlIncludeConstructor.add_to_loader_class(
loader_class=yaml.FullLoader, base_dir=conf_path.parent
)
try:
with open(conf_path) as f:
return yaml.load(f, Loader=yaml.FullLoader)
except yaml.parser.ParserError as e:
raise InvalidConfigFileError(conf_path, quiet) from e
def parse_yaml_string(string: str) -> Any:
"""Parse a YAML string and raise a ValueError if parsing failed.
This method is needed because :meth:`prompt` requires a ``ValueError``
to repeat falied questions.
"""
try:
return yaml.safe_load(string)
except yaml.error.YAMLError as error:
raise ValueError(str(error))
def load_config_data(
src_path: StrOrPath, quiet: bool = False, _warning: bool = True
) -> AnyByStrDict:
"""Try to load the content from a `copier.yml` or a `copier.yaml` file.
"""
conf_paths = [
p
for p in Path(src_path).glob("copier.*")
if p.is_file() and re.match(r"\.ya?ml", p.suffix, re.I)
]
if len(conf_paths) > 1:
raise MultipleConfigFilesError(conf_paths, quiet=quiet)
elif len(conf_paths) == 1:
return load_yaml_data(conf_paths[0], quiet=quiet, _warning=_warning)
else:
return {}
def load_answersfile_data(
dst_path: StrOrPath, answers_file: OptStrOrPath = None,
) -> AnyByStrDict:
"""Load answers data from a `$dst_path/$answers_file` file if it exists."""
try:
with open(Path(dst_path) / (answers_file or ".copier-answers.yml")) as fd:
return yaml.safe_load(fd)
except FileNotFoundError:
return {}
def cast_answer_type(answer: Any, type_fn: Callable) -> Any:
"""Cast answer to expected type."""
# Skip casting None into "None"
if type_fn is str and answer is None:
return answer
# Parse correctly bools as 1, true, yes...
if type_fn is bool and isinstance(answer, str):
return parse_yaml_string(answer)
try:
return type_fn(answer)
except (TypeError, AttributeError):
# JSON or YAML failed because it wasn't a string; no need to convert
return answer
def render_value(value: Any, env: SandboxedEnvironment, context: AnyByStrDict) -> str:
"""Render a single templated value using Jinja.
If the value cannot be used as a template, it will be returned as is.
"""
try:
template = env.from_string(value)
except TypeError:
# value was not a string
return value
try:
return template.render(**context)
except UndefinedError as error:
raise UserMessageError(str(error)) from error
def render_choices(
choices: Choices, env: SandboxedEnvironment, context: AnyByStrDict
) -> Choices:
"""Render a list or dictionary of templated choices using Jinja."""
render = partial(render_value, env=env, context=context)
if isinstance(choices, dict):
choices = {render(k): render(v) for k, v in choices.items()}
elif isinstance(choices, list):
for i, choice in enumerate(choices):
if isinstance(choice, (tuple, list)) and len(choice) == 2:
choices[i] = (render(choice[0]), render(choice[1]))
else:
choices[i] = render(choice)
return choices
def query_user_data(
questions_data: AnyByStrDict,
last_answers_data: AnyByStrDict,
forced_answers_data: AnyByStrDict,
ask_user: bool,
envops: EnvOps,
) -> AnyByStrDict:
"""Query the user for questions given in the config file."""
type_maps: Dict[str, Callable] = {
"bool": bool,
"float": float,
"int": int,
"json": json.loads,
"str": str,
"yaml": parse_yaml_string,
}
env = get_jinja_env(envops=envops)
result: AnyByStrDict = {}
defaults: AnyByStrDict = {}
_render_value = partial(
render_value,
env=env,
context=ChainMap(result, forced_answers_data, defaults, DEFAULT_DATA),
)
_render_choices = partial(
render_choices,
env=env,
context=ChainMap(result, forced_answers_data, defaults, DEFAULT_DATA),
)
for question, details in questions_data.items():
# Get question type; by default let YAML decide it
type_name = _render_value(details.get("type", "yaml"))
try:
type_fn = type_maps[type_name]
except KeyError:
raise InvalidTypeError()
# Get default answer
ask_this = ask_user
default = cast_answer_type(_render_value(details.get("default")), type_fn)
defaults[question] = default
try:
# Use forced answer
answer = forced_answers_data[question]
ask_this = False
except KeyError:
# Get default answer
answer = last_answers_data.get(question, default)
if ask_this:
# Generate message to ask the user
emoji = "🕵️" if details.get("secret", False) else "🎤"
message = f"\n{bold | question}? Format: {type_name}\n{emoji} "
if details.get("help"):
message = (
f"\n{info & italics | _render_value(details['help'])}{message}"
)
# Use the right method to ask
if type_fn is bool:
answer = ask(message, answer)
elif details.get("choices"):
choices = _render_choices(details["choices"])
answer = choose(message, choices, answer)
else:
answer = prompt(message, type_fn, answer)
if answer != details.get("default", default):
result[question] = cast_answer_type(answer, type_fn)
return result
|
import matplotlib.pyplot as plt
import cv2
import numpy as np
def disp_to_color(D, max_disp):
D = D.astype(float)
D_nml = D.reshape(D.size) / max_disp
D_nml[D_nml>1] = 1
I = disp_map(D_nml).reshape(D.shape+(3,))
return I
def disp_map(I):
map = np.array([[0,0,0,114],[0,0,1,185],[1,0,0,114],[1,0,1,174]\
,[0,1,0,114],[0,1,1,185],[1,1,0,114],[1,1,1,0]], dtype=float)
bins = map[:-1,3]
cbins = bins.cumsum()
bins = bins / cbins[-1]
cbins = cbins[:-1] / cbins[-1]
tem = I.reshape(1,I.size) > cbins.reshape(cbins.size,1)
ind = np.sum(I.reshape(1,I.size) > cbins.reshape(cbins.size,1),axis=0)
ind[ind > 6.0] = 6.0
bins = 1.0 / (bins+1e-20)
cbins = np.append(np.array(0),cbins)
I = (I - cbins[ind.astype(int)]) * bins[ind.astype(int)]
I = map[ind,:3] * (1-I.reshape(I.size,1)) + map[ind+1,:3] * I.reshape(I.size,1)
I[I<0] = 0
I[I>1] = 1
return I
def error_colormap():
cols = np.array([ [0/3.0, 0.1875, 49, 54, 149],
[0.1875/3.0, 0.375, 69, 117, 180],
[0.375/3.0, 0.75, 116, 173, 209],
[0.75/3.0, 1.5, 171, 217, 233],
[1.5/3.0, 3, 224, 243, 248],
[3/3.0, 6, 254, 224, 144],
[6/3.0, 12, 253, 174, 97],
[12/3.0, 24, 244, 109, 67],
[24/3.0, 48, 215, 48, 39],
[48/3.0, np.inf, 165, 0, 38 ]])
cols[:,2:] = cols[:,2:] / 255
return cols
def disp_error_image(D_gt, D_est, tau, dilate_radius=1):
E_D_gt_val = disp_error_map(D_gt,D_est)
E0 = E_D_gt_val[0]
D_val = E_D_gt_val[1]
mask = E0/tau[0] > (E0 / abs(D_gt))/tau[1]
E = E0 / tau[0]
E[mask] = ((E0 / abs(D_gt))/tau[1])[mask]
cols = error_colormap();
D_err = np.zeros(D_gt.shape+(3,))
for i in range(cols.shape[0]):
(v,u) = np.where((D_val>0)*(E>=cols[i,0])*(E<=cols[i,1]))
D_err[v,u,0] = cols[i,2]
D_err[v,u,1] = cols[i,3]
D_err[v,u,2] = cols[i,4]
#TODO D_err = imdilate(D_err, strel('disk',dilate_radius))
#D_err = cv2.dilate(D_err, kernel=?)
return D_err
def disp_error_map(D_gt, D_est):
D_gt_val = (D_gt>0)
E = abs(D_gt-D_est)
E[D_gt_val==0] = 0
return E, D_gt_val
# Metrics
def D1_error(D_gt,D_est,tau):
E = abs(D_gt-D_est)
n_err = ((D_gt>0)*(E>tau[0])*(E/(D_gt+1e-10)>tau[1])).sum().astype(float)
n_total = (D_gt>0).sum()
d_err = n_err/n_total
return d_err
def end_point_error(D_gt, D_est):
E = abs(D_gt-D_est)
n_total = (D_gt>0).sum()
E[D_gt == 0] = 0
return E.sum() / n_total
def N_pixel_error(D_gt, D_est, n):
E = abs(D_gt-D_est)
n_total = (D_gt>0).sum()
n_err = ((D_gt>0)*(E>n)).sum().astype(float)
return n_err / n_total
def A_percent_error_quantile(D_gt, D_est, A):
E = abs(D_gt-D_est).reshape(D_gt.size)
E.sort()
return E[int(A*E.size/100.0)]
def sizes_equal(size1, size2):
return size1[0] == size2[0] and size1[1] == size2[1]
def DisplayError(indices, errs, err_names):
for ind in indices:
print((err_names[ind]+(': %f' % errs[ind])))
|
#!/usr/bin/env python3
''' tsacheck - Check intregrity of downloaded files '''
import sys
import os
import sqlite3
import subprocess
import hashlib
# --------------------------------------------------------------------------- #
def check(argv):
'''Check integrity of downloaded files
:param argv: The command line arguments given by the user
:type argv: list
'''
#Get directory
try:
if argv[1] == '-c':
checkFile = True
argv.pop(1)
else:
checkFile = False
directory = os.path.normpath(os.path.abspath(argv[1]))
except IndexError:
checkFile = False
directory = os.getcwd()
dbPath = os.path.join(directory, "archive.db")
if not os.path.isfile(dbPath):
print("ERROR: No archive database found!")
return
try:
#Connect to database
db = connectDB(dbPath)
r = db.execute("SELECT id,name,checksum FROM videos;")
for item in r.fetchall():
filePath = os.path.join(directory, item[1])
#Check if file exists
if not os.path.isfile(filePath):
print("ERROR: File {} not found".format(item[1]))
continue
#Check file integrity
if checkFile:
cmd = ["ffmpeg", "-v", "error", "-i", filePath, "-f", "null", "-"]
out, _ = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()
if out:
print("ERROR: File \"{}\" corrupt!".format(item[1]))
else:
print("File \"{}\" check passed".format(item[1]))
#Calculate checksum
sha256 = hashlib.sha256()
with open(filePath, "rb") as vf:
for chunk in iter(lambda: vf.read(4096), b""):
sha256.update(chunk)
checksum = sha256.hexdigest()
if item[2]:
#Compare checksums
if checksum == item[2]:
print("File \"{}\" checksums match".format(item[1]))
else:
print("ERROR: File \"{}\" checksums mismatch".format(item[1]))
else:
print("File \"{}\" no checksum saved yet".format(item[1]))
db.execute("UPDATE videos SET checksum = ? WHERE id = ?;", (checksum, item[0]))
#Close database
closeDB(db)
except sqlite3.Error as e:
print("ERROR: " + e)
return
# ########################################################################### #
# --------------------------------------------------------------------------- #
def connectDB(path):
'''Connect to a database
:param path: The path of the database
:type path: string
:raises: :class:``sqlite3.Error: Unable to connect to database
:returns: Connection to the database
:rtype: sqlite3.Connection
'''
#Connect database
dbCon = sqlite3.connect(path)
#Return database connection
return dbCon
# ########################################################################### #
# --------------------------------------------------------------------------- #
def closeDB(dbCon):
'''Close the connection to a database
:param dbCon: Connection to the database
:type dbCon: sqlite3.Connection
:raises: :class:``sqlite3.Error: Unable to close database
'''
if dbCon:
dbCon.commit()
dbCon.close()
# ########################################################################### #
# --------------------------------------------------------------------------- #
if __name__ == "__main__":
try:
check(sys.argv)
except KeyboardInterrupt:
print("Aborted!")
# ########################################################################### #
|
#!/usr/bin/env python
# encoding: utf-8
#
# Utility.py
# Copyright (c) 2012 Thorsten Philipp <kyrios@kyri0s.de>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation the rights to use, copy,
# modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the
# following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import os
import unittest
import tempfile
from subprocess import *
class Cmd(object):
"""Run a binary"""
def __init__(self,command,args=""):
super(Cmd, self).__init__()
self.command = command
self.arguments = args
self.isError = 0
self.error = []
self.response = []
def run(self):
"""Execute the command with all it's arguments."""
cmd = self.command + " " + self.arguments
process = Popen(cmd,stdout=PIPE,stderr=PIPE,shell="true")
stdout, stderr = process.communicate()
self.response = stdout.splitlines()
self.error = stderr.splitlines()
if len(self.error) > 0:
self.isError = 1
def response_asString(self):
"""return the response as a string (instead of line by line)"""
return("\n".join(self.response))
def filetemp():
(fd, fname) = tempfile.mkstemp()
return (os.fdopen(fd, "w+b"), fname)
class UtilityTests(unittest.TestCase):
def setUp(self):
pass
if __name__ == '__main__':
unittest.main()
|
import os
import imutils
from flask import Flask, request, render_template, redirect, url_for, jsonify
from random import randint
import numpy as np
import secrets
from BGRemoval import *
from orientation import *
from smile import smile_inference
from crop_rotate import *
from poseNet import poseNet
template_path = os.path.join('templates')
static_path = os.path.join('static')
app = Flask(__name__, template_folder=template_path, static_folder=static_path)
dir_path = os.path.join('temp/')
@app.route('/')
def home():
return render_template('index.html')
@app.route('/start', methods=['POST'])
def start():
if 'files' not in request.files:
return "file input error"
img_path = dir_path + "temp.jpg"
uploaded_files = request.files.getlist("files")
file = uploaded_files[0]
file.save(img_path)
width = int(int(request.form['width']) * 3.7795275591)
height = int(int(request.form['height']) * 3.7795275591)
red = int(request.form['red'])
green = int(request.form['green'])
blue = int(request.form['blue'])
single_face = False
smile_check = False
output = orientation(img_path)
if output==None:
print("No human faces detected")
else:
img,b = output
if b==False:
print("More than one face detected")
else:
single_face = True
smile_status,value = smile_inference(img)
BG = BGRemove()
rgb = (red,green,blue)
image = BG.inference(img,rgb)
detector = MTCNN()
rects = detector.detect_faces(image)
aligned_face = AlignFace(image,rects)
crop_img = poseNet(aligned_face)
cv2.imwrite(dir_path + "crop_img.jpg", crop_img)
data = {}
if single_face == False:
data["single_face"]=False
else:
data["single_face"]=False
data["smile"]=[smile_status,value]
return data
if __name__ == "__main__":
port = 7000
host = '0.0.0.0'
app.run(host=host, port=port, threaded=True)
|
from grazyna.utils import register, init_plugin
from grazyna.utils.event_loop import loop
from grazyna import format
from grazyna_rpg.db import get_session, get_engine
from grazyna_rpg.randomized_map import RandomMap
from grazyna_rpg.world_manager import WorldManager, PathNotFound
from grazyna_rpg.enums import DirectionEnum
from json import load as load_json
@init_plugin
def on_load(plugin, protocol):
config = plugin.config
engine = get_engine(config['db_uri'])
plugin.temp['s'] = get_session(engine)
#with open(config['world_path']) as fp:
# json = load_json(fp)
plugin.temp['world'] = world = WorldManager(RandomMap())
world.create_connections_with_levels()
world.actual_level = world.seek_respawn()
@loop(time_config_key="tick_time", default=10)
def tick(protocol, plugin, config):
pass
@register(cmd='go')
def go(bot, where: DirectionEnum):
try:
bot.plugin.temp['world'].move(where)
except PathNotFound:
bot.say('direction %s doesnt exist' % where.name)
else:
where_i_am(bot)
#shorcuts methods
for direction in DirectionEnum:
x = register(
reg='^%s *$' % direction.value,
name='go_%s' % direction.name,
)(lambda bot: go(bot, direction))
print(x.name)
@register(cmd='where')
def where_i_am(bot):
level = bot.plugin.temp['world'].actual_level
bot.say('{name}{type} dirs: {dirs}'.format(
name=format.bold(level.name),
type=format.bold(format.color(
' (%s)' % level.type.name if level.name else level.type.name,
format.color.white
)),
dirs=' '.join(
format.color(direction.value, format.color.light_green)
for direction in level.directions
),
))
@register(cmd='eat')
def eat(bot, item):
pass
@register(cmd='kill')
def kill(bot, who):
pass
@register(reg='^(\w+)(!{1,3})')
def shout(bot, spell, power: lambda x: len(x)):
pass
|
from __future__ import annotations
from discord.ui import View
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from imp.better import BetterBot
class BetterView(View):
def __init__(
self,
client: BetterBot,
timeout: Optional[int] = None
):
super().__init__(timeout=timeout)
self.client: BetterBot = client
|
from pathlib import Path
import io
import sys
import os
import math
import numpy as np
import requests
import json
import kornia.augmentation as K
from base64 import b64encode
from omegaconf import OmegaConf
import imageio
from PIL import ImageFile, Image
ImageFile.LOAD_TRUNCATED_IMAGES = True
from taming.models import cond_transformer, vqgan
import transformers
import torch
from torch import nn, optim
from torch.nn import functional as F
from torchvision import transforms
from torchvision.transforms import functional as TF
try:
import clip
except ImportError:
from CLIP import clip
from utils import *
from upscale_dream import ScaleImage
class ReplaceGrad(torch.autograd.Function):
@staticmethod
def forward(ctx, x_forward, x_backward):
ctx.shape = x_backward.shape
return x_forward
@staticmethod
def backward(ctx, grad_in):
return None, grad_in.sum_to_size(ctx.shape)
class ClampWithGrad(torch.autograd.Function):
@staticmethod
def forward(ctx, input, min, max):
ctx.min = min
ctx.max = max
ctx.save_for_backward(input)
return input.clamp(min, max)
@staticmethod
def backward(ctx, grad_in):
input, = ctx.saved_tensors
return grad_in * (grad_in * (input - input.clamp(ctx.min, ctx.max)) >= 0), None, None
class Text2Image:
'''
Deep dream text with torch, vqgan, esrgan, clip and diffusion.
Adjust settings for more illusive dreams.
'''
def __init__(self, settings={}):
self.settings = {
# required
'seed': -1,
'prompt': '',
'width': 256,
'height': 256,
'clip_model': 'ViT-B/32', # available ['RN50', 'RN101', 'RN50x4', 'RN50x16', 'ViT-B/32','ViT-B/16']
'vqgan_model': 'vqgan_imagenet_f16_16384', # available ["vqgan_imagenet_f16_16384", "vqgan_imagenet_f16_1024", "wikiart_16384", "coco", "sflckr"]
'initial_image': '',
'target_images': '',
'input_images': '',
'output_folder': 'vqgan-steps',
'output_name': '',
'noise_prompt_seeds': [],
'noise_prompt_weights': [],
'key_frames': True,
'generate_video': False,
'upscale_dream': False,
'upscale_strength': 2,
'video_length': 60, # seconds
'target_fps': 30,
'iterations_per_frame': 3,
'angle': 0,
'zoom': 1,
'translation_x': 0,
'translation_y': 0,
'display_frequency': 10,
# additional
'vq_init_weight': 0.0,
'vq_step_size': 0.1,
'vq_cutn': 64,
'vq_cut_pow': 1.0,
# model links
'pretrained_models': {
'vqgan_imagenet_f16_1024_ckpt': 'https://heibox.uni-heidelberg.de/f/140747ba53464f49b476/?dl=1',
'vqgan_imagenet_f16_1024_yaml': 'https://heibox.uni-heidelberg.de/f/6ecf2af6c658432c8298/?dl=1',
'vqgan_imagenet_f16_16384_ckpt': 'https://heibox.uni-heidelberg.de/f/867b05fc8c4841768640/?dl=1',
'vqgan_imagenet_f16_16384_yaml': 'https://heibox.uni-heidelberg.de/f/274fb24ed38341bfa753/?dl=1',
'coco_ckpt': 'https://dl.nmkd.de/ai/clip/coco/coco.ckpt',
'coco_yaml': 'https://dl.nmkd.de/ai/clip/coco/coco.yaml',
'wikiart_16384_ckpt': 'http://eaidata.bmk.sh/data/Wikiart_16384/wikiart_f16_16384_8145600.ckpt',
'wikiart_16384_yaml': 'http://eaidata.bmk.sh/data/Wikiart_16384/wikiart_f16_16384_8145600.yaml',
'sflckr_ckpt': 'https://heibox.uni-heidelberg.de/d/73487ab6e5314cb5adba/files/?p=%2Fcheckpoints%2Flast.ckpt&dl=1',
'sflckr_yaml': 'https://heibox.uni-heidelberg.de/d/73487ab6e5314cb5adba/files/?p=%2Fconfigs%2F2020-11-09T13-31-51-project.yaml&dl=1',
}
}
for key, value in settings.items():
self.settings[key] = value
self.dir_path = os.path.dirname(os.path.abspath(__file__))
os.makedirs(f"{self.dir_path}/{self.settings['output_folder']}/", exist_ok=True)
self.down_pretrained_models()
self.replace_grad = ReplaceGrad.apply
self.clamp_with_grad = ClampWithGrad.apply
self.model_name = self.settings['vqgan_model']
self.total_iterations = self.settings['video_length'] * self.settings['target_fps']
self.clean_cache()
if self.settings['seed'] == -1:
self.seed = None
else:
self.seed = self.settings['seed']
if self.settings['key_frames']:
try:
self.prompts = self.settings['prompt']
self.prompts_series = split_key_frame_text_prompts(
parse_key_frames(self.settings['prompt']),
self.total_iterations
)
except RuntimeError:
self.display_message(
"WARNING: You have selected to use key frames, but you have not "
"formatted `prompt` correctly for key frames.\n"
)
self.prompts = f"0: ({self.settings['prompt']}:1)"
self.prompts_series = split_key_frame_text_prompts(
parse_key_frames(self.settings['prompt']),
self.total_iterations
)
try:
self.target_images = self.settings['target_images']
self.target_images_series = split_key_frame_text_prompts(
parse_key_frames(self.settings['target_images']),
self.total_iterations
)
except RuntimeError:
self.display_message(
"WARNING: You have selected to use key frames, but you have not "
"formatted `target_images` correctly for key frames.\n"
)
self.target_images = f"0: ({self.settings['target_images']}:1)"
self.target_images_series = split_key_frame_text_prompts(
parse_key_frames(self.settings['target_images']),
self.total_iterations
)
try:
self.angle_series = get_inbetweens(parse_key_frames(self.settings['angle']), self.total_iterations)
except RuntimeError:
self.display_message(
"WARNING: You have selected to use key frames, but you have not "
"formatted `angle` correctly for key frames.\n"
)
self.angle = f"0: ({self.settings['angle']})"
self.angle_series = get_inbetweens(parse_key_frames(self.settings['angle']), self.total_iterations)
try:
self.zoom_series = get_inbetweens(parse_key_frames(self.settings['zoom']), self.total_iterations)
except RuntimeError:
self.display_message(
"WARNING: You have selected to use key frames, but you have not "
"formatted `zoom` correctly for key frames.\n"
)
self.zoom = f"0: ({self.settings['zoom']})"
self.zoom_series = get_inbetweens(parse_key_frames(self.settings['zoom']), self.total_iterations)
try:
self.translation_x_series = get_inbetweens(parse_key_frames(self.settings['translation_x']), self.total_iterations)
except RuntimeError:
self.display_message(
"WARNING: You have selected to use key frames, but you have not "
"formatted `translation_x` correctly for key frames.\n"
)
self.translation_x = f"0: ({self.settings['translation_x']})"
self.translation_x_series = get_inbetweens(parse_key_frames(self.settings['translation_x']), self.total_iterations)
try:
self.translation_y_series = get_inbetweens(parse_key_frames(self.settings['translation_y']), self.total_iterations)
except RuntimeError:
self.display_message(
"WARNING: You have selected to use key frames, but you have not "
"formatted `translation_y` correctly for key frames.\n"
)
self.translation_y = f"0: ({self.settings['translation_y']})"
self.translation_y_series = get_inbetweens(parse_key_frames(self.settings['translation_y']), self.total_iterations)
try:
self.iterations_per_frame_series = get_inbetweens(
parse_key_frames(self.settings['iterations_per_frame']), self.total_iterations, integer=True
)
except RuntimeError:
self.display_message(
"WARNING: You have selected to use key frames, but you have not "
"formatted `iterations_per_frame` correctly for key frames.\n"
)
self.iterations_per_frame = f"0: ({self.settings['iterations_per_frame']})"
self.iterations_per_frame_series = get_inbetweens(
parse_key_frames(self.settings['iterations_per_frame']), self.total_iterations, integer=True
)
else:
self.prompts = [phrase.strip() for phrase in self.settings['prompt'].split("|")]
if self.prompts == ['']:
self.prompts = []
self.target_images = self.settings['target_images']
if self.target_images == "None" or not self.target_images:
self.target_images = []
else:
self.target_images = self.target_images.split("|")
self.target_images = [image.strip() for image in self.target_images]
self.angle = float(self.settings['angle'])
self.zoom = float(self.settings['zoom'])
self.translation_x = float(self.settings['translation_x'])
self.translation_y = float(self.settings['translation_y'])
self.iterations_per_frame = int(self.settings['iterations_per_frame'])
self.clean_cache()
for var in ['device', 'model', 'perceptor', 'z']:
try:
del globals()[var]
except:
pass
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
self.display_message('Deep dream initiated')
self.display_message(f'Using {self.device}')
if self.prompts:
self.display_message(f'I am dreaming about {self.prompts}')
if self.target_images:
self.display_message(f'Using dream state {self.target_images}')
if self.seed == None:
self.seed = torch.seed()
torch.manual_seed(self.seed)
self.display_message(f'Dream seed {self.seed}')
# config
self.vqgan_config = f'{self.dir_path}/models/{self.model_name}.yaml'
self.vqgan_checkpoint = f'{self.dir_path}/models/{self.model_name}.ckpt'
self.model = self.load_vqgan_model(self.vqgan_config, self.vqgan_checkpoint)
if torch.cuda.device_count() > 1:
self.display_message(f"Let's use {torch.cuda.device_count()} GPUs!")
self.model = nn.DataParallel(self.model, device_ids=[_id for _id in range(torch.cuda.device_count())])
self.model.to(self.device)
self.model = self.model.module
else:
self.model.to(self.device)
self.perceptor = clip.load(self.settings['clip_model'], jit=False)[0].eval().requires_grad_(False).to(self.device)
self.cut_size = self.perceptor.visual.input_resolution
self.e_dim = self.model.quantize.e_dim
self.f = 2**(self.model.decoder.num_resolutions - 1)
self.make_cutouts = MakeCutouts(self.cut_size, self.settings['vq_cutn'], cut_pow=self.settings['vq_cut_pow'])
self.n_toks = self.model.quantize.n_e
self.toksX, self.toksY = self.settings['width'] // self.f, self.settings['height'] // self.f
self.sideX, self.sideY = self.toksX * self.f, self.toksY * self.f
self.z_min = self.model.quantize.embedding.weight.min(dim=0).values[None, :, None, None]
self.z_max = self.model.quantize.embedding.weight.max(dim=0).values[None, :, None, None]
self.next_loop_stop = False # ensure proper stop for GPU mem
for i in range(self.total_iterations):
if self.next_loop_stop:
break
if self.settings['key_frames']:
self.prompts = self.prompts_series[i]
self.prompts = [phrase.strip() for phrase in self.prompts.split("|")]
if self.prompts == ['']:
self.prompts = []
self.settings['prompt'] = self.prompts
self.target_images = self.target_images_series[i]
if self.target_images == "None" or not self.target_images:
self.target_images = []
else:
self.target_images = self.target_images.split("|")
self.target_images = [image.strip() for image in self.target_images]
self.settings['target_images'] = self.target_images
self.angle = self.angle_series[i]
self.zoom = self.zoom_series[i]
self.translation_x = self.translation_x_series[i]
self.translation_y = self.translation_y_series[i]
self.iterations_per_frame = self.iterations_per_frame_series[i]
if i == 0 and self.settings['initial_image'] != "":
self.img_0 = read_image_workaround(self.settings['initial_image'])
self.z, *_ = self.model.encode(TF.to_tensor(self.img_0).to(self.device).unsqueeze(0) * 2 - 1)
elif i == 0 and not os.path.isfile(f'{self.dir_path}/{self.settings["output_folder"]}/{i:04}.png'):
self.one_hot = F.one_hot(
torch.randint(self.n_toks, [self.toksY * self.toksX], device=self.device), self.n_toks
).float()
self.z = self.one_hot @ self.model.quantize.embedding.weight
self.z = self.z.view([-1, self.toksY, self.toksX, self.e_dim]).permute(0, 3, 1, 2)
else:
self.img_0 = read_image_workaround(f'{self.dir_path}/{self.settings["output_folder"]}/{i:04}.png')
self.center = (1 * self.img_0.shape[1]//2, 1 * self.img_0.shape[0]//2)
self.trans_mat = np.float32(
[[1, 0, self.translation_x],
[0, 1, self.translation_y]]
)
self.rot_mat = cv2.getRotationMatrix2D(self.center, self.angle, self.zoom)
self.trans_mat = np.vstack([self.trans_mat, [0,0,1]])
self.rot_mat = np.vstack([self.rot_mat, [0,0,1]])
self.transformation_matrix = np.matmul(self.rot_mat, self.trans_mat)
self.img_0 = cv2.warpPerspective(
self.img_0,
self.transformation_matrix,
(self.img_0.shape[1], self.img_0.shape[0]),
borderMode=cv2.BORDER_WRAP
)
self.z, *_ = self.model.encode(TF.to_tensor(self.img_0).to(self.device).unsqueeze(0) * 2 - 1)
i += 1
self.z_orig = self.z.clone()
self.z.requires_grad_(True)
self.opt = optim.Adam([self.z], lr=self.settings['vq_step_size'])
self.normalize = transforms.Normalize(mean=[0.48145466, 0.4578275, 0.40821073],
std=[0.26862954, 0.26130258, 0.27577711])
self.pMs = []
for prompt in self.prompts:
txt, weight, stop = parse_prompt(prompt)
self.embed = self.perceptor.encode_text(clip.tokenize(txt).to(self.device)).float()
self.pMs.append(Prompt(self.embed, self.replace_grad, weight, stop).to(self.device))
for prompt in self.target_images:
path, weight, stop = parse_prompt(prompt)
self.img = resize_image(Image.open(path).convert('RGB'), (self.sideX, self.sideY))
self.batch = self.make_cutouts(TF.to_tensor(self.img).unsqueeze(0).to(self.device))
self.embed = self.perceptor.encode_image(self.normalize(self.batch)).float()
self.pMs.append(Prompt(self.embed, self.replace_grad, weight, stop).to(self.device))
for seed, weight in zip(self.settings['noise_prompt_seeds'], self.settings['noise_prompt_weights']):
gen = torch.Generator().manual_seed(seed)
self.embed = torch.empty([1, self.perceptor.visual.output_dim]).normal_(generator=gen)
self.pMs.append(Prompt(self.embed, self.replace_grad, weight).to(self.device))
try:
self.dream(i)
except KeyboardInterrupt:
self.next_loop_stop = True
pass
# upscale/gen video
if self.settings['upscale_dream']:
scale_settings = {
'input': f'{self.dir_path}/{self.settings["output_folder"]}',
'output': f'{self.dir_path}/{self.settings["output_folder"]}-upscaled',
'suffix': '',
'netscale': int(self.settings['upscale_strength']),
'outscale': int(self.settings['upscale_strength'])
}
out_folder = f'{self.dir_path}/{self.settings["output_folder"]}-upscaled'
ScaleImage(scale_settings)
else:
out_folder = f'{self.dir_path}/{self.settings["output_folder"]}'
if self.settings['generate_video']:
frames_to_video(out_folder, f'{self.dir_path}/out.mp4', self.settings['target_fps'])
def dream(self, i):
x = 0
while True:
if x >= self.iterations_per_frame:
break
else:
self.train(i)
x += 1
def train(self, i):
self.opt.zero_grad()
lossAll = self.ascend_txt(i, True)
if i % self.settings['display_frequency'] == 0:
self.checkin(i, lossAll)
loss = sum(lossAll)
loss.backward()
self.opt.step()
with torch.no_grad():
self.z.copy_(self.z.maximum(self.z_min).minimum(self.z_max))
def save_output(self, i, img):
if self.settings['output_name'] == '':
filename = f"{self.dir_path}/{self.settings['output_folder']}/{i:04}.png"
else:
filename = f"{self.dir_path}/{self.settings['output_folder']}/{self.settings['output_name']}.png"
imageio.imwrite(filename, np.array(img))
def ascend_txt(self, i, save):
out = self.synth(self.z)
iii = self.perceptor.encode_image(self.normalize(self.make_cutouts(out))).float()
result = []
if self.settings['vq_init_weight']:
result.append(F.mse_loss(self.z, self.z_orig) * self.settings['vq_init_weight'] / 2)
for prompt in self.pMs:
result.append(prompt(iii))
img = np.array(out.mul(255).clamp(0, 255)[0].cpu().detach().numpy().astype(np.uint8))[:,:,:]
img = np.transpose(img, (1, 2, 0))
if save:
self.save_output(i, img)
return result
@torch.no_grad()
def checkin(self, i, losses):
losses_str = ', '.join(f'{loss.item():g}' for loss in losses)
self.display_message(f'i: {i}, loss: {sum(losses).item():g}, losses: {losses_str}')
out = self.synth(self.z)
TF.to_pil_image(out[0].cpu()).save('progress.png')
def vector_quantize(self, x, codebook):
d = x.pow(2).sum(dim=-1, keepdim=True) + codebook.pow(2).sum(dim=1) - 2 * x @ codebook.T
indices = d.argmin(-1)
x_q = F.one_hot(indices, codebook.shape[0]).to(d.dtype) @ codebook
return self.replace_grad(x_q, x)
def synth(self, z):
z_q = self.vector_quantize(z.movedim(1, 3), self.model.quantize.embedding.weight).movedim(3, 1)
return self.clamp_with_grad(self.model.decode(z_q).add(1).div(2), 0, 1)
def display_message(self, msg):
print(msg)
def clean_cache(self):
torch.cuda.empty_cache()
with torch.no_grad():
torch.cuda.empty_cache()
def load_vqgan_model(self, config_path, checkpoint_path):
config = OmegaConf.load(config_path)
if config.model.target == 'taming.models.vqgan.VQModel':
model = vqgan.VQModel(**config.model.params)
model.eval().requires_grad_(False)
model.init_from_ckpt(checkpoint_path)
elif config.model.target == 'taming.models.cond_transformer.Net2NetTransformer':
parent_model = cond_transformer.Net2NetTransformer(**config.model.params)
parent_model.eval().requires_grad_(False)
parent_model.init_from_ckpt(checkpoint_path)
model = parent_model.first_stage_model
else:
raise ValueError(f'unknown model type: {config.model.target}')
del model.loss
return model
def stream_down(self, url, path):
self.display_message(f'Model down {self.settings["vqgan_model"]} in progress')
with requests.get(url, stream=True) as r:
r.raise_for_status()
with open(path, 'wb') as f:
for chunk in r.iter_content(chunk_size=8192):
f.write(chunk)
self.display_message(f'Model down {self.settings["vqgan_model"]} done')
def down_pretrained_models(self):
models_path = f'{self.dir_path}/models/'
os.makedirs(models_path, exist_ok=True)
if os.path.exists(models_path + self.settings['vqgan_model'] + '.ckpt') == False:
self.stream_down(
self.settings['pretrained_models'][f'{self.settings["vqgan_model"]}_ckpt'],
models_path + f'{self.settings["vqgan_model"]}.ckpt'
)
self.stream_down(
self.settings['pretrained_models'][f'{self.settings["vqgan_model"]}_yaml'],
models_path + f'{self.settings["vqgan_model"]}.yaml'
)
# total iterations = video_length * target_fps
# key_frames = True, allows setup such as 10: (Apple: 1| Orange: 0), 20: (Apple: 0| Orange: 1| Peach: 1)
# from frame 0 to frame 10 show Apple, from frame 10 to 20 show Orange & Peach
if __name__ == "__main__":
settings = {
'key_frames': True,
'generate_video': True,
'video_length': 6, # seconds
'target_fps': 30,
'upscale_dream': True,
'upscale_strength': 2, # available [2, 4] -> 2x or 4x the generated output
'initial_image': '', # start from image
'target_images': '', # target the shape
'prompt': '10: (Apple: 1| Orange: 0), 20: (Apple: 0| Orange: 1| Peach: 1)',
'width': 256,
'height': 256,
'angle': '10: (0), 30: (10), 50: (0)',
'zoom': '10: (1), 30: (1.2), 50: (1)',
'translation_x': '0: (0)',
'translation_y': '0: (0)',
'iterations_per_frame': '0: (1)',
'vqgan_model': 'vqgan_imagenet_f16_16384', # available ["vqgan_imagenet_f16_16384", "vqgan_imagenet_f16_1024", "wikiart_16384", "coco", "sflckr"]
'clip_model': 'ViT-B/32' # available ['RN50', 'RN101', 'RN50x4', 'RN50x16', 'ViT-B/32','ViT-B/16']
}
Text2Image(settings)
|
# SPDX-FileCopyrightText: 2021 Melissa LeBlanc-Williams for Adafruit Industries
#
# SPDX-License-Identifier: MIT
"""Pin definitions for the Khadas VIM3."""
from adafruit_blinka.microcontroller.amlogic.a311d import pin
GPIOAO_0 = pin.GPIO496
GPIOAO_1 = pin.GPIO497
GPIOAO_2 = pin.GPIO498
GPIOAO_3 = pin.GPIO499
GPIOAO_4 = pin.GPIO500
GPIOAO_5 = pin.GPIO501
GPIOAO_6 = pin.GPIO502
GPIOAO_7 = pin.GPIO503
GPIOAO_8 = pin.GPIO504
GPIOAO_9 = pin.GPIO505
GPIOAO_10 = pin.GPIO506
GPIOAO_11 = pin.GPIO507
GPIOE_0 = pin.GPIO508
GPIOE_1 = pin.GPIO509
GPIOE_2 = pin.GPIO510
GPIOZ_0 = pin.GPIO427
GPIOZ_1 = pin.GPIO428
GPIOZ_2 = pin.GPIO429
GPIOZ_3 = pin.GPIO430
GPIOZ_4 = pin.GPIO431
GPIOZ_5 = pin.GPIO432
GPIOZ_6 = pin.GPIO433
GPIOZ_7 = pin.GPIO434
GPIOZ_8 = pin.GPIO435
GPIOZ_9 = pin.GPIO436
GPIOZ_10 = pin.GPIO437
GPIOZ_11 = pin.GPIO438
GPIOZ_12 = pin.GPIO439
GPIOZ_13 = pin.GPIO440
GPIOZ_14 = pin.GPIO441
GPIOZ_15 = pin.GPIO442
GPIOH_0 = pin.GPIO443
GPIOH_1 = pin.GPIO444
GPIOH_2 = pin.GPIO445
GPIOH_3 = pin.GPIO446
GPIOH_4 = pin.GPIO447
GPIOH_5 = pin.GPIO448
GPIOH_6 = pin.GPIO449
GPIOH_7 = pin.GPIO450
GPIOH_8 = pin.GPIO451
BOOT_0 = pin.GPIO452
BOOT_1 = pin.GPIO453
BOOT_2 = pin.GPIO454
BOOT_3 = pin.GPIO455
BOOT_4 = pin.GPIO456
BOOT_5 = pin.GPIO457
BOOT_6 = pin.GPIO458
BOOT_7 = pin.GPIO459
BOOT_8 = pin.GPIO460
BOOT_9 = pin.GPIO461
BOOT_10 = pin.GPIO462
BOOT_11 = pin.GPIO463
BOOT_12 = pin.GPIO464
BOOT_13 = pin.GPIO465
BOOT_14 = pin.GPIO466
BOOT_15 = pin.GPIO467
GPIOC_0 = pin.GPIO468
GPIOC_1 = pin.GPIO469
GPIOC_2 = pin.GPIO470
GPIOC_3 = pin.GPIO471
GPIOC_4 = pin.GPIO472
GPIOC_5 = pin.GPIO473
GPIOC_6 = pin.GPIO474
GPIOC_7 = pin.GPIO475
GPIOA_0 = pin.GPIO460
GPIOA_1 = pin.GPIO461
GPIOA_2 = pin.GPIO462
GPIOA_3 = pin.GPIO463
GPIOA_4 = pin.GPIO464
GPIOA_5 = pin.GPIO465
GPIOA_6 = pin.GPIO466
GPIOA_7 = pin.GPIO467
GPIOA_8 = pin.GPIO468
GPIOA_9 = pin.GPIO469
GPIOA_10 = pin.GPIO470
GPIOA_11 = pin.GPIO471
GPIOA_12 = pin.GPIO472
GPIOA_13 = pin.GPIO473
GPIOA_14 = pin.GPIO474
GPIOA_15 = pin.GPIO475
GPIOX_0 = pin.GPIO476
GPIOX_1 = pin.GPIO477
GPIOX_2 = pin.GPIO478
GPIOX_3 = pin.GPIO479
GPIOX_4 = pin.GPIO480
GPIOX_5 = pin.GPIO481
GPIOX_6 = pin.GPIO482
GPIOX_7 = pin.GPIO483
GPIOX_8 = pin.GPIO484
GPIOX_9 = pin.GPIO485
GPIOX_10 = pin.GPIO486
GPIOX_11 = pin.GPIO487
GPIOX_12 = pin.GPIO488
GPIOX_13 = pin.GPIO489
GPIOX_14 = pin.GPIO490
GPIOX_15 = pin.GPIO491
GPIOX_16 = pin.GPIO492
GPIOX_17 = pin.GPIO493
GPIOX_18 = pin.GPIO494
GPIOX_19 = pin.GPIO495
SCL = pin.GPIOX_18
SDA = pin.GPIOX_17
SCLK = pin.SPI0_SCLK
MCLK0 = pin.SPI0_MCLK0
MISO = pin.SPI0_SDO
MOSI = pin.SPI0_SDI
D0 = GPIOAO_10 # PIN_13
D1 = GPIOH_6 # PIN_15
D2 = GPIOH_7 # PIN_16
D3 = GPIOAO_1 # PIN_18
D4 = GPIOAO_2 # PIN_19
D5 = GPIOA_15 # PIN_22
D6 = GPIOA_14 # PIN_23
D7 = GPIOAO_2 # PIN_25
D8 = GPIOAO_3 # PIN_26
D9 = GPIOA_1 # PIN_29
D10 = GPIOA_0 # PIN_30
D11 = GPIOA_3 # PIN_31
D12 = GPIOA_2 # PIN_32
D13 = GPIOA_4 # PIN_33
D14 = GPIOH_5 # PIN_35
D15 = GPIOH_4 # PIN_37
D16 = GPIOZ_15 # PIN_39
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@File : receive.py
@Time : 2019/8/6 10:15
@Author : Crisimple
@Github : https://crisimple.github.io/
@Contact : Crisimple@foxmail.com
@License : (C)Copyright 2017-2019, Micro-Circle
@Desc : None
"""
import xml.etree.ElementTree as ET
def parse_xml(webData):
if len(webData) == 0:
return None
xmlData = ET.fromstring(webData)
msg_type = xmlData.find('MsgType').text
if msg_type == 'text':
return TextMsg(xmlData)
elif msg_type == 'image':
return ImageMsg(xmlData)
elif msg_type == 'voice':
return VoiceMsg(xmlData)
elif msg_type == 'video':
return VideoMsg(xmlData)
elif msg_type == 'shortvideo':
return ShortVideoMsg(xmlData)
elif msg_type == 'event':
return EventMsg(xmlData)
class Msg(object):
def __init__(self, xmlData):
self.ToUserName = xmlData.find('ToUserName').text
self.FromUserName = xmlData.find('FromUserName').text
self.CreateTime = xmlData.find('CreateTime').text
self.MsgType = xmlData.find('MsgType').text
# self.MsgId = xmlData.find('MsgId').text
class TextMsg(Msg):
def __init__(self, xmlData):
Msg.__init__(self, xmlData)
self.Content = xmlData.find('Content').text.encode('utf-8')
class ImageMsg(Msg):
def __init__(self, xmlData):
Msg.__init__(self, xmlData)
self.PicUrl = xmlData.find('PicUrl').text
self.MediaId = xmlData.find('MediaId').text
class VoiceMsg(Msg):
def __init__(self, xmlData):
Msg.__init__(self, xmlData)
self.MediaId = xmlData.find('MediaId').text
self.Format = xmlData.find('Format').text
class VideoMsg(Msg):
def __int__(self, xmlData):
Msg.__init__(self, xmlData)
self.MediaId = xmlData.find('MediaId').text
self.ThumbMediaId = xmlData.find('ThumbMediaId').text
class ShortVideoMsg(Msg):
def __int__(self, xmlData):
Msg.__init__(self, xmlData)
self.MediaId = xmlData.find('MediaId').text
self.ThumbMediaId = xmlData.find('ThumbMediaId').text
class EventMsg(Msg):
def __init__(self, xmlData):
Msg.__init__(self, xmlData)
self.Event = xmlData.find('Event').text
if self.Event.lower() in ['click']:
self.EventKey = xmlData.find('EventKey').text
elif self.Event == 'location_select':
self.Location_X = xmlData.find('SendLocationInfo/Location_X').text
self.Location_Y = xmlData.find('SendLocationInfo/Location_Y').text
self.Scale = xmlData.find('SendLocationInfo/Scale').text
self.Label = xmlData.find('SendLocationInfo/Label').text
self.Poiname = xmlData.find('SendLocationInfo/Poiname').text
elif self.Event == 'LOCATION':
self.Longitude = xmlData.find('Longitude').text
self.Latitude = xmlData.find('Latitude').text
self.Precision = xmlData.find('Precision').text
|
class User:
def __init__(self, id, first_name, last_name, email, account_creation_date):
self.id = id
self.first_name = first_name
self.last_name = last_name
self.email = email
self.account_creation_date = account_creation_date
|
from typing import Dict, Any
import json
import datetime
import xml.sax.saxutils
import re
from ask_sdk_model import (RequestEnvelope,
Application,
Session,
Context,
Request,
User,
Permissions,
Device,
SupportedInterfaces,
LaunchRequest,
IntentRequest,
Intent,
Slot,
IntentConfirmationStatus,
SlotConfirmationStatus,
# Response,
ResponseEnvelope)
from ask_sdk_model.interfaces.system.system_state import SystemState
from ask_sdk_model.interfaces.audioplayer.audio_player_interface import AudioPlayerInterface
from ask_sdk_model.slu.entityresolution.resolutions import Resolutions
from ask_sdk_model.slu.entityresolution.resolution import Resolution
from ask_sdk_model.slu.entityresolution.status import Status
from ask_sdk_model.slu.entityresolution.status_code import StatusCode
from ask_sdk_core.serialize import DefaultSerializer
from lxml import etree
import requests
SESSION_ID = "amzn1.echo-api.session.0000"
USER_ID = "amzn1.ask.account.0000"
DEVICE_ID = "amzn1.ask.device.0000"
APPLICATION_ID = "amzn1.ask.skill.6f9a57d5-4e2b-452c-9fd3-037240133075"
API_ENDPOINT = "https://api.amazonalexa.com"
REQUEST_ID_BASE = "amzn1.echo-api.request.0000"
RESOLUTION_AUTHORITY_BASE = "amzn1.er-authority.echo-sdk.amzn1.ask.skill.0000"
def create_request_envelope(session_attributes: Dict[str, Any],
request: Request) -> RequestEnvelope:
"""Creates a request envelope."""
application = Application(
application_id=APPLICATION_ID
)
user = User(
user_id=USER_ID,
access_token=None,
permissions=Permissions(
consent_token=None
)
)
request_envelope = RequestEnvelope(
version="1.0",
session=Session(
new=False,
session_id=SESSION_ID,
user=user,
attributes=session_attributes,
application=application
),
context=Context(
system=SystemState(
application=application,
user=user,
device=Device(
device_id=DEVICE_ID,
supported_interfaces=SupportedInterfaces(
audio_player=AudioPlayerInterface(),
display=None,
video_app=None
)
),
api_endpoint=API_ENDPOINT,
api_access_token=None
)
),
request=request
)
return request_envelope
def create_launch_request() -> LaunchRequest:
"""Creates an launch request."""
launch_request = LaunchRequest(
request_id='{}.0'.format(REQUEST_ID_BASE),
timestamp=datetime.datetime.utcnow(),
locale='en-US'
)
return launch_request
def create_intent_request(round_index: int,
user_utterance: str) -> IntentRequest:
"""Creates an intent request."""
intent_request = IntentRequest(
request_id='{}.{}'.format(REQUEST_ID_BASE,
round_index),
timestamp=datetime.datetime.utcnow(),
locale='en-US',
dialog_state=None,
intent=Intent(
name='ConverseIntent',
slots=dict(
Text=Slot(
name='Text',
value=user_utterance,
confirmation_status=SlotConfirmationStatus.NONE,
resolutions=Resolutions(
resolutions_per_authority=[
Resolution(
authority='{}.TEXT'.format(RESOLUTION_AUTHORITY_BASE),
status=Status(
code=StatusCode.ER_SUCCESS_NO_MATCH
)
)
]
)
)
),
confirmation_status=IntentConfirmationStatus.NONE
)
)
return intent_request
def send_request(endpoint_url: str,
request_envelope: RequestEnvelope) -> ResponseEnvelope:
"""Sends a request to the endpoint and returns the response."""
serializer = DefaultSerializer()
r = requests.post(
endpoint_url,
json=serializer.serialize(request_envelope),
)
response_envelope = serializer.deserialize(
payload=r.text,
obj_type=ResponseEnvelope
)
return response_envelope
def unescape_ssml(text: str) -> str:
"""Unescapes XML control characters in SSML.
See:
https://console.bluemix.net/docs/services/text-to-speech/http.html#escape
We first unescape the text in case it already contains escaped control
characters.
"""
return xml.sax.saxutils.unescape(text, {""": '"', "'": "'"})
def remove_ssml_tags(text: str) -> str:
root = etree.fromstring(text)
text = u' '.join(root.itertext())
return re.sub(r' +', ' ', text)
|
import pandas as pd
import numpy as np
import os
# ログイン情報を管理する
class Management:
def __init__(self, user_id, password):
self.user = user_id
self.password = password
self.file_name = './lib/User/userManagement.csv'
# 未登録か否かを判断して未登録で未使用のユーザ名なら登録
def signup(self):
if os.path.exists(self.file_name):
df = pd.read_csv(self.file_name)
users = list(df['User'])
passwords = list(df['Password'])
if self.user in users:
flg = False
else:
self.registration(user=users, password=passwords)
flg = True
else:
self.registration(user=[], password=[])
flg = True
return flg
# 未登録か否かを判断する。
def checkLogin(self):
if os.path.exists(self.file_name):
df = pd.read_csv(self.file_name)
flg = self.checkUserPassword(df=df)
return flg
else:
return False
# 登録ユーザの更新
def save_data(self, df):
df.to_csv(self.file_name, index=False)
# 新規登録
def registration(self, user, password):
user.append(self.user)
password.append(self.password)
data = np.array([user, password]).T
print('siginup;', data)
df = pd.DataFrame(data=data, columns=['User','Password'])
self.save_data(df=df)
# 既に使用されているユーザかを確認。
def checkUserPassword(self, df):
users = list(df['User'])
if self.user in users:
password = list(df[df['User'] == self.user]['Password'])[0]
if password == self.password:
return True
else:
return False
else:
return False
|
ORDER = ('six', 'five', 'four', 'three', 'two', 'one')
YIN_YANG = {'hhh': '----o----', 'hht': '---- ----',
'htt': '---------', 'ttt': '----x----'}
def oracle(arr):
return '\n'.join(YIN_YANG[''.join(sorted(a[1:]))] for a in
sorted(arr, key=lambda b: ORDER.index(b[0])))
|
# Implementation of Singly Linked list
# Class Node: It creates node which is used by Class SinglyLinkedList
#
# node = Node() is not a member of singlylinkedlist class just because we have to create multiple Node instance per one
# singlyLinkedList. Node instance is created only when user enter a data
# 3 pointers are declared 1. head: points to first node always
# 2. tail: points to last node always
# 3. current_node: varies position as per requirements
class Node:
def __init__(self,key=None):
self.key=key
self.next=None
class SinglyLinkedList(object):
"""docstring for SinglyLinkedList"""
def __init__(self,head=None):
"""Initializing a linked list with the valid argument, if nothing given default is None. So the head points to None
Otherwise it points to the node.
"""
self.head = head
def pushFront(self,key):
"""When a element named key is pushed then it create a node to which the head pointer points to"""
node = Node(key)
#print(node.key)
if self.head is None:
self.head = node
else:
node.next = self.head
self.head = node
def topFront(self):
"""No parameter taken return"""
if self.head is None:
return "List is Empty"
return self.head.key
def popFront(self):
"""doc string"""
if self.head:
self.head = self.head.next
else:
print("list is empty")
def pushBack(self,key):
"""Find the last element and then add node after that"""
node = Node(key)
current_node = self.head
if self.head is None:
self.head = node
else:
while current_node.next:
current_node = current_node.next
current_node.next = node
def topBack(self):
""" returns the last element of the linked list"""
current_node = self.head
if self.head is None:
return "list is empty"
else:
while current_node.next:
current_node = current_node.next
return current_node.key
def printD(self):
current_node = self.head
while current_node.next:
print(self.head.key)
current_node = current_node.next
def popBack(self):
""" Pop up the end node """
current_node = self.head
if self.head is None:
print("list is empty")
return None
else:
while current_node.next.next:
current_node = current_node.next
#return current_node.next.key
current_node.next = None
def find(self, key):
"""To search the first occurance of the key """
current_node = self.head
c=0
if self.head is None:
return "List is empty"
else:
while current_node.next:
c+=1
if current_node.key == key:
return c
current_node = current_node.next
# def erase(self,key):
# """doc string"""
# pass
# def isEmpty(self):
# """doc string"""
# pass
# def addBefore(self,key):
# """doc string"""
# pass
# def addAfter(self,key):
# """doc string"""
# pass
# def __str__(self):
# """doc string"""
# pass
# def __repr__(self):
# """doc string"""
# pass
s = SinglyLinkedList()
s.pushFront(1)
print("1______")
print(s.topFront())
print(s.topBack())
s.pushFront(2)
print("2___1_____")
print(s.topFront())
print(s.topBack())
s.pushBack(3)
s.pushBack(4)
s.pushFront(5)
print("5__2___1___3__4")
print(s.topFront())
print(s.topBack())
s.popBack()
print("5__2___1___3__")
print(s.topFront())
print(s.topBack())
s.popFront()
print("2___1_____3")
print(s.topFront())
print(s.topBack())
# print(s.topFront())
# print(s.topBack())
# print("I am 2 and key is {}".format(s.head.key))
# print("I am 2 and next key is {}".format(s.head.next.key))
# s.pushFront(3)
# print("I am 3 and key is {}".format(s.head.key))
# print("I am 3 and next key is {}".format(s.head.next.key))
# s.pushFront(4)
# print("I am 4 and key is {}".format(s.head.key))
# print("I am 4 and next key is {}".format(s.head.next.key))
# s.pushFront(5)
# print("I am 5 and key is {}".format(s.head.key))
# print("I am 5 and next key is {}".format(s.head.next.key))
|
from datetime import datetime
import json
from pathlib import Path
from datetime import timedelta
user_exp_database_filename = Path("database_files/user_exp_database.json")
user_exp_database = None
addedExp = 100
role_name_list =["goku", "vegeta", "frieza", "cell",
"boo", "gohan", "18", "17",
"trunks", "piccolo", "krillyn", "tien",
"yamacha", "yajirobe", "mortals"]
role_exp_amount = [90000, 78000, 67000, 57000,
48000, 40000, 32000, 26000,
20000, 14000, 10000, 6000,
3000, 1000, 0]
def get_user_exp_database():
global user_exp_database
return user_exp_database
def set_user_exp_database():
global user_exp_database
try:
f = open(user_exp_database_filename, 'r+')
except FileNotFoundError:
f = open(user_exp_database_filename, 'w+')
user_exp_database= json.load(f)
return
def save_user_exp_database():
local_user_exp_database = get_user_exp_database()
with open(user_exp_database_filename, 'w') as outfile:
json.dump(local_user_exp_database, outfile)
return
def check_user_in_exp_database(user_id):
local_user_exp_database = get_user_exp_database()
if user_id in local_user_exp_database:
return True
else:
return False
def add_new_user_to_exp_database(userID, userName):
local_user_exp_database = get_user_exp_database()
local_user_exp_database[userID] = {"name": userName,
"id": userID,
"exp": 0,
"last_given_exp": ""
}
save_user_exp_database()
return
def add_exp(userID):
local_user_exp_database = get_user_exp_database()
currrentExp = local_user_exp_database[userID]["exp"]
local_user_exp_database[userID]["exp"] = currrentExp + addedExp
local_user_exp_database[userID]["last_given_exp"] = datetime.today().strftime("%b-%d-%Y, %H:%M:%S")
save_user_exp_database()
return
def get_user_exp(userID):
local_user_exp_database = get_user_exp_database()
return local_user_exp_database[userID]["exp"]
def get_last_time_user_given_exp(userID):
local_user_exp_database = get_user_exp_database()
return local_user_exp_database[userID]["last_given_exp"]
def can_user_get_exp(userID):
last_given_exp = get_last_time_user_given_exp(userID)
last_given_exp = last_given_exp.strptime(last_given_exp, '%b-%d-%Y, %H:%M:%S')
if datetime.now() < last_given_exp + timedelta(minutes= 2):
return False
else:
return True
def ready():
set_user_exp_database()
return
"""
formatted = datetime.datetime.today().strftime("%b-%d-%Y, %H:%M:%S")
date = datetime.datetime.strptime(formatted, '%b-%d-%Y, %H:%M:%S')
print(date)
"""
|
mySocialMedia_posts = [
{'Likes': 21, 'Comments': 2},
{'Likes': 13, 'Comments': 2, 'Shares': 1},
{'Likes': 33, 'Comments': 8, 'Shares': 3},
{'Comments': 4, 'Shares': 2},
{'Comments': 1, 'Shares': 1},
{'Likes': 19, 'Comments': 3}
]
total_likes = 0
for n,post in enumerate(mySocialMedia_posts):
try:
# This is just a test. It would probably be better to use "post.get('Likes', 0)"
print(n, end='--')
total_likes = total_likes + post['Likes']
except KeyError as msg:
print(f"The post {post} doesn't have {msg}.", end='--')
continue
else:
continue
finally:
# Checking if finally really is executed before continuing.
print(n)
print("\n", total_likes, sep='')
|
############# Credits and version info #############
# Definition generated from Assembly XML tag def
# Date generated: 2018/12/03 04:56
#
# revision: 1 author: Assembly
# Generated plugin from scratch.
# revision: 2 author: -DeToX-
# Mapped out the Raw Entry Table struct
# revision: 3 author: Lord Zedd
# Matching Zonesets
# revision: 4 author: Moses_of_Egypt
# Cleaned up and converted to SuPyr definition
#
####################################################
from ..common_descs import *
from .objs.tag import *
from .play import play_body
from supyr_struct.defs.tag_def import TagDef
zone_resource_type = Struct("resource_type",
BytesRaw("guid", SIZE=16),
Array("unknown_array", SUB_STRUCT=SInt16("unknown"), SIZE=4),
h3_string_id("name"),
ENDIAN=">", SIZE=28
)
zone_resource_structure_type = Struct("resource_structure_type",
BytesRaw("guid", SIZE=16),
h3_string_id("name"),
ENDIAN=">", SIZE=20
)
zone_tag_resource_resource_fixup = Struct("resource_fixup",
SInt32("block_offset"),
h3_page_offset_info("address_info"),
ENDIAN=">", SIZE=8,
COMMENT="""
Block Offset: Offset in tag data sub-buffer.
Address: Fix-up address(either in the data sub-buffer, or a cache-resource offset."""
)
zone_tag_resource_resource_definition_fixup = Struct("resource_definition_fixup",
h3_page_offset_info("offset_info"),
dyn_senum32("resource_structure_type",
DYN_NAME_PATH="........resource_structure_types.STEPTREE[DYN_I].name.string",
WIDGET_WIDTH=32
),
ENDIAN=">", SIZE=8
)
zone_tag_resource = Struct("tag_resource",
h3_dependency("parent_tag"),
UInt16("salt"),
dyn_senum8("resource_type",
DYN_NAME_PATH=".....resource_types.STEPTREE[DYN_I].name.string",
WIDGET_WIDTH=32
),
UInt8("flags"),
SInt32("fixup_info_offset"),
SInt32("fixup_info_size"),
SInt32("secondary_fixup_info_offset"),
Bool16("pages_used",
"primary",
"secondary",
),
SInt16("segment_index"),
h3_page_offset_info("root_definition_address_info"),
h3_reflexive("resource_fixups", zone_tag_resource_resource_fixup),
h3_reflexive("resource_definition_fixups", zone_tag_resource_resource_definition_fixup),
ENDIAN=">", SIZE=64,
)
zone_zoneset_raw_pool = Struct("raw_pool",
Bool32("active_members", *("active_member_%s" % i for i in range(32))),
ENDIAN=">", SIZE=4
)
zone_zoneset_resource_type = QStruct("resource_type",
UInt16("unknown0"),
UInt16("unknown1"),
UInt16("unknown2"),
UInt16("unknown3"),
UInt16("unknown4"),
UInt16("unknown5"),
UInt16("unknown6"),
UInt16("unknown7"),
UInt16("unknown8"),
UInt16("unknown9"),
#BytesRaw("unknown", SIZE=20, VISIBLE=False),
VISIBLE=False,
ENDIAN=">", SIZE=20
)
zone_zoneset = Struct("zoneset",
h3_reflexive("required_raw_pool", zone_zoneset_raw_pool),
BytesRaw("unknown_0", SIZE=12, VISIBLE=False),
h3_reflexive("optional_raw_pool", zone_zoneset_raw_pool),
h3_reflexive("optional_raw_pool_2", zone_zoneset_raw_pool),
BytesRaw("unknown_1", SIZE=20, VISIBLE=False),
h3_string_id("set_name"),
h3_reflexive("resource_types", zone_zoneset_resource_type),
h3_reflexive("required_tag_pool", zone_zoneset_raw_pool),
h3_reflexive("optional_tag_pool", zone_zoneset_raw_pool),
BytesRaw("unknown_2", SIZE=12, VISIBLE=False),
ENDIAN=">", SIZE=120
)
zone_scenario_zoneset_group = Struct("scenario_zoneset_group",
h3_string_id("name"),
SInt32("bsp_group_index"),
Bool32("import_loaded_bsps", *("bsp_%s" % i for i in range(32))),
Bool32("loaded_bsps", *("bsp_%s" % i for i in range(32))),
Bool32("loaded_designer_zonesets", *("set_%s" % i for i in range(32))),
Bool32("unknown_loaded_designer_zonesets", *("set_%s" % i for i in range(32))),
Bool32("unloaded_designer_zonesets", *("set_%s" % i for i in range(32))),
Bool32("loaded_cinematic_zonesets", *("set_%s" % i for i in range(32))),
SInt32("bsp_atlas_index"),
ENDIAN=">", SIZE=36
)
zone_scenario_bsp = Struct("scenario_bsp",
h3_dependency("bsp"),
ENDIAN=">", SIZE=16
)
zone_unknown_4 = QStruct("unknown_4",
UInt32("unknown0"),
UInt32("unknown1"),
UInt32("unknown2"),
UInt32("unknown3"),
UInt32("unknown4"),
#BytesRaw("unknown", SIZE=20, VISIBLE=False),
VISIBLE=False,
ENDIAN=">", SIZE=20
)
zone_prediction_a = QStruct("prediction_a",
UInt32("key"),
ENDIAN=">", SIZE=4
)
zone_prediction_b = QStruct("prediction_b",
SInt16("overall_index"),
SInt16("a_count"),
SInt32("a_index"),
ENDIAN=">", SIZE=8
)
zone_prediction_c = QStruct("prediction_c",
SInt16("overall_index"),
SInt16("b_index"),
ENDIAN=">", SIZE=4
)
zone_prediction_d_tag = QStruct("prediction_d_tag",
SInt16("c_count"),
SInt16("c_index"),
SInt16("a_count"),
SInt16("a_index"),
ENDIAN=">", SIZE=8
)
zone_prediction_d2_tag = Struct("prediction_d2_tag",
dependency_uint32("tag", VISIBLE=False),
SInt32("first_value"),
SInt32("second_value"),
ENDIAN=">", SIZE=12
)
zone_body = Struct("tagdata",
SEnum16("map_type", *zone_map_type),
SInt16("flags"),
h3_reflexive("resource_types", zone_resource_type,
DYN_NAME_PATH='.name.string'),
h3_reflexive("resource_structure_types", zone_resource_structure_type,
DYN_NAME_PATH='.name.string'),
Struct("play_data", INCLUDE=play_body),
h3_reflexive("tag_resources", zone_tag_resource),
Struct("zonesets",
h3_reflexive("designer", zone_zoneset),
h3_reflexive("global", zone_zoneset),
BytesRaw("unknown_0", SIZE=12, VISIBLE=False),
h3_reflexive("unattached", zone_zoneset),
h3_reflexive("disc_forbidden", zone_zoneset),
h3_reflexive("disc_always_streaming", zone_zoneset),
h3_reflexive("bsp_1", zone_zoneset),
h3_reflexive("bsp_2", zone_zoneset),
h3_reflexive("bsp_3", zone_zoneset),
h3_reflexive("cinematic", zone_zoneset),
h3_reflexive("scenario", zone_zoneset),
BytesRaw("unknown_1", SIZE=24, VISIBLE=False),
),
h3_reflexive("scenario_zoneset_groups", zone_scenario_zoneset_group),
h3_reflexive("scenario_bsps", zone_scenario_bsp),
BytesRaw("unknown_2", SIZE=36, VISIBLE=False),
h3_rawdata_ref("fixup_info"),
BytesRaw("unknown_3", SIZE=20, VISIBLE=False),
h3_reflexive("unknown_4", zone_unknown_4),
BytesRaw("unknown_5", SIZE=96, VISIBLE=False),
h3_reflexive("prediction_a", zone_prediction_a),
h3_reflexive("prediction_b", zone_prediction_b),
h3_reflexive("prediction_c", zone_prediction_c),
h3_reflexive("prediction_d_tags", zone_prediction_d_tag),
h3_reflexive("prediction_d2_tags", zone_prediction_d2_tag),
SInt32("campaign_id"),
SInt32("map_id"),
ENDIAN=">", SIZE=532
)
def get():
return zone_def
zone_def = TagDef("zone",
h3_blam_header('zone'),
zone_body,
ext=".%s" % h3_tag_class_fcc_to_ext["zone"], endian=">", tag_cls=H3Tag
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class LoanPayConsultOrder(object):
def __init__(self):
self._out_order_no = None
self._seller_user_id = None
@property
def out_order_no(self):
return self._out_order_no
@out_order_no.setter
def out_order_no(self, value):
self._out_order_no = value
@property
def seller_user_id(self):
return self._seller_user_id
@seller_user_id.setter
def seller_user_id(self, value):
self._seller_user_id = value
def to_alipay_dict(self):
params = dict()
if self.out_order_no:
if hasattr(self.out_order_no, 'to_alipay_dict'):
params['out_order_no'] = self.out_order_no.to_alipay_dict()
else:
params['out_order_no'] = self.out_order_no
if self.seller_user_id:
if hasattr(self.seller_user_id, 'to_alipay_dict'):
params['seller_user_id'] = self.seller_user_id.to_alipay_dict()
else:
params['seller_user_id'] = self.seller_user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = LoanPayConsultOrder()
if 'out_order_no' in d:
o.out_order_no = d['out_order_no']
if 'seller_user_id' in d:
o.seller_user_id = d['seller_user_id']
return o
|
"""
Given a set of distinct integers, nums, return all possible subsets (the power set).
Note: The solution set must not contain duplicate subsets.
"""
import itertools
def subsets(nums):
subsets = []
for i in range(len(nums)):
combinations = [list(i) for i in itertools.combinations(nums, i)]
subsets.extend(combinations)
subsets.append(nums)
return subsets
answer = subsets([1, 2, 3])
print(answer)
assert answer == [[], [1], [2], [3], [1, 2], [1, 3], [2, 3], [1, 2, 3]]
|
#!/usr/bin/env python
# pylint: disable=disallowed-name
# pylint: disable=missing-class-docstring,
# pylint: disable=missing-function-docstring,
# pylint: disable=no-self-use
import unittest
from paramobject import ParametrizedObject, parameter, Parameter
class TestParametrizedObject(unittest.TestCase):
def test_basic_values(self):
class ClassUnderTest(ParametrizedObject):
foo = Parameter(default=42)
bar = Parameter()
@parameter(stored=True, default=123)
def baz(self):
return self.params_storage['baz']
@parameter(stored=True)
def qux(self):
return self.params_storage['qux']
@parameter
def quux(self):
return 11 * self.bar
# missing mandatory parameter value must raise ValueError
self.assertRaises(ValueError, ClassUnderTest)
self.assertRaises(ValueError, ClassUnderTest, bar=1)
self.assertRaises(ValueError, ClassUnderTest, qux=2)
# check correct default values
obj = ClassUnderTest(bar=1, qux=2)
self.assertEqual(obj.foo, 42)
self.assertEqual(obj.baz, 123)
# check correct values
obj = ClassUnderTest(foo=1, bar=2, baz=3, qux=4)
self.assertEqual(obj.foo, 1)
self.assertEqual(obj.bar, 2)
self.assertEqual(obj.baz, 3)
self.assertEqual(obj.qux, 4)
self.assertEqual(obj.quux, 22)
def test_wither(self):
class ClassUnderTest(ParametrizedObject):
foo = Parameter(default=42)
bar = Parameter(default=77)
@bar.wither
def with_bar(self, bar, as_string=False):
if as_string:
bar = str(bar)
return self.with_params(bar=bar)
# check values
obj = ClassUnderTest()
self.assertEqual(obj.foo, 42)
self.assertEqual(obj.bar, 77)
# check default wither
obj = ClassUnderTest()
self.assertEqual(obj.with_foo(10).foo, 10)
# check custom wither
obj = ClassUnderTest()
self.assertEqual(obj.with_bar(10).bar, 10)
self.assertEqual(obj.with_bar(10, as_string=True).bar, '10')
def test_caster(self):
class ClassUnderTest(ParametrizedObject):
radius = Parameter(default=10)
@parameter
def diameter(self):
return 2 * self.radius
@radius.caster
def cast_radius(self, default, **kwargs):
radius = kwargs.get('radius', default)
if 'diameter' in kwargs:
radius = kwargs['diameter'] / 2
return radius
# check default
obj = ClassUnderTest()
self.assertEqual(obj.radius, 10)
self.assertEqual(obj.diameter, 20)
# check init with radius
obj = ClassUnderTest(radius=100)
self.assertEqual(obj.radius, 100)
self.assertEqual(obj.diameter, 200)
# check init with diameter
obj = ClassUnderTest(diameter=100)
self.assertEqual(obj.radius, 50)
self.assertEqual(obj.diameter, 100)
# check wither
obj = ClassUnderTest()
self.assertEqual(obj.with_radius(100).diameter, 200)
def test_subclassing(self):
class ClassUnderTest(ParametrizedObject):
foo = Parameter(default=42)
class SubClassUnderTest(ClassUnderTest):
bar = Parameter(default=77)
# check values
obj = SubClassUnderTest()
self.assertEqual(obj.foo, 42)
self.assertEqual(obj.bar, 77)
def test_nested_parametrized_object(self):
class ContainedClassUnderTest(ParametrizedObject):
bar = Parameter(default=42)
class ClassUnderTest(ParametrizedObject):
foo = Parameter(default=ContainedClassUnderTest())
# check value
obj = ClassUnderTest()
self.assertEqual(obj.foo.bar, 42)
# check wither, contained class must return an instance of the
# containing class
obj = ClassUnderTest()
self.assertIsInstance(obj.foo.with_bar(10), ClassUnderTest)
self.assertEqual(obj.foo.with_bar(10).foo.bar, 10)
if __name__ == '__main__':
unittest.main()
|
from gevent import monkey
monkey.patch_all()
import click
from corgie import scheduling
from corgie.log import logger as corgie_logger
from corgie.log import configure_logger
@click.command()
@click.option('--lease_seconds', '-l', nargs=1, type=int, required=True)
@click.option('--queue_name', '-q', nargs=1, type=str, required=True)
@click.option('-v', '--verbose', count=True, help='Turn on debug logging')
def worker(lease_seconds, queue_name, verbose):
configure_logger(verbose)
executor = scheduling.Executor(queue_name=queue_name)
executor.execute(lease_seconds=lease_seconds)
|
# coding: utf-8
# In[1]:
# Name: Alex Egg
# Email: eggie5@gmail.com
# PID: A53112354
from pyspark import SparkContext
sc = SparkContext()
# In[2]:
from pyspark.mllib.linalg import Vectors
from pyspark.mllib.regression import LabeledPoint
from string import split,strip
from pyspark.mllib.tree import GradientBoostedTrees, GradientBoostedTreesModel
from pyspark.mllib.tree import RandomForest, RandomForestModel
from pyspark.mllib.util import MLUtils
# In[3]:
# Read the file into an RDD
# If doing this on a real cluster, you need the file to be available on all nodes, ideally in HDFS.
path='/HIGGS/HIGGS.csv'
inputRDD=sc.textFile(path)
# In[4]:
# Transform the text RDD into an RDD of LabeledPoints
Data=inputRDD.map(lambda line: [float(strip(x)) for x in line.split(',')]).map(lambda a: LabeledPoint(a[0], a[1:]))
# In[5]:
Data1=Data.sample(False,0.1).cache()
(trainingData,testData)=Data1.randomSplit([0.7,0.3])
# In[7]:
from time import time
errors={}
for depth in [10]:
start=time()
model=GradientBoostedTrees.trainClassifier(trainingData, categoricalFeaturesInfo={}, numIterations=10, maxDepth=depth,
learningRate=0.25, maxBins=35)
errors[depth]={}
dataSets={'train':trainingData,'test':testData}
for name in dataSets.keys(): # Calculate errors on train and test sets
data=dataSets[name]
Predicted=model.predict(data.map(lambda x: x.features))
LabelsAndPredictions = data.map(lambda lp: lp.label).zip(Predicted)
Err = LabelsAndPredictions.filter(lambda (v,p): v != p).count()/float(data.count())
errors[depth][name]=Err
print depth,errors[depth],int(time()-start),'seconds'
# Expected test error <= 27.5%
# Expected running time <= 350 seconds
# 10 {'test': 0.2806224626238058, 'train': 0.26497137638847573} 66 seconds
# 10 {'test': 0.27378476281278563, 'train': 0.24796675763802906} 67 seconds
|
################################################################
# Starts a specified WL managed server using the admin server
# The Admin server must be running
################################################################
#Functions
################################################################
def getServerStatus(svrName):
slrBean = cmo.lookupServerLifeCycleRuntime(svrName)
return slrBean.getState()
adminServerT3 = 't3://wladmin:7001'
adminUsername = 'weblogic'
adminPassword = os.environ.get("ADMIN_PASSWORD")
srvName = os.environ.get("HOSTNAME")
#Connect to admin server
connect(username=adminUsername, password=adminPassword, url=adminServerT3)
domainRuntime()
#Check if the server is already running
serverStatus = getServerStatus(srvName)
if (serverStatus == 'RUNNING'):
print 'Server is already running'
exit()
#Start the server async
start(srvName,'Server',block='false')
|
# Python Version: 3.x
# -*- coding: utf-8 -*-
import json
import posixpath
import re
import urllib.parse
from typing import *
import bs4
import requests
import onlinejudge.dispatch
import onlinejudge.implementation.logging as log
import onlinejudge.implementation.utils as utils
import onlinejudge.type
from onlinejudge.type import SubmissionError
# This is a workaround. AtCoder's servers sometime fail to send "Content-Type" field.
# see https://github.com/kmyk/online-judge-tools/issues/28 and https://github.com/kmyk/online-judge-tools/issues/232
def _request(*args, **kwargs):
resp = utils.request(*args, **kwargs)
log.debug('AtCoder\'s server said "Content-Type: %s"', resp.headers.get('Content-Type', '(not sent)'))
resp.encoding = 'UTF-8'
return resp
@utils.singleton
class AtCoderService(onlinejudge.type.Service):
def login(self, get_credentials: onlinejudge.type.CredentialsProvider, session: Optional[requests.Session] = None) -> bool:
session = session or utils.new_default_session()
url = 'https://practice.contest.atcoder.jp/login'
# get
resp = _request('GET', url, session=session, allow_redirects=False)
msgs = AtCoderService._get_messages_from_cookie(resp.cookies)
for msg in msgs:
log.status('message: %s', msg)
if msgs:
return 'login' not in resp.url
# post
username, password = get_credentials()
resp = _request('POST', url, session=session, data={'name': username, 'password': password}, allow_redirects=False)
msgs = AtCoderService._get_messages_from_cookie(resp.cookies)
AtCoderService._report_messages(msgs)
return 'login' not in resp.url # AtCoder redirects to the top page if success
def is_logged_in(self, session: Optional[requests.Session] = None) -> bool:
session = session or utils.new_default_session()
url = 'https://practice.contest.atcoder.jp/login'
resp = _request('GET', url, session=session, allow_redirects=False)
msgs = AtCoderService._get_messages_from_cookie(resp.cookies)
return bool(msgs)
def get_url(self) -> str:
return 'https://atcoder.jp/'
def get_name(self) -> str:
return 'atcoder'
@classmethod
def from_url(cls, s: str) -> Optional['AtCoderService']:
# example: https://atcoder.jp/
# example: http://agc012.contest.atcoder.jp/
result = urllib.parse.urlparse(s)
if result.scheme in ('', 'http', 'https') \
and (result.netloc in ('atcoder.jp', 'beta.atcoder.jp') or result.netloc.endswith('.contest.atcoder.jp')):
return cls()
return None
@classmethod
def _get_messages_from_cookie(cls, cookies) -> List[str]:
msgtags = [] # type: List[str]
for cookie in cookies:
log.debug('cookie: %s', str(cookie))
if cookie.name.startswith('__message_'):
msg = json.loads(urllib.parse.unquote_plus(cookie.value))
msgtags += [msg['c']]
log.debug('message: %s: %s', cookie.name, str(msg))
msgs = [] # type: List[str]
for msgtag in msgtags:
soup = bs4.BeautifulSoup(msgtag, utils.html_parser)
msg = None
for tag in soup.find_all():
if tag.string and tag.string.strip():
msg = tag.string
break
if msg is None:
log.error('failed to parse message')
else:
msgs += [msg]
return msgs
@classmethod
def _report_messages(cls, msgs: List[str], unexpected: bool = False) -> bool:
for msg in msgs:
log.status('message: %s', msg)
if msgs and unexpected:
log.failure('unexpected messages found')
return bool(msgs)
class AtCoderProblem(onlinejudge.type.Problem):
def __init__(self, contest_id: str, problem_id: str):
self.contest_id = contest_id
self.problem_id = problem_id
self._task_id = None # type: Optional[int]
def download_sample_cases(self, session: Optional[requests.Session] = None) -> List[onlinejudge.type.TestCase]:
session = session or utils.new_default_session()
# get
resp = _request('GET', self.get_url(), session=session)
msgs = AtCoderService._get_messages_from_cookie(resp.cookies)
if AtCoderService._report_messages(msgs, unexpected=True):
# example message: "message: You cannot see this page."
log.warning('are you logged in?')
return []
# parse
soup = bs4.BeautifulSoup(resp.content.decode(resp.encoding), utils.html_parser)
samples = utils.SampleZipper()
lang = None
for pre, h3 in self._find_sample_tags(soup):
s = utils.textfile(utils.dos2unix(pre.string.lstrip()))
name = h3.string
l = self._get_tag_lang(pre)
if lang is None:
lang = l
elif lang != l:
log.info('skipped due to language: current one is %s, not %s: %s ', lang, l, name)
continue
samples.add(s, name)
return samples.get()
def _get_tag_lang(self, tag):
assert isinstance(tag, bs4.Tag)
for parent in tag.parents:
for cls in parent.attrs.get('class') or []:
if cls.startswith('lang-'):
return cls
def _find_sample_tags(self, soup) -> Generator[Tuple[bs4.Tag, bs4.Tag], None, None]:
for pre in soup.find_all('pre'):
log.debug('pre tag: %s', str(pre))
if not pre.string:
continue
prv = utils.previous_sibling_tag(pre)
# the first format: h3+pre
if prv and prv.name == 'h3' and prv.string:
yield (pre, prv)
else:
# ignore tags which are not samples
# example: https://atcoder.jp/contests/abc003/tasks/abc003_4
while prv is not None:
if prv.name == 'pre':
break
prv = utils.previous_sibling_tag(prv)
if prv is not None:
continue
# the second format: h3+section pre
if pre.parent and pre.parent.name == 'section':
prv = pre.parent and utils.previous_sibling_tag(pre.parent)
if prv and prv.name == 'h3' and prv.string:
yield (pre, prv)
def get_url(self) -> str:
return 'http://{}.contest.atcoder.jp/tasks/{}'.format(self.contest_id, self.problem_id)
def get_service(self) -> AtCoderService:
return AtCoderService()
@classmethod
def from_url(cls, s: str) -> Optional['AtCoderProblem']:
# example: http://agc012.contest.atcoder.jp/tasks/agc012_d
result = urllib.parse.urlparse(s)
dirname, basename = posixpath.split(utils.normpath(result.path))
if result.scheme in ('', 'http', 'https') \
and result.netloc.count('.') == 3 \
and result.netloc.endswith('.contest.atcoder.jp') \
and result.netloc.split('.')[0] \
and dirname == '/tasks' \
and basename:
contest_id = result.netloc.split('.')[0]
problem_id = basename
return cls(contest_id, problem_id)
# example: https://beta.atcoder.jp/contests/abc073/tasks/abc073_a
m = re.match(r'^/contests/([\w\-_]+)/tasks/([\w\-_]+)$', utils.normpath(result.path))
if result.scheme in ('', 'http', 'https') \
and result.netloc in ('atcoder.jp', 'beta.atcoder.jp') \
and m:
contest_id = m.group(1)
problem_id = m.group(2)
return cls(contest_id, problem_id)
return None
def get_input_format(self, session: Optional[requests.Session] = None) -> str:
session = session or utils.new_default_session()
# get
resp = _request('GET', self.get_url(), session=session)
msgs = AtCoderService._get_messages_from_cookie(resp.cookies)
if AtCoderService._report_messages(msgs, unexpected=True):
return ''
# parse
soup = bs4.BeautifulSoup(resp.content.decode(resp.encoding), utils.html_parser)
for h3 in soup.find_all('h3'):
if h3.string in ('入力', 'Input'):
tag = h3
for _ in range(3):
tag = utils.next_sibling_tag(tag)
if tag is None:
break
if tag.name in ('pre', 'blockquote'):
s = ''
for it in tag:
s += it.string or it # AtCoder uses <var>...</var> for math symbols
return s
return ''
def get_language_dict(self, session: Optional[requests.Session] = None) -> Dict[str, onlinejudge.type.Language]:
session = session or utils.new_default_session()
# get
url = 'http://{}.contest.atcoder.jp/submit'.format(self.contest_id)
resp = _request('GET', url, session=session)
msgs = AtCoderService._get_messages_from_cookie(resp.cookies)
if AtCoderService._report_messages(msgs, unexpected=True):
return {}
# check whether logged in
path = utils.normpath(urllib.parse.urlparse(resp.url).path)
if path.startswith('/login'):
log.error('not logged in')
return {}
# parse
soup = bs4.BeautifulSoup(resp.content.decode(resp.encoding), utils.html_parser)
select = soup.find('select', class_='submit-language-selector') # NOTE: AtCoder can vary languages depending on tasks, even in one contest. here, ignores this fact.
language_dict = {}
for option in select.find_all('option'):
language_dict[option.attrs['value']] = {'description': option.string}
return language_dict
def submit_code(self, code: bytes, language: str, session: Optional[requests.Session] = None) -> onlinejudge.type.DummySubmission:
assert language in self.get_language_dict(session=session)
session = session or utils.new_default_session()
# get
url = 'http://{}.contest.atcoder.jp/submit'.format(self.contest_id) # TODO: use beta.atcoder.jp
resp = _request('GET', url, session=session)
msgs = AtCoderService._get_messages_from_cookie(resp.cookies)
if AtCoderService._report_messages(msgs, unexpected=True):
raise SubmissionError
# check whether logged in
path = utils.normpath(urllib.parse.urlparse(resp.url).path)
if path.startswith('/login'):
log.error('not logged in')
raise SubmissionError
# parse
soup = bs4.BeautifulSoup(resp.content.decode(resp.encoding), utils.html_parser)
form = soup.find('form', action=re.compile(r'^/submit\?task_id='))
if not form:
log.error('form not found')
raise SubmissionError
log.debug('form: %s', str(form))
# post
task_id = self._get_task_id(session=session)
form = utils.FormSender(form, url=resp.url)
form.set('task_id', str(task_id))
form.set('source_code', code)
form.set('language_id_{}'.format(task_id), language)
resp = form.request(session=session)
resp.raise_for_status()
# result
msgs = AtCoderService._get_messages_from_cookie(resp.cookies)
AtCoderService._report_messages(msgs)
if '/submissions/me' in resp.url:
# example: https://practice.contest.atcoder.jp/submissions/me#32174
# CAUTION: this URL is not a URL of the submission
log.success('success: result: %s', resp.url)
# NOTE: ignore the returned legacy URL and use beta.atcoder.jp's one
url = 'https://beta.atcoder.jp/contests/{}/submissions/me'.format(self.contest_id)
return onlinejudge.type.DummySubmission(url)
else:
log.failure('failure')
log.debug('redirected to %s', resp.url)
raise SubmissionError
def _get_task_id(self, session: Optional[requests.Session] = None) -> int:
if self._task_id is None:
session = session or utils.new_default_session()
# get
resp = _request('GET', self.get_url(), session=session)
msgs = AtCoderService._get_messages_from_cookie(resp.cookies)
if AtCoderService._report_messages(msgs, unexpected=True):
raise SubmissionError
# parse
soup = bs4.BeautifulSoup(resp.content.decode(resp.encoding), utils.html_parser)
submit = soup.find('a', href=re.compile(r'^/submit\?task_id='))
if not submit:
log.error('link to submit not found')
raise SubmissionError
m = re.match(r'^/submit\?task_id=([0-9]+)$', submit.attrs['href'])
assert m
self._task_id = int(m.group(1))
return self._task_id
class AtCoderSubmission(onlinejudge.type.Submission):
def __init__(self, contest_id: str, submission_id: int, problem_id: Optional[str] = None):
self.contest_id = contest_id
self.submission_id = submission_id
self.problem_id = problem_id
@classmethod
def from_url(cls, s: str, problem_id: Optional[str] = None) -> Optional['AtCoderSubmission']:
submission_id = None # type: Optional[int]
# example: http://agc001.contest.atcoder.jp/submissions/1246803
result = urllib.parse.urlparse(s)
dirname, basename = posixpath.split(utils.normpath(result.path))
if result.scheme in ('', 'http', 'https') \
and result.netloc.count('.') == 3 \
and result.netloc.endswith('.contest.atcoder.jp') \
and result.netloc.split('.')[0] \
and dirname == '/submissions':
contest_id = result.netloc.split('.')[0]
try:
submission_id = int(basename)
except ValueError:
pass
submission_id = None
if submission_id is not None:
return cls(contest_id, submission_id, problem_id=problem_id)
# example: https://beta.atcoder.jp/contests/abc073/submissions/1592381
m = re.match(r'^/contests/([\w\-_]+)/submissions/(\d+)$', utils.normpath(result.path))
if result.scheme in ('', 'http', 'https') \
and result.netloc == ('atcoder.jp', 'beta.atcoder.jp') \
and m:
contest_id = m.group(1)
try:
submission_id = int(m.group(2))
except ValueError:
submission_id = None
if submission_id is not None:
return cls(contest_id, submission_id, problem_id=problem_id)
return None
def get_url(self) -> str:
return 'http://{}.contest.atcoder.jp/submissions/{}'.format(self.contest_id, self.submission_id)
def get_problem(self) -> AtCoderProblem:
if self.problem_id is None:
raise ValueError
return AtCoderProblem(self.contest_id, self.problem_id)
def get_service(self) -> AtCoderService:
return AtCoderService()
def download(self, session: Optional[requests.Session] = None) -> str:
session = session or utils.new_default_session()
# get
resp = _request('GET', self.get_url(), session=session)
msgs = AtCoderService._get_messages_from_cookie(resp.cookies)
if AtCoderService._report_messages(msgs, unexpected=True):
raise RuntimeError
# parse
soup = bs4.BeautifulSoup(resp.content.decode(resp.encoding), utils.html_parser)
code = None
for pre in soup.find_all('pre'):
log.debug('pre tag: %s', str(pre))
prv = utils.previous_sibling_tag(pre)
if not (prv and prv.name == 'h3' and 'Source code' in prv.text):
continue
code = pre.string
if code is None:
log.error('source code not found')
raise RuntimeError
return code
onlinejudge.dispatch.services += [AtCoderService]
onlinejudge.dispatch.problems += [AtCoderProblem]
onlinejudge.dispatch.submissions += [AtCoderSubmission]
|
from . import resources, util
from .airbase import AirbaseClient, AirbaseRequest
__all__ = ["AirbaseClient", "AirbaseRequest", "resources", "util"]
|
import numpy as np
import tensorflow as tf
default_path = 'C:/Users/DimKa/Documents/'
def data(path=default_path):
log_root = path
(im_train, y_train), (im_test, y_test) = tf.keras.datasets.cifar10.load_data()
# Normalize to 0-1 range and subtract mean of training pixels
im_train = im_train / 255
im_test = im_test / 255
mean_training_pixel = np.mean(im_train, axis=(0, 1, 2))
x_train = im_train - mean_training_pixel
x_test = im_test - mean_training_pixel
image_shape = x_train[0].shape
labels = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
return x_train, y_train, x_test, y_test, image_shape, log_root, labels
|
# -*- coding: utf-8 -*-
from cleo.commands import Command
class Foo5Command(Command):
def __init__(self):
pass
|
import gzip
import os
import json
from nltk import sent_tokenize
from utils.dataset import *
from dataset_loader.base_loader import LoaderBase
class Loader(LoaderBase):
def __init__(self, cfg):
super().__init__(cfg)
self.is_dialogue = False
def load(self):
for data_type in ['train', 'val', 'test']:
source_path = os.path.join(self.cfg.train.dataset_path, f"{data_type}.source")
target_path = os.path.join(self.cfg.train.dataset_path, f"{data_type}.target")
source = read_list_asline(source_path)
target = read_list_asline(target_path)
self.data[data_type] = [sent_tokenize(x) for x in source] # we need to split document into sentences
self.label[data_type] = target
return self.data, self.label
|
import pytest
from .. import *
def test_bytes_base32():
expr = Bytes("base32", "7Z5PWO2C6LFNQFGHWKSK5H47IQP5OJW2M3HA2QPXTY3WTNP5NU2MHBW27M")
assert expr.type_of() == TealType.bytes
expected = TealSimpleBlock([
TealOp(Op.byte, "base32(7Z5PWO2C6LFNQFGHWKSK5H47IQP5OJW2M3HA2QPXTY3WTNP5NU2MHBW27M)")
])
actual, _ = expr.__teal__()
assert actual == expected
def test_bytes_base32_empty():
expr = Bytes("base32", "")
assert expr.type_of() == TealType.bytes
expected = TealSimpleBlock([
TealOp(Op.byte, "base32()")
])
actual, _ = expr.__teal__()
assert actual == expected
def test_bytes_base64():
expr = Bytes("base64", "Zm9vYmE=")
assert expr.type_of() == TealType.bytes
expected = TealSimpleBlock([
TealOp(Op.byte, "base64(Zm9vYmE=)")
])
actual, _ = expr.__teal__()
assert actual == expected
def test_bytes_base64_empty():
expr = Bytes("base64", "")
assert expr.type_of() == TealType.bytes
expected = TealSimpleBlock([
TealOp(Op.byte, "base64()")
])
actual, _ = expr.__teal__()
assert actual == expected
def test_bytes_base16():
expr = Bytes("base16", "A21212EF")
assert expr.type_of() == TealType.bytes
expected = TealSimpleBlock([
TealOp(Op.byte, "0xA21212EF")
])
actual, _ = expr.__teal__()
assert actual == expected
def test_bytes_base16_prefix():
expr = Bytes("base16", "0xA21212EF")
assert expr.type_of() == TealType.bytes
expected = TealSimpleBlock([
TealOp(Op.byte, "0xA21212EF")
])
actual, _ = expr.__teal__()
assert actual == expected
def test_bytes_base16_empty():
expr = Bytes("base16", "")
assert expr.type_of() == TealType.bytes
expected = TealSimpleBlock([
TealOp(Op.byte, "0x")
])
actual, _ = expr.__teal__()
assert actual == expected
def test_bytes_utf8():
expr = Bytes("hello world")
assert expr.type_of() == TealType.bytes
expected = TealSimpleBlock([
TealOp(Op.byte, "\"hello world\"")
])
actual, _ = expr.__teal__()
assert actual == expected
def test_bytes_utf8_special_chars():
expr = Bytes("\t \n \r\n \\ \" \' 😀")
assert expr.type_of() == TealType.bytes
expected = TealSimpleBlock([
TealOp(Op.byte, "\"\\t \\n \\r\\n \\\\ \\\" \' \\xf0\\x9f\\x98\\x80\"")
])
actual, _ = expr.__teal__()
assert actual == expected
def test_bytes_utf8_empty():
expr = Bytes("")
assert expr.type_of() == TealType.bytes
expected = TealSimpleBlock([
TealOp(Op.byte, "\"\"")
])
actual, _ = expr.__teal__()
assert actual == expected
def test_bytes_invalid():
with pytest.raises(TealInputError):
Bytes("base23", "")
with pytest.raises(TealInputError):
Bytes("base32", "Zm9vYmE=")
with pytest.raises(TealInputError):
Bytes("base64", "?????")
with pytest.raises(TealInputError):
Bytes("base16", "7Z5PWO2C6LFNQFGHWKSK5H47IQP5OJW2M3HA2QPXTY3WTNP5NU2MHBW27M")
|
# -*- coding: utf-8 -*-
"""
.. module:: perform_saob
:synopsis: module performing a Systematic Analysis Of Biases
.. moduleauthor:: Aurore Bussalb <aurore.bussalb@mensiatech.com>
"""
import warnings
import pandas as pd
import numpy as np
import random
from sklearn.linear_model import LassoCV, LassoLarsIC
from sklearn.model_selection import LeaveOneOut
import statsmodels.api as sm
from sklearn import tree
import graphviz
def effect_size_within_subjects(mean_post_test_treatment, mean_pre_test_treatment, std_post_test_treatment, std_pre_test_treatment):
"""Computes effects sizes inside a treatment group, this effect size reflects the evolution inside a group between pre and post test. The
fomula used comes from Cohen J. (1988), *Statistical Power Analysis for the Behavioral Sciences*.
Parameters
----------
n_treatment: int
Number of patients included in the treatment group.
mean_post_test_treatment: float
Mean score after the treatment.
mean_pre_test_treatment: float
Mean score before the treatment.
std_pre_test_treatment: float
Standard deviation of the mean score before the treatment.
std_post_test_treatment: float
Standard deviation of the mean score before the treatment.
Returns
-------
effect_size: float
Value estimating the efficacy of the treatment.
If it's negative, the result is in favor of the treatment.
Notes
-----
Effect sizes computed for each study correspond to the effect sizes within subjects. Thus, the effect size is
computed from the pre and post test values of the treatment group. So here a control group is not neccessary.
"""
Effect_size_treatment = (mean_post_test_treatment - mean_pre_test_treatment)/np.sqrt((std_pre_test_treatment**2 + std_post_test_treatment**2)/2)
return Effect_size_treatment
def normalize_severity_at_baseline(mean_pre_test_treatment, maximum_on_clinical_scale):
"""Normalizes the pre-test scores in order to include them in the SAOB analysis.
Parameters
----------
mean_post_test_treatment: float
Mean score after the treatment.
maximum_on_clinical_scale: int
Maximum score possible to be obtained on the clinical scale.
Returns
-------
severity_at_baseline: float
Normalized pre-test score.
"""
severity_at_baseline = mean_pre_test_treatment/maximum_on_clinical_scale
return severity_at_baseline
def detect_and_reject_outliers(df, y):
"""Detects and rejects outliers in the distribution of within effect sizes.
Studies with a within effect size out of the bounds are excluded.
Parameters
----------
df: pandas.DataFrame
Dataframe containing all observations in rows, factors and also values to compute the effect size within subjects in columns.
It is obtained after the import of the csv file containing all data by ``import_csv_for_factors``.
y: pandas.Series
Effect size within subjects computed for each observation.
Returns
-------
df: pandas.DataFrame
Dataframe containing all observations with outliers excluded.
y: pandas.Series
Effect size within subjects with outliers excluded.
"""
# Compute mean and standard deviation of all the within effect sizes
mean_wES = y.mean()
std_wES = y.std()
# Compute the thresholds of acceptance
bound_inf = mean_wES - 3*std_wES
bound_sup = mean_wES + 3*std_wES
# Detect outliers
df_outlier = df[ (y < bound_inf) | (y > bound_sup) ]
# Reject outlier
df = df.drop(df_outlier.index.values, axis=0)
y = y.drop(df_outlier.index.values, axis=0)
return df, y
def preprocess_factors(df):
"""Preprocesses factors before running the SAOB.
Factors with too many missing values and with too many identical observations will be removed.
Besides, values can be standardized. The categorical variables will be coded as dummies.
Parameters
----------
df: pandas.DataFrame
Dataframe containing all observations in rows, factors and also values to compute the effect size within subjects in columns.
It is obtained after the import of the csv file containing all data by ``import_csv_for_factors`` and the outlier rejection.
Returns
-------
X: pandas.DataFrame
Preprocessed dataframe containing all observations in rows and factors in columns.
Factors with too many missing values and with too many identical observations have been removed.
Categorical variables are coded in dummies.
Values here are standardized.
X_non_standardized: pandas.DataFrame
Preprocessed dataframe containing all observations in rows and factors in columns.
Factors with too many missing values and with too many identical observations have been removed.
Categorical variables are coded in dummies.
Values here are not standardized.
"""
# Dataframe containing only factors
X = df.drop(['mean_post_test_treatment', 'mean_pre_test_treatment','n_treatment',
'raters', 'score_name', 'std_post_test_treatment',
'std_pre_test_treatment', 'effect_size_treatment', 'maximum_on_clinical_scale'], axis=1)
# Remove factors with too few observations
X_number_of_nans = X.isnull().sum()
columns_to_remove_nans = X_number_of_nans[(X_number_of_nans > round(len(X)*20/100) + 1)]
X = X.drop(columns_to_remove_nans.index.values, axis=1)
# Turn into dummy variables the categorical variables
categorical_factors = list(set(X.columns) - set(X._get_numeric_data().columns))
X = pd.get_dummies(X, columns=categorical_factors, drop_first=True)
# Remove categorical factors too homogeneous
bool_cols = [col for col in X if X[col].dropna().value_counts().index.isin([0,1]).all()]
X_categorical = X[bool_cols]
X_categorical_count = X_categorical.apply(pd.value_counts)
columns_to_remove_no = X_categorical_count.iloc[0][(X_categorical_count.iloc[0] > round(len(df)*80/100) + 1)]
columns_to_remove_yes = X_categorical_count.iloc[1][(X_categorical_count.iloc[1] > round(len(df)*80/100) + 1)]
columns_to_remove = columns_to_remove_no.index.tolist() + columns_to_remove_yes.index.tolist()
X = X.drop(columns_to_remove, axis=1)
# Put -1 instead of NaNs in continuous factors
X.fillna(value=-1, inplace=True)
# Standardization of the independent variables for WLS and Lasso
X_non_standardized = X
X = (X - X.mean())/X.std()
return X, X_non_standardized
def weighted_linear_regression(df, X, y):
"""Performs Weighted Least Squares.
Dependent variable = effect size within subjects; independent variable = factors.
P independent variables and n observations.
model: WXB = Wy; W (nxn) diagonal matrix of weights, y (nx1) column vector
of dependent variables, X (nxP) matrix of independent variables, B (Px1)
column vector of coefficients.
Parameters
----------
df: pandas.DataFrame
Dataframe containing all observations in rows, factors in columns and also values to compute the effect size within subjects.
It is obtained after the import of the csv file containing all data by ``import_csv_for_factors`` and the outlier rejection.
X: pandas.DataFrame
Preprocessed dataframe containing all observations in rows and factors in columns (the independent variables).
Factors with too many missing values and with too many identical observations have been removed. Besides, values have been standardized.
Categorical variables are coded in dummies.
This dataframe is obtained thanks to the ``preprocess_factors function``.
y: pandas.Series
Effect size within subjects computed for each observation (the dependent variable) obtained after the outlier rejection.
Returns
-------
summary: statsmodels.iolib.summary.Summary
Summary of the WLS regression.
In particular values of coefficients, associated pvalues, F-statistics, Prob(Omnibus), skew and kurtosis.
"""
# Find the number of scales per study
df['number_of_scales'] = df.index.value_counts()
# Compute the weight of each ES: number_of_treatment_patients_in_the_study/number_of_scales_in_the_study
df['weight'] = df['n_treatment']/df['number_of_scales']
W = np.diag(df['weight'])
# Get rank of the moment matrix and its condition number: it has to be full rank,
# to have eigen values > 0 and a high condition number to be invertible
rank_X = np.linalg.matrix_rank(X)
X_transpose = X.transpose()
W_transpose = W.transpose()
rank_W = np.linalg.matrix_rank(W)
moment_matrix = np.linalg.multi_dot([X_transpose,W_transpose,W,X])
moment_matrix_rank = np.linalg.matrix_rank(moment_matrix)
eigen_values_moment_matrix = np.linalg.eigvals(moment_matrix)
condition_number = np.linalg.cond(moment_matrix)
if moment_matrix.shape[0] == moment_matrix_rank:
print('Moment matrix is invertible')
else:
warnings.warn('Moment matrix is not invertible, be carefull while interpreting the results')
# WLS
weighted_regression = sm.WLS(y, sm.add_constant(X), weights=df['weight'])
weighted_regression_fit = weighted_regression.fit()
summary = weighted_regression_fit.summary()
return summary
def ordinary_linear_regression(X, y):
"""Performs Ordinary Least Squares.
Dependent variable = effect size within subjects; independent variable = factors.
P independent variables and n observations.
model: XB = y; y (nx1) column vector of dependent variables, X (nxP) matrix
of independent variables, B (Px1) column vector of coefficients.
Parameters
----------
X: pandas.DataFrame
Preprocessed dataframe containing all observations in rows and factors in columns (the independent variables).
Factors with too many missing values and with too many identical
observations have been removed. Besides, values have been standardized.
Categorical variables are coded in dummies.
This dataframe is obtained thanks to the ``preprocess_factors`` function.
y: pandas.Series
Effect size within subjects computed for each observation (the dependent variable) obtained after the outlier rejection.
Returns
-------
summary_ols: statsmodels.iolib.summary.Summary
Summary of the OLS regression.
In particular values of coefficients, associated pvalues, F-statistics, Prob(Omnibus), skew and kurtosis.
"""
# Get rank of the moment matrix and its condition number: it has to be full rank,
# to have eigen values > 0 and a high condition number to be invertible
rank_X = np.linalg.matrix_rank(X)
X_transpose = X.transpose()
moment_matrix = np.dot(X_transpose,X)
moment_matrix_rank = np.linalg.matrix_rank(moment_matrix)
eigen_values_moment_matrix = np.linalg.eigvals(moment_matrix)
condition_number = np.linalg.cond(moment_matrix)
if moment_matrix.shape[0] == moment_matrix_rank:
print('Moment matrix is invertible')
else:
warnings.warn('Moment matrix is not invertible, be carefull while interpreting the results')
# Run the OLS
regression = sm.OLS(y, sm.add_constant(X))
regression_fit = regression.fit()
summary_ols = regression_fit.summary()
return summary_ols
def regularization_lassocv(X, y):
"""Performs Lasso linear model with iterative fitting along a regularization path.
The best model is selected by cross-validation.
Dependent variable = effect size within subjects; independent variable = factors.
P independent variables and n observations.
Parameters
----------
X: pandas.DataFrame
Preprocessed dataframe containing all observations in rows and factors in columns (the independent variables).
Factors with too many missing values and with too many identical observations have been removed. Besides, values have been standardized.
Categorical variables are coded in dummies.
This dataframe is obtained thanks to the ``preprocess_factors`` function.
y: pandas.Series
Effect size within subjects computed for each observation (the dependent variable) obtained after the outlier rejection.
Returns
-------
coeff: pandas.DataFrame
Results of the Lasso.
Column with coefficients obtained after regularization and the names of the associated factors.
mse_test: numpy.ndarray
Mean square error for the test set on each fold, varying alpha.
Shape (n_alphas, n_folds).
alphas: numpy.ndarray
The grid of alphas used for fitting.
Shape (n_alphas).
alpha: float
The amount of penalization chosen by cross validation.
"""
# Cross validation (leave one out) to choose the tuning parameter alpha
loo = LeaveOneOut()
lassocv = LassoCV(alphas = None, cv = loo) # leave one out method, internal cross validation: it performs cv on the
# data it receives
# Fit all the data with the alpha found
lassocv.fit(X, y)
alpha = lassocv.alpha_
alphas = lassocv.alphas_
mse_test = lassocv.mse_path_
# Coefficients that don't explain the model are now reduced to exactly zero
coeff = pd.DataFrame({'Factors':X.columns, 'Coefficients':lassocv.coef_})
return coeff, mse_test, alphas, alpha
def regularization_lassoAIC(X, y):
"""Performs Lasso model fit with Lars using AIC for model selection.
Dependent variable = effect size within subjects; independent variable = factors.
P independent variables and n observations.
model: XB = y; y (nx1) column vector of dependent variables, X (nxP) matrix
of independent variables, B (Px1) column vector of coefficients.
Parameters
----------
X: pandas.DataFrame
Preprocessed dataframe containing all observations in rows and factors in columns (the independent variables).
Factors with too many missing values and with too many identical
observations have been removed. Besides, values have been standardized.
Categorical variables are coded in dummies.
This dataframe is obtained thanks to the ``preprocess_factors`` function.
y: pandas.Series
Effect size within subjects computed for each observation (the dependent variable) obtained after the outlier rejection.
Returns
-------
coeff_aic: pandas.DataFrame
Results of the Lasso.
Column with coefficients obtained after regularization and the names of the associated factors.
"""
model = LassoLarsIC(criterion='aic')
model.fit(X, y)
coeff_aic = pd.DataFrame({'Factors':X.columns, 'Coefficients':model.coef_})
return coeff_aic
def decision_tree(X_non_standardized, y):
"""Computes a Decision tree.
Non linear and hierarchical method.
Parameters
----------
X: pandas.DataFrame
Preprocessed dataframe containing all observations in rows and factors in columns (the independent variables).
Factors with too many missing values and with too many identical
observations have been removed. Besides, values have been standardized.
Categorical variables are coded in dummies.
This dataframe is obtained thanks to the ``preprocess_factors`` function.
y: pandas.Series
Effect size within subjects computed for each observation (the dependent variable) obtained after the outlier rejection.
Returns
-------
decision_tree: pdf
Decision tree obtained.
"""
# Decision tree (criterion: mean square error)
clf = tree.DecisionTreeRegressor(criterion='mse', min_samples_leaf=8)
clf.fit(X_non_standardized, y)
score_decision_tree = clf.score(X_non_standardized, y)
print('R² decision tree', score_decision_tree)
# Visualization
dot_data = tree.export_graphviz(clf, feature_names=X_non_standardized.columns, out_file=None, rounded=True)
graph = graphviz.Source(dot_data)
graph.render('decision_tree', view=True)
|
# -*- coding: utf-8 -*-
# REDIS 配置,请自行替换
REDIS_HOST = 'your_redis_ip'
REDIS_PORT = 6379
REDIS_PASSWORD = 'your_redis_password'
REDIS_DB = 0
# REDIS KEY的格式,会接收用户的open_id作为变量
REDIS_KEY='wechat-dialog:demo:%(open_id)s'
# 初始配置,根据用户信息分配对应的会话处理器
ROUTER = {
'text': [ # 文本消息,用文本内容进行匹配
('^累加器$', 'accumulator'), # 格式为(<匹配模式>, <处理函数>), 匹配模式为正则表达式
('^github$', 'show_links'),
('^会话记录$', 'context_origin'), # 通过is_replay避免重复执行某段代码
('^会话菜单$', 'context_menu'), # 通过raise UnexpectAnswer将某个不合法输入当做下一个输入的入口
('.*', 'show_help'), # 默认的处理函数,请务必指定一个
],
'event': [ # 事件消息,用事件类型进行匹配,事件类型包括关注(subscribe)和取关(unsubscribe)
('^subscribe$', 'show_welcome'),
('.*', 'show_help'), # 默认的处理函数,请务必指定一个
],
}
HELP = '''这个公众号是wechat-dialog项目的demo,可以用编写命令行程序的体验来编写公众号深度会话
源代码在GITHUB上,输入"github"(全小写)可以获得链接
共有三个DEMO:
- 回复"累加器"玩一玩累加,这是基本功能
- 回复"会话记录"看一下如何防止数据重复写入
- 回复"会话菜单"了解如何静默切换会话逻辑'''
# 下面这些是对话处理函数 - 一个python生成器(generator)
# 接收一个参数to_user: 这是用户的open_id
# 返回一个元祖:(<MsgType>, <内容>)
# 目前只测试过文本消息(TextMsg)和链接消息(NewsMsg)
# 相关例子可以看show_help和show_links
# accumulator是DEMO的核心 - 通过用户问答实现了一个累加器
def show_help(to_user):
# 下面两行用于初始化,请保留
yield None
msg_content, is_replay = yield None # 获得用户的消息内容
return ('TextMsg', HELP) # 如果没有后续会话就return,return格式(<MsgType>, <内容>)
def show_welcome(to_user):
yield None
msg_content, is_replay = yield None
msg = '感谢关注!\n'+HELP
return ('TextMsg', msg)
def show_links(to_user):
yield None
msg_content, is_replay = yield None
return ('NewsMsg', [
{
'title': '项目源码',
'description': 'github上的项目源码,喜欢的话顺手点个STAR吧(๑•ᴗ•๑)',
'url': 'https://github.com/arthurmmm/wechat-dialog',
# 可以选择不设置pic_url,使用默认缩略图。在reply.py中可以设置默认缩略图。
'pic_url': 'https://help.github.com/assets/images/site/be-social.gif',
},
# 最多支持8个链接
])
def accumulator(to_user):
yield None
msg_content, is_replay = yield None
num_count, is_replay = yield ('TextMsg', '您需要累加几个数字?')
try:
num_count = int(num_count)
except Exception:
return ('TextMsg', '输入不合法!我们需要一个整数,请输入"开始"重新开启累加器')
res = 0
for i in range(num_count):
num, is_replay = yield ('TextMsg', '请输入第%s个数字, 目前累加和:%s' % (i+1, res))
try:
num = int(num)
except Exception:
return ('TextMsg', '输入不合法!我们需要一个整数,请输入"开始"重新开启累加器')
res += num
# 注意:最后一个消息一定要用return不要用yield!return用于标记会话结束。
return ('TextMsg', '累加结束,累加和: %s' % res)
context_start = None
def context_origin(to_user):
yield None
msg_content, is_replay = yield None
global context_start
from datetime import datetime
if not is_replay: # 加上这句可以让context_start只更新一次,避免replay过程中的重复操作
context_start = datetime.now()
context_start = datetime.strftime(context_start, '%Y-%m-%d %H:%M:%S')
msg_content, is_replay = yield ('TextMsg', '您在%s开启了这段对话,随便聊聊吧,回复“结束”结束对话' % context_start)
while True:
if msg_content == '结束':
return ('TextMsg', '会话结束,这段会话的开始时间是%s' % context_start)
else:
msgtime = datetime.now()
msgtime = datetime.strftime(msgtime, '%Y-%m-%d %H:%M:%S')
msg_content, is_replay = yield ('TextMsg', '%s:%s' % (msgtime, msg_content))
def context_menu(to_user):
yield None
msg_content, is_replay = yield None
msg_content, is_replay = yield ('TextMsg', '菜单:\n1. 苹果\n2. 香蕉\n回复数字选择,如果回复的内容不合法会直接跳转,比如回复"累加器"会直接跳转到累加器对话。')
if msg_content == '1':
return ('TextMsg', '您选择的是:苹果')
elif msg_content == '2':
return ('TextMsg', '您选择的是:香蕉')
else:
# 不用输入特殊消息结束,直接进入下一段对话
from wechat.bot import UnexpectAnswer
raise UnexpectAnswer
|
import sys, os, subprocess, json, MySQLdb, time
service_root = os.path.dirname(__file__)
if service_root != '':
service_root = service_root + '/'
sys.path.insert(0, os.path.join(service_root, '../httpsqs'))
from httpsqs_client import httpsqs
converter = "pdf2swf %s -o %s -s /data/xpdf-chinese-simplified/"
def convert():
while True:
try:
sqs = httpsqs('127.0.0.1', '1218', 'pdf2swf')
result = sqs.gets('pdf2swf')
if result:
data = result['data']
if data != False and data != 'HTTPSQS_ERROR':
op = json.loads(data)
input_file = service_root + '../../data/' + op['folder'] + op['name']
output_file_folder = service_root + '../../public/attachments/' + op['folder']
output_file = output_file_folder + op['raw_name'] + '.swf'
try:
if not os.path.exists(output_file_folder):
os.makedirs(output_file_folder)
status = subprocess.call(converter % (input_file, output_file), shell = True)
sql = "UPDATE docs SET path = '%s', status = '%s' WHERE id=%d"
if status == 0:
sql = sql % (op['folder'] + op['raw_name'] + '.swf', 'success', op['id'])
else:
sql = sql % ('', 'fail', op['id'])
db = MySQLdb.connect(host='localhost',user='pdf2swf',passwd='VMeHUf5zS5XnKWh4',db='wenku-demo', read_default_file="/data/lampstack/mysql/my.cnf")
cursor = db.cursor()
cursor.execute(sql)
cursor.close()
db.close()
except Exception, e:
print e
except Exception, e:
print e
time.sleep(1)
if __name__ == "__main__":
convert()
|
from myutils import *
import numpy as np
from time import *
from pcheck import *
import sys
from copy import *
def filter_data(F, R, variants):
m = len(F)
n = len(F[0])
F1 = [[zero for _ in range(len(F[0]))] for _ in range(len(F))]
R1 = [[zero for _ in range(len(R[0]))] for _ in range(len(R))]
for i in range(0, m):
for j in range(0, n):
if F[i][j] >= 0.05:
F1[i][j] = F[i][j]
R1[i][j] = R[i][j]
else:
F1[i][j] = zero
R1[i][j] = zero
F1 = list(map(list, zip(*F1)))
R1 = list(map(list, zip(*R1)))
chosen_variants = []
removed_variants = []
F2 = []
R2 = []
for i in range(0, n):
seen_times = []
for j in range(0, m):
if F1[i][j] > zero:
seen_times.append(j)
if len(seen_times) >= 2 and seen_times == list(range(min(seen_times), max(seen_times) + 1)):
chosen_variants.append(variants[i])
F2.append(F1[i])
R2.append(R1[i])
else:
removed_variants.append(variants[i])
F2 = list(map(list, zip(*F2)))
R2 = list(map(list, zip(*R2)))
if chosen_variants:
F3, R3, rearranged_variants, _ = rearrange_penalty(F2, R2, chosen_variants)
return F3, R3, rearranged_variants, removed_variants
else:
return F2, R2, chosen_variants, removed_variants
def get_max_diff(l1, l2):
return max(map(lambda x, y: abs(x - y), l1, l2))
def diff_below_cutoff(l1, l2, cutoff):
if len(l1) != len(l2):
return False
for i in range(0, len(l1)):
if abs(l1[i] - l2[i]) > cutoff:
return False
return True
def average_column(l1, l2):
# Assume equal length
return list(map(lambda x, y: (x + y) / 2, l1, l2))
def cluster_data(F, R, variants, threshold):
F_t = list(map(list, zip(*F)))
R_t = list(map(list, zip(*R)))
str_variants = list(map(str, variants))
clusters = [(str_variants[0], F_t[0], R_t[0])]
i = 1
while i < len(F_t):
new_cluster = True
for j in range(0, len(clusters)):
(header, column, r_column) = clusters[j]
if diff_below_cutoff(F_t[i], column, threshold):
clusters[j] = (
header + "_" + str_variants[i], average_column(F_t[i], column), average_column(R_t[i], r_column))
new_cluster = False
break
if new_cluster:
clusters.append((str_variants[i], F_t[i], R_t[i]))
i = i + 1
cluster_variants = []
cluster_F = []
cluster_R = []
for (cluster_variant, column, r_column) in clusters:
cluster_variants.append(cluster_variant)
cluster_F.append(column)
cluster_R.append(r_column)
cluster_F = list(map(list, zip(*cluster_F)))
cluster_R = list(map(list, zip(*cluster_R)))
return cluster_F, cluster_R, cluster_variants
def best_scoring_ancestor(F, S, order, parents, clone_index, prev_clone_arrival_time, curr_clone_arrival_time):
num_clones = len(order)
scores = [zero] * len(order)
row_index = prev_clone_arrival_time
for i in range(0, clone_index):
clone = order[i]
scores[clone] = F[row_index][clone]
children = parents_to_children(parents, num_clones)
new_scores = [zero] * len(order)
for i in range(0, clone_index):
clone = order[i]
children_sum = zero
cur_children = children[clone]
for child in cur_children:
children_sum = children_sum + F[row_index][child]
new_scores[clone] = scores[clone] - children_sum
penalized_scores = [zero] * len(order)
for i in range(0, clone_index):
clone = order[i]
cur_children = children[clone]
cur_children.append(order[clone_index])
penalty = zero
for t in range(curr_clone_arrival_time, len(F)):
children_sum = zero
children_variance = zero
for child in cur_children:
children_sum = children_sum + F[t][child]
children_variance = children_variance + (S[t][child] ** 2)
temp = F[t][clone] - children_sum
if temp < 0:
denominator = Decimal(sqrt((S[t][clone] ** 2) + children_variance))
# if denominator > zero:
penalty = penalty + (temp / denominator)
penalized_scores[clone] = new_scores[clone] + penalty
temp = []
for i in range(0, clone_index):
temp.append((order[i], penalized_scores[order[i]]))
ancestor = max(temp, key=lambda i: i[1])[0]
return ancestor, new_scores[ancestor], penalized_scores[ancestor] - new_scores[ancestor]
def tree_score(F, S, order, parents, arrival_times, upto):
num_clones = len(order)
children = parents_to_children(parents, num_clones)
final_score = one
final_penalty = zero
for clone_index in range(1, upto + 1):
prev_clone = order[clone_index - 1]
curr_clone = order[clone_index]
parent = parents[curr_clone]
prev_clone_arrival_time = arrival_times[prev_clone]
curr_clone_arrival_time = arrival_times[curr_clone]
row_index = prev_clone_arrival_time
score = F[row_index][parent]
children_sum = zero
cur_children = children[parent]
for child in cur_children:
children_sum = children_sum + F[row_index][child]
new_score = score - children_sum
penalty = zero
for t in range(curr_clone_arrival_time, len(F)):
children_sum = zero
children_variance = zero
for child in cur_children:
children_sum = children_sum + F[t][child]
children_variance = children_variance + (S[t][child] ** 2)
temp = F[t][parent] - children_sum
if temp < 0:
denominator = Decimal(sqrt((S[t][parent] ** 2) + children_variance))
if denominator > zero:
# print(str(temp) + "\t" + str(denominator) + "\t" + str(temp / denominator))
penalty = penalty + (temp / denominator)
# penalty = penalty + temp
final_penalty = final_penalty + penalty
final_score = final_score * new_score
return final_score, final_penalty
def new_algorithm(F, R, k=0):
# Empty check
if len(F) == 0:
return [], -1, [] # parents_vector, score
S = rd_to_sd(F, R)
# Add founder
my_F = add_founder(F)
my_S = add_founder_penalty(S)
num_clones = len(my_F[0])
# Get the step structure of the step matrix
steps, arrival_times = get_step_structure(my_F)
steps = correct_steps_for_known_founders(steps, k)
parents = {}
for clone in range(1, num_clones):
parents[clone] = -1
order = list(range(0, num_clones))
final_score = one
final_penalty = zero
clone_counter = 1
starting_step = 1
if k > 0:
for i in range(0, k):
parents[i + 1] = 0
if k > 1:
G, vertices = get_partial_order(my_F, [steps[1][0], steps[1][-1]])
ts = topological_sort(G, vertices)
order = [0] + ts + list(range(steps[1][-1] + 1, num_clones))
score, penalty = tree_score(my_F, my_S, order, parents, arrival_times, steps[1][-1])
max_score = score
max_penalty = penalty
max_order = order
for i in range(0, len(ts)):
for j in range(0, i):
new_ts = ts[:]
new_ts.remove(ts[i])
new_ts.insert(j, ts[i])
temp_order = [0] + new_ts + list(range(steps[1][-1] + 1, num_clones))
temp_score, temp_penalty = tree_score(my_F, my_S, temp_order, parents, arrival_times, i + 1)
if (temp_score + temp_penalty) > (max_score + max_penalty):
max_score = temp_score
max_penalty = temp_penalty
max_order = temp_order
order = max_order[:]
final_score = max_score
final_penalty = max_penalty
clone_counter = steps[1][-1] + 1
starting_step = 2
else:
clone_counter = 2
starting_step = 2
for step_counter in range(starting_step, len(steps)):
step = steps[step_counter]
if len(step) == 1:
clone = order[clone_counter]
parent, score, penalty = best_scoring_ancestor(my_F, my_S, order, parents, clone_counter,
arrival_times[order[clone_counter - 1]],
arrival_times[order[clone_counter]])
parents[clone] = parent
final_score = final_score * score
final_penalty = final_penalty + penalty
clone_counter = clone_counter + 1
else:
G, vertices = get_partial_order(my_F, [step[0], step[-1]])
ts = topological_sort(G, vertices)
new_order = order[:step[0]] + ts + order[step[-1] + 1:]
for i in range(0, len(ts)):
clone = new_order[clone_counter]
parent, score, penalty = best_scoring_ancestor(my_F, my_S, new_order, parents, clone_counter,
arrival_times[order[clone_counter - 1]],
arrival_times[order[clone_counter]])
max_parents = parents.copy()
max_parents[clone] = parent
max_score = final_score * score
max_penalty = final_penalty + penalty
max_order = new_order[:]
for j in range(0, i):
new_ts = ts[:]
new_ts.remove(ts[i])
new_ts.insert(j, ts[i])
common_ancestor = parents[new_ts[j + 1]]
temp_parents = parents.copy()
temp_parents[clone] = common_ancestor
temp_order = order[:step[0]] + new_ts + order[step[-1] + 1:]
temp_score, temp_penalty = tree_score(my_F, my_S, temp_order, temp_parents, arrival_times,
clone_counter)
if (temp_score + temp_penalty) > (max_score + max_penalty):
max_parents = temp_parents.copy()
max_score = temp_score
max_penalty = temp_penalty
max_order = temp_order
parents = max_parents.copy()
final_score = max_score
final_penalty = max_penalty
new_order = max_order[:]
clone_counter = clone_counter + 1
order = new_order[:]
parents_vector = [0]
for i in range(1, num_clones):
parents_vector.append(parents[i])
return parents_vector, final_score + final_penalty, order
def valid_parent_value(parents, F, fail_threshold):
# F may not be square
m = len(F)
vp = True
cur_parent = parents[-1]
children = get_children(parents, cur_parent)
for t in range(cur_parent, m):
children_sum = zero
for child in children:
children_sum += F[t][child]
if F[t][cur_parent] - children_sum < fail_threshold:
vp = False
return vp
return vp
def valid_parent_order(order_validity, parents):
cur_variant = len(parents) - 1
cur_path = deepcopy(order_validity[cur_variant])
temp = parents[cur_variant]
while temp != 0:
cur_path.discard(temp)
temp = parents[temp]
cur_path.discard(0)
return len(cur_path) == 0
def c_row(f_row, parents):
m = len(f_row)
C = []
for parent in range(0, m):
children = get_children(parents, parent)
children_sum = zero
for child in children:
children_sum += f_row[child]
C.append(f_row[parent] - children_sum)
return C
def smart_predict_original(F, m, fail_threshold, clones=[], S=[]):
# Assumes square F
if len(F) == 0:
return [], [], [], -1
# Add founder
my_F = add_founder(F)
# m = len(my_F)
choices_stack = [[0]]
chosen_parents = [0]
valid_parents = white_list(list(range(m)), clones)
order_validity = black_list(list(range(m)), clones)
success = False
while choices_stack:
chosen_parents = choices_stack.pop()
i = len(chosen_parents) - 1
if not valid_parent_value(chosen_parents, my_F, fail_threshold):
continue
if clones:
if not valid_parent_order(order_validity, chosen_parents):
continue
if i == (m - 1):
success = True
break
C_row = c_row(my_F[i], chosen_parents)
next_choices = list((np.array(C_row)).argsort())
for next_choice in next_choices:
if C_row[next_choice] > fail_threshold and next_choice <= i and next_choice in valid_parents[i + 1]:
temp = chosen_parents[:]
temp.append(next_choice)
choices_stack.append(temp)
if not success:
return [], my_F, [], -1
C = []
for i in range(0, m):
C.append(c_row(my_F[i], chosen_parents))
p = one
for i in range(1, m):
p = p * C[i - 1][chosen_parents[i]]
return C, my_F, chosen_parents, p
def variant_penalty1(parents, F, S):
# F may not be square
m = len(F)
pen = 0
cur_parent = parents[-1]
children = get_children(parents, cur_parent)
for t in range(cur_parent, m):
children_sum = zero
cs_sds = zero
for child in children:
children_sum += F[t][child]
cs_sds += (S[t][child] ** 2)
temp = F[t][cur_parent] - children_sum
if temp < 0:
denominator = Decimal(sqrt((S[t][cur_parent] ** 2) + cs_sds))
if denominator > zero:
pen += temp / denominator
return pen
def scores(F, parents, penalty, S=[]):
i = len(parents) - 1
C_row = c_row(F[i], parents)
for j in range(0, len(C_row)):
if S:
pen = penalty(parents + [j], F, S)
C_row[j] = C_row[j] + pen
return C_row
def smart_predict_penalty(F, m, fail_threshold, clones=[], S=[]):
# Assumes square F
if len(F) == 0:
return [], [], [], -1
# Add founder
my_F = add_founder(F)
my_S = []
if S:
my_S = add_founder_penalty(S)
# m = len(my_F)
choices_stack = [[0]]
chosen_parents = [0]
valid_parents = white_list(list(range(m)), clones)
order_validity = black_list(list(range(m)), clones)
success = False
while choices_stack:
# print(choices_stack)
chosen_parents = choices_stack.pop()
i = len(chosen_parents) - 1
if not S:
if not valid_parent_value(chosen_parents, my_F, fail_threshold):
continue
if clones:
if not valid_parent_order(order_validity, chosen_parents):
continue
if i == (m - 1):
success = True
break
sc = scores(my_F, chosen_parents, variant_penalty1, my_S)
next_choices = list((np.array(sc)).argsort())
for next_choice in next_choices:
if next_choice <= i and next_choice in valid_parents[i + 1]:
temp = chosen_parents[:]
temp.append(next_choice)
choices_stack.append(temp)
if not success:
return [], my_F, [], -1
C = []
for i in range(0, m):
C.append(c_row(my_F[i], chosen_parents))
p = one
for i in range(1, m):
p = p * C[i - 1][chosen_parents[i]]
return C, my_F, chosen_parents, p
def old_algorithm(F, R, smart_predict_algo):
# Empty check
if len(F) == 0:
return [], -1 # parents_vector, score
S = rd_to_sd(F, R)
num_clones = len(F[0])
order = []
steps, arrival_times = get_step_structure(F)
choices_stack = [(0, [0], [])]
(cur_score, cur_T, cur_P) = (0, [0], [])
success = False
while choices_stack:
(cur_score, cur_T, cur_P) = choices_stack.pop()
cur_m = 1
for item in cur_P:
cur_m += len(item)
i = len(cur_P)
if i == len(steps):
success = True
break
cur_step = steps[i]
temp = []
j = 0
# topo_permutations, F_mod = topological_valid_permutations(F_iter, cur_step)
# F_iter = deepcopy(F_mod)
# for permutation in topo_permutations:
for permutation in permutations(cur_step):
perm_ss = deepcopy(cur_P)
perm_ss.append(list(permutation))
perm_F = squarify(F, perm_ss)
perm_S = squarify(S, perm_ss)
_, _, perm_T, perm_score = smart_predict_algo(perm_F, cur_m + len(permutation), zero, clones=[], S=perm_S)
if perm_score != -1:
temp.append((perm_score, perm_T, perm_ss))
if temp:
temp_sorted = sorted(temp, key=lambda x: x[0])
choices_stack = choices_stack + temp_sorted
if success:
variants_in = list(range(1, num_clones + 1))
variants_out = reorder(variants_in, chain.from_iterable(cur_P))
parents_out = remap_parents(variants_out, cur_T)
return parents_out, cur_score
else:
return [], -1
def predict(F, variants, algo, R=[], filter=True, k=0):
start = process_time()
if len(R) == 0:
R = [[Decimal(sys.maxsize) for _ in range(len(F[0]))] for _ in range(len(F))]
if filter:
F1, R1, variants1, removed_variants = filter_data(F, R, variants)
else:
removed_variants = []
F1 = deepcopy(F)
R1 = deepcopy(R)
variants1 = deepcopy(variants)
F2, R2, _, removed_time_points = remove_redundant_time_points(F1, R1)
order = []
if variants:
# F2, R2, variants1 = cluster_data(F2, R2, variants1, 0.05)
# F2, R2, _, _ = remove_redundant_time_points(F2, R2)
parents = []
score = zero
if algo == 0:
parents, score = old_algorithm(F2, R2, smart_predict_original)
elif algo == 1:
parents, score, order = new_algorithm(F2, R2, k)
elif algo == 2:
parents, score = old_algorithm(F2, R2, smart_predict_penalty)
else:
exit("Invalid parameter for algo.")
else:
parents = []
score = zero
end = process_time()
return parents, score, variants1, removed_variants, len(F2), end - start, removed_time_points, F1, R1, order
|
##parameters=text_format, text, SafetyBelt='', **kw
##
from Products.CMFDefault.exceptions import EditingConflict
from Products.CMFDefault.exceptions import ResourceLockedError
from Products.CMFDefault.utils import Message as _
if text_format != context.text_format or text != context.EditableBody():
try:
context.edit(text_format, text, safety_belt=SafetyBelt)
return context.setStatus(True, _(u'Document changed.'))
except (ResourceLockedError, EditingConflict), errmsg:
return context.setStatus(False, errmsg)
else:
return context.setStatus(False, _(u'Nothing to change.'))
|
version="0.2.2b-dev"
|
import Orange
from Orange.evaluation.testing import CrossValidation
from orangecontrib.recommendation import *
from sklearn.metrics import mean_squared_error, mean_absolute_error
import math
import time
def test_learners():
start = time.time()
# Load data
#data = Orange.data.Table('epinions_test.tab')
data = Orange.data.Table('filmtrust/ratings.tab')
trust = Orange.data.Table('filmtrust/trust.tab')
print('- Loading time: %.3fs' % (time.time() - start))
# Global average
start = time.time()
learner = GlobalAvgLearner()
recommender = learner(data)
print('- Time (GlobalAvgLearner): %.3fs' % (time.time() - start))
rmse = math.sqrt(mean_squared_error(data.Y, recommender(data)))
mae = mean_absolute_error(data.Y, recommender(data))
print('- RMSE (GlobalAvgLearner): %.3f' % rmse)
print('- MAE (GlobalAvgLearner): %.3f' % mae)
print('')
# Item average
start = time.time()
learner = ItemAvgLearner()
recommender = learner(data)
print('- Time (ItemAvgLearner): %.3fs' % (time.time() - start))
rmse = math.sqrt(mean_squared_error(data.Y, recommender(data)))
mae = mean_absolute_error(data.Y, recommender(data))
print('- RMSE (ItemAvgLearner): %.3f' % rmse)
print('- MAE (ItemAvgLearner): %.3f' % mae)
print('')
# User average
start = time.time()
learner = UserAvgLearner()
recommender = learner(data)
print('- Time (UserAvgLearner): %.3fs' % (time.time() - start))
rmse = math.sqrt(mean_squared_error(data.Y, recommender(data)))
mae = mean_absolute_error(data.Y, recommender(data))
print('- RMSE (UserAvgLearner): %.3f' % rmse)
print('- MAE (UserAvgLearner): %.3f' % mae)
print('')
# User-Item baseline
start = time.time()
learner = UserItemBaselineLearner()
recommender = learner(data)
print('- Time (UserItemBaselineLearner): %.3fs' % (time.time() - start))
rmse = math.sqrt(mean_squared_error(data.Y, recommender(data)))
mae = mean_absolute_error(data.Y, recommender(data))
print('- RMSE (UserItemBaselineLearner): %.3f' % rmse)
print('- MAE (UserItemBaselineLearner): %.3f' % mae)
print('')
# BRISMF
start = time.time()
learner = BRISMFLearner(num_factors=10, num_iter=15, learning_rate=0.01,
lmbda=0.1, verbose=0)
recommender = learner(data)
print('- Time (BRISMFLearner): %.3fs' % (time.time() - start))
rmse = math.sqrt(mean_squared_error(data.Y, recommender(data)))
mae = mean_absolute_error(data.Y, recommender(data))
print('- RMSE (BRISMFLearner): %.3f' % rmse)
print('- MAE (BRISMFLearner): %.3f' % mae)
print('')
# SVD++
start = time.time()
learner = SVDPlusPlusLearner(num_factors=10, num_iter=15,
learning_rate=0.01, lmbda=0.1, verbose=0)
recommender = learner(data)
print('- Time (SVDPlusPlusLearner): %.3fs' % (time.time() - start))
rmse = math.sqrt(mean_squared_error(data.Y, recommender(data)))
mae = mean_absolute_error(data.Y, recommender(data))
print('- RMSE (SVDPlusPlusLearner): %.3f' % rmse)
print('- MAE (SVDPlusPlusLearner): %.3f' % mae)
print('')
# TrustSVD
start = time.time()
learner = TrustSVDLearner(num_factors=10, num_iter=15, learning_rate=0.01,
lmbda=0.12, social_lmbda=0.9, trust=trust,
verbose=0)
recommender = learner(data)
print('- Time (TrustSVD): %.3fs' % (time.time() - start))
rmse = math.sqrt(mean_squared_error(data.Y, recommender(data)))
mae = mean_absolute_error(data.Y, recommender(data))
print('- RMSE (TrustSVD): %.3f' % rmse)
print('- MAE (TrustSVD): %.3f' % mae)
print('')
def test_CV():
# Load data
data = Orange.data.Table('filmtrust/ratings_small.tab')
trust = Orange.data.Table('filmtrust/trust_small.tab')
# Learners
global_avg = GlobalAvgLearner()
items_avg = ItemAvgLearner()
users_avg = UserAvgLearner()
useritem_baseline = UserItemBaselineLearner()
brismf = BRISMFLearner(num_factors=15, num_iter=10, learning_rate=0.07,
lmbda=0.1)
svdpp = SVDPlusPlusLearner(num_factors=15, num_iter=10, learning_rate=0.007,
lmbda=0.1)
trustsvd = TrustSVDLearner(num_factors=15, num_iter=10, learning_rate=0.007,
lmbda=0.1, social_lmbda=0.05, trust=trust)
learners = [global_avg, items_avg, users_avg, useritem_baseline,
brismf, svdpp, trustsvd]
res = CrossValidation(data, learners, k=5)
rmse = Orange.evaluation.RMSE(res)
r2 = Orange.evaluation.R2(res)
print("Learner RMSE R2")
for i in range(len(learners)):
print(
"{:8s} {:.2f} {:5.2f}".format(learners[i].name, rmse[i], r2[i]))
if __name__ == "__main__":
#pass
test_learners()
|
row, col = map(int, input().split())
n, m, d = map(int, input().split())
data = []
dx = [-1, 0, 1, 0]
dy = [0, 1, 0, -1]
dir_types = [0, 1, 2, 3] #북동남서
for i in range(row):
i = list(map(int, input().split()))
data.append(i)
|
from selenium import webdriver
if __name__ == "__main__":
driver = webdriver.Chrome()
base_url = 'https://www.baidu.com'
driver.get(base_url)
|
from django.urls import path
from . import views
urlpatterns = [
#tarfas
path('',views.tarefas,name='tarefas'),
path('tarefa/<int:categoria_id>/',views.tarefas_por_categoria,name='tarefas_por_categoria'),
path('tarefa/concluir/<int:tarefa_id>/',views.concluir_tarefa,name='concluir_tarefa'),
path('tarefa/cadastrar/', views.cadastrar_tarefa, name='cadastrar_tarefa'),
path('tarefa/excluir/<int:tarefa_id>/',views.excluir_tarefa, name='excluir_tarefa'),
path('tarefa/editar/<int:tarefa_id>/',views.editar_tarefa,name='editar_tarefa'),
#categorias
path('categoria/cadastrar/',views.cadastrar_categoria,name='cadastrar_categoria'),
path('categoria/listar', views.listar_categorias, name='listar_categorias'),
path('categoria/excluir/<int:categoria_id>/',views.excluir_categoria, name='excluir_categoria'),
path('categoria/editar/<int:categoria_id>/',views.editar_categoria,name='editar_categoria'),
]
|
import os
import os.path
# import traitlets.config import Config
from traitlets import default, Unicode
from nbconvert.exporters.html import HTMLExporter
from traitlets.log import get_logger
class HideCodeHTMLExporter(HTMLExporter):
def __init__(self, config=None, **kw):
# self.register_preprocessor('hide_code.HideCodePreprocessor', True)
super(HideCodeHTMLExporter, self).__init__(config, **kw)
# self.preprocessors = ['hide_code.HideCodePreprocessor']
# self._init_preprocessors()
@default('template_file')
def _template_file_default(self):
return 'hide_code_full.tpl'
@property
def template_path(self):
"""
We want to inherit from HTML template, and have template under
`./templates/` so append it to the search path. (see next section)
"""
return super(HideCodeHTMLExporter, self).template_path + [os.path.join(os.path.dirname(__file__), "Templates")]
# return [os.path.join(os.path.dirname(__file__), "Templates")]
# @default('template_path')
# def _default_template_path(self):
# return os.path.join(os.path.dirname(__file__), "Templates")
|
import re
import os
import shutil
import urllib.parse
from zlib import compress
import base64
import string
plantuml_alphabet = string.digits + string.ascii_uppercase + string.ascii_lowercase + '-_'
base64_alphabet = string.ascii_uppercase + string.ascii_lowercase + string.digits + '+/'
b64_to_plantuml = bytes.maketrans(base64_alphabet.encode('utf-8'), plantuml_alphabet.encode('utf-8'))
class MarkdownResult:
def __init__(self):
self.title = ""
self.files = []
class MarkdownParser:
def __init__(self,parent,filename,token,user):
self.user = user
self.token = token
self.parent = parent
self.filename = filename
self.url = None
self.gistId = None
def deflate_and_encode(plantuml_text):
"""zlib compress the plantuml text and encode it for the plantuml server.
"""
zlibbed_str = compress(plantuml_text.encode('utf-8'))
compressed_string = zlibbed_str[2:-4]
return base64.b64encode(compressed_string).translate(b64_to_plantuml).decode('utf-8')
def parse(self):
reUrl = re.compile(r'\[gist-sync-url\]:(.*)',re.I)
file_path = os.path.join(self.parent, self.filename)
with open(file_path, 'r') as f:
for line in f:
urlMatch = reUrl.match(line)
if urlMatch:
self.url = urlMatch.group(1)
urlret = urllib.parse.urlparse(self.url)
path = urlret.path
if path[-1] == '/':
path = path[:-1]
self.gistId = path.split('/')[-1]
# self.user = path.split('/')[-2]
break
return self.gistId != None and self.user != None
def syncTo(self, path):
if not self.gistId or not self.user:
return None
reTitle = re.compile(r'\s?#\s+(.*)')
reImg = re.compile(r'.*!\[.*\]\((.*)\)')
reCode = re.compile(r'(\s*)```(\w*)')
retObj = MarkdownResult()
retObj.files.append("index.md")
mdPath = os.path.join(path, "index.md")
inCode = False
preSpace = ""
codeTxt = ""
with open(mdPath, 'w') as mdf:
file_path = os.path.join(self.parent, self.filename)
with open(file_path, 'r') as f:
for line in f:
codeMatch = reCode.match(line)
if codeMatch:
info = codeMatch.group(2)
if not inCode and (info == "puml" or info == "plantuml"):
inCode = True
codeTxt = ""
preSpace = codeMatch.group(1)
line = ""
elif inCode:
inCode = False
pumlCode = MarkdownParser.deflate_and_encode(codeTxt)
codeTxt = ""
line = f'\n{preSpace}\n'
if inCode:
codeTxt += line
continue
titleMatch = reTitle.match(line)
if titleMatch:
retObj.title = titleMatch.group(1)
imgMatch = reImg.match(line)
if imgMatch:
imgStr = imgMatch.group(1)
imgPath = imgStr.split()[0]
newFilename = self._convertImgFileName(imgPath)
print("find img:", imgStr)
if newFilename is not None:
oldFile = os.path.join(self.parent, imgPath)
newFile = os.path.join(path, newFilename)
shutil.copyfile(oldFile, newFile)
retObj.files.append(newFilename)
# The path ref https://gist.github.com/cben/46d9536baacb7c5d196c/
newPath = os.path.join(self.gistId, "raw" , newFilename)
line = line.replace(imgPath, newPath)
mdf.write(line)
for parent,dirnames,filenames in os.walk(path):
if ".git" in parent:
continue
for filename in filenames:
if filename not in retObj.files:
# print("remove file:" + filename)
os.remove(os.path.join(parent, filename))
return retObj
def _convertImgFileName(self, path):
if path.startswith("http"):
return None
newFilename = "z"+path.replace("/", "_").replace("..", "_")
return newFilename
|
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stats
# (a)
n = 100
rng = np.random.default_rng(42)
mu = 5
data = rng.normal(mu, 1, size=n)
# (b)
mu_lim = (3, 7)
k = 5000
mus = np.linspace(*mu_lim, k)
likelihood = np.ones(k)
for x in data:
likelihood *= stats.norm.pdf(x, loc=mus)
mu_density = likelihood * 1
mu_density /= mu_density.sum()
fig_b, ax_b = plt.subplots()
ax_b.set(xlabel="Mu", ylabel="Posterior density")
ax_b.plot(mus, mu_density)
fig_b.savefig("11-02b.png", bbox_inches="tight")
# (c)
drawn_mus = rng.choice(mus, size=1000, p=mu_density)
fig_c, ax_c = plt.subplots()
ax_c.set(xlabel="Mu", ylabel="Frequency")
ax_c.hist(drawn_mus, bins=150, range=mu_lim)
fig_c.savefig("11-02c.png", bbox_inches="tight")
# (d)
drawn_thetas = np.exp(rng.choice(mus, size=10000, p=mu_density))
theta_lims = (75, 220)
fig_d, axs_d = plt.subplots(1, 2, figsize=(14, 6))
axs_d[0].set(xlabel="Theta", ylabel="Frequency")
axs_d[0].hist(drawn_thetas, bins="auto", range=theta_lims)
thetas = np.geomspace(*theta_lims, k)
log_thetas = np.log(thetas)
diff = np.tile(log_thetas, n).reshape((n, -1)).transpose() - data
propto = np.exp(-0.5 * np.sum(diff ** 2, axis=1))
theta_density = propto / propto.sum()
axs_d[1].set(xlabel="Theta", ylabel="Posterior density")
axs_d[1].plot(thetas, theta_density)
fig_d.savefig("11-02d.png", bbox_inches="tight")
# (e)
mu_cdf = mu_density.cumsum()
alpha = 0.05
mu_left_idx = np.argmax(mu_cdf >= alpha / 2)
mu_right_idx = np.argmax(mu_cdf >= 1 - alpha / 2)
mu_pi = (mus[mu_left_idx], mus[mu_right_idx])
print(
"95% posterior interval for mu is given by",
f"[{mu_pi[0]:.3f}, {mu_pi[1]:.3f}]",
)
# (f)
theta_cdf = theta_density.cumsum()
alpha = 0.05
theta_left_idx = np.argmax(theta_cdf >= alpha / 2)
theta_right_idx = np.argmax(theta_cdf >= 1 - alpha / 2)
theta_pi = (thetas[theta_left_idx], thetas[theta_right_idx])
print(
"95% posterior interval for theta is given by",
f"[{theta_pi[0]:.3f}, {theta_pi[1]:.3f}]",
)
|
from flask import Flask, Reponse, request
from flask_sqlalchemy import
import mysql.connector
|
"""This module is the Python part of the CAD Viewer widget"""
import base64
import json
from textwrap import dedent
import numpy as np
import ipywidgets as widgets
from ipywidgets.embed import embed_minimal_html, dependency_state
from traitlets import Unicode, Dict, List, Tuple, Integer, Float, Any, Bool, observe
from IPython.display import HTML, update_display
from pyparsing import ParseException
from .utils import get_parser, to_json, bsphere, normalize
class AnimationTrack:
"""
Defining a three.js animation track.
Parameters
----------
path : string
The path (or id) of the cad object for which this track is meant.
Usually of the form `/top-level/level2/...`
action : {"t", "tx", "ty", "tz", "q", "rx", "ry", "rz"}
The action type:
- "tx", "ty", "tz" for translations along the x, y or z-axis
- "t" to add a position vector (3-dim array) to the current position of the CAD object
- "rx", "ry", "rz" for rotations around x, y or z-axis
- "q" to apply a quaternion to the location of the CAD object
times : list of float or int
An array of floats describing the points in time where CAD object (with id `path`) should be at the location
defined by `action` and `values`
values : list of float or int
An array of same length as `times` defining the locations where the CAD objects should be according to the
`action` provided. Formats:
- "tx", "ty", "tz": float distance to move
- "t": 3-dim tuples or lists defining the positions to move to
- "rx", "ry", "rz": float angle in degrees
- "q" quaternions of the form (x,y,z,w) the represent the rotation to be applied
Examples
--------
```
AnimationTrack(
'/bottom/left_middle/lower', # path
'rz', # action
[0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0], # times (seconds)
[-15.0, -15.0, -15.0, 9.7, 20.0, 9.7, -15.0, -15.0, -15.0] # angles
)
AnimationTrack(
'base/link_4_6', # path
't', # action
[0.0, 1.0, 2.0, 3.0, 4.0], # times (seconds)
[[0.0, 0.0, 0.0], [0.0, 1.9509, 3.9049],
[0.0 , -3.2974, -16.7545], [0.0 , 0.05894 , -32.0217],
[0.0 , -3.2212, -13.3424]] # 3-dim positions
)
```
See also
--------
- [three.js NumberKeyframeTrack](https://threejs.org/docs/index.html?q=track#api/en/animation/tracks/NumberKeyframeTrack)
- [three.js QuaternionKeyframeTrack](https://threejs.org/docs/index.html?q=track#api/en/animation/tracks/QuaternionKeyframeTrack)
"""
def __init__(self, path, action, times, values):
if len(times) != len(values):
raise ValueError("Parameters 'times' and 'values' need to have same length")
self.path = path
self.action = action
self.times = times
self.values = values
self.length = len(times)
def to_array(self):
"""
Create an array representation of the animation track
Returns
-------
array-like
The 4 dim array comprising of the instance variables `path`, `action`, `times` and `values`
"""
def tolist(obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
elif isinstance(obj, (list, tuple)):
return [tolist(subarray) for subarray in obj]
else:
return obj
return [self.path, self.action, tolist(self.times), tolist(self.values)]
@widgets.register
class CadViewerWidget(widgets.Output): # pylint: disable-msg=too-many-instance-attributes
"""The CAD Viewer widget."""
_view_name = Unicode("CadViewerView").tag(sync=True)
_model_name = Unicode("CadViewerModel").tag(sync=True)
_view_module = Unicode("cad-viewer-widget").tag(sync=True)
_model_module = Unicode("cad-viewer-widget").tag(sync=True)
_view_module_version = Unicode("1.3.2").tag(sync=True)
_model_module_version = Unicode("1.3.2").tag(sync=True)
#
# Display traits
#
title = Unicode(allow_none=True).tag(sync=True)
"unicode string of the title of the sidecar to be used. None means CAD view will be opened in cell"
anchor = Unicode(allow_none=True).tag(sync=True)
"unicode string whether to add a view to the right sidebar ('right') or as a tab to the main window ('tab')"
cad_width = Integer().tag(sync=True)
"unicode string: Width of the canvas element"
height = Integer(allow_none=True).tag(sync=True)
"int: Height of the canvas element"
tree_width = Integer(allow_none=True).tag(sync=True)
"int: Width of the navigation tree element"
theme = Unicode(allow_none=True).tag(sync=True)
"unicode string: UI theme, can be 'dark' or 'light' (default)"
pinning = Bool(allow_none=True, default_value=None).tag(sync=True)
"bool: Whether to show the pin a png button or not"
#
# Viewer traits
#
shapes = Dict(allow_none=True).tag(sync=True, to_json=to_json)
"unicode: Serialized nested tessellated shapes"
states = Dict(Tuple(Integer(), Integer()), allow_none=True).tag(sync=True)
"dict: State of the nested cad objects, key = object path, value = 2-dim tuple of 0/1 (hidden/visible) for object and edges"
tracks = List(allow_none=True).tag(sync=True)
"unicode: Serialized list of animation track arrays, see [AnimationTrack.to_array](/widget.html#cad_viewer_widget.widget.AnimationTrack.to_array)"
timeit = Bool(allow_none=True, default_value=None).tag(sync=True)
"bool: Whether to output timing info to the browser console (True) or not (False)"
tools = Bool(allow_none=True, default_value=None).tag(sync=True)
"bool: Whether to show CAD tools (True) or not (False)"
glass = Bool(allow_none=True, default_value=None).tag(sync=True)
"bool: Whether to use the glass mode (CAD navigation as transparent overlay) or not"
ortho = Bool(allow_none=True, default_value=None).tag(sync=True)
"bool: Whether to use orthographic view (True) or perspective view (False)"
control = Unicode().tag(sync=True)
"unicode: Whether to use trackball controls ('trackball') or orbit controls ('orbit')"
axes = Bool(allow_none=True, default_value=None).tag(sync=True)
"bool: Whether to show coordinate axes (True) or not (False)"
axes0 = Bool(allow_none=True, default_value=None).tag(sync=True)
"bool: Whether to center coordinate axes at the origin [0,0,0] (True) or at the CAD object center (False)"
grid = Tuple(Bool(), Bool(), Bool(), allow_none=True).tag(sync=True)
"tuple: Whether to show the grids for `xy`, `xz`, `yz`."
ticks = Integer(allow_none=True).tag(sync=True)
"integer: Hint for the number of ticks for the grids (will be adjusted for nice intervals)"
transparent = Bool(allow_none=True, default_value=None).tag(sync=True)
"bool: Whether to show the CAD objects transparently (True) or not (False)"
black_edges = Bool(allow_none=True, default_value=None).tag(sync=True)
"bool: Whether to shows the edges in black (True) or not(False)"
collapse = Integer(allow_none=True, default_value=None).tag(sync=True)
"int: Collapse CAD tree (1: collapse nodes with single leaf, 2: collapse all nodes)"
normal_len = Float(allow_none=True).tag(sync=True)
"float: If > 0, the vertex normals will be rendered with the length given be this parameter"
default_edge_color = Unicode(allow_none=True).tag(sync=True)
"unicode: The default edge color in web format, e.g. '#ffaa88'"
default_opacity = Float(allow_none=True).tag(sync=True)
"unicode: The default opacity for transparent objects"
ambient_intensity = Float(allow_none=True).tag(sync=True)
"float: The intensity of the ambient light"
direct_intensity = Float(allow_none=True).tag(sync=True)
"float: The intensity of the 8 direct lights"
#
# Generic UI traits
#
tab = Unicode(allow_none=True).tag(sync=True)
"unicode: Whether to show the navigation tree ('tree') or the clipping UI ('clip')"
clip_intersection = Bool(allow_none=True, default_value=None).tag(sync=True)
"bool: Whether to use intersection clipping (True) or not (False)"
clip_planes = Bool(allow_none=True, default_value=None).tag(sync=True)
"bool: Whether to show colored clipping planes (True) or not (False)"
clip_normal_0 = Tuple(Float(), Float(), Float(), allow_none=True, default_value=None).tag(sync=True)
"tuple: Normal of clipping plane 1 as a 3-dim tuple of float (x,y,z)"
clip_normal_1 = Tuple(Float(), Float(), Float(), allow_none=True, default_value=None).tag(sync=True)
"tuple: Normal of clipping plane 2 as a 3-dim tuple of float (x,y,z)"
clip_normal_2 = Tuple(Float(), Float(), Float(), allow_none=True, default_value=None).tag(sync=True)
"tuple: Normal of clipping plane 3 as a 3-dim tuple of float (x,y,z)"
clip_slider_0 = Float(allow_none=True, default_value=None).tag(sync=True)
"float: Slider value of clipping plane 1"
clip_slider_1 = Float(allow_none=True, default_value=None).tag(sync=True)
"float: Slider value of clipping plane 2"
clip_slider_2 = Float(allow_none=True, default_value=None).tag(sync=True)
"float: Slider value of clipping plane 3"
reset_camera = Bool(allow_none=True, default_value=None).tag(sync=True)
"bool: Whether to reset camera (True) or not (False)"
position = Tuple(Float(), Float(), Float(), allow_none=True).tag(sync=True)
"tuple: Position of the camera as a 3-dim tuple of float (x,y,z)"
quaternion = Tuple(Float(), Float(), Float(), Float(), allow_none=True).tag(sync=True)
"tuple: Rotation of the camera as 4-dim quaternion (x,y,z,w)"
target = Tuple(Float(), Float(), Float(), allow_none=True).tag(sync=True)
"tuple: Camera target to look at as 3-dim tuple (x,y,z)"
zoom = Float(allow_none=True).tag(sync=True)
"float: Zoom value of the camera"
position0 = Tuple(Float(), Float(), Float(), allow_none=True).tag(sync=True)
"tuple: Initial position of the camera as a 3-dim tuple of float (x,y,z)"
quaternion0 = Tuple(Float(), Float(), Float(), Float(), allow_none=True).tag(sync=True)
"tuple: Initial rotation of the camera as 4-dim quaternion (x,y,z,w)"
zoom0 = Float(allow_none=True).tag(sync=True)
"float: Initial zoom value of the camera"
target0 = Tuple(Float(), Float(), Float(), allow_none=True).tag(sync=True)
"tuple: Initial camera target to look at as 3-dim tuple (x,y,z)"
zoom_speed = Float(allow_none=True).tag(sync=True)
"float: Speed of zooming with the mouse"
pan_speed = Float(allow_none=True).tag(sync=True)
"float: Speed of panning with the mouse"
rotate_speed = Float(allow_none=True).tag(sync=True)
"float: Speed of rotation with the mouse"
animation_speed = Float(allow_none=True).tag(sync=True)
"float: Animation speed"
state_updates = Dict(Tuple(Integer(), Integer()), allow_none=True).tag(sync=True)
"dict: Dict with paths as key and a 2-dim tuple of 0/1 (hidden/visible) for object and edges"
#
# Read only traitlets
#
lastPick = Dict(key_trait=Unicode(), value_trait=Any(), allow_none=True, read_only=True).tag(sync=True)
"dict: Describes the last picked element of the CAD view"
result = Unicode(allow_none=True).tag(sync=True)
"unicode string: JSON serialized result from Javascript"
#
# Internal traitlets
#
disposed = Bool(default=False, allow_none=True, default_value=None).tag(sync=True)
"unicode string: Whether the Javascript viewer is disposed"
initialize = Bool(allow_none=True, default_value=None).tag(sync=True)
"bool: internally used to control initialization of view. Do not use!"
js_debug = Bool(allow_none=True, default_value=None).tag(sync=True)
"bool: Whether to show infos in the browser console (True) or not (False)"
image_id = Unicode(allow_none=True).tag(sync=True)
"unicode string: the id of the image tag to use for pin as png"
@observe("result")
def func(self, change):
if change["new"] is not None:
data = json.loads(change["new"])
if data.get("display_id") is not None:
html = f"""<img src="{data['src']}" width="{data['width']}px" height="{data['height']}px"/>"""
update_display(HTML(html), display_id=data["display_id"])
else:
if self.test_func is not None and callable(self.test_func):
self.test_func(base64.b64decode(data["src"].split(",")[1])) # pylint: disable=not-callable
else:
with open(data["filename"], "wb") as fd:
fd.write(base64.b64decode(data["src"].split(",")[1]))
self.result = None
class CadViewer:
"""
The main class for the CAD Viewer encapsulating the three-cad-viewer Javascript module
Parameters
----------
cad_width : int, default: 800
Width of the canvas element
height : int, default: 600
Height of the canvas element
tree_width : int, default: 240
Width of the navigation tree element
theme : string, default: 'light'
UI theme, can be 'dark' or 'light' (default)
tools : bool, default: True
Whether to show CAD tools (True) or not (False)
glass : bool, default: False
Whether to use glass mode (True) or not (False)
pinning: bool, default: False
Whether to allow replacing the CAD View by a canvas screenshot
See also
--------
- [three-cad-viewer](https://github.com/bernhard-42/three-cad-viewer) ([Demo](https://bernhard-42.github.io/three-cad-viewer/example.html))
- [threejs](https://threejs.org/docs/index.html#manual/en/introduction/Creating-a-scene)
"""
def __init__(
self,
cad_width=800,
height=600,
tree_width=240,
theme="browser",
glass=False,
tools=True,
pinning=False,
title=None,
anchor=None,
):
if cad_width < 400:
raise ValueError("Ensure cad_width >= 400")
if tree_width < 240:
raise ValueError("Ensure tree_width >= 240")
self.widget = CadViewerWidget(
cad_width=cad_width,
height=height,
tree_width=tree_width,
theme=theme,
glass=glass,
tools=tools,
pinning=pinning,
title=title,
anchor=anchor,
)
self.widget.test_func = None
self.msg_id = 0
self.parser = get_parser()
self.empty = True
self.tracks = []
def _parse(self, string):
try:
return self.parser.parseString(string).asList()
except ParseException:
return None
def dispose(self):
"""
Dispose the CAD Viewer
"""
self.execute("viewer.dispose")
def add_shapes(
self,
shapes,
states,
tracks=None,
# render options
normal_len=0,
default_edge_color="#707070",
default_opacity=0.5,
ambient_intensity=0.5,
direct_intensity=0.3,
# viewer options
tools=None,
glass=None,
cad_width=None,
tree_width=None,
height=None,
control="trackball",
ortho=True,
axes=False,
axes0=False,
grid=None,
ticks=10,
transparent=False,
black_edges=False,
collapse=0,
position=None,
quaternion=None,
target=None,
zoom=None,
reset_camera=True,
zoom_speed=1.0,
pan_speed=1.0,
rotate_speed=1.0,
timeit=False,
js_debug=False,
):
"""
Adding shapes to the CAD view
Parameters
----------
shapes : dict
Nested tessellated shapes
states : dict
State of the nested cad objects, key = object path, value = 2-dim tuple of 0/1 (hidden/visible) for object and edges
tracks : list or tuple, default None
List of animation track arrays, see [AnimationTrack.to_array](/widget.html#cad_viewer_widget.widget.AnimationTrack.to_array)
title: str, default: None
Name of the title view to display the shapes.
ortho : bool, default True
Whether to use orthographic view (True) or perspective view (False)
cad_width : int, default: None
Width of the canvas element
height : int, default: None
Height of the canvas element
tree_width : int, default: None
Width of the navigation tree element
tools : bool, default: None
Whether to show CAD tools (True) or not (False)
glass : bool, default: None
Whether to use glass mode (True) or not (False)
control : string, default 'trackball'
Whether to use trackball controls ('trackball') or orbit controls ('orbit')
axes : bool, default False
Whether to show coordinate axes (True) or not (False)
axes0 : bool, default False
Whether to center coordinate axes at the origin [0,0,0] (True) or at the CAD object center (False)
grid : 3-dim list of bool, default None
Whether to show the grids for `xy`, `xz`, `yz` (`None` means `(False, False, False)`)
ticks : int, default 10
Hint for the number of ticks for the grids (will be adjusted for nice intervals)
transparent : bool, default False
Whether to show the CAD objects transparently (True) or not (False)
black_edges : bool, default False
Whether to shows the edges in black (True) or not(False)
collapse : int, default 0
Collapse CAD tree (1: collapse nodes with single leaf, 2: collapse all nodes)
normal_Len : int, default 0
If > 0, the vertex normals will be rendered with the length given be this parameter
default_edge_color : string, default "#707070"
The default edge color in web format, e.g. '#ffaa88'
default_opacity : float, default 0.5
The default opacity level for transparency between 0.0 an 1.0
ambient_intensity : float, default 0.9
The intensity of the ambient light
direct_intensity : float, default 0.12
The intensity of the 8 direct lights
position : 3-dim list of float, default None
Position of the camera as a 3-dim tuple of float (x,y,z)
quaternion : 4-dim list of float, default None
Rotation of the camera as 4-dim quaternion (x,y,z,w)
target : 3-dim list of float, default None
Camera target to look at, default is the center of the object's bounding box
zoom : float, default None
Zoom value of the camera
reset_camera : bool, default True
Keep the camera position and rotation when showing new shapes (True) or not (False)
zoom_speed : float, default 1.0
Speed of zooming with the mouse
pan_speed : float, default 1.0
Speed of panning with the mouse
rotate_speed : float, default 1.0
Speed of rotation with the mouse
timeit : bool, default False
Whether to output timing info to the browser console (True) or not (False)
Examples
--------
A simple cube with edge len of 1 is tessellated like the `shape` element of the first (and only) element of
the `parts` list:
```
shapes = {
"name": "Group",
"id": "/Group",
"loc": None, # would be (<position>, <quaternion>), e.g. ([0,0,0), (0,0,0,1)]),
"bb": {
"xmin": -0.5, "xmax": 0.5,
"ymin": -0.5, "ymax": 0.5,
"zmin": -0.5, "zmax": 0.5
}
"parts": [{
"name": "Part_0",
"id": "/Group/Part_0",
"type": "shapes",
"shape": {"vertices": [
[-0.5, -0.5, -0.5], [-0.5, -0.5, 0.5], [-0.5, 0.5, -0.5], [-0.5, 0.5, 0.5],
[0.5, -0.5, -0.5], [0.5, -0.5, 0.5], [0.5, 0.5, -0.5], [0.5, 0.5, 0.5],
[-0.5, -0.5, -0.5], [0.5, -0.5, -0.5], [-0.5, -0.5, 0.5], [0.5, -0.5, 0.5],
[-0.5, 0.5, -0.5], [0.5, 0.5, -0.5], [-0.5, 0.5, 0.5], [0.5, 0.5, 0.5],
[-0.5, -0.5, -0.5], [-0.5, 0.5, -0.5], [0.5, -0.5, -0.5], [0.5, 0.5, -0.5],
[-0.5, -0.5, 0.5], [-0.5, 0.5, 0.5], [0.5, -0.5, 0.5], [0.5, 0.5, 0.5]],
"triangles": [
1, 2, 0, 1, 3, 2, 5, 4, 6, 5, 6, 7, 11, 8, 9, 11, 10, 8, 15, 13,
12, 15, 12, 14, 19, 16, 17, 19, 18, 16, 23, 21, 20, 23, 20, 22 ],
"normals": [
[-1, 0, 0], [-1, 0, 0], [-1, 0, 0], [-1, 0, 0],
[1, 0, 0], [1, 0, 0], [1, 0, 0], [1, 0, 0],
[0, -1, 0], [0, -1, 0], [0, -1, 0], [0, -1, 0],
[0, 1, 0], [0, 1, 0], [0, 1, 0], [0, 1, 0],
[0, 0, -1], [0, 0, -1], [0, 0, -1], [0, 0, -1],
[0, 0, 1], [0, 0, 1], [0, 0, 1], [0, 0, 1]
],
"edges": [
[[-0.5, -0.5, -0.5], [-0.5, -0.5, 0.5]],
[[-0.5, -0.5, 0.5], [-0.5, 0.5, 0.5]],
[[-0.5, 0.5, -0.5], [-0.5, 0.5, 0.5]],
[[-0.5, -0.5, -0.5], [-0.5, 0.5, -0.5]],
[[0.5, -0.5, -0.5], [0.5, -0.5, 0.5]],
[[0.5, -0.5, 0.5], [0.5, 0.5, 0.5]],
[[0.5, 0.5, -0.5], [0.5, 0.5, 0.5]],
[[0.5, -0.5, -0.5], [0.5, 0.5, -0.5]],
[[-0.5, -0.5, -0.5], [0.5, -0.5, -0.5]],
[[-0.5, -0.5, 0.5], [0.5, -0.5, 0.5]],
[[-0.5, 0.5, -0.5], [0.5, 0.5, -0.5]],
[[-0.5, 0.5, 0.5], [0.5, 0.5, 0.5]]
]},
"color": "#e8b024",
"renderback": false
}]
}
states = {'/Group/Part_0': [1, 1]}
```
A nested object (with shapes shortened) looks like:
```
{
'id': '/bottom', 'name': 'bottom', 'loc': ['<position>', '<quaternion>'],
'parts': [{
'id': '/bottom/bottom_0', 'name': 'bottom_0', 'type': 'shapes', 'color': '#bfbfbf',
'shape': {'vertices': [...], 'triangles': [...], 'normals': [...], 'edges': [...]},
}, {
'id': '/bottom/top', 'name': 'top', 'loc': ['<position>', '<quaternion>'],
'parts': [{
'id': '/bottom/top/top_0', 'name': 'top_0', 'type': 'shapes', 'color': '#bfbfbf',
'shape': {'vertices': [...], 'triangles': [...], 'normals': [...], 'edges': [...]},
}]
}, {
'id': '/bottom/front_stand', 'name': 'front_stand', 'loc': ['<position>', '<quaternion>'],
'parts': [{
'id': '/bottom/front_stand/front_stand_0', 'name': 'front_stand_0', 'type': 'shapes', 'color': '#7fcce5',
'shape': {'vertices': [...], 'triangles': [...], 'normals': [...], 'edges': [...]},
}]
}, {
'id': '/bottom/back_stand', 'name': 'back_stand', 'loc': ['<position>', '<quaternion>'],
'parts': [{
'id': '/bottom/back_stand/back_stand_0', 'name': 'back_stand_0', 'type': 'shapes', 'color': '#7fcce5',
'shape': {'vertices': [...], 'triangles': [...], 'normals': [...], 'edges': [...]},
}]
}, {
'id': '/bottom/right_back', 'name': 'right_back', 'loc': ['<position>', '<quaternion>'],
'parts': [{
'id': '/bottom/right_back/right_back_0', 'name': 'right_back_0', 'type': 'shapes', 'color': '#ffa500',
'shape': {'vertices': [...], 'triangles': [...], 'normals': [...], 'edges': [...]},
}, {
'id': '/bottom/right_back/lower', 'name': 'lower', 'loc': ['<position>', '<quaternion>'],
'parts': [{
'id': '/bottom/right_back/lower/lower_0', 'name': 'lower_0', 'type': 'shapes', 'color': '#ffa500',
'shape': {'vertices': [...], 'triangles': [...], 'normals': [...], 'edges': [...]},
}]
}]
},
...
]
}
states = {
'/bottom/bottom_0': [1, 1],
'/bottom/top/top_0': [1, 1],
'/bottom/front_stand/front_stand_0': [1, 1],
'/bottom/back_stand/back_stand_0': [1, 1],
'/bottom/right_back/right_back_0': [1, 1],
'/bottom/right_back/lower/lower_0': [1, 1],
...
}
```
Notes
-----
Vector : float[3] := [x, y, z]
VectorList : Vector[n] := [ [x0, y0, z0], [x1, xy1, z1], ... ]
Index : int[m] := [ i0, i1, i2, ... ]
Edge : Vector[2] := [ [x0, y0, z0], [x1, xy1, z1]]
EdgeList : Edge[k] := [ [[x0, y0, z0], [x1, xy1, z1]], [[x2, y2, z2], [x3, xy3, z3]], ... ]
Shape, Faces := {
"id": "<str>",
"name": "<str>",
"type": "shapes",
"color": "#ffffff",
"renderback": false
"shape": {
"vertices": <VectorList>,
"triangles": <Index>,
"normals": <VectorList>,
"edges": <EdgeList>
}
}
Edges := {
"id": "</path/to/<name>>",
"name": "<name>",
"type": "edges",
"color": "#ffffff",
"width": 3,
"renderback": false
"shape": <EdgeList>
}
Vertices := {
"id": "</path/to/<name>>",
"name": "<name>",
"type": "vertices",
"color": "#ffffff",
"size": 6
"shape": <VectorList>
}
"""
if control == "orbit" and quaternion is not None:
raise ValueError("Camera quaternion cannot be used with Orbit camera control")
if control == "trackball" and position is not None and quaternion is None:
raise ValueError("For Trackball camera control, position paramater also needs quaternion parameter")
if grid is None:
grid = [False, False, False]
# If one changes the control type, override reset_camera with "True"
if self.empty or self.widget.control != control:
reset_camera = True
self.empty = False
# Don't show warning on first call
if self.widget.control != "":
print("Camera control changed, so camera was resetted")
if reset_camera:
center, radius = bsphere(shapes["bb"])
if position is None:
position = (normalize(np.array((1, 1, 1))) * 5.5 * radius + center).tolist()
if quaternion is None and control == "trackball":
quaternion = (
0.1759198966061612,
0.42470820027786693,
0.8204732385702833,
0.33985114297998736,
)
if target is None:
target = center.tolist()
if zoom is None:
zoom = 4 / 3
w = self.widget.cad_width if cad_width is None else cad_width
h = self.widget.height if height is None else height
if w >= h:
zoom *= h / w
else:
copy = lambda v: None if v is None else (*v,)
position = copy(self.widget.position)
quaternion = copy(self.widget.quaternion)
target = copy(self.widget.target)
zoom = self.widget.zoom
self.widget.initialize = True
with self.widget.hold_trait_notifications():
self.widget.shapes = shapes
self.widget.states = states
self.widget.default_edge_color = default_edge_color
self.widget.default_opacity = default_opacity
self.widget.ambient_intensity = ambient_intensity
self.widget.direct_intensity = direct_intensity
self.widget.normal_len = normal_len
self.widget.control = control
if tools is not None:
self.widget.tools = tools
if glass is not None:
self.widget.glass = glass
if cad_width is not None:
self.widget.cad_width = cad_width
if tree_width is not None:
self.widget.tree_width = tree_width
if height is not None:
self.widget.height = height
self.widget.axes = axes
self.widget.axes0 = axes0
self.widget.grid = grid
self.widget.ticks = ticks
self.widget.ortho = ortho
self.widget.transparent = transparent
self.widget.black_edges = black_edges
self.widget.collapse = collapse
self.widget.reset_camera = reset_camera
self.widget.position = position
self.widget.quaternion = quaternion
self.widget.target = target
self.widget.zoom = zoom
# If reset_camera, position0, quaternion0 and zoom0 need to be set
if reset_camera:
self.widget.position0 = (*position,)
if control == "trackball":
self.widget.quaternion0 = (*quaternion,)
self.widget.target0 = (*target,)
self.widget.zoom0 = zoom
self.widget.zoom_speed = zoom_speed
self.widget.pan_speed = pan_speed
self.widget.rotate_speed = rotate_speed
self.widget.timeit = timeit
self.widget.js_debug = js_debug
self.add_tracks(tracks)
self.widget.initialize = False
if tools is not None:
self.widget.tools = tools
if glass is not None:
self.widget.glass = glass
def update_states(self, states):
"""Set navigation tree states for a CAD view"""
all_paths = list(self.widget.states.keys())
for path, state in states.items():
if not path in all_paths:
raise ValueError(f"Path {path} is not a valid state")
self.widget.state_updates = states
def update_camera_location(self):
"""Sync position, quaternion and zoom of camera to Python"""
self.execute("updateCamera", [])
def close(self):
"""
Close the underlying Javascript viewer
"""
self.widget.disposed = True
@property
def disposed(self):
"""
Whether the Javascript viewer is disposed
"""
return self.widget.disposed
#
# UI and scene accessors
#
@property
def ambient_intensity(self):
"""
Get or set the CadViewerWidget traitlet `ambient_intensity`.
see [CadViewerWidget.ambient_intensity](./widget.html#cad_viewer_widget.widget.CadViewerWidget.ambient_intensity)
"""
return self.widget.ambient_intensity
@ambient_intensity.setter
def ambient_intensity(self, value):
self.widget.ambient_intensity = value
@property
def direct_intensity(self):
"""
Get or set the CadViewerWidget traitlet `direct_intensity`
see [CadViewerWidget.direct_intensity](./widget.html#cad_viewer_widget.widget.CadViewerWidget.direct_intensity)
"""
return self.widget.direct_intensity
@direct_intensity.setter
def direct_intensity(self, value):
self.widget.direct_intensity = value
@property
def axes(self):
"""
Get or set the CadViewerWidget traitlet `axes`
see [CadViewerWidget.axes](./widget.html#cad_viewer_widget.widget.CadViewerWidget.axes)
"""
return self.widget.axes
@axes.setter
def axes(self, value):
self.widget.axes = value
@property
def axes0(self):
"""
Get or set the CadViewerWidget traitlet `axes0`
see [CadViewerWidget.axes0](./widget.html#cad_viewer_widget.widget.CadViewerWidget.axes0)
"""
return self.widget.axes0
@axes0.setter
def axes0(self, value):
self.widget.axes0 = value
@property
def grid(self):
"""
Get or set the CadViewerWidget traitlet `grid`
see [CadViewerWidget.grid](./widget.html#cad_viewer_widget.widget.CadViewerWidget.grid)
"""
return self.widget.grid
@grid.setter
def grid(self, value):
self.widget.grid = value
@property
def ortho(self):
"""
Get or set the CadViewerWidget traitlet `ortho`
see [CadViewerWidget.ortho](./widget.html#cad_viewer_widget.widget.CadViewerWidget.ortho)
"""
return self.widget.ortho
@ortho.setter
def ortho(self, value):
self.widget.ortho = value
@property
def transparent(self):
"""
Get or set the CadViewerWidget traitlet `transparent`
see [CadViewerWidget.transparent](./widget.html#cad_viewer_widget.widget.CadViewerWidget.transparent)
"""
return self.widget.transparent
@transparent.setter
def transparent(self, value):
self.widget.transparent = value
@property
def black_edges(self):
"""
Get or set the CadViewerWidget traitlet `black_edges`
see [CadViewerWidget.black_edges](./widget.html#cad_viewer_widget.widget.CadViewerWidget.black_edges)
"""
return self.widget.black_edges
@black_edges.setter
def black_edges(self, value):
self.widget.black_edges = value
@property
def normal_len(self):
"""
Get or set the CadViewerWidget traitlet `normal_len`
"""
return self.widget.black_edges
@property
def default_edge_color(self):
"""
Get or set the CadViewerWidget traitlet `default_edge_color`
see [CadViewerWidget.default_edge_color](./widget.html#cad_viewer_widget.widget.CadViewerWidget.default_edge_color)
"""
return self.widget.default_edge_color
@default_edge_color.setter
def default_edge_color(self, value):
if value.startswith("#"):
self.widget.default_edge_color = value
else:
self.widget.default_edge_color = f"#{value}"
@property
def default_opacity(self):
"""
Get or set the CadViewerWidget traitlet `default_opacity`
see [CadViewerWidget.default_opacity](./widget.html#cad_viewer_widget.widget.CadViewerWidget.default_opacity)
"""
return self.widget.default_opacity
@default_opacity.setter
def default_opacity(self, value):
if value.startswith("#"):
self.widget.default_opacity = value
else:
self.widget.default_opacity = f"#{value}"
@property
def clip_intersection(self):
"""
Get or set the CadViewerWidget traitlet `clip_intersection`
see [CadViewerWidget.clip_intersection](./widget.html#cad_viewer_widget.widget.CadViewerWidget.clip_intersection)
"""
return self.widget.clip_intersection
@clip_intersection.setter
def clip_intersection(self, value):
self.widget.clip_intersection = value
@property
def clip_normal_0(self):
"""
Get or set the CadViewerWidget traitlet `clip_normal_0`
see [CadViewerWidget.clip_normal_0](./widget.html#cad_viewer_widget.widget.CadViewerWidget.clip_normal_0)
"""
return self.widget.clip_normal_0
@clip_normal_0.setter
def clip_normal_0(self, value):
self.widget.clip_normal_0 = value
@property
def clip_normal_1(self):
"""
Get or set the CadViewerWidget traitlet `clip_normal_1`
see [CadViewerWidget.clip_normal_1](./widget.html#cad_viewer_widget.widget.CadViewerWidget.clip_normal_1)
"""
return self.widget.clip_normal_1
@clip_normal_1.setter
def clip_normal_1(self, value):
self.widget.clip_normal_1 = value
@property
def clip_normal_2(self):
"""
Get or set the CadViewerWidget traitlet `clip_normal_2`
see [CadViewerWidget.clip_normal_2](./widget.html#cad_viewer_widget.widget.CadViewerWidget.clip_normal_2)
"""
return self.widget.clip_normal_2
@clip_normal_2.setter
def clip_normal_2(self, value):
self.widget.clip_normal_2 = value
@property
def clip_value_0(self):
"""
Get or set the CadViewerWidget traitlet `clip_slider_0`
see [CadViewerWidget.clip_slider_0](./widget.html#cad_viewer_widget.widget.CadViewerWidget.clip_slider_0)
"""
return self.widget.clip_slider_0
@clip_value_0.setter
def clip_value_0(self, value):
self.widget.clip_slider_0 = value
@property
def clip_value_1(self):
"""
Get or set the CadViewerWidget traitlet `clip_slider_1`
see [CadViewerWidget.clip_slider_1](./widget.html#cad_viewer_widget.widget.CadViewerWidget.clip_slider_1)
"""
return self.widget.clip_slider_1
@clip_value_1.setter
def clip_value_1(self, value):
self.widget.clip_slider_1 = value
@property
def clip_value_2(self):
"""
Get or set the CadViewerWidget traitlet `clip_slider_2`
see [CadViewerWidget.clip_slider_2](./widget.html#cad_viewer_widget.widget.CadViewerWidget.clip_slider_2)
"""
return self.widget.clip_slider_2
@clip_value_2.setter
def clip_value_2(self, value):
self.widget.clip_slider_2 = value
@property
def clip_planes(self):
"""
Get or set the CadViewerWidget traitlet `clip_planes`
see [CadViewerWidget.clip_planes](./widget.html#cad_viewer_widget.widget.CadViewerWidget.clip_planes)
"""
return self.widget.clip_planes
@clip_planes.setter
def clip_planes(self, value):
self.widget.clip_planes = value
@property
def js_debug(self):
"""
Get or set the CadViewerWidget traitlet `js_debug`
see [CadViewerWidget.js_debug](./widget.html#cad_viewer_widget.widget.CadViewerWidget.js_debug)
"""
return self.widget.js_debug
@js_debug.setter
def js_debug(self, value):
self.widget.js_debug = value
@property
def tools(self):
"""
Get or set the CadViewerWidget traitlet `tools`
see [CadViewerWidget.tools](./widget.html#cad_viewer_widget.widget.CadViewerWidget.tools)
"""
return self.widget.tools
@tools.setter
def tools(self, value):
self.widget.tools = value
@property
def glass(self):
"""
Get or set the CadViewerWidget traitlet `glass`
see [CadViewerWidget.tools](./widget.html#cad_viewer_widget.widget.CadViewerWidget.glass)
"""
return self.widget.glass
@glass.setter
def glass(self, value):
self.widget.glass = value
@property
def cad_width(self):
"""
Get or set the CadViewerWidget traitlet `cad_width`
see [CadViewerWidget.tools](./widget.html#cad_viewer_widget.widget.CadViewerWidget.cad_width)
"""
return self.widget.cad_width
@cad_width.setter
def cad_width(self, value):
self.widget.cad_width = value
@property
def tree_width(self):
"""
Get or set the CadViewerWidget traitlet `tree_width`
see [CadViewerWidget.tools](./widget.html#cad_viewer_widget.widget.CadViewerWidget.tree_width)
"""
return self.widget.tree_width
@tree_width.setter
def tree_width(self, value):
self.widget.tree_width = value
@property
def height(self):
"""
Get or set the CadViewerWidget traitlet `height`
see [CadViewerWidget.tools](./widget.html#cad_viewer_widget.widget.CadViewerWidget.height)
"""
return self.widget.height
@height.setter
def height(self, value):
self.widget.height = value
@property
def pan_speed(self):
"""
Get or set the CadViewerWidget traitlet `pan_speed`
see [CadViewerWidget.pan_speed](./widget.html#cad_viewer_widget.widget.CadViewerWidget.pan_speed)
"""
return self.widget.pan_speed
@pan_speed.setter
def pan_speed(self, value):
self.widget.pan_speed = value
@property
def rotate_speed(self):
"""
Get or set the CadViewerWidget traitlet `rotate_speed`
see [CadViewerWidget.rotate_speed](./widget.html#cad_viewer_widget.widget.CadViewerWidget.rotate_speed)
"""
return self.widget.rotate_speed
@rotate_speed.setter
def rotate_speed(self, value):
self.widget.rotate_speed = value
@property
def zoom_speed(self):
"""
Get or set the CadViewerWidget traitlet `zoom_speed`
see [CadViewerWidget.zoom_speed](./widget.html#cad_viewer_widget.widget.CadViewerWidget.zoom_speed)
"""
return self.widget.zoom_speed
@zoom_speed.setter
def zoom_speed(self, value):
self.widget.zoom_speed = value
#
# Camera position handling
#
@property
def zoom(self):
"""
Get or set the CadViewerWidget traitlet `zoom`
see [CadViewerWidget.zoom](./widget.html#cad_viewer_widget.widget.CadViewerWidget.zoom)
"""
return self.widget.zoom
@zoom.setter
def zoom(self, value):
self.widget.zoom = value
@property
def position(self):
"""
Get or set the CadViewerWidget traitlet `position`
see [CadViewerWidget.position](./widget.html#cad_viewer_widget.widget.CadViewerWidget.position)
"""
return self.widget.position
@position.setter
def position(self, value):
self.widget.position = value
@property
def quaternion(self):
"""
Get or set the CadViewerWidget traitlet `quaternion`
see [CadViewerWidget.quaternion](./widget.html#cad_viewer_widget.widget.CadViewerWidget.quaternion)
"""
if self.widget.control == "orbit":
print("quaternion controlled internally for control=='orbit'")
return None
else:
return self.widget.quaternion
@quaternion.setter
def quaternion(self, value):
if self.widget.control == "orbit":
print("quaternion controlled internally for control=='orbit'")
else:
self.widget.quaternion = value
@property
def target(self):
"""
Get or set the CadViewerWidget traitlet `target`
see [CadViewerWidget.position](./widget.html#cad_viewer_widget.widget.CadViewerWidget.target)
"""
# self.update_camera_location()
return self.widget.target
@target.setter
def target(self, value):
self.widget.target = value
@property
def last_pick(self):
"""
Get or set the CadViewerWidget traitlet `lastPick`
see [CadViewerWidget.lastPick](./widget.html#cad_viewer_widget.widget.CadViewerWidget.lastPick)
"""
return self.widget.lastPick
@property
def control(self):
"""
Get or set the CadViewerWidget traitlet `control`
see [CadViewerWidget.control](./widget.html#cad_viewer_widget.widget.CadViewerWidget.control)
"""
return self.widget.control
@property
def pinning(self):
"""
Get or set the CadViewerWidget traitlet `pinning`
see [CadViewerWidget.pinning](./widget.html#cad_viewer_widget.widget.CadViewerWidget.pinning)
"""
return self.widget.pinning
@pinning.setter
def pinning(self, flag):
self.widget.pinning = flag
#
# Animation handling
#
def clear_tracks(self):
"""
Remove animation tracks from CAD view
"""
self.tracks = []
self.widget.tracks = []
def _check_track(self, track):
paths = self.widget.states.keys()
if not any([(f"{path}/").startswith(f"{track.path}/") for path in paths]):
raise ValueError(f"{track.path} is not a valid subpath of any of {list(paths)}")
actions = ["t", "tx", "ty", "tz", "q", "rx", "ry", "rz"]
if not track.action in actions:
raise ValueError(f"{track.action} is not a valid action {list(actions)}")
if len(track.times) != len(track.values):
raise ValueError(f"Track times and values need to have same length")
if not all([isinstance(t, (int, float)) for t in track.times]):
raise ValueError(f"Time values need to be int or float")
if track.action in ["tx", "ty", "tz", "rx", "ry", "rz"]:
if not all([isinstance(t, (int, float)) for t in track.values]):
raise ValueError(f"Value values need to be int or float for action '{track.action}'")
if track.action in ["t", "q"]:
size = 3 if track.action == "t" else 4
if not all(
[
isinstance(v, (list, tuple)) and (len(v) == size) and all([isinstance(x, (int, float)) for x in v])
for v in track.values
]
):
raise ValueError(
f"Value values need to be {size} dim lists of int or float for action '{track.action}'"
)
return track
def add_track(self, track):
"""
Add an animation track to CAD view
Parameters
----------
track: AnimationTrack
Animation track, see [AnimationTrack](/widget.html#cad_viewer_widget.widget.AnimationTrack)
"""
self.tracks.append(self._check_track(track))
def add_tracks(self, tracks):
"""
Add a list of animation tracks to CAD view
Parameters
----------
tracks: list of AnimationTrack
List of Animation tracks, see [AnimationTrack](/widget.html#cad_viewer_widget.widget.AnimationTrack)
"""
checked_tracks = (
[] if tracks is None else [self._check_track(track) for track in tracks]
) # enforce a new array
self.tracks = checked_tracks
def animate(self, speed=1):
"""
Send animation tracks to CAD view
Parameters
----------
speed : float, default: 1
Animation speed, will be forwarded via `animation_speed` traitlet
"""
self.widget.tracks = [track.to_array() for track in self.tracks]
self.widget.animation_speed = speed
self.execute("animate")
# self.play()
def play(self):
"""
Start or unpause animation
"""
self.execute("viewer.controlAnimation", ["play"])
def stop(self):
"""
Stop animation
"""
self.execute("viewer.controlAnimation", ["stop"])
def pause(self):
"""
Pause or unpause animation
"""
self.execute("viewer.controlAnimation", ["pause"])
def pin_as_png(self):
"""
Pin CAD View as PNG
"""
self.execute("pinAsPng", None)
def export_png(self, filename):
"""
Save CAD View as PNG
"""
self.execute("saveAsPng", filename)
self.execute("viewer.update", True)
#
# Tab handling
#
def select_tree(self):
"""
Select Navigation tree tab
"""
self.widget.tab = "tree"
def select_clipping(self):
"""
Select Clipping tab
"""
self.widget.tab = "clip"
#
# Rotations
#
def set_camera(self, direction):
"""
Set camera to one of the predefined locations
Parameters
----------
direction : string
one of ["iso", "top", "bottom", "left", "right", "front", "rear"]
"""
self.execute("viewer.camera.presetCamera", [direction])
self.execute("viewer.update", [])
def rotate_x(self, angle):
"""
Rotate CAD obj around x-axis - trackball controls only
Parameters
----------
angle : float
The rotation angle in degrees
"""
if self.control != "trackball":
raise NameError("rotateX only works for trackball control")
self.execute("viewer.controls.rotateX", (angle,))
def rotate_y(self, angle):
"""
Rotate CAD obj around y-axis - trackball controls only
Parameters
----------
angle : float
The rotation angle in degrees
"""
if self.control != "trackball":
raise NameError("rotateY only works for trackball control")
self.execute("viewer.controls.rotateY", (angle,))
def rotate_z(self, angle):
"""
Rotate CAD obj around z-axis - trackball controls only
Parameters
----------
angle : float
The rotation angle in degrees
"""
if self.control != "trackball":
raise NameError("rotateZ only works for trackball control")
self.execute("viewer.controls.rotateZ", (angle,))
def rotate_up(self, angle):
"""
Rotate CAD obj up (positive angle) and down (negative angle) - orbit controls only
Parameters
----------
angle : float
The rotation angle in degrees
"""
if self.control != "orbit":
raise NameError("rotateUp only works for orbit control")
self.execute("viewer.controls.rotateUp", (angle,))
def rotate_left(self, angle):
"""
Rotate CAD obj to the left (positive angle) and right (negative angle) - orbit controls only
Parameters
----------
angle : float
The rotation angle in degrees
"""
if self.control != "orbit":
raise NameError("rotateLeft only works for orbit control")
self.execute("viewer.controls.rotateLeft", (angle,))
#
# Exports
#
def export_html(self, filename="cadquery.html", title="CadQuery"):
if self.widget.title != None:
raise RuntimeError("Export_html does not work with sidecar. Show the object again in a cell viewer")
pinning = self.pinning
self.pinning = False
embed_minimal_html(filename, title=title, views=[self.widget], state=dependency_state(self.widget))
self.pinning = pinning
#
# Custom message handling
#
def execute(self, method, args=None):
"""
Execute a method of a Javascript object
Parameters
----------
method : string
A 'CadViewer' object based Javascrip object path, e.g. `abc.def[3].method(args)` where `abc.def[3]` is the
object notation relative to the 'CadViewer' object and `method` is the method to call
args : list of any
The arguments passed to `abc.def[3].method(args)`
"""
def wrapper(change=None):
if change is None:
self.msg_id += 1
path = self._parse(method)
content = {
"type": "cad_viewer_method",
"id": self.msg_id,
"method": json.dumps(path),
"args": json.dumps(args),
}
self.widget.send(content=content, buffers=None)
return self.msg_id
if args is not None and not isinstance(args, (tuple, list)):
args = [args]
return wrapper()
def remove_ui_elements(self, elements):
self.execute("viewer.trimUI", [elements, False])
def dump_model(self, shapes=False):
print(
dedent(
f"""
DISPLAY
title: {self.widget.title}
anchor: {self.widget.anchor}
cad_width: {self.widget.cad_width}
height: {self.widget.height}
tree_width: {self.widget.tree_width}
theme: {self.widget.theme}
pinning: {self.widget.pinning}
SHAPES
shapes: {self.widget.shapes if shapes else "... (set shapes=True)"}
states: {self.widget.states}
tracks: {self.widget.tracks}
RENDERER
normal_len: {self.widget.normal_len}
default_edge_color: {self.widget.default_edge_color}
default_opacity: {self.widget.default_opacity}
ambient_intensity: {self.widget.ambient_intensity}
direct_intensity: {self.widget.direct_intensity}
VIEWER
timeit: {self.widget.timeit}
tools: {self.widget.tools}
glass: {self.widget.glass}
ortho: {self.widget.ortho}
control: {self.widget.control}
axes: {self.widget.axes}
axes0: {self.widget.axes0}
grid: {self.widget.grid}
ticks: {self.widget.ticks}
transparent: {self.widget.transparent}
black_edges: {self.widget.black_edges}
collapse: {self.widget.collapse}
tab: {self.widget.tab}
clip_intersection: {self.widget.clip_intersection}
clip_planes: {self.widget.clip_planes}
clip_normal_0: {self.widget.clip_normal_0}
clip_normal_1: {self.widget.clip_normal_1}
clip_normal_2: {self.widget.clip_normal_2}
clip_slider_0: {self.widget.clip_slider_0}
clip_slider_1: {self.widget.clip_slider_1}
clip_slider_2: {self.widget.clip_slider_2}
reset_camera: {self.widget.reset_camera}
position: {self.widget.position}
quaternion: {self.widget.quaternion}
target: {self.widget.target}
zoom: {self.widget.zoom}
position0: {self.widget.position0}
quaternion0: {self.widget.quaternion0}
target0: {self.widget.target0}
zoom0: {self.widget.zoom0}
zoom_speed: {self.widget.zoom_speed}
pan_speed: {self.widget.pan_speed}
rotate_speed: {self.widget.rotate_speed}
animation_speed: {self.widget.animation_speed}
state_updates: {self.widget.state_updates}
lastPick: {self.widget.lastPick}
INTERNAL
result: {self.widget.result}
disposed: {self.widget.disposed}
initialize: {self.widget.initialize}
js_debug: {self.widget.js_debug}
image_id: {self.widget.image_id}
"""
)
)
|
# pylint: disable=consider-using-f-string
"""Wrapper over the FTrack API to make it more pythonic to use.
Supports the querying and creation of objects, and a way to build
event listeners.
It's designed to hide the SQL-like syntax in favour of an object
orientated approach. Inspiration was taken from SQLALchemy.
"""
__all__ = ['FTrackQuery', 'entity', 'and_', 'or_', 'not_', 'event',
'select', 'create', 'update', 'delete']
__version__ = '1.7.0'
import ftrack_api
from . import utils
from .abstract import AbstractStatement
from .query import Query, entity, and_, or_, not_
from .event import event
from .statement import select, create, update, delete
class FTrackQuery(ftrack_api.Session):
# pylint: disable=arguments-differ
"""Expansion of the ftrack_api.Session class."""
def __init__(self, **kwargs):
"""Attempt to initialise the connection.
If the debug argument is set, the connection will be ignored.
"""
self.debug = kwargs.pop('debug', False)
self._logger = kwargs.pop('logger', utils.logger)
self._logger.debug('Connecting...')
if not self.debug:
super(FTrackQuery, self).__init__(**kwargs)
self._logger.debug('New session initialised.')
def __getattribute__(self, attr):
"""Get an entity type if it exists.
The standard AttributeError will be raised if not.
"""
try:
return super(FTrackQuery, self).__getattribute__(attr)
except AttributeError:
if self.debug or attr in super(FTrackQuery, self).__getattribute__('types'):
return Query(self, attr)
raise
def close(self, *args, **kwargs):
"""Avoid error when closing session in debug mode."""
if not self.debug:
return super(FTrackQuery, self).close(*args, **kwargs)
def get(self, value, _value=None, *args, **kwargs): # pylint: disable=keyword-arg-before-vararg
"""Get any entity from its ID.
The _value argument is for compatibility with ftrack_api.Session.
"""
if _value is None:
entity = 'Context'
else:
entity, value = value, _value
self._logger.debug('Get: %s(%r)', entity, value)
return super(FTrackQuery, self).get(entity, value, *args, **kwargs)
def query(self, query, *args, **kwargs):
"""Create an FTrack query object from a string."""
query = str(query)
self._logger.debug('Query: %s', query)
return super(FTrackQuery, self).query(query, *args, **kwargs)
def create(self, entity, data, *args, **kwargs):
"""Create a new entity."""
if not kwargs.get('reconstructing', False):
self._logger.debug('Create: %s(%s)', entity, utils.dict_to_str(data))
return super(FTrackQuery, self).create(entity, data, *args, **kwargs)
def delete(self, entity, *args, **kwargs):
"""Delete an FTrack entity."""
self._logger.debug('Delete: %r', entity)
return super(FTrackQuery, self).delete(entity, *args, **kwargs)
def where(self, *args, **kwargs):
"""Set entity type as TypedContext if none provided."""
return self.TypedContext.where(*args, **kwargs)
def commit(self, *args, **kwargs):
"""Commit changes."""
self._logger.debug('Changes saved.')
return super(FTrackQuery, self).commit(*args, **kwargs)
def rollback(self, *args, **kwargs):
"""Rollback changes."""
self._logger.debug('Changes discarded.')
return super(FTrackQuery, self).rollback(*args, **kwargs)
def populate(self, entities, projections):
"""Populate new values."""
if isinstance(projections, (list, tuple, set)):
projections = ','.join(map(str, projections))
return super(FTrackQuery, self).populate(entities, projections)
def execute(self, stmt):
"""Execute a statement."""
if isinstance(stmt, AbstractStatement):
return stmt.execute(self)
raise NotImplementedError(type(stmt))
|
#! -*- coding: utf-8 -*-
# bert做Seq2Seq任务,采用UNILM方案
# 介绍链接:https://kexue.fm/archives/6933
from bert4torch.models import build_transformer_model
from bert4torch.tokenizers import Tokenizer, load_vocab
from bert4torch.snippets import sequence_padding, text_segmentate
from bert4torch.snippets import AutoRegressiveDecoder, Callback, ListDataset
import torch
from torchinfo import summary
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
import glob
# 基本参数
maxlen = 256
batch_size = 16
epochs = 10000
# bert配置
config_path = 'F:/Projects/pretrain_ckpt/bert/[google_tf_base]--chinese_L-12_H-768_A-12/bert_config.json'
checkpoint_path = 'F:/Projects/pretrain_ckpt/bert/[google_tf_base]--chinese_L-12_H-768_A-12/pytorch_model.bin'
dict_path = 'F:/Projects/pretrain_ckpt/bert/[google_tf_base]--chinese_L-12_H-768_A-12/vocab.txt'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# 加载并精简词表,建立分词器
token_dict, keep_tokens = load_vocab(
dict_path=dict_path,
simplified=True,
startswith=['[PAD]', '[UNK]', '[CLS]', '[SEP]'],
)
tokenizer = Tokenizer(token_dict, do_lower_case=True)
def collate_fn(batch):
"""单条样本格式:[CLS]篇章[SEP]答案[SEP]问题[SEP]
"""
batch_token_ids, batch_segment_ids = [], []
for txt in batch:
text = open(txt, encoding='utf-8').read()
text = text.split('\n')
if len(text) > 1:
title = text[0]
content = '\n'.join(text[1:])
token_ids, segment_ids = tokenizer.encode(content, title, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], [batch_token_ids, batch_segment_ids]
train_dataloader = DataLoader(ListDataset(glob.glob('F:/Projects/data/corpus/sentence_classification/THUCNews/*/*.txt')),
batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
model = build_transformer_model(
config_path,
checkpoint_path,
with_mlm='linear',
application='unilm',
keep_tokens=keep_tokens, # 只保留keep_tokens中的字,精简原字表
).to(device)
summary(model, input_data=[next(iter(train_dataloader))[0]])
class CrossEntropyLoss(nn.CrossEntropyLoss):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def forward(self, outputs, target):
'''
y_pred: [btz, seq_len, vocab_size]
targets: y_true, y_segment
unilm式样,需要手动把非seq2seq部分mask掉
'''
_, y_pred = outputs
y_true, y_mask = target
y_true = y_true[:, 1:]# 目标token_ids
y_mask = y_mask[:, 1:] # segment_ids,刚好指示了要预测的部分
y_pred = y_pred[:, :-1, :] # 预测序列,错开一位
y_pred = y_pred.reshape(-1, y_pred.shape[-1])
y_true = (y_true*y_mask).flatten()
return super().forward(y_pred, y_true)
model.compile(loss=CrossEntropyLoss(ignore_index=0), optimizer=optim.Adam(model.parameters(), 1e-5))
class AutoTitle(AutoRegressiveDecoder):
"""seq2seq解码器
"""
@AutoRegressiveDecoder.wraps(default_rtype='logits')
def predict(self, inputs, output_ids, states):
token_ids, segment_ids = inputs
token_ids = torch.cat([token_ids, output_ids], 1)
segment_ids = torch.cat([segment_ids, torch.ones_like(output_ids, device=device)], 1)
_, y_pred = model.predict([token_ids, segment_ids])
return y_pred[:, -1, :]
def generate(self, text, topk=1, topp=0.95):
max_c_len = maxlen - self.maxlen
token_ids, segment_ids = tokenizer.encode(text, maxlen=max_c_len)
output_ids = self.beam_search([token_ids, segment_ids], topk=topk) # 基于beam search
return tokenizer.decode(output_ids.cpu().numpy())
autotitle = AutoTitle(start_id=None, end_id=tokenizer._token_end_id, maxlen=32, device=device)
def just_show():
s1 = u'夏天来临,皮肤在强烈紫外线的照射下,晒伤不可避免,因此,晒后及时修复显得尤为重要,否则可能会造成长期伤害。专家表示,选择晒后护肤品要慎重,芦荟凝胶是最安全,有效的一种选择,晒伤严重者,还请及 时 就医 。'
s2 = u'8月28日,网络爆料称,华住集团旗下连锁酒店用户数据疑似发生泄露。从卖家发布的内容看,数据包含华住旗下汉庭、禧玥、桔子、宜必思等10余个品牌酒店的住客信息。泄露的信息包括华住官网注册资料、酒店入住登记的身份信息及酒店开房记录,住客姓名、手机号、邮箱、身份证号、登录账号密码等。卖家对这个约5亿条数据打包出售。第三方安全平台威胁猎人对信息出售者提供的三万条数据进行验证,认为数据真实性非常高。当天下午 ,华 住集 团发声明称,已在内部迅速开展核查,并第一时间报警。当晚,上海警方消息称,接到华住集团报案,警方已经介入调查。'
for s in [s1, s2]:
print(u'生成标题:', autotitle.generate(s))
class Evaluator(Callback):
"""评估与保存
"""
def __init__(self):
self.lowest = 1e10
def on_epoch_end(self, steps, epoch, logs=None):
# 保存最优
if logs['loss'] <= self.lowest:
self.lowest = logs['loss']
# model.save_weights('./best_model.pt')
# 演示效果
just_show()
if __name__ == '__main__':
evaluator = Evaluator()
model.fit(
train_dataloader,
steps_per_epoch=100,
epochs=epochs,
callbacks=[evaluator]
)
else:
model.load_weights('./best_model.pt')
|
import toml
class TomlError(Exception):
pass
# Read configuration file
def load_config(filename):
config = toml.load(filename)
# Check the necessary fields are provided in the toml
if config.get('contract') is None or config['contract'].get('pricefeeds') is None or config['contract'].get('wrb') is None:
raise TomlError("Please specify pricefeed and wrb as \n[contract]\npricefeed=0xaaaa\nwrb=0xbbbb")
elif config.get('account') is None or config['account'].get('address') is None:
raise TomlError("Please specify account as \n[account]\naddress=0xaaaa")
elif config.get('network') is None or config['network'].get('provider') is None:
raise TomlError("Please specify account as \n[network]\nprovider=127.0.0.1:8545")
else:
return config
|
def format_error(error_message):
return {"err": error_message}
|
#!/usr/bin/env python
"""Tests for `pyargcbr` package."""
import os
from typing import List, Dict
import pytest
from pyargcbr.agents.metrics import levenshtein_distance as cmp
from pyargcbr.cbrs.domain_cbr import DomainCBR
from pyargcbr.knowledge_resources.domain_case import DomainCase
from pyargcbr.knowledge_resources.domain_context import DomainContext
from pyargcbr.knowledge_resources.justification import Justification
from pyargcbr.knowledge_resources.premise import Premise
from pyargcbr.knowledge_resources.problem import Problem
from pyargcbr.knowledge_resources.similar_domain_case import SimilarDomainCase
def similar_domain_case_comparison(case1: SimilarDomainCase, case2: SimilarDomainCase):
premises1 = list(case1.case.problem.context.premises.values())
premises2 = list(case2.case.problem.context.premises.values())
solutions1 = case1.case.solutions
solutions2 = case2.case.solutions
if case1.similarity == case2.similarity \
and cmp(case1.case.justification.description, case2.case.justification.description) == 0 \
and len(premises1) == len(premises2) \
and len(solutions1) == len(solutions2):
for i in range(0, len(premises1)):
if premises1[i] != premises2[i]:
return False
for i in range(0, len(solutions1)):
if solutions1[i].conclusion.id != solutions2[i].conclusion.id \
or cmp(solutions1[i].conclusion.description, solutions2[i].conclusion.description) > 0:
return False
return True
class TestDomainCBR:
cbr: DomainCBR = None
def retrieval_accuracy(self):
for a_case in self.cbr.get_all_cases_list():
retrieved_cases = self.cbr.retrieve_and_retain(a_case, 1.0)
for sim_case in retrieved_cases:
equal = True
sim_premises = list(sim_case.case.problem.context.premises.values())
a_premises = list(a_case.problem.context.premises.values())
for i in range(0, len(a_premises)):
if sim_premises[i] != a_premises[i]:
equal = False
assert equal
def retrieval_consistency(self):
all_cases = self.cbr.get_all_cases()
all_cases2: List[List[DomainCase]] = []
for cases_list in all_cases:
all_cases2.append(cases_list)
for cases_list in all_cases2:
for a_case in cases_list:
# Query the domainCBR for the list similarCases1
similar_cases1 = self.cbr.retrieve_and_retain(a_case, 0.0)
# Query again the domainCBR for the list of similarCases2
similar_cases2 = self.cbr.retrieve_and_retain(a_case, 0.0)
for case1 in similar_cases1:
found: bool = False
for case2 in similar_cases2:
if similar_domain_case_comparison(case1, case2):
assert True
found = True
break
if not found:
assert False
def case_duplication(self):
all_cases = self.cbr.get_all_cases()
all_cases2: List[List[DomainCase]] = []
for cases_list in all_cases:
all_cases2.append(cases_list)
for cases_list in all_cases2:
for a_case in cases_list:
similar_cases = self.cbr.retrieve_and_retain(a_case, 0.0)
for i in range(len(similar_cases)):
case1 = similar_cases.pop(0)
assert case1 not in similar_cases
def operating(self):
first_case: DomainCase = self.cbr.get_all_cases_list()[0]
similar_to_first_case = self.cbr.retrieve_and_retain(first_case, 0.0)
assert similar_domain_case_comparison(similar_to_first_case[0],
SimilarDomainCase(first_case, similar_to_first_case[0].similarity))
premises: Dict[int, Premise] = {}
for premise in first_case.problem.context.premises.values():
premises[premise.id] = premise
first_premise = list(premises.values())[0]
first_premise.content += "aa"
solutions = first_case.solutions
justification = Justification("justification")
dom_case = DomainCase(problem=Problem(
context=DomainContext(premises=premises)), solutions=solutions, justification=justification)
self.cbr.add_case(dom_case)
similar_cases = self.cbr.retrieve_and_retain(dom_case, 0.0)
assert similar_domain_case_comparison(similar_cases[0],
SimilarDomainCase(dom_case, similar_cases[0].similarity))
similar_to_first_case = self.cbr.retrieve_and_retain(first_case, 0.0)
assert similar_domain_case_comparison(similar_to_first_case[0],
SimilarDomainCase(first_case, similar_to_first_case[0].similarity))
@pytest.fixture
def domain_cbr_setup(self):
file = os.path.abspath("tests/domain_cases_py.dat")
self.cbr = DomainCBR(file, "/tmp/null", -1)
def test_content(self, domain_cbr_setup):
self.retrieval_accuracy()
self.retrieval_consistency()
self.case_duplication() # This part is really slow
self.operating()
|
"""System Module"""
from django.test import TestCase
class UrlWeatherTest(TestCase):
"""URL Loading Test"""
def test_weather_url(self):
"""Test to observe if weather page URL is loading correctly"""
response = self.client.get('')
self.assertEqual(response.status_code, 200)
|
from pathlib import Path
from typing import Iterable
import git
import pytest
from commodore.component import Component
def setup_components_upstream(tmp_path: Path, components: Iterable[str]):
# Prepare minimum component directories
upstream = tmp_path / "upstream"
component_urls = {}
component_versions = {}
for component in components:
repo_path = upstream / component
component_urls[component] = f"file://#{repo_path.resolve()}"
component_versions[component] = "master"
repo = git.Repo.init(repo_path)
class_dir = repo_path / "class"
class_dir.mkdir(parents=True, exist_ok=True)
(class_dir / "defaults.yml").touch(exist_ok=True)
repo.index.add(["class/defaults.yml"])
repo.index.commit("component defaults")
return component_urls, component_versions
def _setup_component(tmp_path: Path, cn: str):
urls, _ = setup_components_upstream(tmp_path, [cn])
return Component(cn, repo_url=urls[cn], directory=tmp_path / "test-component")
@pytest.mark.bench
def bench_component_checkout(benchmark, tmp_path: Path):
c = _setup_component(tmp_path, "test-component")
benchmark(c.checkout)
|
import numpy as np
import matplotlib.pyplot as plt
import time
import sys
sys.path.append('../')
from FollyHighLevelControllerV2 import FollyHighLevelControllerV2
# Initialize Molly position
molly_position = np.array([-4, -8])
# Initialize Folly position
folly_position = np.array([-2, -7.5, 0])
# object length
object_length = 2
# line path constraint
path_constraints = np.array([[4, 10, 80]])
# simulation
T = 150 # Total time
dt = 0.5 # time per iteration
sigma = 0.0 # simulation noise standard deviation
# initialize MPC Alg
follyHLC = FollyHighLevelControllerV2(molly_position,
folly_position,
object_length,
path_constraints, # path_contraints
10, # horizon length
dt, # step time
0.1 # maximum speed
)
# constant molly velocity
molly_velocity_command = np.array([0.01, 0.05])
# total deviation
deviation = 0
# path_constaints plot function
def plot_constraints(path_constraints, x_domain=np.linspace(-10, 0, 10)):
for n in range(path_constraints.shape[0]):
plt.plot(x_domain, (-path_constraints[n, 2] - path_constraints[n, 0] * x_domain) / path_constraints[n, 1])
prev_time = time.time() # time iteration for real time plot
new_time = time.time()
plt.show(block=False)
for t in range(T):
load_length_deviation = np.linalg.norm(folly_position[0:2] - molly_position) - object_length
deviation += np.abs(load_length_deviation)
plt.clf()
# plot current state
plot_constraints(path_constraints) # constraint path
plt.plot([molly_position[0], folly_position[0]], [molly_position[1], folly_position[1]], 'b-', linewidth=2,
label='Load: dev {:.1f}cm'.format(load_length_deviation * 100)) # object lifted
plt.plot(molly_position[0], molly_position[1], '.', color='olive', label='Molly Position') # MollyPosition
plt.plot(folly_position[0], folly_position[1], 'r.', label='Folly Position') # Folly Actual Positions
plt.legend()
plt.axis('equal')
plt.title(
'Folly High Level Controller V2 {0}/{1} Iteration Time {2:.2f}s'.format(t, T,
follyHLC.optimizer.m.options.SOLVETIME))
# plt.title(
# 'Folly High Level Controller V2 {0}/{1} Iteration Time {2:.2f}s'.format(t, T, new_time - prev_time))
plt.xlabel('[m]')
plt.ylabel('[m]')
plt.pause(0.01)
time.sleep(np.maximum(dt - (new_time - prev_time), 0.01)) # pause to match real time
prev_time = new_time
new_time = time.time()
# actuate molly command with noise
molly_position = molly_position + dt * molly_velocity_command + dt * np.random.normal(0, sigma, 2)
# compute folly command
folly_velocity_command = follyHLC.update_then_calculate_optimal_actuation(molly_position, folly_position,
molly_velocity_command)
# actuate optimal command with noise
folly_position = folly_position + dt * folly_velocity_command + dt * np.random.normal(0, sigma, 3)
print('The average deviation was {:.1f}cm.'.format(deviation / T * 100))
|
# Same problem setup as in `dq_darcy_stokes.py` except mixed
# formulation is used to solve the Darcy subproblem and thus
# we have a Lagrange multiplier on the interface to enforce the
# coupling (mass conservation in particular)
from utils import rotate
import sympy as sp
from dolfin import *
from xii import *
import ulfy
def setup_problem(i, mms, parameters, stokes_CR):
'''Solution of the Darcy-emmersed-Stokes test case'''
meshD, boundariesD = mms['get_geometry'](i, 'inner')
meshS, boundariesS = mms['get_geometry'](i, 'outer')
interface, subdomainsI = mms['get_geometry'](i, 'interface')
dsD = Measure('ds', domain=meshD, subdomain_data=boundariesD)
nD = FacetNormal(meshD)
dsS = Measure('ds', domain=meshS, subdomain_data=boundariesS)
nS = FacetNormal(meshS)
tS = rotate(nS)
dx_ = Measure('dx', domain=interface, subdomain_data=subdomainsI)
nD_ = OuterNormal(interface, [0.5, 0.5])
nS_ = -nD_ # We have nS as master
tS_ = rotate(nS_)
# And now for the fun stuff
if not stokes_CR:
VS = VectorFunctionSpace(meshS, 'CG', 2)
QS = FunctionSpace(meshS, 'CG', 1)
else:
VS = VectorFunctionSpace(meshS, 'CR', 1)
QS = FunctionSpace(meshS, 'DG', 0)
VD = FunctionSpace(meshD, 'RT', 1)
QD = FunctionSpace(meshD, 'DG', 0)
M = FunctionSpace(interface, 'DG', 0)
W = [VS, VD, QS, QD, M]
uS, uD, pS, pD, l = map(TrialFunction, W)
vS, vD, qS, qD, m = map(TestFunction, W)
TuS, TvS = (Trace(x, interface) for x in (uS, vS))
TuD, TvD = (Trace(x, interface) for x in (uD, vD))
# Material parameters
mu, K, alpha = (Constant(parameters[key]) for key in ('mu', 'K', 'alpha'))
a = block_form(W, 2)
a[0][0] = (Constant(2*mu)*inner(sym(grad(uS)), sym(grad(vS)))*dx +
alpha*inner(dot(TuS, tS_), dot(TvS, tS_))*dx_)
# Stabilization for CR
if stokes_CR:
hA = FacetArea(meshS)
a[0][0] += 2*mu/avg(hA)*inner(jump(uS, nS), jump(vS, nS))*dS
a[0][2] = -inner(pS, div(vS))*dx
a[0][4] = inner(l, dot(TvS, nS_))*dx_
a[1][1] = (1/K)*inner(uD, vD)*dx
a[1][3] = -inner(pD, div(vD))*dx
a[1][4] = inner(l, dot(TvD, nD_))*dx_
a[2][0] = -inner(qS, div(uS))*dx
a[3][1] = -inner(qD, div(uD))*dx
a[4][0] = inner(m, dot(TuS, nS_))*dx_
a[4][1] = inner(m, dot(TuD, nD_))*dx_
# We will have 7, 8 as Neumann boundaries for Stokes and 5, 6 for Dirichlet
lm_tags = (1, 2, 3, 4)
L = block_form(W, 1)
L[0] = (inner(mms['fS'], vS)*dx
# Contribution from Neumann bcs on the boundary
+ sum(inner(mms['traction_S'][tag], vS)*dsS(tag) for tag in (7, 8))
# Multiplier contrib from sigma.n.t
- sum(inner(mms['g_t'][tag], dot(vS, tS))*dsS(tag) for tag in lm_tags))
# Multiplier contrib from sigma.n.n
L[1] = sum(inner(mms['g_n'][tag], dot(vD, nD))*dsD(tag) for tag in lm_tags)
L[3] = -inner(mms['fD'], qD)*dx
# Interface mass conservation
L[4] = sum(inner(mms['g_u'][tag], m)*dx_(tag) for tag in lm_tags)
VS_bcs = [DirichletBC(VS, mms['velocity_S'][tag], boundariesS, tag) for tag in (5, 6)]
W_bcs = [VS_bcs, [], [], [], []]
return a, L, W, W_bcs
# --------------------------------------------------------------------
if __name__ == '__main__':
from common import ConvergenceLog, H1_norm, L2_norm, Hdiv_norm, broken_norm
from dq_darcy_stokes import setup_mms
import sys, argparse
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Decide material parameters ...
parser.add_argument('--param_mu', type=float, default=1, help='Stokes viscosity')
parser.add_argument('--param_K', type=float, default=1, help='Darcy conductivity')
parser.add_argument('--param_alpha', type=float, default=1, help='BJS')
# ... and whether to use CR-P0 discretization for Stokes
parser.add_argument('--stokes_CR', type=int, default=0, choices=[0, 1])
args, _ = parser.parse_known_args()
# Reduce verbosity
set_log_level(40)
# For checking convergence we pick the solution of the test case ...
material_params = {k.split('_')[-1] : v for k, v in vars(args).items() if k.startswith('param_')}
mms = setup_mms(material_params)
uS_true, uD_true, pS_true, pD_true = (mms['solution'][k] for k in ('uS', 'uD', 'pS', 'pD'))
lm_true = mms['solution']['lm']
clog = ConvergenceLog({'uS': (uS_true, H1_norm, '1'),
'uD': (uD_true, Hdiv_norm, 'div'),
'pS': (pS_true, L2_norm, '0'),
'pD': (pD_true, L2_norm, '0'),
# Multiplier is defined piecewise
'lm': (lm_true.expressions, broken_norm(lm_true.subdomains, L2_norm), '0')
})
print(clog.header())
for i in range(6):
a, L, W, bcs = setup_problem(i, mms, parameters=material_params,
stokes_CR=args.stokes_CR)
# Use direct solver to get the solution
A, b = map(ii_assemble, (a, L))
A, b = apply_bc(A, b, bcs)
A, b = map(ii_convert, (A, b))
wh = ii_Function(W)
LUSolver(A, 'mumps').solve(wh.vector(), b)
uSh, uDh, pSh, pDh, lmh = wh
clog.add((uSh, uDh, pSh, pDh, lmh))
print(clog.report_last(with_name=False))
rates = tuple(clog[var].get_rate()[0] for var in ('uS', 'uD', 'pS', 'pD', 'lm'))
if args.stokes_CR:
expected = (1, )*5
else:
expected = (2, 1, 2, 1, 1)
passed = all(abs(r-e) < 0.1 for r, e in zip(rates, expected))
sys.exit(int(passed))
|
#! python3
# -*- encoding: utf-8 -*-
def test_repr_str():
from class_object import Pair
p = Pair(3, 4)
print(p)
print('p is {0!r}'.format(p))
print('p is {0}'.format(p))
def test_custom_date_format():
from class_object import Date
from datetime import date
d = date.today()
d = Date(d.year, d.month, d.day)
print(format(d))
print(format(d, 'mdy'))
print('The date is {:ymd}'.format(d))
def test_lazy_connection():
from functools import partial
from class_object import LazyConnection
conn = LazyConnection(('www.python.com', 80))
with conn as s:
s.send(b'GET /index.html HTTP/1.0\r\n')
s.send(b'HOST: www.python.org\r\n')
s.send(b'\r\n')
resp = b''.join(iter(partial(s.recv, 8192), b''))
print(resp)
def test_circle():
from class_object import Circle
c = Circle(4.0)
print(c.radius)
print(c.area)
print(c.perimeter)
def test_extend():
# print(B.__mro__)
from class_object import SubPerson
s = SubPerson('Guido')
print(s)
def test_descriptor():
from class_object import Stock
s = Stock('a', 10, 15.0)
print(s)
def test_lazyproperty():
from class_object import Circle
c = Circle(4.0)
print(vars(c))
print(c.area)
print(vars(c))
del c.area
print(vars(c))
print(c.area)
def test_sorteditems():
from class_object import SortedItems
items = SortedItems([5, 1, 3])
print(list(items))
items.add(2)
print(list(items))
def test_proxy():
class Spam:
def __init__(self, x):
self.x = x
def bar(self, y):
print('Spam.bar:', self.x, y)
s = Spam(2)
from class_object import Proxy
p = Proxy(s)
print(p.x)
def test_mixin():
from class_object import LoggedMappingMixin
class LoggedDict(LoggedMappingMixin, dict):
pass
d = LoggedDict()
d['x'] = 23
print(d['x'])
del d['x']
from collections import defaultdict
from class_object import SetOnceMappingMixin
class SetOnceDefaultDict(SetOnceMappingMixin, defaultdict):
pass
d = SetOnceDefaultDict(list)
d['x'].append(2)
d['x'].append(3)
print(d)
def test_state():
from class_object import Connection1
c = Connection1()
print(c._state)
c.open()
print(c._state)
print(c.read())
print(c.write('hello'))
c.close()
print(c._state)
def test_getattr():
from class_object import Point
p = Point(2, 3)
d = getattr(p, 'distance')(0, 0)
print(d)
import operator
d = operator.methodcaller('distance', 0, 0)(p)
print(d)
points = [
Point(1, 2),
Point(3, 0),
Point(10, -3),
Point(-5, -7),
Point(-1, 8),
Point(3, 2)
]
points.sort(key=operator.methodcaller('distance', 0, 0))
print(points)
def test_visitor():
from class_object import Number
from class_object import Add
from class_object import Evaluator
a = Number(0)
for n in range(1, 100000):
a = Add(a, Number(n))
e = Evaluator()
print(e.visit(a))
def test_comparable():
from class_object import House
from class_object import Room
h1 = House('h1', 'Cape')
h1.add_room(Room('Master Bedroom', 14, 21))
h1.add_room(Room('Living Room', 18, 20))
h1.add_room(Room('Kitchen', 12, 16))
h1.add_room(Room('Office', 12, 12))
h2 = House('h2', 'Ranch')
h2.add_room(Room('Master Bedroom', 14, 21))
h2.add_room(Room('Living Room', 18, 20))
h2.add_room(Room('Kitchen', 12, 16))
h3 = House('h3', 'Split')
h3.add_room(Room('Master Bedroom', 14, 21))
h3.add_room(Room('Living Room', 18, 20))
h3.add_room(Room('Office', 12, 16))
h3.add_room(Room('Kitchen', 15, 17))
houses = [h1, h2, h3]
print('Is h1 bigger than h2?', h1 > h2)
print('Is h2 smaller than h3?', h2 < h3)
print('Which one is biggest?', max(houses))
def test_cache_object():
import weakref
class CachedSpamManager:
def __init__(self):
self._cache = weakref.WeakValueDictionary()
@staticmethod
def get_spam(self, name):
if name not in self._cache:
s = Spam._new(name)
self._cache[name] = s
else:
s = self._cache[name]
return s
def clear(self):
self._cache.clear()
class Spam:
# _spam_cache = weakref.WeakValueDictionary()
manager = CachedSpamManager()
# def __new__(cls, name):
# if name in cls._spam_cache:
# return cls._spam_cache[name]
#
# else:
# self = super().__new__(cls)
# cls._spam_cache[name] = self
# def __init__(self, name):
# print('Initializing Spam')
# self.name = name
# def get_spam(self, name):
# return Spam.manager.get_spam(name)
@classmethod
def _new(cls, name):
self = cls.__new__(cls)
self.name = name
return self
c = CachedSpamManager()
s = c.get_spam('Dave')
print(s)
t = c.get_spam('Dave')
print(t)
print(s is t)
if __name__ == '__main__':
test_cache_object()
|
# Generated by Django 3.2.9 on 2021-11-24 21:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('SocialLinks', '0005_csseditor_title'),
]
operations = [
migrations.AddField(
model_name='csseditor',
name='link_hover_color',
field=models.CharField(default='#000000', max_length=7),
),
]
|
"""Test a specific audio driver (either for platform or silent). Only checks the use of the
interface. Any playback is silent."""
from __future__ import absolute_import, print_function
from tests import mock
import queue
import pytest
import time
import pyglet
_debug = False
pyglet.options['debug_media'] = _debug
pyglet.options['debug_media_buffers'] = _debug
import pyglet.app
from pyglet.media.drivers import silent
from pyglet.media.drivers.silent import SilentAudioDriver
from pyglet.media.sources import SourceGroup
from pyglet.media.sources.procedural import Silence
def _delete_driver():
if hasattr(pyglet.media.drivers._audio_driver, 'delete'):
pyglet.media.drivers._audio_driver.delete()
pyglet.media.drivers._audio_driver = None
def test_get_platform_driver():
driver = pyglet.media.drivers.get_audio_driver()
assert driver is not None
assert not isinstance(driver, SilentAudioDriver), 'Cannot load audio driver for your platform'
_delete_driver()
def test_get_silent_driver():
driver = pyglet.media.drivers.get_silent_audio_driver()
assert driver is not None
assert isinstance(driver, SilentAudioDriver)
_delete_driver()
class MockPlayer(object):
def __init__(self, event_loop):
self.queue = queue.Queue()
self.event_loop = event_loop
def dispatch_event(self, event_type, *args):
if _debug:
print('MockPlayer: event {} received @ {}'.format(event_type, time.time()))
self.queue.put((event_type, args))
self.event_loop.interrupt_event_loop()
def wait_for_event(self, timeout, *event_types):
end_time = time.time() + timeout
try:
while time.time() < end_time:
if _debug:
print('MockPlayer: run for {} sec @ {}'.format(end_time-time.time(),
time.time()))
self.event_loop.run_event_loop(duration=end_time-time.time())
event_type, args = self.queue.get_nowait()
if event_type in event_types:
return event_type, args
except queue.Empty:
return None, None
def wait_for_all_events(self, timeout, *expected_events):
if _debug:
print('Wait for events @ {}'.format(time.time()))
end_time = time.time() + timeout
expected_events = list(expected_events)
received_events = []
while expected_events:
event_type, args = self.wait_for_event(timeout, *expected_events)
if _debug:
print('MockPlayer: got event {} @ {}'.format(event_type, time.time()))
if event_type is None and time.time() >= end_time:
pytest.fail('Timeout before all events have been received. Still waiting for: '
+ ','.join(expected_events))
elif event_type is not None:
if event_type in expected_events:
expected_events.remove(event_type)
received_events.append((event_type, args))
return received_events
def wait(self, timeout):
end_time = time.time() + timeout
while time.time() < end_time:
duration = max(.01, end_time-time.time())
self.event_loop.run_event_loop(duration=duration)
#assert time.time() - end_time < .1
@pytest.fixture
def player(event_loop):
return MockPlayer(event_loop)
class SilentTestSource(Silence):
def __init__(self, duration, sample_rate=44800, sample_size=16):
super(Silence, self).__init__(duration, sample_rate, sample_size)
self.bytes_read = 0
def get_audio_data(self, nbytes):
data = super(Silence, self).get_audio_data(nbytes)
if data is not None:
self.bytes_read += data.length
return data
def has_fully_played(self):
return self.bytes_read == self._max_offset
def get_drivers():
drivers = [silent]
ids = ['Silent']
try:
from pyglet.media.drivers import pulse
drivers.append(pulse)
ids.append('PulseAudio')
except:
pass
try:
from pyglet.media.drivers import openal
drivers.append(openal)
ids.append('OpenAL')
except:
pass
try:
from pyglet.media.drivers import directsound
drivers.append(directsound)
ids.append('DirectSound')
except:
pass
return {'params': drivers, 'ids': ids}
@pytest.fixture(**get_drivers())
def driver(request):
if _debug:
print('Create driver @ {}'.format(time.time()))
driver = request.param.create_audio_driver()
assert driver is not None
def fin():
driver.delete()
request.addfinalizer(fin)
return driver
def _create_source_group(*sources):
source_group = SourceGroup(sources[0].audio_format, None)
for source in sources:
source_group.queue(source)
return source_group
def test_create_destroy(driver):
driver.delete()
def test_create_audio_player(driver, player):
source_group = _create_source_group(Silence(1.))
audio_player = driver.create_audio_player(source_group, player)
audio_player.delete()
def test_audio_player_play(driver, player):
source = SilentTestSource(.1)
source_group = _create_source_group(source)
audio_player = driver.create_audio_player(source_group, player)
try:
audio_player.play()
player.wait_for_all_events(1., 'on_eos', 'on_source_group_eos')
assert source.has_fully_played(), 'Source not fully played'
finally:
audio_player.delete()
def test_audio_player_play_multiple(driver, player):
sources = (SilentTestSource(.1), SilentTestSource(.1))
source_group = _create_source_group(*sources)
audio_player = driver.create_audio_player(source_group, player)
try:
audio_player.play()
player.wait_for_all_events(1., 'on_eos', 'on_eos', 'on_source_group_eos')
for source in sources:
assert source.has_fully_played(), 'Source not fully played'
finally:
audio_player.delete()
def test_audio_player_add_to_paused_group(driver, player):
"""This is current behaviour when adding a sound of the same format as the previous to a
player paused due to end of stream for previous sound."""
source = SilentTestSource(.1)
source_group = _create_source_group(source)
if _debug:
print('Create player @ {}'.format(time.time()))
audio_player = driver.create_audio_player(source_group, player)
try:
audio_player.play()
player.wait_for_all_events(1., 'on_eos', 'on_source_group_eos')
source2 = SilentTestSource(.1)
source_group.queue(source2)
audio_player.play()
player.wait_for_all_events(1., 'on_eos', 'on_source_group_eos')
assert source2.has_fully_played(), 'Source not fully played'
finally:
audio_player.delete()
def test_audio_player_delete_driver_with_players(driver, player):
"""Delete a driver with active players. Should not cause problems."""
source = SilentTestSource(10.)
source_group = _create_source_group(source)
audio_player = driver.create_audio_player(source_group, player)
audio_player.play()
def test_audio_player_clear(driver, player):
"""Test clearing all buffered data."""
source = SilentTestSource(10.)
source_group = _create_source_group(source)
audio_player = driver.create_audio_player(source_group, player)
try:
audio_player.play()
player.wait(.5)
assert 0. < audio_player.get_time() < 5.
audio_player.stop()
source.seek(5.)
audio_player.clear()
audio_player.play()
player.wait(.3)
assert 5. <= audio_player.get_time() < 10.
finally:
audio_player.delete()
def test_audio_player_time(driver, player):
"""Test retrieving current timestamp from player."""
source = SilentTestSource(10.)
source_group = _create_source_group(source)
audio_player = driver.create_audio_player(source_group, player)
try:
audio_player.play()
last_time = audio_player.get_time()
# Needs to run until at least the initial buffer is processed. Ideal time is 1 sec, so run
# more than 1 sec.
for _ in range(15):
player.wait(.1)
assert last_time < audio_player.get_time()
last_time = audio_player.get_time()
if _debug:
print('='*80)
print('Time:', last_time)
print('Bytes read:', source.bytes_read)
print('='*80)
finally:
audio_player.delete()
|
from django.db import models
from json import JSONEncoder
from django.core.serializers.json import DjangoJSONEncoder
import datetime
# Create your models here.
class Encoder(DjangoJSONEncoder):
def default(self, value):
if isinstance(value, datetime.date):
return DjangoJSONEncoder.default(self, value)
class Pilot(models.Model):
firstname = models.CharField(max_length=60)
lastname = models.CharField(max_length=60)
starting_date = models.DateField()
class Airplane(models.Model):
modelname = models.CharField(max_length=60)
builddate = models.DateField()
class Airport(models.Model):
name = models.CharField(max_length=60)
code = models.CharField(max_length=60)
city = models.CharField(max_length=60)
class Flight(models.Model):
pilot = models.ForeignKey(Pilot, on_delete=models.CASCADE)
airplane = models.ForeignKey(Airplane, on_delete=models.CASCADE)
departure_airport = models.ForeignKey(Airport, on_delete=models.CASCADE, related_name='+')
arrival_airport = models.ForeignKey(Airport, on_delete=models.CASCADE, related_name='+')
departure_time = models.DateTimeField()
arrival_time = models.DateTimeField()
|
from rply.token import BaseBox
from Main.Errors import error, errors
import sys
class BinaryOp(BaseBox):
def __init__(self, left, right):
self.left = left
self.right = right
if self.right.kind == "string" or self.left.kind == "string":
self.kind = "string"
else:
self.kind = self.left.kind
class Sum(BinaryOp):
def eval(self):
try:
return self.left.eval() + self.right.eval()
except:
try:
return self.left.sum(self.right)
except:
error(errors.IMPOSSIBLEOPERATION, "", {
"type": "values, types, operationtype",
"operationtype": "Addition",
"values": [self.left.eval(), self.right.eval()],
"types": [self.left.kind.tostr(), self.right.kind.tostr()]
})
sys.exit(1)
class Sub(BinaryOp):
def eval(self):
try:
return self.left.eval() - self.right.eval()
except:
try:
return self.left.sub(self.right)
except:
error(errors.IMPOSSIBLEOPERATION, "", {
"type": "values, types, operationtype",
"operationtype": "Substraction",
"values": [self.left.eval(), self.right.eval()],
"types": [self.left.kind.tostr(), self.right.kind.tostr()]
})
sys.exit(1)
class Mul(BinaryOp):
def eval(self):
try:
return self.left.eval() * self.right.eval()
except:
try:
return self.left.mul(self.right)
except:
error(errors.IMPOSSIBLEOPERATION, "", {
"type": "values, types, operationtype",
"operationtype": "Multiplication",
"values": [self.left.eval(), self.right.eval()],
"types": [self.left.kind.tostr(), self.right.kind.tostr()]
})
sys.exit(1)
class Div(BinaryOp):
def eval(self):
try:
return self.left.eval() / self.right.eval()
except:
try:
return self.left.div(self.right)
except:
error(errors.IMPOSSIBLEOPERATION, "", {
"type": "values, types, operationtype",
"operationtype": "Division",
"values": [self.left.eval(), self.right.eval()],
"types": [self.left.kind.tostr(), self.right.kind.tostr()]
})
sys.exit(1)
class DivEu(BinaryOp):
def eval(self):
try:
return self.left.eval() // self.right.eval()
except:
try:
return self.left.diveu(self.right)
except:
error(errors.IMPOSSIBLEOPERATION, "", {
"type": "values, types, operationtype",
"operationtype": "Euclidean Division",
"values": [self.left.eval(), self.right.eval()],
"types": [self.left.kind.tostr(), self.right.kind.tostr()]
})
sys.exit(1)
class Pow(BinaryOp):
def eval(self):
try:
return self.left.eval() ** self.right.eval()
except:
try:
return self.left.pow(self.right)
except:
error(errors.IMPOSSIBLEOPERATION, "", {
"type": "values, types, operationtype",
"operationtype": "Power",
"values": [self.left.eval(), self.right.eval()],
"types": [self.left.kind.tostr(), self.right.kind.tostr()]
})
sys.exit(1)
class Mod(BinaryOp):
def eval(self):
try:
return self.left.eval() % self.right.eval()
except:
try:
return self.left.mod(self.right)
except:
error(errors.IMPOSSIBLEOPERATION, "", {
"type": "values, types, operationtype",
"operationtype": "Modulo",
"values": [self.left.eval(), self.right.eval()],
"types": [self.left.kind.tostr(), self.right.kind.tostr()]
})
sys.exit(1)
|
import csv
import web
import json
import datetime
import energyServer
urls = ("/vehicleData", "vehicles",
# "/subwayTotal", "subwayTotal",
# "/subwayChanges", "subwayChanges",
"/", "check")
class check:
def GET(self):
return "200 OK"
def getTime():
web.header('Access-Control-Allow-Origin', '*')
web.header('Access-Control-Allow-Credentials', 'true')
now = datetime.datetime.now()
hour = now.hour
minute = now.minute
return (hour, minute)
class vehicles:
def GET(self):
hour, minute = getTime()
B = energyServer.S.vehicleChangesList
jsonDump = json.dumps(B)
#energyServer.S.clearList()
return jsonDump
# class subwayTotal:
# def GET(self):
# B = energyServer.S.totalChangesList
# jsonDump = json.dumps(B)
# return jsonDump
# class subwayChanges:
# def GET(self):
# data = web.input()
# BBL = data.BBL
# populationDifference = energyServer.S.totalChanges[BBL]
# return json.dumps(populationDifference)
vehicleData = web.application(urls, locals());
|
class TermsOfDeliveryService(object):
"""
:class:`fortnox.TermsOfDeliveryService` is used by :class:`fortnox.Client` to make
actions related to TermsOfDelivery resource.
Normally you won't instantiate this class directly.
"""
"""
Allowed attributes for TermsOfDelivery to send to Fortnox backend servers.
"""
OPTS_KEYS_TO_PERSIST = ['Code', 'Description']
SERVICE = "TermsOfDelivery"
def __init__(self, http_client):
"""
:param :class:`fortnox.HttpClient` http_client: Pre configured high-level http client.
"""
self.__http_client = http_client
@property
def http_client(self):
return self.__http_client
def list(self, **params):
"""
Retrieve all TermsOfDelivery
Returns all TermsOfDelivery available to the Company, according to the parameters provided
:calls: ``get /termsofdeliveries``
:param dict params: (optional) Search options.
:return: List of dictionaries that support attriubte-style access, which represent collection of TermsOfDelivery.
:rtype: list
"""
_, _, terms_of_deliveries = self.http_client.get("/termsofdeliveries", params=params)
return terms_of_deliveries
def retrieve(self, code):
"""
Retrieve a single TermsOfDelivery
Returns a single TermsOfDelivery according to the unique TermsOfDelivery ID provided
If the specified TermsOfDelivery does not exist, this query returns an error
:calls: ``get /termsofdeliveries/{code}``
:param int id: Unique identifier of a TermsOfDelivery.
:return: Dictionary that support attriubte-style access and represent TermsOfDelivery resource.
:rtype: dict
"""
_, _, terms_of_delivery = self.http_client.get("/termsofdeliveries/{code}".format(code=code))
return terms_of_delivery
def create(self, *args, **kwargs):
"""
Create a TermsOfDelivery
Creates a new TermsOfDelivery
**Notice** the TermsOfDelivery's name **must** be unique within the scope of the resource_type
:calls: ``post /termsofdeliveries``
:param tuple *args: (optional) Single object representing TermsOfDelivery resource.
:param dict **kwargs: (optional) terms_of_delivery attributes.
:return: Dictionary that support attriubte-style access and represents newely created TermsOfDelivery resource.
:rtype: dict
"""
if not args and not kwargs:
raise Exception('attributes for TermsOfDelivery are missing')
attributes = args[0] if args else kwargs
attributes = dict((k, v) for k, v in attributes.items() if k in self.OPTS_KEYS_TO_PERSIST)
attributes.update({'service': self.SERVICE})
_, _, terms_of_delivery = self.http_client.post("/termsofdeliveries", body=attributes)
return terms_of_delivery
def update(self, code, *args, **kwargs):
"""
Update a TermsOfDelivery
Updates a TermsOfDelivery's information
If the specified TermsOfDelivery does not exist, this query will return an error
**Notice** if you want to update a TermsOfDelivery, you **must** make sure the TermsOfDelivery's name is unique within the scope of the specified resource
:calls: ``put /termsofdeliveries/{code}``
:param int id: Unique identifier of a TermsOfDelivery.
:param tuple *args: (optional) Single object representing TermsOfDelivery resource which attributes should be updated.
:param dict **kwargs: (optional) TermsOfDelivery attributes to update.
:return: Dictionary that support attriubte-style access and represents updated TermsOfDelivery resource.
:rtype: dict
"""
if not args and not kwargs:
raise Exception('attributes for TermsOfDelivery are missing')
attributes = args[0] if args else kwargs
attributes = dict((k, v) for k, v in attributes.items())
attributes.update({'service': self.SERVICE})
_, _, terms_of_delivery = self.http_client.put("/termsofdeliveries/{code}".format(code=code), body=attributes)
return terms_of_delivery
|
import torch
from torch import nn
import torch.nn.functional as F
import torchvision
from torchvision import datasets, transforms, models
from collections import OrderedDict
from torch import optim
from torch.autograd import Variable
class PreTrainedCNN:
def __init__(self, arch, gpu, hidden_units, pretrained):
self.model = getattr(models, arch)(pretrained=pretrained)
self.gpu = gpu
self.arch = arch
self.hidden_units = hidden_units
for param in self.model.parameters():
param.requires_grad = False
classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(25088, hidden_units)),
('relu', nn.ReLU()),
('fc2', nn.Linear(hidden_units, 500)),
('relu', nn.ReLU()),
('fc3', nn.Linear(500, 102)),
('output', nn.LogSoftmax(dim=1))
]))
self.model.classifier = classifier
self.criterion = nn.NLLLoss()
self.loading_shapes = ['|', '/', '-', '\\']
self.loading_idx = 0
self.loading_shapes_len = 4 #save few nanoseconds
if gpu:
self.model.cuda()
def save(self, save_dir, class_to_idx):
to_serialize = {
"arch": self.arch,
"hidden_units": self.hidden_units,
"state_dict": self.model.state_dict(),
"class_to_idx": class_to_idx
}
torch.save(to_serialize, save_dir + '/checkpoint.pth')
def train(self, loader, lr, epochs):
steps = 0
running_loss = 0
print_every = 100
self.optimizer = optim.Adam(self.model.classifier.parameters(), lr)
print("Training started")
for e in range(epochs):
self.model.train()
for ii, (inputs, labels) in enumerate(loader):
self.loading()
steps += 1
self.optimizer.zero_grad()
inputs, labels = Variable(inputs), Variable(labels)
if self.gpu:
inputs, labels = inputs.cuda(), labels.cuda()
outputs = self.model.forward(inputs)
loss = self.criterion(outputs, labels)
loss.backward()
self.optimizer.step()
running_loss += loss.item()
if steps % print_every == 0:
print("Epoch: {}/{}... ".format(e+1, epochs),
"Loss: {:.4f}".format(running_loss/print_every))
running_loss = 0
print ("Training complete")
def validate(self, loader):
self.model.eval()
accuracy = 0
test_loss = 0
steps = 0
print("Validating model")
for ii, (inputs, labels) in enumerate(loader):
self.loading()
steps += 1
self.optimizer.zero_grad()
inputs, labels = Variable(inputs), Variable(labels)
if self.gpu:
inputs, labels = inputs.cuda(), labels.cuda()
output = self.model.forward(inputs)
test_loss += self.criterion(output, labels).data.item()
ps = torch.exp(output).data
equality = (labels.data == ps.max(1)[1])
accuracy += equality.type_as(torch.FloatTensor()).mean()
print("Test Loss: {:.3f}.. ".format( test_loss / len(loader) ),
"Test Accuracy: {:.3f}".format( accuracy / len(loader)) )
def loading(self, index=None, tot=None):
perc = ""
if index != None and tot != None:
n = round(index / tot * 100)
perc = ' {0:02d}%'.format(n)
print("\r" + self.loading_shapes[self.loading_idx % self.loading_shapes_len] + perc, end = '\r')
self.loading_idx += 1
|
"""
Taylor Polynomial Approximations of
1. SiLU
2. ReLU
https://en.wikipedia.org/wiki/Taylor_series#Definition
"""
import torch
import torch.nn as nn
class SiLUTaylorApprox(nn.Module):
def __init__(self, order=2):
super(SiLUTaylorApprox, self).__init__()
self.coeffs = torch.tensor([0.0, 1/2., 1/4., 0.0, -1/48, 0.0, 1/480, 0.0, -17/80640., 0.0, 31/1451520])
self.order = order
def forward(self, x):
out = 0
for i in range(self.order+1):
out = out + self.coeffs[i] * x**i
return out
class ReLUTaylorApprox(nn.Module):
def __init__(self, order=2):
super(ReLUTaylorApprox, self).__init__()
self.coeffs = torch.tensor([0.0, 0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
self.order = order
def forward(self, x):
out = 0
for i in range(self.order+1):
out = out + self.coeffs[i] * x**i
return out
if __name__ == "__main__":
s = SiLUTaylorApprox()
r = ReLUTaylorApprox()
|
import json
from datetime import datetime
from datetime import date
from powerbi.enums import DataSourceType
from typing import Union
from enum import Enum
# https://docs.microsoft.com/en-us/dotnet/framework/data/adonet/entity-data-model-primitive-data-types
# https://docs.microsoft.com/en-us/power-bi/developer/automation/api-dataset-properties#data-type-restrictions
# https://docs.microsoft.com/en-us/analysis-services/multidimensional-models/mdx/mdx-cell-properties-format-string-contents?view=asallproducts-allversions
class PowerBiEncoder(json.JSONEncoder):
def __init__(self, *args, **kwargs):
json.JSONEncoder.__init__(self, *args, **kwargs)
def default(self, obj):
if isinstance(obj, Columns):
return obj.columns
elif isinstance(obj, Measures):
return obj.measures
if isinstance(obj, Column):
return obj.column
elif isinstance(obj, Measure):
return obj.measure
elif isinstance(obj, Dataset):
return obj.push_dataset
elif isinstance(obj, Tables):
return obj.tables
elif isinstance(obj, Table):
return obj.table
elif isinstance(obj, Relationships):
return obj.relationships
elif isinstance(obj, Relationship):
return obj.relationship
elif isinstance(obj, DataSources):
return obj.datasources
elif isinstance(obj, DataSource):
return obj.data_source
elif isinstance(obj, Dataset):
return obj.push_dataset
class Column():
"""
### Overview
----
Represents a column inside of a `PowerBiTable`
object.
"""
def __init__(self, name: str, data_type: Union[str, Enum]) -> None:
"""Initializes a new `Column` object.
### Parameters
----
name : str
The column name.
data_type : Union[str, Enum]
One of the allowed data types.
"""
if isinstance(data_type, Enum):
data_type = data_type.value
self.column = {
'name': name,
'dataType': data_type,
'dataCategory': '',
'formatString': '',
'isHidden': False,
'sortByColumn': None,
'summarizeBy': None
}
@property
def name(self) -> str:
"""The column name.
### Returns
----
str :
The column name.
"""
return self.column.get('name', None)
@name.setter
def name(self, name: str) -> None:
"""Sets the column name.
### Parameters
----
name : str
The name you want the column to be.
"""
self.column.update({'name': name})
@property
def data_type(self) -> str:
"""Returns the data type of column.
### Returns
----
str :
One of the allowed data types.
"""
return self.column.get('dataType', None)
@data_type.setter
def data_type(self, data_type: Union[str, Enum]) -> None:
"""Sets the column data type.
### Parameters
----
data_type : Union[str, Enum]
One of the allowed data types.
"""
if isinstance(data_type, Enum):
data_type = data_type.value
self.column.update({'dataType': data_type})
@property
def format_string(self) -> str:
"""Returns the format string of the column.
### Returns
----
str :
The format of the column as specified in
FORMAT_STRING.
"""
return self.column.get('formatString', None)
@format_string.setter
def format_string(self, format_string: str) -> None:
"""Sets the optional format of the column.
### Parameters
----
format_string : str
The format of the column as specified in FORMAT_STRING.
"""
self.column.update({'formatString': format_string})
@property
def data_category(self) -> str:
"""Returns the data category of the column,
if any is specified.
### Returns
----
str :
String value to be used for the data category
which describes the data within this column.
"""
return self.column.get('dataCategory', None)
@data_category.setter
def data_category(self, data_category: str) -> None:
"""Sets the data category of the column.
### Parameters
----
data_category : str
Value to be used for the data category which
describes the data within this column. Some
common values include: `[Address, City, Continent,
Country, Image, ImageUrl, Latitude, Longitude,
Organization, Place, PostalCode, StateOrProvince,
WebUrl]`
"""
self.column.update({'dataCategory': data_category})
@property
def is_hidden(self) -> bool:
"""Returns the property indicating if the column
is hidden from view.
### Returns
----
bool :
If `True` the column is hidden, `False`
otherwise.
"""
return self.column.get('isHidden', None)
@is_hidden.setter
def is_hidden(self, is_hidden: bool) -> None:
"""Sets the `isHidden` property of the column.
### Parameters
----
is_hidden : bool
Property indicating if the column is hidden from view.
Default is `False`.
"""
self.column.update({'isHidden': is_hidden})
@property
def sort_by_column(self) -> str:
"""Returns the property indicating the column
that the table is ordered by.
### Returns
----
str :
String name of a column in the same table to be
used to order the current column.
"""
return self.column.get('sortByColumn', None)
@sort_by_column.setter
def sort_by_column(self, sort_by_column: str) -> None:
"""Sets the `sortByColumn` property of the column.
### Parameters
----
sort_by_column : str
String name of a column in the same table to be
used to order the current column.
"""
self.column.update({'sortByColumn': sort_by_column})
@property
def summarize_by(self) -> str:
"""Returns the property indicating how the column
is aggregated.
### Returns
----
str :
Aggregate Function to use for summarizing this
column.
"""
return self.column.get('summarizeBy', None)
@summarize_by.setter
def summarize_by(self, summarize_by: Union[str, Enum]) -> None:
"""Sets the `summarizeBy` property of the column.
### Parameters
----
summarize_by : Union[str, Enum]
Aggregate Function to use for summarizing this
column.
"""
if isinstance(summarize_by, Enum):
summarize_by = summarize_by.value
self.column.update({'summarizeBy': summarize_by})
def to_dict(self) -> dict:
"""Returns the column properties.
### Returns
----
dict
A dictionary containing each of the column
properties.
"""
return self.column
class Measure():
"""
### Overview
----
Represents a `Measure` inside of a `PowerBiColumn`
object.
"""
def __init__(self, name: str, expression: str) -> None:
"""Initializes a new `Measure` object.
### Parameters
-----
name : str
The measure name.
expression : str
A valid DAX expression.
"""
self.measure = {
'name': name,
'expression': expression,
'formatString': '',
'isHidden': False,
}
@property
def name(self) -> str:
"""The measure name.
### Returns
----
str :
The measure name.
"""
return self.measure.get('name', None)
@name.setter
def name(self, name: str) -> None:
"""Sets the measure name.
### Parameters
----
name : str
The name you want the measure to be.
"""
self.measure.update({'name': name})
@property
def expression(self) -> str:
"""Returns the measure DAX expression.
### Returns
----
str :
A valid DAX expression.
"""
return self.measure.get('dataType', None)
@expression.setter
def expression(self, expression: str) -> None:
"""Sets the measure DAX expression.
### Parameters
----
expression : str
A valid DAX expression.
"""
self.measure.update({'expression': expression})
@property
def format_string(self) -> str:
"""Returns the format string of the measure.
### Returns
----
str :
The format of the measure as specified in
FORMAT_STRING.
"""
return self.measure.get('formatString', None)
@format_string.setter
def format_string(self, format_string: str) -> None:
"""Sets the optional format of the measure.
### Parameters
----
format_string : str
The format of the measure as specified in
FORMAT_STRING.
"""
self.measure.update({'formatString': format_string})
@property
def is_hidden(self) -> bool:
"""Returns the property indicating if the measure
is hidden from client tools.
### Returns
----
bool :
If `True` the measure is hidden, `False`
otherwise.
"""
return self.measure.get('isHidden', None)
@is_hidden.setter
def is_hidden(self, is_hidden: bool) -> None:
"""Sets the `isHidden` property of the column.
### Parameters
----
is_hidden : bool
Property indicating if the measure is hidden from
client tools. Default is `False`.
"""
self.measure.update({'isHidden': is_hidden})
def to_dict(self) -> dict:
"""Returns the measure properties.
### Returns
----
dict
A dictionary containing each of the measure
properties.
"""
return self.measure
def to_json(self) -> str:
"""Returns the measure properties as
a JSON formatted string.
### Returns
----
str
A string that contains the measure
properties.
"""
return json.dumps(obj=self.measure, indent=4)
class Relationship():
"""
### Overview
----
Represents a `Relationship` inside of a `PowerBiModel`
object.
"""
def __init__(self, name: str, from_table: str, to_table: str, from_column: str, to_column: str) -> None:
"""Initializes a new `Relationship` object.
### Parameters
-----
name : str
The measure name.
expression : str
A valid DAX expression.
"""
self.relationship = {
'name': name,
'fromColumn': from_column,
'toColumn': to_column,
'fromTable': from_table,
'toTable': to_table,
'crossFilteringBehavior': 'OneDirection'
}
@property
def name(self) -> str:
"""The relationship name.
### Returns
----
str :
The relationship name.
"""
return self.relationship.get('name', None)
@name.setter
def name(self, name: str) -> None:
"""Sets the relationship name.
### Parameters
----
name : str
The name you want the relationship
to be.
"""
self.relationship.update({'name': name})
@property
def from_table(self) -> str:
"""Returns the `fromTable` property.
### Returns
----
str :
Name of the foreign key table.
"""
return self.relationship.get('fromTable', None)
@from_table.setter
def from_table(self, from_table: str) -> None:
"""Sets the `fromTable` propery.
### Parameters
----
from_table : str
Name of the foreign key table.
"""
self.relationship.update({'fromTable': from_table})
@property
def to_table(self) -> str:
"""Returns the `toTable` property.
### Returns
----
str :
Name of the primary key table.
"""
return self.relationship.get('toTable', None)
@to_table.setter
def to_table(self, to_table: str) -> None:
"""Sets the `toTable` propery.
### Parameters
----
to_table : str
Name of the primary key table.
"""
self.relationship.update({'toTable': to_table})
@property
def from_column(self) -> str:
"""Returns the `toColumn` property.
### Returns
----
str :
Name of the foreign key column.
"""
return self.relationship.get('fromColumn', None)
@from_column.setter
def from_column(self, from_column: str) -> None:
"""Sets the `fromColumn` propery.
### Parameters
----
from_column : str
Name of the foreign key column.
"""
self.relationship.update({'fromColumn': from_column})
@property
def to_column(self) -> str:
"""Returns the `toColumn` property.
### Returns
----
str :
Name of the primary key column.
"""
return self.relationship.get('toColumn', None)
@to_column.setter
def to_column(self, to_column: str) -> None:
"""Sets the `toColumn` propery.
### Parameters
----
to_column : str
Name of the primary key column.
"""
self.relationship.update({'toColumn': to_column})
@property
def cross_filtering_behavior(self) -> str:
"""Returns the `crossFilteringBehavior` property.
### Returns
----
str :
The filter direction of the relationship: [`OneDirection`,
`BothDirections`, `Automatic`].
"""
return self.relationship.get('crossFilteringBehavior', None)
@cross_filtering_behavior.setter
def cross_filtering_behavior(self, cross_filtering_behavior: str = 'OneDirection') -> None:
"""Sets the `crossFilteringBehavior` propery.
### Parameters
----
cross_filtering_behavior : str (optional, Default='OneDirection')
The filter direction of the relationship: [`OneDirection`,
`BothDirections`, `Automatic`].
"""
self.relationship.update(
{'crossFilteringBehavior': cross_filtering_behavior})
def to_dict(self) -> dict:
"""Returns the relationship properties.
### Returns
----
dict
A dictionary containing each of the relationship
properties.
"""
return self.relationship
class Columns():
"""
### Overview
----
Represents a collection of `Column` objects
that are found inside of a `PowerBiTable` object.
"""
def __init__(self) -> None:
self.columns = []
def __setitem__(self, index: int, data: Column) -> None:
self.columns.append(data)
def __getitem__(self, index: int) -> Column:
return self.columns[index]
def __delitem__(self, index: int) -> None:
del self.columns[index]
def __len__(self) -> int:
return len(self.columns)
def __iter__(self):
return iter(self.columns)
class Measures():
"""
### Overview
----
Represents a collection of `Measure` objects
that are found inside of a `PowerBiTable` object.
"""
def __init__(self) -> None:
self.measures = []
def __setitem__(self, index: int, data: Column) -> None:
self.measures[index] = data
def __getitem__(self, index: int) -> Column:
return self.measures[index]
def __delitem__(self, index: int) -> None:
del self.measures[index]
def __len__(self) -> int:
return len(self.measures)
def __iter__(self):
return iter(self.measures)
class Relationships():
"""
### Overview
----
Represents a collection of `Relationship` objects
that are found inside of a `PowerBiDataset` object.
"""
def __init__(self) -> None:
self.relationships = []
def __setitem__(self, index: int, data: Column) -> None:
self.relationships[index] = data
def __getitem__(self, index: int) -> Column:
return self.relationships[index]
def __delitem__(self, index: int) -> None:
del self.relationships[index]
def __len__(self) -> int:
return len(self.relationships)
def __iter__(self):
return iter(self.relationships)
class Tables():
"""
### Overview
----
Represents a collection of `Table` objects
that are found inside of a `PowerBiDataset`
object.
"""
def __init__(self) -> None:
self.tables = []
def __setitem__(self, index: int, data: Column) -> None:
self.tables.append(data)
def __getitem__(self, index: int) -> Column:
return self.tables[index]
def __delitem__(self, index: int) -> None:
del self.tables[index]
def __len__(self) -> int:
return len(self.tables)
def __iter__(self):
return iter(self.tables)
class DataSources():
"""
### Overview
----
Represents a collection of `Datasource` objects
that are found inside of a `PowerBiDataset`
object.
"""
def __init__(self) -> None:
self.datasources = []
def __setitem__(self, index: int, data: object) -> None:
self.datasources.append(data)
def __getitem__(self, index: int) -> object:
return self.datasources[index]
def __delitem__(self, index: int) -> None:
del self.datasources[index]
def __len__(self) -> int:
return len(self.datasources)
def __iter__(self):
return iter(self.datasources)
class Table():
"""
### Overview
----
Represents a Table inside of a PowerBi
dataset.
"""
def __init__(self, name: str) -> None:
"""Initializes the `Table` object.
### Parameters
----
name : str
User defined name of the table.
It is also used as the identifier
of the table.
"""
self._columns = Columns()
self._measures = Measures()
self.table = {
'name': name,
'columns': self._columns,
'measures': self._measures,
'rows': []
}
def __repr__(self) -> str:
"""Represents the string representation of the
table object.
### Returns
----
str
A JSON formatted string.
"""
return json.dumps(obj=self.table, indent=4, cls=PowerBiEncoder)
def __getitem__(self, index: int) -> object:
return self.table[index]
def __delitem__(self, index: int) -> None:
del self.table[index]
def __iter__(self):
return iter(self.table)
@property
def name(self) -> str:
"""The table name.
### Returns
----
str :
User defined name of the table.
It is also used as the identifier
of the table.
"""
return self.table.get('name', None)
@name.setter
def name(self, name: str) -> None:
"""Sets the table name.
### Parameters
----
name : str
User defined name of the table.
It is also used as the identifier
of the table.
"""
self.table.update({'name': name})
@property
def columns(self) -> str:
"""Gets the `columns` property.
### Returns
----
str :
Collection of `Column` objects.
"""
return self._columns
def add_column(self, column: Column) -> None:
"""Adds a new `Column` to the `Columns`
collection.
### Parameters
----
column : Column
A `Column` object with the properties
set.
"""
self._columns[len(self._columns)] = column
def get_column(self, index: int) -> Column:
"""Gets a `Column` from the `Columns`
collection.
### Parameters
----
index : int
The index of the column you want
to return from the collection.
### Returns
----
Column :
A `PowerBiColumn` object.
"""
return self._columns[index]
def del_column(self, index: int) -> None:
"""Deletes a `Column` to the `Columns`
collection.
### Parameters
----
index : int
The index of the column you want
to delete from the collection.
"""
del self._columns[index]
@property
def measures(self) -> str:
"""Gets the `measures` property.
### Returns
----
str :
Collection of `measure` objects.
"""
return self._measures
@property
def add_measure(self, measure: Measure) -> None:
"""Adds a column to the `measures` collection.
### Parameters
----
measure : measure
A `Measure` object with the properties
set.
"""
measures = self.table.get('measures', [])
measures.append(measure)
def del_measure(self, index: int = 0) -> None:
"""Deletes a `Measure` in the `measures` collection.
### Parameters
----
index : int (optional=, Default=0)
The index of the `Measure` object
that you wish to delete.
"""
measures = self.table.get('measures', [])
measures.pop(index)
def get_measure(self, index: int = 0) -> Column:
"""Gets a `Measure` in the `measures` collection by
indexing it.
### Parameters
----
index : int (optional=, Default=0)
The index of the `Measure` object
that you wish to get.
"""
return self.table.get('measures', [])[index]
@property
def rows(self) -> str:
"""Gets the `rows` property.
### Returns
----
str :
Collection of `row` objects.
"""
return self.table.get('rows', [])
def add_row(self, row: Union[list, dict]) -> None:
"""Adds a `Row` object to the `rows` collection.
### Parameters
----
row : dict
A `Row` object with the properties
set.
"""
rows = self.table.get('rows', [])
if isinstance(row, dict):
rows.append(row)
elif isinstance(row, list):
rows.extend(row)
def del_row(self, index: int = 0) -> None:
"""Deletes a `Row` in the `rows` collection.
### Parameters
----
index : int (optional=, Default=0)
The index of the `Row` object
that you wish to delete.
"""
rows = self.table.get('rows', [])
rows.pop(index)
def get_row(self, index: int = 0) -> dict:
"""Gets a `Row` in the `rows` collection by
indexing it.
### Parameters
----
index : int (optional=, Default=0)
The index of the `Row` object
that you wish to get.
"""
return self.table.get('rows', [])[index]
def as_dict(self) -> dict:
return self.table
class Dataset():
"""
### Overview
----
Represents a `PowerBiDataset` object with
different tables, relationships, and data
sources.
"""
def __init__(self, name: str, tables: Tables = []) -> None:
"""Initializes the `Dataset` object.
### Parameters
----
name : str
User defined name of the dataset.
It is also used as the identifier
of the dataset.
tables : Tables (optional, Default=[])
A collection of `Table` objects
you want to be part of the dataset.
"""
if len(tables) == 0:
self._tables = Tables()
else:
self._tables = tables
self._relationships = Relationships()
self._data_sources = DataSources()
self.push_dataset = {
'name': name,
'tables': self._tables,
'datasources': self._data_sources,
'defaultMode': '',
'relationships': self._relationships
}
def __repr__(self) -> str:
"""Represents the string representation of the
table object.
### Returns
----
str
A JSON formatted string.
"""
return json.dumps(obj=self.push_dataset, indent=4, cls=PowerBiEncoder)
def __getitem__(self, index: int) -> object:
return self.push_dataset[index]
def __delitem__(self, index: int) -> None:
del self.push_dataset[index]
@property
def name(self) -> str:
"""The dataset name.
### Returns
----
str :
The dataset name.
"""
return self.push_dataset.get('name', None)
@name.setter
def name(self, name: str) -> None:
"""Sets the dataset name.
### Parameters
----
name : str
The name you want the dataset to be.
"""
self.push_dataset.update({'name': name})
@property
def default_mode(self) -> str:
"""Gets the `defaultMode` property.
### Returns
----
str :
The dataset mode or type.
"""
return self.push_dataset.get('defaultMode', None)
@default_mode.setter
def default_mode(self, default_mode: str) -> None:
"""Sets the `defaultMode` property.
### Parameters
----
default_mode : str
The dataset mode or type.
"""
self.push_dataset.update({'defaultMode': default_mode})
@property
def tables(self) -> Tables:
"""Returns the `Tables` collection from
the dataset.
### Returns
----
Tables
The dataset's `Tables` collection.
"""
return self._tables
def add_table(self, table: Table) -> None:
"""Adds a new `Table` object to the `Tables`
collection.
### Parameters
----
table : Table
A table object with the properties set.
"""
self._tables[len(self._tables)] = table
def del_table(self, index: int) -> None:
"""Deletes a `Table` to the `Tables`
collection.
### Parameters
----
index : int
The index of the table you want
to delete from the collection.
"""
del self._tables[index]
def get_table(self, index: int) -> Table:
"""Gets a `Table` to the `Tables`
collection.
### Parameters
----
index : int
The index of the table you want
to get from the collection.
"""
return self._tables[index]
@property
def relationships(self) -> Relationships:
"""Returns the `Relationships` collection from
the dataset.
### Returns
----
Relationships
The dataset's `Relationships` collection.
"""
return self._relationships
def add_relationship(self, relationship: Relationship) -> None:
"""Adds a `Relationship` to the `Relationships`
collection.
### Parameters
----
relationship : Relationship
The relationship object you want to add
to the collection.
"""
self._relationships[len(self._relationships)] = relationship
def del_relationship(self, index: int) -> None:
"""Deletes a `Relationship` to the `Relationships`
collection.
### Parameters
----
index : int
The index of the relationship you want
to delete from the collection.
"""
del self._relationships[index]
def get_relationship(self, index: int) -> Relationship:
"""Gets a `Relationship` to the `Relationships`
collection.
### Parameters
----
index : int
The index of the relationship you want
to get from the collection.
"""
return self._relationships[index]
@property
def data_sources(self) -> DataSources:
"""Returns the `DataSources` collection from
the dataset.
### Returns
----
Datasources
The dataset's `DataSources` collection.
"""
return self._data_sources
def add_data_source(self, data_source: object) -> None:
"""Adds a `DataSource` to the `DataSources`
collection.
### Parameters
----
data_source : DataSource
The data source object you want to add
to the collection.
"""
self._data_sources[len(self._data_sources)] = data_source
def del_data_source(self, index: int) -> None:
"""Deletes a `DataSource` to the `DataSources`
collection.
### Parameters
----
index : int
The index of the data source you want
to delete from the collection.
"""
del self._data_sources[index]
def get_data_source(self, index: int) -> object:
"""Adds a `DataSource` to the `DataSources`
collection.
### Parameters
----
index : int
The index of the data source you want
to add to the collection.
"""
return self._data_sources[index]
def _prep_for_post(self) -> dict:
"""Preps the `Dataset` object so it's
valid JSON for the PostDataset endpoint.
### Returns
----
dict
A dataset with valid keys.
"""
copy_push_dataset = self.push_dataset.copy()
del copy_push_dataset['datasources']
for table in copy_push_dataset['tables']:
del table['rows']
return copy_push_dataset
class DataSource():
"""
### Overview
----
Represents a `DataSource` object that is part
of a `PowerBiDataset` object.
"""
def __init__(self, data_source_type: Union[str, Enum]) -> None:
"""Initializes the `DataSource` object.
### Parameters
----
data_source_type : Union[str, Enum]
The datasource type, can also be a `DataSourceType`
enum.
"""
if isinstance(data_source_type, Enum):
data_source_type = data_source_type.value
self.data_source_type = data_source_type
self.data_source = {
'datasourceType': self.data_source_type,
'connectionDetails': {},
'dataSourceId': '',
'gatewayId': ''
}
@property
def data_source_type(self) -> str:
"""Gets the `dataSourceType` property.
### Returns
----
str :
The `dataSourceType` property.
"""
return self.data_source.get('datasourceType', None)
@data_source_type.setter
def data_source_type(self, data_source_type: str) -> None:
"""Sets the `dataSourceType` property.
### Parameters
----
data_source_type : str
The `dataSourceType` with the properties set.
"""
self.data_source.update({'datasourceType': data_source_type})
@property
def connection_details(self) -> str:
"""Gets the `connectionDetails` property.
### Returns
----
str :
The `connectionDetails` property.
"""
return self.data_source.get('connectionDetails', None)
@connection_details.setter
def connection_details(self, connection_details: str) -> None:
"""Sets the `connectionDetails` property.
### Parameters
----
connection_details : str
The `connectionDetails` with the properties set.
"""
self.data_source.update({'connectionDetails': connection_details})
class ConnectionDetails():
def __init__(self) -> None:
pass
|
# Copyright 2015 NICTA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.contrib import admin
from django.contrib import messages
from dashboard_loader.models import Loader
from dashboard_loader.management.update_data import update
from dashboard_loader.loader_utils import LoaderException
@admin.register(Loader)
class LoaderAdmin(admin.ModelAdmin):
list_display=("app", "refresh_rate", "suspended", "last_locked", "last_run", "last_loaded")
list_editable=("refresh_rate", "suspended")
fields = ('app', 'refresh_rate', 'suspended', 'last_loaded', 'last_run', "last_locked", "locked_by_process", "locked_by_thread", "last_api_access")
list_filter=("suspended",)
readonly_fields=("app", "last_loaded", "last_run", "locked_by_process", "locked_by_thread")
actions = [ 'update_data' ]
def update_data(self, request, queryset):
infos = []
errors = []
for l in queryset:
try:
infos.extend(update(l, verbosity=3, force=True, async=False))
except LoaderException, e:
errors.append(unicode(e))
for err in errors:
self.message_user(request, err, level=messages.ERROR)
for msg in infos:
self.message_user(request, msg)
update_data.short_description = "Update data for Loader"
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# (c) Copyright 2003-2015 HP Development Company, L.P.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Author: Don Welch
#
__version__ = '0.1'
__title__ = 'Device Setup Utility'
__mod__ = 'hp-devicesettings'
__doc__ = "Device settings utility for HPLIP supported printers. (Note: Not all printers require the use of this utility)."
#Std Lib
import sys
import re
import getopt
import time
import operator
import os
# Local
from base.g import *
from base import device, utils, maint, tui, module
from prnt import cups
try:
from importlib import import_module
except ImportError as e:
log.debug(e)
from base.utils import dyn_import_mod as import_module
try:
mod = module.Module(__mod__, __title__, __version__, __doc__, None,
(GUI_MODE,), (UI_TOOLKIT_QT4, UI_TOOLKIT_QT5))
mod.setUsage(module.USAGE_FLAG_DEVICE_ARGS,
see_also_list=['hp-toolbox'])
opts, device_uri, printer_name, mode, ui_toolkit, lang = \
mod.parseStdOpts()
device_uri = mod.getDeviceUri(device_uri, printer_name,
filter={'power-settings': (operator.gt, 0)})
if not device_uri:
sys.exit(1)
log.info("Using device : %s\n" % device_uri)
if not utils.canEnterGUIMode4():
log.error("%s -u/--gui requires Qt4 GUI support. Exiting." % __mod__)
sys.exit(1)
# try:
# from PyQt4.QtGui import QApplication
# from ui4.devicesetupdialog import DeviceSetupDialog
# except ImportError:
# log.error("Unable to load Qt4 support. Is it installed?")
# sys.exit(1)
QApplication, ui_package = utils.import_dialog(ui_toolkit)
ui = import_module(ui_package + ".devicesetupdialog")
app = QApplication(sys.argv)
dlg = ui.DeviceSetupDialog(None, device_uri)
dlg.show()
try:
log.debug("Starting GUI loop...")
app.exec_()
except KeyboardInterrupt:
sys.exit(0)
except KeyboardInterrupt:
log.error("User exit")
log.info("")
log.info("Done.")
|
from ....extensions import ExtensionMixin
from ...flarum.core.forum import Forum
class ForumMixin(Forum):
@property
def markdown_mdarea(self) -> bool:
"""
Whether or not the MDArea is enabled for markdown.
"""
return self.attributes.get("flarum-markdown.mdarea", False)
class MarkdownExtension(ExtensionMixin):
"""
https://packagist.org/packages/flarum/markdown
"""
AUTHOR = 'flarum'
NAME = 'markdown'
@classmethod
def mixin(cls):
super().mixin(Forum, ForumMixin)
|
from manpy.simulation.applications.MilkPlant.imports import *
from manpy.simulation.imports import ExcelHandler, ExitJobShop
from manpy.simulation.Globals import runSimulation
import time
start = time.time()
# how many liters is one milk pack
milkUnit = 1.0
T1 = MilkTank("T1", "T1", capacity=3200 / float(milkUnit))
T2 = MilkTank("T2", "T2", capacity=2600 / float(milkUnit))
T3 = MilkTank("T3", "T3", capacity=7000 / float(milkUnit))
TBM2 = MilkTank("TBM2", "Tank Before F", capacity=float("inf"))
TAM2 = MilkTank("TAM2", "Tank After F", capacity=float("inf"))
TBM3 = MilkTank("TBM3", "Tank Before W", capacity=float("inf"))
TAM3 = MilkTank("TAM3", "Tank After W", capacity=float("inf"))
Tr1 = MilkTransport("Tr1", "Tr1")
Tr2 = MilkTransport("Tr2", "Tr2")
Tr3 = MilkTransport("Tr3", "Tr3")
Tr4 = MilkTransport("Tr4", "Tr4")
Tr5 = MilkTransport("Tr5", "Tr5")
M2 = MilkProcess("M2", "F")
M3 = MilkProcess("M3", "W")
E = MilkExit("E", "Exit")
route1 = [
{"stationIdsList": ["T1"]},
{
"stationIdsList": ["Tr1"],
"processingTime": {"Fixed": {"mean": 0.034682 * milkUnit}},
},
]
route2 = [
{"stationIdsList": ["T3"]},
{
"stationIdsList": ["Tr3"],
"processingTime": {"Fixed": {"mean": 0.222222 * milkUnit}},
},
]
commonRoute = [
{"stationIdsList": ["T2"]},
{
"stationIdsList": ["Tr2"],
"processingTime": {"Fixed": {"mean": 0.03 * milkUnit}},
"volume": 1000,
},
{"stationIdsList": ["TBM2"]},
{
"stationIdsList": ["M2"],
"processingTime": {"Fixed": {"mean": 180}},
"volume": 1000,
},
{"stationIdsList": ["TAM2"]},
{
"stationIdsList": ["Tr4"],
"processingTime": {"Fixed": {"mean": 0.06 * milkUnit}},
"volume": 1000,
},
{"stationIdsList": ["TBM3"]},
{
"stationIdsList": ["M3"],
"processingTime": {"Fixed": {"mean": 20}},
"volume": 1000,
},
{"stationIdsList": ["E"]},
]
MPList = []
for i in range(int(865 / float(milkUnit))):
MP = MilkPack(
"MT_A" + str(i),
"MT_A" + str(i),
route=route1 + commonRoute,
liters=milkUnit,
fat=3.8 * milkUnit,
productId="Product X",
)
MPList.append(MP)
for i in range(int(135 / float(milkUnit))):
MP = MilkPack(
"MT_B" + str(i),
"MT_B" + str(i),
route=route2 + commonRoute,
liters=milkUnit,
fat=0.1 * milkUnit,
productId="Product X",
)
MPList.append(MP)
runSimulation(
[T1, T2, T3, TBM2, TAM2, TBM3, TAM3, Tr1, Tr2, Tr3, Tr4, Tr5, M2, M3, E] + MPList,
1000,
trace="Yes",
)
ExcelHandler.outputTrace("MilkPlant2")
for productId in E.finishedProductDict:
volume = E.finishedProductDict[productId]["volume"]
totalFat = E.finishedProductDict[productId]["totalFat"]
exitTime = E.finishedProductDict[productId]["exitTime"]
fat = totalFat / float(volume)
print(
(
"from",
productId,
volume,
"liters were produced of",
fat,
"% fat. Product ready at t=",
exitTime,
)
)
print(("Execution Time=", time.time() - start))
|
# SheetManager Exceptions
class GoogleConnectionException(Exception):
pass
class InitSheetManagerException(Exception):
pass
class AuthSheetManagerException(Exception):
pass
class CreateSheetException(Exception):
pass
class OpenSheetException(Exception):
pass
class WriteTableException(Exception):
pass
class ProtectColumnsException(Exception):
pass
class FetchTableException(Exception):
pass
class CheckinProcessException(Exception):
pass
class TableCheckinException(Exception):
pass
class FatalCheckinException(Exception):
pass
class UpdateStatusException(Exception):
pass
class UpdateRowException(Exception):
pass
class UpdateTableException(Exception):
pass
# Holdings Exceptions
class HoldingsQueryException(Exception):
pass
class BadBibstemException(Exception):
pass
#Tasks Exceptions
class DBCommitException(Exception):
"""Non-recoverable Error with making database commits."""
pass
class DBReadException(Exception):
"""Non-recoverable Error with making database selection."""
pass
class DBClearException(Exception):
"""Non-recoverable Error with clearing a full table (prior to reload)."""
pass
class InvalidTableException(Exception):
pass
class WriteDataToSheetException(Exception):
pass
class TableCheckinException(Exception):
pass
class TableCheckoutException(Exception):
pass
class ClearTableException(Exception):
pass
#Utils Exceptions
class ReadBibstemException(Exception):
pass
class ReadCompletenessException(Exception):
pass
class ReadCanonicalException(Exception):
pass
class ReadEncodingException(Exception):
pass
class RequestsException(Exception):
pass
class ReadRefsourcesException(Exception):
pass
class ExportBibstemsException(Exception):
pass
class ExportISSNException(Exception):
pass
class BackupFileException(Exception):
pass
|
import qrcode
location_in = input("Enter Location:")
qr=qrcode.QRCode(version=1,box_size=10,border=5)
qr.add_data('https://www.google.co.in/maps/place/'+location_in)
qr.make(fit=True)
img=qr.make_image(fill="black",back_color="white")
img.save("1.png")
|
from django.db import models
from home.models import FeatureContent
# Create your models here.
class ApplContent(FeatureContent):
button_text = models.CharField(blank=True, max_length=50)
load_file = models.FileField(blank=True, null=True)
|
from importlib import reload
import numpy as np
import itertools as it
from tumorstoppy import distances, measures, data, knn
data_points = 979
Data = data.Data
cdr3 = data.CDR3_13
cut_cdr3 = Data(
[
Data(['./data/processed_data/TumorCDR3s_test/TumorCDR3s_test_13.txt',
'./data/processed_data/NonTumorCDR3s_test/NonTumorCDR3s_test_13.txt'],
['tumorous', 'benign'], data_points),
Data(['./data/processed_data/TumorCDR3s_training/TumorCDR3s_training_13.txt',
'./data/processed_data/NonTumorCDR3s_training/NonTumorCDR3s_training_13.txt'],
['tumorous', 'benign'], data_points),
],
['test', 'training'],
)
error_evaluation = measures.error_evaluation
distance = distances.blosum62_distance
#print(error_evaluation(None, cdr3['training'], verbose=True))
weights1 = np.array([8.31503303, 6.3603994 , 2.26271026, -1.51866793, 7.5740573 ,
7.66745514, 4.72895374, 1.29182651, 6.50981513, 3.79679703,
6.12735646, 1.11650591, 1.27719528])
weights2 = np.array([7.21814821, 5.19300206, 3.53396301, 3.07099894, 3.13081365,
3.36293533, 3.70343292, 2.65573896, 3.89459912, 2.69011136,
4.32294453, 5.33310198, 6.140785])
def dist(s1, s2):
return distance(s1, s2, weights2)
results_cancer = []
for seq in cut_cdr3['test','tumorous']:
print('Testing ' + seq)
results_cancer.append(knn.nearest_neighbor(seq, *cut_cdr3['training'], dist))
print('Finished tumorous tests')
results_cancer = np.array(results_cancer)
results_non_cancer = []
for seq in cut_cdr3['test','benign']:
print('Testing ' + seq)
results_non_cancer.append(knn.nearest_neighbor(seq, *cut_cdr3['training'], dist))
results_non_cancer = np.array(results_non_cancer)
print('Finished benign tests')
with open('results.txt', 'w') as FILE:
FILE.write(str(results_cancer))
FILE.write('\n\n')
FILE.write(str(results_non_cancer))
|
import math
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.autograd.profiler as profiler
from pykeops.torch import LazyTensor
from geometry_processing import (
curvatures,
mesh_normals_areas,
tangent_vectors,
atoms_to_points_normals,
)
from helper import soft_dimension, diagonal_ranges
from benchmark_models import DGCNN_seg, PointNet2_seg, dMaSIFConv_seg
def knn_atoms(x, y, x_batch, y_batch, k):
N, D = x.shape
x_i = LazyTensor(x[:, None, :])
y_j = LazyTensor(y[None, :, :])
pairwise_distance_ij = ((x_i - y_j) ** 2).sum(-1)
pairwise_distance_ij.ranges = diagonal_ranges(x_batch, y_batch)
# N.B.: KeOps doesn't yet support backprop through Kmin reductions...
# dists, idx = pairwise_distance_ij.Kmin_argKmin(K=k,axis=1)
# So we have to re-compute the values ourselves:
idx = pairwise_distance_ij.argKmin(K=k, axis=1) # (N, K)
x_ik = y[idx.view(-1)].view(N, k, D)
dists = ((x[:, None, :] - x_ik) ** 2).sum(-1)
return idx, dists
def get_atom_features(x, y, x_batch, y_batch, y_atomtype, k=16):
idx, dists = knn_atoms(x, y, x_batch, y_batch, k=k) # (num_points, k)
num_points, _ = idx.size()
idx = idx.view(-1)
dists = 1 / dists.view(-1, 1)
_, num_dims = y_atomtype.size()
feature = y_atomtype[idx, :]
feature = torch.cat([feature, dists], dim=1)
feature = feature.view(num_points, k, num_dims + 1)
return feature
class Atom_embedding(nn.Module):
def __init__(self, args):
super(Atom_embedding, self).__init__()
self.D = args.atom_dims
self.k = 16
self.conv1 = nn.Linear(self.D + 1, self.D)
self.conv2 = nn.Linear(self.D, self.D)
self.conv3 = nn.Linear(2 * self.D, self.D)
self.bn1 = nn.BatchNorm1d(self.D)
self.bn2 = nn.BatchNorm1d(self.D)
self.relu = nn.LeakyReLU(negative_slope=0.2)
def forward(self, x, y, y_atomtypes, x_batch, y_batch):
fx = get_atom_features(x, y, x_batch, y_batch, y_atomtypes, k=self.k)
fx = self.conv1(fx)
fx = fx.view(-1, self.D)
fx = self.bn1(self.relu(fx))
fx = fx.view(-1, self.k, self.D)
fx1 = fx.sum(dim=1, keepdim=False)
fx = self.conv2(fx)
fx = fx.view(-1, self.D)
fx = self.bn2(self.relu(fx))
fx = fx.view(-1, self.k, self.D)
fx2 = fx.sum(dim=1, keepdim=False)
fx = torch.cat((fx1, fx2), dim=-1)
fx = self.conv3(fx)
return fx
class AtomNet(nn.Module):
def __init__(self, args):
super(AtomNet, self).__init__()
self.args = args
self.transform_types = nn.Sequential(
nn.Linear(args.atom_dims, args.atom_dims),
nn.LeakyReLU(negative_slope=0.2),
nn.Linear(args.atom_dims, args.atom_dims),
nn.LeakyReLU(negative_slope=0.2),
nn.Linear(args.atom_dims, args.atom_dims),
nn.LeakyReLU(negative_slope=0.2),
)
self.embed = Atom_embedding(args)
def forward(self, xyz, atom_xyz, atomtypes, batch, atom_batch):
# Run a DGCNN on the available information:
atomtypes = self.transform_types(atomtypes)
return self.embed(xyz, atom_xyz, atomtypes, batch, atom_batch)
class Atom_embedding_MP(nn.Module):
def __init__(self, args):
super(Atom_embedding_MP, self).__init__()
self.D = args.atom_dims
self.k = 16
self.n_layers = 3
self.mlp = nn.ModuleList(
[
nn.Sequential(
nn.Linear(2 * self.D + 1, 2 * self.D + 1),
nn.LeakyReLU(negative_slope=0.2),
nn.Linear(2 * self.D + 1, self.D),
)
for i in range(self.n_layers)
]
)
self.norm = nn.ModuleList(
[nn.GroupNorm(2, self.D) for i in range(self.n_layers)]
)
self.relu = nn.LeakyReLU(negative_slope=0.2)
def forward(self, x, y, y_atomtypes, x_batch, y_batch):
idx, dists = knn_atoms(x, y, x_batch, y_batch, k=self.k) # N, 9, 7
num_points = x.shape[0]
num_dims = y_atomtypes.shape[-1]
point_emb = torch.ones_like(x[:, 0])[:, None].repeat(1, num_dims)
for i in range(self.n_layers):
features = y_atomtypes[idx.reshape(-1), :]
features = torch.cat([features, dists.reshape(-1, 1)], dim=1)
features = features.view(num_points, self.k, num_dims + 1)
features = torch.cat(
[point_emb[:, None, :].repeat(1, self.k, 1), features], dim=-1
) # N, 8, 13
messages = self.mlp[i](features) # N,8,6
messages = messages.sum(1) # N,6
point_emb = point_emb + self.relu(self.norm[i](messages))
return point_emb
class Atom_Atom_embedding_MP(nn.Module):
def __init__(self, args):
super(Atom_Atom_embedding_MP, self).__init__()
self.D = args.atom_dims
self.k = 17
self.n_layers = 3
self.mlp = nn.ModuleList(
[
nn.Sequential(
nn.Linear(2 * self.D + 1, 2 * self.D + 1),
nn.LeakyReLU(negative_slope=0.2),
nn.Linear(2 * self.D + 1, self.D),
)
for i in range(self.n_layers)
]
)
self.norm = nn.ModuleList(
[nn.GroupNorm(2, self.D) for i in range(self.n_layers)]
)
self.relu = nn.LeakyReLU(negative_slope=0.2)
def forward(self, x, y, y_atomtypes, x_batch, y_batch):
idx, dists = knn_atoms(x, y, x_batch, y_batch, k=self.k) # N, 9, 7
idx = idx[:, 1:] # Remove self
dists = dists[:, 1:]
k = self.k - 1
num_points = y_atomtypes.shape[0]
out = y_atomtypes
for i in range(self.n_layers):
_, num_dims = out.size()
features = out[idx.reshape(-1), :]
features = torch.cat([features, dists.reshape(-1, 1)], dim=1)
features = features.view(num_points, k, num_dims + 1)
features = torch.cat(
[out[:, None, :].repeat(1, k, 1), features], dim=-1
) # N, 8, 13
messages = self.mlp[i](features) # N,8,6
messages = messages.sum(1) # N,6
out = out + self.relu(self.norm[i](messages))
return out
class AtomNet_MP(nn.Module):
def __init__(self, args):
super(AtomNet_MP, self).__init__()
self.args = args
self.transform_types = nn.Sequential(
nn.Linear(args.atom_dims, args.atom_dims),
nn.LeakyReLU(negative_slope=0.2),
nn.Linear(args.atom_dims, args.atom_dims),
)
self.embed = Atom_embedding_MP(args)
self.atom_atom = Atom_Atom_embedding_MP(args)
def forward(self, xyz, atom_xyz, atomtypes, batch, atom_batch):
# Run a DGCNN on the available information:
atomtypes = self.transform_types(atomtypes)
atomtypes = self.atom_atom(
atom_xyz, atom_xyz, atomtypes, atom_batch, atom_batch
)
atomtypes = self.embed(xyz, atom_xyz, atomtypes, batch, atom_batch)
return atomtypes
def combine_pair(P1, P2):
P1P2 = {}
for key in P1:
v1 = P1[key]
v2 = P2[key]
if v1 is None:
continue
if key == "batch" or key == "batch_atoms":
v1v2 = torch.cat([v1, v2 + v1[-1] + 1], dim=0)
elif key == "triangles":
# v1v2 = torch.cat([v1,v2],dim=1)
continue
else:
v1v2 = torch.cat([v1, v2], dim=0)
P1P2[key] = v1v2
return P1P2
def split_pair(P1P2):
batch_size = P1P2["batch_atoms"][-1] + 1
p1_indices = P1P2["batch"] < batch_size // 2
p2_indices = P1P2["batch"] >= batch_size // 2
p1_atom_indices = P1P2["batch_atoms"] < batch_size // 2
p2_atom_indices = P1P2["batch_atoms"] >= batch_size // 2
P1 = {}
P2 = {}
for key in P1P2:
v1v2 = P1P2[key]
if (key == "rand_rot") or (key == "atom_center"):
n = v1v2.shape[0] // 2
P1[key] = v1v2[:n].view(-1, 3)
P2[key] = v1v2[n:].view(-1, 3)
elif "atom" in key:
P1[key] = v1v2[p1_atom_indices]
P2[key] = v1v2[p2_atom_indices]
elif key == "triangles":
continue
# P1[key] = v1v2[:,p1_atom_indices]
# P2[key] = v1v2[:,p2_atom_indices]
else:
P1[key] = v1v2[p1_indices]
P2[key] = v1v2[p2_indices]
P2["batch"] = P2["batch"] - batch_size + 1
P2["batch_atoms"] = P2["batch_atoms"] - batch_size + 1
return P1, P2
def project_iface_labels(P, threshold=2.0):
queries = P["xyz"]
batch_queries = P["batch"]
source = P["mesh_xyz"]
batch_source = P["mesh_batch"]
labels = P["mesh_labels"]
x_i = LazyTensor(queries[:, None, :]) # (N, 1, D)
y_j = LazyTensor(source[None, :, :]) # (1, M, D)
D_ij = ((x_i - y_j) ** 2).sum(-1).sqrt() # (N, M)
D_ij.ranges = diagonal_ranges(batch_queries, batch_source)
nn_i = D_ij.argmin(dim=1).view(-1) # (N,)
nn_dist_i = (
D_ij.min(dim=1).view(-1, 1) < threshold
).float() # If chain is not connected because of missing densities MaSIF cut out a part of the protein
query_labels = labels[nn_i] * nn_dist_i
P["labels"] = query_labels
class dMaSIF(nn.Module):
def __init__(self, args):
super(dMaSIF, self).__init__()
# Additional geometric features: mean and Gauss curvatures computed at different scales.
self.curvature_scales = args.curvature_scales
self.args = args
I = args.in_channels
O = args.orientation_units
E = args.emb_dims
H = args.post_units
# Computes chemical features
self.atomnet = AtomNet_MP(args)
self.dropout = nn.Dropout(args.dropout)
if args.embedding_layer == "dMaSIF":
# Post-processing, without batch norm:
self.orientation_scores = nn.Sequential(
nn.Linear(I, O),
nn.LeakyReLU(negative_slope=0.2),
nn.Linear(O, 1),
)
# Segmentation network:
self.conv = dMaSIFConv_seg(
args,
in_channels=I,
out_channels=E,
n_layers=args.n_layers,
radius=args.radius,
)
# Asymmetric embedding
if args.search:
self.orientation_scores2 = nn.Sequential(
nn.Linear(I, O),
nn.LeakyReLU(negative_slope=0.2),
nn.Linear(O, 1),
)
self.conv2 = dMaSIFConv_seg(
args,
in_channels=I,
out_channels=E,
n_layers=args.n_layers,
radius=args.radius,
)
elif args.embedding_layer == "DGCNN":
self.conv = DGCNN_seg(I + 3, E,self.args.n_layers,self.args.k)
if args.search:
self.conv2 = DGCNN_seg(I + 3, E,self.args.n_layers,self.args.k)
elif args.embedding_layer == "PointNet++":
self.conv = PointNet2_seg(args, I, E)
if args.search:
self.conv2 = PointNet2_seg(args, I, E)
if args.site:
# Post-processing, without batch norm:
self.net_out = nn.Sequential(
nn.Linear(E, H),
nn.LeakyReLU(negative_slope=0.2),
nn.Linear(H, H),
nn.LeakyReLU(negative_slope=0.2),
nn.Linear(H, 1),
)
def features(self, P, i=1):
"""Estimates geometric and chemical features from a protein surface or a cloud of atoms."""
if (
not self.args.use_mesh and "xyz" not in P
): # Compute the pseudo-surface directly from the atoms
# (Note that we use the fact that dicts are "passed by reference" here)
P["xyz"], P["normals"], P["batch"] = atoms_to_points_normals(
P["atoms"],
P["batch_atoms"],
atomtypes=P["atomtypes"],
resolution=self.args.resolution,
sup_sampling=self.args.sup_sampling,
)
# Estimate the curvatures using the triangles or the estimated normals:
P_curvatures = curvatures(
P["xyz"],
triangles=P["triangles"] if self.args.use_mesh else None,
normals=None if self.args.use_mesh else P["normals"],
scales=self.curvature_scales,
batch=P["batch"],
)
# Compute chemical features on-the-fly:
chemfeats = self.atomnet(
P["xyz"], P["atom_xyz"], P["atomtypes"], P["batch"], P["batch_atoms"]
)
if self.args.no_chem:
chemfeats = 0.0 * chemfeats
if self.args.no_geom:
P_curvatures = 0.0 * P_curvatures
# Concatenate our features:
return torch.cat([P_curvatures, chemfeats], dim=1).contiguous()
def embed(self, P):
"""Embeds all points of a protein in a high-dimensional vector space."""
features = self.dropout(self.features(P))
P["input_features"] = features
#torch.cuda.synchronize(device=features.device)
#torch.cuda.reset_max_memory_allocated(device=P["atoms"].device)
begin = time.time()
# Ours:
if self.args.embedding_layer == "dMaSIF":
self.conv.load_mesh(
P["xyz"],
triangles=P["triangles"] if self.args.use_mesh else None,
normals=None if self.args.use_mesh else P["normals"],
weights=self.orientation_scores(features),
batch=P["batch"],
)
P["embedding_1"] = self.conv(features)
if self.args.search:
self.conv2.load_mesh(
P["xyz"],
triangles=P["triangles"] if self.args.use_mesh else None,
normals=None if self.args.use_mesh else P["normals"],
weights=self.orientation_scores2(features),
batch=P["batch"],
)
P["embedding_2"] = self.conv2(features)
# First baseline:
elif self.args.embedding_layer == "DGCNN":
features = torch.cat([features, P["xyz"]], dim=-1).contiguous()
P["embedding_1"] = self.conv(P["xyz"], features, P["batch"])
if self.args.search:
P["embedding_2"] = self.conv2(
P["xyz"], features, P["batch"]
)
# Second baseline
elif self.args.embedding_layer == "PointNet++":
P["embedding_1"] = self.conv(P["xyz"], features, P["batch"])
if self.args.search:
P["embedding_2"] = self.conv2(P["xyz"], features, P["batch"])
#torch.cuda.synchronize(device=features.device)
end = time.time()
#memory_usage = torch.cuda.max_memory_allocated(device=P["atoms"].device)
memory_usage = 0
conv_time = end - begin
return conv_time, memory_usage
def preprocess_surface(self, P):
P["xyz"], P["normals"], P["batch"] = atoms_to_points_normals(
P["atoms"],
P["batch_atoms"],
atomtypes=P["atomtypes"],
resolution=self.args.resolution,
sup_sampling=self.args.sup_sampling,
distance=self.args.distance,
)
if P['mesh_labels'] is not None:
project_iface_labels(P)
def forward(self, P1, P2=None):
# Compute embeddings of the point clouds:
if P2 is not None:
P1P2 = combine_pair(P1, P2)
else:
P1P2 = P1
conv_time, memory_usage = self.embed(P1P2)
# Monitor the approximate rank of our representations:
R_values = {}
R_values["input"] = soft_dimension(P1P2["input_features"])
R_values["conv"] = soft_dimension(P1P2["embedding_1"])
if self.args.site:
P1P2["iface_preds"] = self.net_out(P1P2["embedding_1"])
if P2 is not None:
P1, P2 = split_pair(P1P2)
else:
P1 = P1P2
return {
"P1": P1,
"P2": P2,
"R_values": R_values,
"conv_time": conv_time,
"memory_usage": memory_usage,
}
|
from time import time
import jwt
from .config import ServiceAccount
class JSONWebTokenHandler:
"""Handles bearer tokens from a service account."""
def __init__(self, service_account: ServiceAccount, audience: str):
self._service_account = service_account
self._audience = audience
self._issued_time = time()
def __str__(self):
return self.token
@property
def _headers(self) -> dict:
return {"alg": "RS256", "typ": "JWT", "kid": self._service_account.private_key_id}
@property
def _payload(self) -> dict:
"""Returns payload to be encoded"""
if time() > self._issued_time + 1200:
self._issued_time = time()
payload = {
"aud": self._audience,
"iss": self._service_account.client_email,
"sub": self._service_account.client_email,
"iat": self._issued_time,
"exp": self._issued_time + 1200,
}
return payload
@property
def token(self) -> str:
"""Returns an encoded token."""
return jwt.encode(
payload=self._payload,
key=self._service_account.private_key,
algorithm="RS256",
headers=self._headers,
)
__all__ = ["JSONWebTokenHandler"]
|
import json
from rest_framework.views import status
from django.urls import reverse
# local imports
from .base_test import TestBase
class TagCreation(TestBase):
"""Test if a tag list is created"""
def test_tag_addition(self):
"""Test tags addition to article"""
self.verify_user()
response = self.client.post(
self.article_url,
data=json.dumps(self.valid_taglist_data),
content_type='application/json'
)
length_of_taglist = len(response.data["tag_list"])
self.assertEqual(length_of_taglist, 2)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_tag_update(self):
"""Test tags update"""
self.verify_user()
response = self.client.post(
self.article_url,
data=json.dumps(self.valid_taglist_data),
content_type='application/json'
)
slug = response.data['slug']
response = self.client.put(
reverse(
'articles:detail_article',
kwargs={'slug': slug},
),
data=json.dumps(self.valid_taglist_update_data),
content_type='application/json'
)
length_of_taglist = len(response.data["tag_list"])
self.assertEqual(length_of_taglist, 4)
self.assertEqual(response.status_code, status.HTTP_200_OK)
|
import sys
import time
from urllib.request import urlopen
from .error import OmeError
from .util import remove, get_terminal_width, is_terminal
def format_size(n):
if n < 1024:
return '%d B' % n
elif n < 1024**2:
return '%.1f KB' % (float(n) / 1024)
elif n < 1024**3:
return '%.1f MB' % (float(n) / 1024**2)
elif n < 1024**4:
return '%.1f GB' % (float(n) / 1024**3)
def format_bar(value, total, max_width):
width = value * max_width // total
return '=' * width + ' ' * (max_width - width)
class Progress(object):
def __init__(self, length, file=sys.stdout):
self.length = length
self.file = file
self.is_terminal = is_terminal(file)
self.start_time = time.time()
self.transferred = 0
self.last_time = self.start_time
self.last_transferred = 0
self.rate = None
self.last_line = ''
def update(self, transferred):
if self.is_terminal:
self.transferred += transferred
now = time.time()
if now - self.last_time > 1.0:
self.rate = (self.transferred - self.last_transferred) / (now - self.last_time)
self.last_time = now
self.last_transferred = self.transferred
rate = format_size(self.rate) + '/s' if self.rate is not None else ''
if self.length:
bar = format_bar(self.transferred, self.length, int(get_terminal_width() / 3))
line = '\r\x1B[K[{0}] {1}% ({2} of {3}) @ {4}'.format(
bar, self.transferred * 100 // self.length,
format_size(self.transferred), format_size(self.length), rate)
else:
line = '\r\x1B[K=> {0} @ {1}\x1B[K'.format(format_size(self.transferred), rate)
if line != self.last_line:
self.file.write(line)
self.file.flush()
self.last_line = line
def finish(self):
if self.is_terminal:
self.file.write('\r\x1B[K')
def download(url, path):
print('ome: downloading', url)
try:
with open(path, 'wb') as output:
with urlopen(url) as input:
progress = Progress(input.length)
try:
while True:
buf = input.read(1024)
if not buf:
break
progress.update(len(buf))
output.write(buf)
finally:
progress.finish()
except KeyboardInterrupt:
remove(path)
raise
except Exception as e:
remove(path)
raise OmeError('ome: download failed: {}'.format(e))
|
def product(*args):
"""This method returns the product of items passed as arguments
Returns product of the arguments passed. If args length is zero then ti will return 0
"""
if len(args) == 0:
return 0
result = 1
for arg in args:
result *= arg
return result
def convert_curreny(amount, **kwargs):
"""This function converts from one currency to other
"""
#amount = kwargs['amount']
source = kwargs['source']
destination = kwargs['destination']
rate = kwargs['rate']
target = amount * rate
return target
def is_leap(year):
"""This function finds if the year passed is leap or not
Arguments:
year (int): year
Return: True if leap false other wise
"""
if year % 4 == 0:
if year % 100 == 0:
if year % 400 == 0:
return True
else:
return False
return True
else:
return False
if __name__ == '__main__':
convert_curreny(source='usd', destination='inr', amount=135, rate=74.5)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.