max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
templates/kvm.py | kakwa/kouignamann-core | 0 | 12772251 | <reponame>kakwa/kouignamann-core
<%
import uuid
import os
import random
imgdir = os.getenv("IMGDIR", "/var/lib/kvm")
isopath = os.getenv("ISOPATH", "/var/lib/kvm/isos/CentOS-7-custom-install.iso")
slot=1
global diskLetter
diskLetter='a'
# increment disk letter
def nextDiskLetter():
global diskLetter
diskLetter = chr(ord(diskLetter) + 1)
# init mac calculation
# Fixed OUI
# random NIC specific first 2 bytes (but common to all interfaces)
# random last byte, incremented for each interface
# this methods limits the number of interfaces to 128
def randomMacInit():
global lastMac
global baseMac
lastMac=random.randint(0x00, 0xef)
baseMac = [ 0x52, 0x54, 0x00,
random.randint(0x00, 0x7f),
random.randint(0x00, 0xff) ]
# get a semi-random mac address
def getMac():
global lastMac
global baseMac
mac = list(baseMac)
mac.append(lastMac)
lastMac = lastMac + 1
return ':'.join(map(lambda x: "%02x" % x, mac))
randomMacInit()
%>\
<domain type='kvm'>
<name>${host['hostname']}</name>
<% uuid=uuid.uuid1() %>\
<uuid>${uuid}</uuid>
<% memory=str(host['hardware']['ram'] * 1024) %>\
<memory unit='KiB'>${memory}</memory>
<currentMemory unit='KiB'>${memory}</currentMemory>
<% cpus=str(host['hardware']['cpus']) %>\
<vcpu placement='static'>${cpus}</vcpu>
<os>
<type arch='x86_64' machine='pc-i440fx-rhel7.0.0'>hvm</type>
<boot dev='hd'/>
<boot dev='cdrom'/>
</os>
<features>
<acpi/>
<apic/>
<pae/>
</features>
<clock offset='utc'/>
<on_poweroff>destroy</on_poweroff>
<on_reboot>restart</on_reboot>
<on_crash>restart</on_crash>
<devices>
<emulator>/usr/libexec/qemu-kvm</emulator>
## disks declaration
<%counter=0
id=str(counter)
%>\
%for vg in host['partitioning']['volume-groups']:
<disk type='file' device='disk'>
<driver name='qemu' type='qcow2' cache='none'/>
<source file='${imgdir}/${host['hostname']}_${id}.img'/>
<target dev='sd${diskLetter}' bus='scsi'/>
<address type='drive' controller='0' bus='${id}' target='0' unit='0'/>
</disk>
<%counter= counter + 1
id=str(counter)
nextDiskLetter()
%>\
%endfor
<disk type='block' device='cdrom'>
<driver name='qemu' type='raw'/>
<source file='${isopath}'/>
<target dev='hd${diskLetter}' bus='ide'/>
<readonly/>
<address type='drive' controller='0' bus='${id}' target='0' unit='0'/>
</disk>
<controller type='scsi' index='0' model='virtio-scsi'>
<address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>
</controller>
<controller type='usb' index='0'>
<address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x2'/>
</controller>
<controller type='pci' index='0' model='pci-root'/>
<controller type='ide' index='0'>
<address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x1'/>
</controller>
<controller type='virtio-serial' index='0'>
<address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/>
</controller>
## Interface declaration
<% counter=1 %>
%for interface in host['network']['interfaces']:
<% mac=getMac()
slot="%02x" % counter
counter = counter + 1%>\
<interface type='network'>
<mac address='${mac}'/>
<source network='${interface['type']}'/>
<model type='e1000'/>
<address type='pci' domain='0x0000' bus='0x01' slot='0x${slot}' function='0x0'/>
</interface>
%endfor
<serial type='pty'>
<target port='0'/>
</serial>
<console type='pty'>
<target type='serial' port='0'/>
</console>
<channel type='spicevmc'>
<target type='virtio' name='com.redhat.spice.0'/>
<address type='virtio-serial' controller='0' bus='0' port='1'/>
</channel>
<input type='mouse' bus='ps2'/>
<graphics type='spice' autoport='yes'/>
<video>
<model type='qxl' ram='65536' vram='65536' heads='1'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
</video>
<memballoon model='virtio'>
<address type='pci' domain='0x0000' bus='0x00' slot='0x08' function='0x0'/>
</memballoon>
</devices>
</domain>
| 2.4375 | 2 |
src/datalakebundle/table/write/DataWriter.py | bricksflow/datalake-bundle | 0 | 12772252 | import string
import random
from logging import Logger
from pyspark.sql.session import SparkSession
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.types import StructType
from datalakebundle.table.upsert.UpsertQueryCreator import UpsertQueryCreator
from datalakebundle.delta.DeltaStorage import DeltaStorage
class DataWriter:
def __init__(
self,
logger: Logger,
spark: SparkSession,
delta_storage: DeltaStorage,
upsert_query_creator: UpsertQueryCreator,
):
self.__logger = logger
self.__spark = spark
self.__delta_storage = delta_storage
self.__upsert_query_creator = upsert_query_creator
def append(self, df: DataFrame, full_table_name: str, schema: StructType, options: dict):
# insertInto() requires dataframe columns order to match schema columns order
df.select([field.name for field in schema.fields]).write.options(**options).insertInto(full_table_name, overwrite=False)
def overwrite(self, df: DataFrame, full_table_name: str, partition_by: list, options: dict):
self.__delta_storage.overwrite_data(df, full_table_name, partition_by, options)
def upsert(self, df: DataFrame, full_table_name: str, schema: StructType, primary_key: list):
temp_source_table = (
f"upsert_{full_table_name.replace('.', '__')}_{''.join(random.choice(string.ascii_lowercase) for _ in range(6))}"
)
df.createOrReplaceTempView(temp_source_table)
upsert_sql_statement = self.__upsert_query_creator.create(full_table_name, schema, primary_key, temp_source_table)
try:
self.__spark.sql(upsert_sql_statement)
except BaseException:
raise
finally:
self.__spark.catalog.dropTempView(temp_source_table)
| 2.546875 | 3 |
End2End_train.py | Tlili-ahmed/2BiVQA | 14 | 12772253 | """
Author :
<NAME>
"""
import numpy as np
import matplotlib.pyplot as plt
import cv2
import os
from keras import backend as K
from tqdm.keras import TqdmCallback
from scipy.stats import spearmanr
from tensorflow.keras import Input
from tensorflow.keras import optimizers
from tensorflow.keras import models
from tensorflow.keras import layers
from tensorflow.keras import regularizers
from tensorflow.keras.models import Model
from statistics import mean
from sklearn.utils import shuffle
from tensorflow import keras
from tensorflow.keras.optimizers import Adam
import pandas as pd
import datetime
from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau ,Callback,TensorBoard
from keras.models import load_model
from tensorflow.keras.preprocessing import image
from tensorflow.keras import applications
import PIL
from keras.activations import softmax,sigmoid
import h5py
from PIL import Image
from keras.layers import Layer
from scipy.stats import spearmanr,pearsonr
import sklearn
import tensorflow as tf
from tensorflow.keras.layers import MaxPooling2D ,Dense,Concatenate ,Dropout ,Input,concatenate,Conv2D,Reshape,GlobalMaxPooling2D,Flatten,GlobalAveragePooling2D,AveragePooling2D,Lambda,MaxPooling2D,TimeDistributed, Bidirectional, LSTM
import argparse
import random
from tqdm import tqdm
tf.keras.backend.clear_session()
#os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
#os.environ['CUDA_VISIBLE_DEVICES']=""
def data_generator(data,batch_size=16):
num_samples = len(data)
random.shuffle(data)
while True:
for offset in range(0, num_samples, batch_size):
# Get the samples you'll use in this batch
batch_samples = data[offset:offset+batch_size]
X_train = np.zeros((batch_size, 30,25,2560))
y_train = np.zeros((batch_size,1))
for i in range(batch_size):
X_train[i,:,:,:] = np.load(batch_samples[i][0])
y_train[i,:] = np.load(batch_samples[i][1])
y_train[i,:] = y_train[i,:]
yield X_train, y_train
def logistic_func(X, bayta1, bayta2, bayta3, bayta4):
# 4-parameter logistic function
logisticPart = 1 + np.exp(np.negative(np.divide(X - bayta3, np.abs(bayta4))))
yhat = bayta2 + np.divide(bayta1 - bayta2, logisticPart)
return yhat
'''
def data_generator_1(data,batch_size=4):
num_samples = len(data)
while True:
for offset in range(0, num_samples, batch_size):
# Get the samples you'll use in this batch
batch_samples = data[offset:offset+batch_size]
X_train = np.zeros((batch_size, 30,25,2560))
y_train = np.zeros((batch_size,1))
for i in range(batch_size):
X_train[i,:,:,:] = np.load(batch_samples[i][0])
y_train[i,:] = np.load(batch_samples[i][1])
yield X_train
def data_generator_2(data,batch_size=1):
num_samples = len(data)
while True:
for offset in range(0, num_samples, batch_size):
# Get the samples you'll use in this batch
batch_samples = data[offset:offset+batch_size]
X_train = np.zeros((batch_size, 30,25,2560))
y_train = np.zeros((batch_size,1))
for i in range(batch_size):
X_train[i,:,:,:] = np.load(batch_samples[i][0])
y_train[i,:] = np.load(batch_samples[i][1])
yield y_train
'''
def build_model(batch_shape, model_final):
model = models.Sequential()
model.add(TimeDistributed(model_final,input_shape = batch_shape))
model.add(Bidirectional(LSTM(64,return_sequences=True,kernel_initializer='random_normal',
recurrent_initializer='random_normal',
dropout=0.4,recurrent_dropout=0)))
model.add(Bidirectional(LSTM(64,return_sequences=True,
kernel_initializer='random_normal',
recurrent_initializer='random_normal', dropout=0.4,recurrent_dropout=0)))
model.add(Flatten())
model.add(Dense(256,activation='relu', kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.001)))
model.add(layers.Dropout(rate=0.5))
model.add(layers.Dense(1))
model.add(layers.Activation('linear'))
model.compile(optimizer=optimizers.Adam(),loss='mse',metrics=['mae'])
model.summary()
return model
def data_prepare():
x = os.listdir('features_X')
li = []
for i in range(len(x)):
tem = []
x_f = './features_X/' + x[i]
y_f = './features_y/' + x[i]
tem.append(x_f)
tem.append(y_f)
li.append(tem)
li.sort()
return (li)
if __name__ == '__main__':
parser = argparse.ArgumentParser("End2End_train")
parser.add_argument('-nf',
'--num_frames',
default=30,
type=int,
help='Number of cropped frames per video.'
)
parser.add_argument('-m',
'--pretrained_model',
default='/models/res-bi-sp_koniq.h5',
type=str,
help='path to pretrained spatial pooling module.'
)
parser.add_argument('-b',
'--batch_size',
default=16,
type=int,
help='batch_size.'
)
if not os.path.exists('./models'):
os.makedirs('./models')
args = parser.parse_args()
md = ModelCheckpoint(filepath='./models/trained_model.h5',monitor='val_loss', mode='min',save_weights_only=True,save_best_only=True,verbose=1)
rd = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=20,min_lr=1e-7, verbose=2, mode='min')
ear = EarlyStopping(monitor='val_loss',mode ='min', patience=80, verbose=2,restore_best_weights=False)
callbacks_k = [md,rd,TqdmCallback(verbose=2),ear]
li = data_prepare()
li.sort()
num_patch = 25
nb = args.num_frames
batch_size = args.batch_size
sp_pretrained = args.pretrained_model
sep = int(len(li)/5)
train_l = li[0:sep*4]
test_l = li[sep*4:]
train_gen = data_generator(train_l,batch_size= batch_size)
val_gen = data_generator(test_l,batch_size= batch_size)
In = Input((nb,num_patch,2048))
model = load_model(sp_pretrained)
for layer in model.layers:
layer.trainable = True
model_final = Model(inputs=model.input,outputs=model.layers[-3].output )
model = build_model((nb,num_patch,2048), model_final)
history = model.fit_generator(train_gen,steps_per_epoch = int(len(train_l)/ batch_size),
epochs=200,validation_data=val_gen,validation_steps =
int(len(test_l)/batch_size) ,verbose=0,callbacks=callbacks_k)
| 2.0625 | 2 |
alembic/versions/80ecdb88cee2_add_thumbnails_column.py | nemanjamart/graphics_service | 1 | 12772254 | <reponame>nemanjamart/graphics_service
"""Add thumbnails column
Revision ID: <KEY>
Revises: b9308d306405
Create Date: 2021-02-05 16:32:50.041223
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('graphics', sa.Column('thumbnails', postgresql.ARRAY(sa.String()), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('graphics', 'thumbnails')
# ### end Alembic commands ###
| 1.4375 | 1 |
platypush/backend/pushbullet/__init__.py | shineit/platypush | 1 | 12772255 | import logging
import json
import requests
import time
import websocket
from platypush.config import Config
from platypush.message import Message
from platypush.message.event.pushbullet import PushbulletEvent
from .. import Backend
class PushbulletBackend(Backend):
def __init__(self, token, device, **kwargs):
super().__init__(**kwargs)
self.token = token
self.device_name = device
self.pb_device_id = self.get_device_id()
self._last_received_msg = {
'request' : { 'body': None, 'time': None },
'response' : { 'body': None, 'time': None },
'event' : { 'body': None, 'time': None },
}
def _get_latest_push(self):
t = int(time.time()) - 5
try:
response = requests.get(
u'https://api.pushbullet.com/v2/pushes',
headers = { 'Access-Token': self.token },
params = {
'modified_after': str(t),
'active' : 'true',
'limit' : 1,
}
)
response = response.json()
except Exception as e:
logging.exception(e)
raise e
if 'pushes' in response and response['pushes']:
return response['pushes'][0]
else:
return {}
def _should_skip_last_received_msg(self, msg):
if not isinstance(msg, dict): return True # We received something weird
is_duplicate=False
last_msg = self._last_received_msg[msg['type']]
if last_msg:
msg = Message.parse(msg)
if str(msg) == str(last_msg['body']) \
and time.time() - last_msg['time'] <= 2:
# Duplicate message sent on the Pushbullet socket within
# two seconds, ignore it
logging.debug('Ignoring duplicate message received on the socket')
is_duplicate = True
self._last_received_msg[msg['type']] = {
'body': msg, 'time': time.time()
}
return is_duplicate
def on_push(self):
def _f(ws, data):
try:
# Parse the push
try:
data = json.loads(data) if isinstance(data, str) else push
except Exception as e:
logging.exception(e)
return
# If it's a push, get it
if data['type'] == 'tickle' and data['subtype'] == 'push':
push = self._get_latest_push()
elif data['type'] == 'push':
push = data['push']
else: return # Not a push notification
# Post an event, useful to react on mobile notifications if
# you enabled notification mirroring on your PushBullet app
event = PushbulletEvent(**push)
self.on_message(event)
if 'body' not in push: return
logging.debug('Received push: {}'.format(push))
body = push['body']
try: body = json.loads(body)
except ValueError as e: return # Some other non-JSON push
if not self._should_skip_last_received_msg(body):
self.on_message(body)
except Exception as e:
logging.exception(e)
return
return _f
def on_error(self):
def _f(ws, e):
logging.exception(e)
logging.info('Restarting PushBullet backend')
ws.close()
self._init_socket()
return _f
def _init_socket(self):
self.ws = websocket.WebSocketApp(
'wss://stream.pushbullet.com/websocket/' + self.token,
on_message = self.on_push(),
on_error = self.on_error())
def get_device_id(self):
response = requests.get(
u'https://api.pushbullet.com/v2/devices',
headers = { 'Access-Token': self.token },
).json()
devices = [dev for dev in response['devices'] if 'nickname' in dev
and dev['nickname'] == self.device_name]
if not devices:
raise RuntimeError('No such Pushbullet device: {}'
.format(self.device_name))
return devices[0]['iden']
def send_message(self, msg):
requests.post(
u'https://api.pushbullet.com/v2/pushes',
headers = { 'Access-Token': self.token },
json = {
'type': 'note',
'device_iden': self.pb_device_id,
'body': str(msg)
}
).json()
def on_stop(self):
self.ws.close()
def run(self):
super().run()
self._init_socket()
logging.info('Initialized Pushbullet backend - device_id: {}'
.format(self.device_name))
self.ws.run_forever()
# vim:sw=4:ts=4:et:
| 2.453125 | 2 |
pkg/package.py | bruce30262/idapkg | 125 | 12772256 | <reponame>bruce30262/idapkg
"""
Package-related classes and methods are in pkg.package module. All constructing arguments are accessible via property.
"""
import ctypes
import glob
import json
import os
import random
import runpy
import shutil
import sys
import traceback
import zipfile
import ida_kernwin
import ida_loader
import ida_diskio
from .config import g
from .env import ea as current_ea, os as current_os
from .internal_api import invalidate_proccache, get_extlangs, idausr_remove, idausr_add
from .logger import getLogger
from .vendor.semantic_version import Version, Spec
from .virtualenv_utils import FixInterpreter
__all__ = ["LocalPackage", "InstallablePackage"]
log = getLogger(__name__)
def rename(old, new):
if sys.platform == 'win32':
if not ctypes.windll.kernel32.MoveFileExA(str(old), str(new), 0):
raise WindowsError(ctypes.windll.kernel32.GetLastError())
else:
return os.rename(old, new)
def _get_native_suffix():
if current_os == 'win':
suffix = '.dll'
elif current_os == 'linux':
suffix = '.so'
elif current_os == 'mac':
suffix = '.dylib'
else:
raise Exception("unknown os: %r" % current_os)
return suffix
class LocalPackage(object):
def __init__(self, id, path, version):
self.id = str(id)
self.version = str(version)
self.path = os.path.normpath(path)
def remove(self):
"""
Removes a package.
"""
idausr_remove(self.path)
with FixInterpreter():
for script in self.info().get('uninstallers', []):
script = os.path.join(self.path, script)
try:
runpy.run_path(script)
except Exception:
# XXX: How can I rollback this?
traceback.print_exc()
log.warn(
"Uninstallation script %r exited with exception!", script)
if not LocalPackage._remove_package_dir(self.path):
log.error(
"Package directory is in use and will be removed after restart.")
# If not modified, the only case this fails is, custom ld.so or windows.
# Latter case is common.
new_path = self.path.rstrip('/\\') + '-removed'
if os.path.exists(new_path):
new_path += '-%x' % random.getrandbits(64)
rename(self.path, new_path)
# XXX: is it good to mutate this object?
self.path = new_path
log.info("Done!")
def install(self, remove_on_fail=False):
"""
Run python scripts specified by :code:`installers` field in `info.json`.
:returns: None
"""
orig_cwd = os.getcwd()
try:
os.chdir(self.path)
info = self.info()
scripts = info.get('installers', [])
if not isinstance(scripts, list):
raise Exception(
'%r: Corrupted package: installers key is not list' % self.id)
with FixInterpreter():
for script in scripts:
log.info('Executing installer path %r...', script)
script = os.path.join(self.path, script)
runpy.run_path(script)
except Exception:
log.info('Installer failed!')
if remove_on_fail:
self.remove()
raise
finally:
os.chdir(orig_cwd)
def load(self, force=False):
"""
Actually does :code:`ida_loaders.load_plugin(paths)`, and updates IDAUSR variable.
"""
if not force and self.path in ida_diskio.get_ida_subdirs(''):
# Already loaded, just update sys.path for python imports
if self.path not in sys.path:
sys.path.append(self.path)
return
# XXX: find a more efficient way to ensure dependencies
errors = []
for dependency in self.info().get('dependencies', {}).keys():
dep = LocalPackage.by_name(dependency)
if not dep:
errors.append('Dependency not found: %r' % dependency)
continue
dep.load()
if errors:
for error in errors:
log.error(error)
return
def handler():
# Load plugins immediately
# processors / loaders will be loaded on demand
if self.path not in sys.path:
sys.path.append(self.path)
# Update IDAUSR variable
idausr_add(self.path)
# Immediately load compatible plugins
self._find_loadable_modules('plugins', ida_loader.load_plugin)
# Find loadable processor modules, and if exists, invalidate cached process list (proccache).
invalidates = []
self._find_loadable_modules('procs', invalidates.append)
if invalidates:
invalidate_proccache()
# Run in main thread
ida_kernwin.execute_sync(handler, ida_kernwin.MFF_FAST)
def populate_env(self):
"""
A passive version of load; it only populates IDAUSR variable.
It's called at :code:`idapythonrc.py`.
"""
errors = []
for dependency in self.info().get('dependencies', {}).keys():
dep = LocalPackage.by_name(dependency)
if not dep:
errors.append('Dependency not found: %r' % dependency)
continue
dep.populate_env()
if errors:
for error in errors:
log.error(error)
return
idausr_add(self.path)
if self.path not in sys.path:
sys.path.append(self.path)
def plugins(self):
return self._collect_modules('plugins')
def loaders(self):
return self._collect_modules('loaders')
def procs(self):
return self._collect_modules('procs')
def _collect_modules(self, category):
result = []
self._find_loadable_modules(category, result.append)
return result
def _find_loadable_modules(self, subdir, callback):
# Load modules in external languages (.py, .idc, ...)
for suffix in ['.' + x.fileext for x in get_extlangs()]:
expr = os.path.join(self.path, subdir, '*' + suffix)
for path in glob.glob(expr):
callback(str(path))
# Load native modules
for suffix in (_get_native_suffix(),):
expr = os.path.join(self.path, subdir, '*' + suffix)
for path in glob.glob(expr):
is64 = path[:-len(suffix)][-2:] == '64'
if is64 == (current_ea == 64):
callback(str(path))
def info(self):
"""
Loads :code:`info.json` and returns a parsed JSON object.
:rtype: dict
"""
with open(os.path.join(self.path, 'info.json'), 'rb') as _file:
return json.load(_file)
@staticmethod
def by_name(name, prefix=None):
"""
Returns a package with specified `name`.
:rtype: LocalPackage
"""
if prefix is None:
prefix = g['path']['packages']
path = os.path.join(prefix, name)
# check if the folder exists
if not os.path.isdir(path):
return None
# filter removed package
removed = os.path.join(path, '.removed')
if os.path.isfile(removed):
LocalPackage._remove_package_dir(path)
return None
info_json = os.path.join(path, 'info.json')
if not os.path.isfile(info_json):
log.warn('Warning: info.json is not found at %r', path)
return None
with open(info_json, 'rb') as _file:
try:
info = json.load(_file)
except Exception:
traceback.print_exc()
log.warn('Warning: info.json is not valid at %r', path)
return None
result = LocalPackage(
id=info['_id'], path=path, version=info['version'])
return result
@staticmethod
def all(disabled=False):
"""
List all packages installed at :code:`g['path']['packages']`.
:rtype: list(LocalPackage)
"""
prefix = g['path']['packages']
res = os.listdir(prefix)
res = (x for x in res if os.path.isdir(os.path.join(prefix, x)))
res = (LocalPackage.by_name(x) for x in res)
res = (x for x in res if x)
res = [x for x in res if (x.id in g['ignored_packages']) == disabled]
return res
@staticmethod
def _remove_package_dir(path):
errors = []
def onerror(_listdir, _path, exc_info):
log.error("%s: %s", _path, str(exc_info[1]))
errors.append(exc_info[1])
shutil.rmtree(path, onerror=onerror)
if errors:
# Mark for later removal
open(os.path.join(path, '.removed'), 'wb').close()
return not errors
def __repr__(self):
return '<LocalPackage id=%r path=%r version=%r>' % \
(self.id, self.path, self.version)
class InstallablePackage(object):
def __init__(self, id, name, version, description, author, repo):
self.id = str(id)
self.name = name
self.version = str(version)
self.description = description
self.repo = repo
self.author = author
def install(self, upgrade=False):
"""
Just calls :code:`InstallablePackage.install_from_repo(self.repo, self.id, upgrade)`.
"""
install_from_repo(self.repo, self.id, allow_upgrade=upgrade)
def __repr__(self):
return '<InstallablePackage id=%r version=%r repo=%r>' % \
(self.id, self.version, self.repo)
def install_from_repo(repo, name, version_spec='*', allow_upgrade=False, _visited=None):
"""
This method downloads a package satisfying spec.
.. note ::
The function waits until all of dependencies are installed.
Run it as separate thread if possible.
"""
top_level = _visited is None
_visited = _visited or {}
if name in _visited:
log.warn("Cyclic dependency found when installing %r <-> %r",
name, _visited)
return
prev = LocalPackage.by_name(name)
_version_spec = Spec(version_spec)
satisfies_local = prev and Version(prev.version) in _version_spec
if allow_upgrade or not satisfies_local:
log.debug("Fetching releases for %r from %r...", name, repo)
releases = repo.releases(name)
if not releases:
error = "Release not found on remote repository: %r on %r (error: %r)" % (
name, repo, releases['error'])
raise Exception(error)
releases = [release for release in releases
if Version(release['version']) in _version_spec]
if not releases:
error = "Release satisfying the condition %r %r not found on remote repository %r" % (
name, version_spec, repo)
raise Exception(error)
downloading = None if (
prev and releases[-1]['version'] == prev.version) else releases[-1]['version']
else:
downloading = None
if downloading:
log.info('Collecting %s...', name)
data = repo.download(name, downloading)
f = zipfile.ZipFile(data, 'r')
# No /: topmost files
# One /: topmost folders
topmost_files = [path for path in f.namelist() if path.count('/') == 0]
# From ZipInfo.is_dir() in Python 3.x
topmost_folders = [path for path in f.namelist() if path.endswith('/')]
common_prefix = topmost_folders[0] if len(topmost_files) == 0 and len(topmost_folders) == 1 else ""
info = json.load(f.open(common_prefix + 'info.json'))
packages_path = g['path']['packages']
install_path = os.path.join(packages_path, info["_id"])
# this ensures os.path.exists(install_path) == False
# TODO: should we unload a already-loaded plugin?
if prev:
prev.remove()
assert not os.path.exists(install_path)
# XXX: edge case?
removed = os.path.join(install_path, '.removed')
if os.path.isfile(removed):
os.unlink(removed)
log.info('Extracting into %r...', install_path)
if common_prefix:
f.extractall(packages_path)
os.rename(os.path.join(packages_path, common_prefix), install_path)
else:
f.extractall(install_path)
# Initiate LocalPackage object
pkg = LocalPackage(info['_id'], install_path, info['version'])
else:
pkg = prev
log.info("Requirement already satisfied: %s%s",
name, '' if version_spec == '*' else version_spec)
restart_required = pkg.info().get('restart_required', False)
_visited[name] = (pkg.version, restart_required)
# First, install dependencies
# TODO: add version check
for dep_name, dep_version_spec in pkg.info().get('dependencies', {}).items():
install_from_repo(repo, dep_name, dep_version_spec, allow_upgrade, _visited)
# Then, install this package.
if downloading:
pkg.install()
if not restart_required:
pkg.load()
if top_level:
log.info("Successfully installed %s",
' '.join('%s-%s' % (key, value[0]) for key, value in _visited.items()))
delayed = [(key, value) for key, value in _visited.items() if value[1]]
if delayed:
log.info(
"Plugins in the following packages will be loaded after restarting IDA.")
log.info(
" %s", " ".join('%s-%s' % (key, value[0]) for key, value in delayed))
return pkg
| 2 | 2 |
setup.py | ofbennett/GreenGrapher | 0 | 12772257 | <filename>setup.py
from setuptools import setup, find_packages
setup(
name = 'greengrapher',
version = '1.0',
packages = find_packages(exclude = ['*test']),
scripts = ['scripts/greengraph.py'],
install_requires = ['numpy','matplotlib','geopy','requests','nose']
)
| 1.382813 | 1 |
tests/integration/test_groups.py | mobidevke/py-fineract | 7 | 12772258 | import random
from fineract.objects.group import Group
number = random.randint(0, 10000)
def test_create_group(fineract):
group = Group.create(fineract.request_handler, 'Test ' + str(number), 1)
assert isinstance(group, Group)
def test_get_group_by_name(fineract):
group = Group.get_group_by_name(fineract.request_handler, 'Test ' + str(number))
assert isinstance(group, Group)
def test_add_member_to_group(fineract):
client = fineract.get_client(1)
group = Group.get_group_by_name(fineract.request_handler, 'Test ' + str(number))
assert group.add_members([client.id])
def test_remove_member_from_group(fineract):
client = fineract.get_client(1)
group = Group.get_group_by_name(fineract.request_handler, 'Test ' + str(number))
assert group.remove_members([client.id])
| 2.265625 | 2 |
src/tree.py | kinow/rosalind-exercises | 0 | 12772259 | import networkx as nx
if __name__ == '__main__':
n = 872
data = '''
633 397
336 423
583 800
457 20
707 797
61 1
494 672
382 140
282 62
782 188
463 478
571 456
531 8
119 294
863 190
49 205
224 70
88 118
80 139
854 273
66 74
234 155
789 120
225 353
446 767
346 163
796 779
66 450
130 38
228 793
262 263
41 204
21 421
630 120
210 424
15 10
698 778
404 663
489 287
331 214
517 655
355 176
391 416
19 44
518 598
194 161
45 49
683 156
177 274
9 23
311 837
25 605
206 265
21 28
468 690
485 694
211 338
211 749
227 405
485 29
376 271
624 661
49 825
81 19
486 624
90 57
261 597
440 170
470 564
317 482
798 467
766 563
341 499
179 226
110 238
52 467
343 258
237 5
81 524
777 398
669 727
680 36
99 701
713 227
115 146
414 824
20 71
835 634
122 707
67 192
15 66
95 36
548 247
277 520
146 173
94 8
28 334
389 190
525 154
587 650
666 384
208 813
799 119
119 159
221 352
73 399
25 344
870 409
692 438
557 178
303 715
347 40
696 434
267 615
229 668
593 732
413 386
560 496
11 4
16 10
201 461
45 39
42 40
644 238
30 4
483 119
107 82
675 738
667 450
103 846
596 56
365 433
40 8
46 101
552 97
519 103
123 206
731 454
82 39
97 253
250 118
578 226
46 5
278 601
335 24
314 193
82 822
632 490
383 806
359 488
490 297
735 564
783 322
491 266
370 628
581 708
388 207
620 481
174 58
261 130
377 69
116 113
171 293
530 329
787 661
607 225
125 73
12 34
50 432
141 102
526 449
475 187
46 182
31 11
5 43
76 34
420 447
706 106
486 86
361 395
244 273
87 312
9 6
434 56
149 428
32 216
500 300
28 36
231 256
11 436
85 402
355 741
563 170
161 127
484 554
32 1
61 196
498 551
587 189
847 817
160 665
179 547
110 248
518 788
826 201
84 19
646 103
152 838
368 87
48 35
745 189
315 700
412 763
195 386
528 364
652 553
382 649
2 59
861 716
186 3
144 332
738 751
181 390
177 178
75 133
457 627
445 622
336 315
243 251
91 604
776 638
217 246
534 430
480 626
78 51
38 679
208 243
99 32
21 292
804 232
15 91
37 11
641 856
267 183
523 23
321 277
634 671
6 20
19 77
358 16
795 461
494 213
155 101
277 442
130 357
351 521
624 638
391 2
208 493
618 26
53 80
513 412
409 248
398 140
786 829
103 190
247 309
324 191
5 83
227 171
199 775
427 538
5 754
51 126
871 769
218 11
221 363
685 145
589 116
556 814
774 621
177 69
22 33
414 242
165 278
207 206
314 831
85 53
308 121
7 5
503 334
67 418
106 487
816 315
573 678
398 658
103 101
555 651
571 606
99 329
604 811
231 284
92 210
102 20
451 473
19 35
682 230
24 635
199 318
267 300
172 25
101 463
704 52
43 185
266 223
67 211
583 743
583 753
108 195
643 333
471 152
168 603
104 68
60 279
370 460
110 55
541 25
613 93
317 13
1 4
201 163
22 498
466 435
362 241
297 724
134 299
437 228
819 30
167 38
683 750
236 609
695 164
507 307
281 191
53 476
176 45
8 722
328 240
97 288
112 117
170 65
545 20
76 111
18 577
322 518
60 34
586 471
582 523
72 100
282 365
73 809
22 17
349 311
18 11
19 9
97 4
103 275
214 802
504 509
372 371
59 68
653 391
13 131
159 285
636 568
272 133
380 11
853 779
274 756
345 114
128 191
93 4
456 74
836 48
581 163
675 657
616 104
62 549
90 858
304 186
589 760
25 189
254 73
233 48
67 132
779 512
280 211
31 307
157 435
271 69
699 225
67 231
364 595
579 559
72 53
237 387
25 8
643 726
4 10
703 135
225 733
468 687
773 106
20 148
41 32
152 348
312 392
225 92
302 536
72 149
64 65
351 315
859 28
843 63
739 525
160 720
58 21
280 555
436 747
764 82
849 27
616 752
684 348
89 55
299 354
113 101
688 152
220 184
510 41
79 361
845 211
379 282
608 271
78 313
117 341
14 209
105 818
160 112
20 128
46 120
217 302
53 42
87 83
76 431
280 640
42 350
54 213
59 166
16 17
643 711
88 156
686 420
25 92
407 506
170 283
619 215
158 459
496 851
664 459
100 698
286 242
260 25
269 61
583 180
677 651
462 234
584 109
118 657
187 161
702 56
784 690
183 35
55 301
19 277
33 257
301 656
66 119
742 733
865 383
537 110
479 673
400 143
53 122
410 104
137 57
574 585
573 292
474 481
127 108
479 371
68 529
169 147
496 810
253 502
15 326
13 229
114 147
842 493
240 45
600 569
722 833
49 112
203 71
366 148
212 182
115 99
716 675
337 593
162 114
47 36
729 125
6 1
8 3
805 391
200 100
404 172
55 46
840 388
429 299
515 416
114 79
867 400
77 202
270 306
446 392
258 91
23 69
106 383
516 175
419 248
26 303
33 145
387 532
35 496
56 79
235 323
449 310
81 648
197 127
365 477
128 150
464 144
144 164
697 480
841 549
1 2
495 176
240 785
175 15
82 157
27 181
348 381
236 43
231 375
848 523
114 371
98 31
820 349
235 415
370 113
570 394
610 386
602 97
736 40
798 852
588 502
447 568
122 761
319 196
441 160
625 629
719 625
91 565
128 198
235 199
866 77
13 1
337 87
401 147
468 373
294 333
420 246
647 378
691 237
492 306
143 128
61 815
131 553
152 68
235 385
101 221
270 765
110 567
359 327
61 642
221 356
21 397
184 217
393 725
807 212
144 322
12 105
109 497
339 97
470 133
193 51
279 295
290 100
823 672
96 106
440 769
165 164
101 142
721 730
794 159
371 378
270 709
17 56
311 360
755 22
153 834
277 289
9 438
313 540
136 4
325 34
132 369
469 30
228 145
537 757
512 80
27 16
25 827
199 166
595 641
641 710
271 327
43 242
214 296
123 29
291 458
214 594
517 508
153 12
12 14
64 427
145 443
21 569
159 590
292 316
714 828
839 757
781 268
103 184
2 3
26 625
841 844
4 340
358 408
296 762
102 393
746 301
8 52
160 803
424 480
654 20
634 17
392 453
20 86
11 12
763 808
311 744
436 770
87 342
203 780
129 5
615 721
718 425
73 66
786 637
37 676
208 168
232 95
674 446
109 64
427 451
104 138
734 435
343 430
746 772
270 48
850 828
351 556
455 332
574 461
792 398
109 310
219 140
50 46
238 373
2 5
173 180
546 368
12 21
35 54
511 106
575 288
154 26
759 650
543 198
194 411
435 689
22 135
522 124
535 510
612 165
313 599
34 57
444 101
144 29
208 864
435 505
468 728
230 63
158 384
25 168
270 396
832 451
55 712
43 67
639 637
56 550
58 255
337 611
545 562
659 643
448 4
477 504
127 311
75 51
134 403
156 262
406 261
521 542
90 222
454 356
860 601
668 714
50 62
10 24
455 621
21 158
566 202
412 267
539 35
370 771
38 13
214 91
287 240
364 183
474 431
140 108
717 569
223 32
413 527
160 791
63 23
662 173
815 857
716 801
452 342
862 99
705 498
758 330
374 77
501 467
40 241
150 188
93 394
623 406
132 422
645 243
168 171
670 768
270 868
72 425
65 121
88 83
252 183
257 790
681 583
19 70
669 273
35 108
87 821
39 239
378 576
151 78
370 417
748 367
24 29
660 43
131 330
142 264
23 124
26 5
533 229
350 869
172 291
87 163
134 48
817 586
439 399
559 631
125 245
637 431
297 36
383 812
692 693
211 514
244 210
'''
G=nx.Graph()
for line in filter(None, data.split('\n')):
values = line.split(' ')
if len(values) == 2:
G.add_edge(values[0], values[1])
nodes = G.number_of_nodes()
number_of_edges = len(nx.connected_components(G)) -1
number_of_edges += (n - nodes)
print number_of_edges | 2.03125 | 2 |
hammer2cbt.py | datguywitha3ds/CBT-FX | 2 | 12772260 | #!/usr/bin/env python3
import argparse
note_name = [
"FX_C_0", "FX_Cs0", "FX_D_0", "FX_Ds0", "FX_E_0", "FX_F_0", "FX_Fs0", "FX_G_0", "FX_Gs0", "FX_A_0", "FX_As0", "FX_B_0",
"FX_C_1", "FX_Cs1", "FX_D_1", "FX_Ds1", "FX_E_1", "FX_F_1", "FX_Fs1", "FX_G_1", "FX_Gs1", "FX_A_1", "FX_As1", "FX_B_1",
"FX_C_2", "FX_Cs2", "FX_D_2", "FX_Ds2", "FX_E_2", "FX_F_2", "FX_Fs2", "FX_G_2", "FX_Gs2", "FX_A_2", "FX_As2", "FX_B_2",
"FX_C_3", "FX_Cs3", "FX_D_3", "FX_Ds3", "FX_E_3", "FX_F_3", "FX_Fs3", "FX_G_3", "FX_Gs3", "FX_A_3", "FX_As3", "FX_B_3",
"FX_C_4", "FX_Cs4", "FX_D_4", "FX_Ds4", "FX_E_4", "FX_F_4", "FX_Fs4", "FX_G_4", "FX_Gs4", "FX_A_4", "FX_As4", "FX_B_4",
"FX_C_5", "FX_Cs5", "FX_D_5", "FX_Ds5", "FX_E_5", "FX_F_5", "FX_Fs5", "FX_G_5", "FX_Gs5", "FX_A_5", "FX_As5", "FX_B_5"]
buffer_array_length = [0] * 32
buffer_array_ch2pan = [0] * 32
buffer_array_ch2vol = [0] * 32
buffer_array_ch2duty = [0] * 32
buffer_array_ch2note = [0] * 32
buffer_array_ch4pan = [0] * 32
buffer_array_ch4vol = [0] * 32
buffer_array_ch4freq = [0] * 32
channels_used_string = [""] * 52
channels_used_string[0] = "No channels (...Empty?)"
channels_used_string[3] = "Noise channel"
channels_used_string[48] = "Duty channel 2"
channels_used_string[51] = "Duty channel 2 & Noise channel"
buffer_length = 0
buffer_channels_used = 0
buffer_priority = 0
def update_c_header(file, leng, b):
return """/*
""" + file + """
Sound Effect File.
Info:
Length : """ + str(leng) + """
Bank : """ + str(b) + """
Priority : """ + str(buffer_priority) + """
Channels used : """ + channels_used_string[buffer_channels_used] + """
This file was generated by hammer2cbt
*/
"""
def clamp(n, smallest, largest):
return max(smallest, min(n, largest))
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", help = "Input FX Hammer .sav file", type = str)
parser.add_argument("-o", "--output", help = "Output folder for .c/.h files", type = str)
parser.add_argument("-b", "--bank", help = "Optional bank number", type = int)
parser.add_argument("-dp", "--dpath", help = "Optional path to CBT-FX (F.E '-dp include/cbtfx.h', default is 'cbtfx.h')", type = str)
parser.add_argument("-na", "--name", help = "Optional effect name (Default is 'SFX_' Followed by the effect number)", type = str)
parser.add_argument("-nu", "--number", help = "Effect number to export", type = int)
parser.add_argument("-lr", "--invert", help = "Invert FX Hammer pan values (Editor is inverted)")
args = parser.parse_args()
input_sav = args.input
output_folder = ""
if args.output:
output_folder = args.output + "/"
FXHammer_sfx = 0
if args.number:
FXHammer_sfx = args.number
buffer_name = "SFX_" + (("{0:X}").format(FXHammer_sfx)).zfill(2)
if args.name:
buffer_name = args.name
buffer_bank = 0
if args.bank:
buffer_bank = args.bank
default_path = "cbtfx.h"
if args.dpath:
default_path = args.dpath
FXHammer_file = open(input_sav, "rb")
FXHammer_file.seek(0x400 + (FXHammer_sfx * 256))
buffer_length = 0
for x in range(32):
buffer_array_length[x] = int.from_bytes(FXHammer_file.read(1), "big")
if buffer_array_length[x] == 0:
buffer_length += 1 # This is to avoid adding 1 to the length in the driver and save some space
break
buffer_array_ch2pan[x] = int.from_bytes(FXHammer_file.read(1), "big") >> 1
buffer_array_ch2vol[x] = int.from_bytes(FXHammer_file.read(1), "big") >> 4
buffer_array_ch2duty[x] = (int.from_bytes(FXHammer_file.read(1), "big") >> 6)
buffer_array_ch2note[x] = (int.from_bytes(FXHammer_file.read(1), "big") - 0x40) / 2
buffer_array_ch4pan[x] = int.from_bytes(FXHammer_file.read(1), "big") >> 3
buffer_array_ch4vol[x] = int.from_bytes(FXHammer_file.read(1), "big") >> 4
buffer_array_ch4freq[x] = int.from_bytes(FXHammer_file.read(1), "big")
if args.invert:
buffer_array_ch2pan[x] =((buffer_array_ch2pan[x] & 0x0F) << 4 | (buffer_array_ch2pan[x] & 0xF0) >> 4)
buffer_array_ch4pan[x] =((buffer_array_ch4pan[x] & 0x0F) << 4 | (buffer_array_ch4pan[x] & 0xF0) >> 4)
buffer_length += 1
FXHammer_file.seek(0x300 + FXHammer_sfx)
buffer_channels_used = int.from_bytes(FXHammer_file.read(1), "big")
FXHammer_file.seek(0x200 + FXHammer_sfx)
buffer_priority = int.from_bytes(FXHammer_file.read(1), "big")
FXHammer_file.close()
C_file_out = open(output_folder + buffer_name + ".c", "w")
if buffer_bank > 0:
C_file_out.write("#pragma bank " + str(buffer_bank) + "\n")
C_file_out.write(update_c_header(buffer_name, buffer_length, buffer_bank))
C_file_out.write("""#define """ + buffer_name + """_Length """ + str(buffer_length) + """\n#define """ + buffer_name + """_Bank """ + str(buffer_bank) + """\n#define """ + buffer_name + """_CH_Used """ + str(buffer_channels_used << 2 | buffer_channels_used) + """\n#define """ + buffer_name + """_Priority """ + str(buffer_priority))
C_file_out.write("\n#define CBTFX_PLAY_" + buffer_name + " CBTFX_init(&" + buffer_name + "[0][0], " + str(buffer_length) + ", " + str(buffer_priority) + ", " + str(buffer_channels_used << 2 | buffer_channels_used) + ")")
C_file_out.write('\n#include "' + default_path + '"\n')
if buffer_bank > 0:
C_file_out.write("\nconst void __at(" + str(buffer_bank) +") __bank_" + buffer_name +";")
C_file_out.write("\nconst unsigned char " + buffer_name +"[" + str(buffer_length) +"][CBTFX_LENGTH] = {\n")
for x in range(0, 32):
if buffer_array_length[x] == 0:
break
C_file_out.write(" CBTFX_FRAME(" + str(buffer_array_length[x]) + ", " + str(buffer_array_ch2pan[x]) + ", " + str(buffer_array_ch2vol[x]) + ", " + str(buffer_array_ch2duty[x]) + ", " + str(note_name[int(buffer_array_ch2note[x])]) + ", " + str(buffer_array_ch4pan[x]) + ", " + str(buffer_array_ch4vol[x])+ ", " + str(buffer_array_ch4freq[x]) +")")
if not buffer_array_length[int(clamp(x + 1, 0, 32))] == 0:
C_file_out.write(",\n")
else:
break
C_file_out.write("\n};")
C_file_out.close()
H_file_out = open(output_folder + buffer_name + ".h", "w")
H_file_out.write(update_c_header(buffer_name, buffer_length, buffer_bank))
H_file_out.write("#ifndef __" + buffer_name + "_h_INCLUDE\n")
H_file_out.write("#define __" + buffer_name + "_h_INCLUDE\n")
H_file_out.write("""#define """ + buffer_name + """_Length """ + str(buffer_length) + """\n#define """ + buffer_name + """_Bank """ + str(buffer_bank) + """\n#define """ + buffer_name + """_CH_Used """ + str(buffer_channels_used << 2 | buffer_channels_used) + """\n#define """ + buffer_name + """_Priority """ + str(buffer_priority))
H_file_out.write("\n#define CBTFX_PLAY_" + buffer_name + " CBTFX_init(&" + buffer_name + "[0][0], " + str(buffer_length) + ", " + str(buffer_priority) + ", " + str(buffer_channels_used << 2 | buffer_channels_used) + ")")
if buffer_bank > 0:
H_file_out.write("\nextern const void __bank_" + buffer_name +";")
H_file_out.write("\nextern const unsigned char " + buffer_name +"[" + str(buffer_length) +"][CBTFX_LENGTH];\n")
H_file_out.write("#endif")
H_file_out.close()
| 1.671875 | 2 |
ml_service/app/auxiliary/celery_tools.py | AsAsgard/trading_pr | 2 | 12772261 | #!/usr/bin/env python
# coding: utf-8
import smtplib
import datetime
from dateutil.parser import parse
from flask_mail import Mail, Message
from app.auxiliary.query_tools import calc_time
from app.logger import Logger
def sendEmail(email, msg_body):
from app.fl_app import application
mail = Mail(application)
with application.app_context():
msg = Message('Response from Trading Project', recipients=[email])
msg.body = msg_body
try:
mail.send(msg)
except (smtplib.SMTPHeloError, smtplib.SMTPRecipientsRefused,
smtplib.SMTPSenderRefused, smtplib.SMTPDataError):
pass
def celeryLogFailAndEmail(task_id, start_time, email, ex_name):
message = f"Your request was failed. To know why - you can make a status request with your task ID: {task_id}"
sendEmail(email, message)
Logger.info(f"Response: Celery task failed. task_id: <{task_id}>; exc_name: <{ex_name}>; "
f"time: <{calc_time(start_time)} ms>")
def celeryLogSuccessAndEmail(task_id, start_time, email, result):
message = f"Your request successed! The result is:\n" \
f"{result}"
sendEmail(email, message)
Logger.info(f"Response: Query successed. query_id: <{task_id}>; "
f"time: <{calc_time(start_time)} ms>")
def getDateAndTimeByKey(parameters, dt_key, task_id):
dateValue = None
timeValue = None
if parameters.get("".join(["date", dt_key])):
try:
dateValue = parse(parameters.get("".join(["date", dt_key]))).date()
except ValueError:
Logger.warn(f"Bad date format. Continue running task without it. task_id: {task_id} \n"
f"key=<{''.join(['date', dt_key])}>; value=<{parameters.get(''.join(['date', dt_key]))}>")
if parameters.get("".join(["time", dt_key])):
try:
timeValue = parse(" ".join(["1970-01-01", parameters.get("".join(["time", dt_key]))])).time()
except ValueError:
Logger.warn(f"Bad time format. Continue running task without it. task_id: {task_id} \n"
f"key=<{''.join(['time', dt_key])}>; value=<{parameters.get(''.join(['time', dt_key]))}>")
Logger.debug(dateValue)
Logger.debug(timeValue)
return (dateValue, timeValue)
def addWhereToExpression(expr, tickers, dateFrom, timeFrom, dateTo, timeTo):
expr = " ".join([expr, "WHERE "])
expr = "".join([expr, "("])
if tickers:
expr = "".join([expr, "("])
for i in range(len(tickers)):
if i != 0:
expr = " ".join([expr, "OR", f"ticker='{tickers[i]}'"])
else:
expr = "".join([expr, f"ticker='{tickers[i]}'"])
expr = "".join([expr, ")"])
if dateFrom or dateTo or timeFrom or timeTo:
expr = " ".join([expr, "AND", "("])
needOR = False
needAND = False
if (dateFrom and not dateTo and timeTo) or \
(dateTo and not dateFrom and timeFrom):
return None
if dateFrom and not timeFrom:
timeFrom = datetime.time(0, 0, 0)
if dateTo and not timeTo:
timeTo = datetime.time(23, 59, 59)
Logger.debug(dateFrom)
Logger.debug(dateTo)
Logger.debug(timeFrom)
Logger.debug(timeTo)
if dateFrom and dateTo:
if dateFrom > dateTo or (dateFrom == dateTo and timeFrom > timeTo):
return None
# даты между
if not dateFrom or not dateTo or dateTo != dateFrom:
if dateFrom or dateTo:
expr = "".join([expr, "("])
if dateFrom:
expr = "".join([expr, f"date>'{dateFrom.strftime('%Y-%m-%d')}'"])
needAND = True
if dateTo:
if needAND:
expr = " ".join([expr, "AND"])
expr = " ".join([expr, f"date<'{dateTo.strftime('%Y-%m-%d')}'"])
if dateFrom or dateTo:
expr = "".join([expr, ")"])
needOR = True
needAND = False
# слева
if dateFrom or timeFrom:
if needOR:
expr = " ".join([expr, "OR"])
needOR = False
expr = " ".join([expr, "("])
if dateFrom:
expr = "".join([expr, f"date='{dateFrom.strftime('%Y-%m-%d')}'"])
needAND = True
if timeFrom:
if needAND:
expr = " ".join([expr, "AND"])
needAND = False
expr = " ".join([expr, f"time>='{timeFrom.strftime('%H:%M:%S')}'"])
needAND = True
if dateFrom and dateTo and dateTo == dateFrom:
if needAND:
expr = " ".join([expr, "AND"])
needAND = False
expr = " ".join([expr, f"time<='{timeTo.strftime('%H:%M:%S')}'"])
needAND = True
if dateFrom or timeFrom:
expr = "".join([expr, ")"])
needOR = True
needAND = False
# справа
if not dateFrom or not dateTo or dateTo != dateFrom:
if dateTo or timeTo:
if needOR:
expr = " ".join([expr, "OR"])
needOR = False
expr = " ".join([expr, "("])
if dateTo:
expr = "".join([expr, f"date='{dateTo.strftime('%Y-%m-%d')}'"])
needAND = True
if timeTo:
if needAND:
expr = " ".join([expr, "AND"])
expr = " ".join([expr, f"time<='{timeTo.strftime('%H:%M:%S')}'"])
if dateTo or timeTo:
expr = "".join([expr, ")"])
needOR = True
needAND = False
expr = " ".join([expr, ")"])
expr = "".join([expr, ")"])
return expr
| 2.59375 | 3 |
sdk/python/pulumi_equinix_metal/organization.py | pulumi/pulumi-equinix-metal | 1 | 12772262 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['OrganizationArgs', 'Organization']
@pulumi.input_type
class OrganizationArgs:
def __init__(__self__, *,
description: Optional[pulumi.Input[str]] = None,
logo: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
twitter: Optional[pulumi.Input[str]] = None,
website: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Organization resource.
:param pulumi.Input[str] description: Description string
:param pulumi.Input[str] logo: Logo URL
:param pulumi.Input[str] name: The name of the Organization
:param pulumi.Input[str] twitter: Twitter handle
:param pulumi.Input[str] website: Website link
"""
if description is not None:
pulumi.set(__self__, "description", description)
if logo is not None:
pulumi.set(__self__, "logo", logo)
if name is not None:
pulumi.set(__self__, "name", name)
if twitter is not None:
pulumi.set(__self__, "twitter", twitter)
if website is not None:
pulumi.set(__self__, "website", website)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description string
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def logo(self) -> Optional[pulumi.Input[str]]:
"""
Logo URL
"""
return pulumi.get(self, "logo")
@logo.setter
def logo(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "logo", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Organization
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def twitter(self) -> Optional[pulumi.Input[str]]:
"""
Twitter handle
"""
return pulumi.get(self, "twitter")
@twitter.setter
def twitter(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "twitter", value)
@property
@pulumi.getter
def website(self) -> Optional[pulumi.Input[str]]:
"""
Website link
"""
return pulumi.get(self, "website")
@website.setter
def website(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "website", value)
@pulumi.input_type
class _OrganizationState:
def __init__(__self__, *,
created: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
logo: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
twitter: Optional[pulumi.Input[str]] = None,
updated: Optional[pulumi.Input[str]] = None,
website: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Organization resources.
:param pulumi.Input[str] description: Description string
:param pulumi.Input[str] logo: Logo URL
:param pulumi.Input[str] name: The name of the Organization
:param pulumi.Input[str] twitter: Twitter handle
:param pulumi.Input[str] website: Website link
"""
if created is not None:
pulumi.set(__self__, "created", created)
if description is not None:
pulumi.set(__self__, "description", description)
if logo is not None:
pulumi.set(__self__, "logo", logo)
if name is not None:
pulumi.set(__self__, "name", name)
if twitter is not None:
pulumi.set(__self__, "twitter", twitter)
if updated is not None:
pulumi.set(__self__, "updated", updated)
if website is not None:
pulumi.set(__self__, "website", website)
@property
@pulumi.getter
def created(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "created")
@created.setter
def created(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "created", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description string
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def logo(self) -> Optional[pulumi.Input[str]]:
"""
Logo URL
"""
return pulumi.get(self, "logo")
@logo.setter
def logo(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "logo", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Organization
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def twitter(self) -> Optional[pulumi.Input[str]]:
"""
Twitter handle
"""
return pulumi.get(self, "twitter")
@twitter.setter
def twitter(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "twitter", value)
@property
@pulumi.getter
def updated(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "updated")
@updated.setter
def updated(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "updated", value)
@property
@pulumi.getter
def website(self) -> Optional[pulumi.Input[str]]:
"""
Website link
"""
return pulumi.get(self, "website")
@website.setter
def website(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "website", value)
class Organization(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
logo: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
twitter: Optional[pulumi.Input[str]] = None,
website: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a resource to manage organization resource in Equinix Metal.
## Example Usage
```python
import pulumi
import pulumi_equinix_metal as equinix_metal
# Create a new Project
tf_organization1 = equinix_metal.Organization("tfOrganization1", description="quux")
```
## Import
This resource can be imported using an existing organization ID
```sh
$ pulumi import equinix-metal:index/organization:Organization metal_organization {existing_organization_id}
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: Description string
:param pulumi.Input[str] logo: Logo URL
:param pulumi.Input[str] name: The name of the Organization
:param pulumi.Input[str] twitter: Twitter handle
:param pulumi.Input[str] website: Website link
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[OrganizationArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a resource to manage organization resource in Equinix Metal.
## Example Usage
```python
import pulumi
import pulumi_equinix_metal as equinix_metal
# Create a new Project
tf_organization1 = equinix_metal.Organization("tfOrganization1", description="quux")
```
## Import
This resource can be imported using an existing organization ID
```sh
$ pulumi import equinix-metal:index/organization:Organization metal_organization {existing_organization_id}
```
:param str resource_name: The name of the resource.
:param OrganizationArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(OrganizationArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
logo: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
twitter: Optional[pulumi.Input[str]] = None,
website: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = OrganizationArgs.__new__(OrganizationArgs)
__props__.__dict__["description"] = description
__props__.__dict__["logo"] = logo
__props__.__dict__["name"] = name
__props__.__dict__["twitter"] = twitter
__props__.__dict__["website"] = website
__props__.__dict__["created"] = None
__props__.__dict__["updated"] = None
super(Organization, __self__).__init__(
'equinix-metal:index/organization:Organization',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
created: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
logo: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
twitter: Optional[pulumi.Input[str]] = None,
updated: Optional[pulumi.Input[str]] = None,
website: Optional[pulumi.Input[str]] = None) -> 'Organization':
"""
Get an existing Organization resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: Description string
:param pulumi.Input[str] logo: Logo URL
:param pulumi.Input[str] name: The name of the Organization
:param pulumi.Input[str] twitter: Twitter handle
:param pulumi.Input[str] website: Website link
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _OrganizationState.__new__(_OrganizationState)
__props__.__dict__["created"] = created
__props__.__dict__["description"] = description
__props__.__dict__["logo"] = logo
__props__.__dict__["name"] = name
__props__.__dict__["twitter"] = twitter
__props__.__dict__["updated"] = updated
__props__.__dict__["website"] = website
return Organization(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def created(self) -> pulumi.Output[str]:
return pulumi.get(self, "created")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Description string
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def logo(self) -> pulumi.Output[Optional[str]]:
"""
Logo URL
"""
return pulumi.get(self, "logo")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the Organization
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def twitter(self) -> pulumi.Output[Optional[str]]:
"""
Twitter handle
"""
return pulumi.get(self, "twitter")
@property
@pulumi.getter
def updated(self) -> pulumi.Output[str]:
return pulumi.get(self, "updated")
@property
@pulumi.getter
def website(self) -> pulumi.Output[Optional[str]]:
"""
Website link
"""
return pulumi.get(self, "website")
| 2.109375 | 2 |
test/test_polar_decoding.py | NVlabs/sionna | 163 | 12772263 | #
# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
try:
import sionna
except ImportError as e:
import sys
sys.path.append("../")
import tensorflow as tf
gpus = tf.config.list_physical_devices('GPU')
print('Number of GPUs available :', len(gpus))
if gpus:
gpu_num = 0 # Number of the GPU to be used
try:
tf.config.set_visible_devices(gpus[gpu_num], 'GPU')
print('Only GPU number', gpu_num, 'used.')
tf.config.experimental.set_memory_growth(gpus[gpu_num], True)
except RuntimeError as e:
print(e)
import unittest
import pytest # for pytest filterwarnings
import numpy as np
from sionna.fec.polar.encoding import PolarEncoder, Polar5GEncoder
from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder
from sionna.fec.polar.decoding import Polar5GDecoder
from sionna.fec.crc import CRCEncoder
from sionna.fec.utils import GaussianPriorSource
from sionna.utils import BinarySource
from sionna.fec.polar.utils import generate_5g_ranking
class TestPolarDecodingSC(unittest.TestCase):
def test_invalid_inputs(self):
"""Test against invalid values of n and frozen_pos."""
# frozen vec to long
n = 32
frozen_pos = np.arange(n+1)
with self.assertRaises(AssertionError):
PolarSCDecoder(frozen_pos, n)
# n not a pow of 2
# frozen vec to long
n = 32
k = 12
frozen_pos,_ = generate_5g_ranking(k, n)
with self.assertRaises(AssertionError):
PolarSCDecoder(frozen_pos, n+1)
# test valid shapes
# (k, n)
param_valid = [[0, 32], [10, 32], [32, 32], [100, 256],
[123, 1024], [1024, 1024]]
for p in param_valid:
frozen_pos, _ = generate_5g_ranking(p[0], p[1])
PolarSCDecoder(frozen_pos, p[1])
# no complex-valued input allowed
with self.assertRaises(ValueError):
frozen_pos,_ = generate_5g_ranking(32, 64)
PolarSCDecoder(frozen_pos, 64, output_dtype=tf.complex64)
def test_output_dim(self):
"""Test that output dims are correct (=n) and output equals all-zero
codeword."""
bs = 10
# (k, n)
param_valid = [[1, 32], [10, 32], [32, 32], [100, 256], [123, 1024],
[1024, 1024]]
for p in param_valid:
frozen_pos, _ = generate_5g_ranking(p[0],p[1])
dec = PolarSCDecoder(frozen_pos, p[1])
c = -10. * np.ones([bs, p[1]]) # all-zero with BPSK (no noise);logits
u = dec(c).numpy()
self.assertTrue(u.shape[-1]==p[0])
# also check that all-zero input yields all-zero output
u_hat = np.zeros([bs, p[0]])
self.assertTrue(np.array_equal(u, u_hat))
def test_numerical_stab(self):
"""Test for numerical stability (no nan or infty as output)."""
bs = 10
# (k,n)
param_valid = [[1, 32], [10, 32], [32, 32], [100, 256]]
source = GaussianPriorSource()
for p in param_valid:
frozen_pos, _ = generate_5g_ranking(p[0],p[1])
dec = PolarSCDecoder(frozen_pos, p[1])
# case 1: extremely large inputs
c = source([[bs, p[1]], 0.0001])
# llrs
u1 = dec(c).numpy()
# no nan
self.assertFalse(np.any(np.isnan(u1)))
#no inftfy
self.assertFalse(np.any(np.isinf(u1)))
self.assertFalse(np.any(np.isneginf(u1)))
# case 2: zero llr input
c = tf.zeros([bs, p[1]])
# llrs
u2 = dec(c).numpy()
# no nan
self.assertFalse(np.any(np.isnan(u2)))
#no inftfy
self.assertFalse(np.any(np.isinf(u2)))
self.assertFalse(np.any(np.isneginf(u2)))
def test_identity(self):
"""test that info bits can be recovered if no noise is added."""
bs = 10
# (k, n)
param_valid = [[1, 32], [10, 32], [32, 32], [100, 256], [123, 1024],
[1024, 1024]]
for p in param_valid:
source = BinarySource()
frozen_pos, _ = generate_5g_ranking(p[0],p[1])
enc = PolarEncoder(frozen_pos, p[1])
dec = PolarSCDecoder(frozen_pos, p[1])
u = source([bs, p[0]])
c = enc(u)
llr_ch = 20.*(2.*c-1) # demod BPSK witout noise
u_hat = dec(llr_ch)
self.assertTrue(np.array_equal(u.numpy(), u_hat.numpy()))
def test_keras(self):
"""Test that Keras model can be compiled (supports dynamic shapes)."""
bs = 10
k = 100
n = 128
source = BinarySource()
frozen_pos, _ = generate_5g_ranking(k, n)
inputs = tf.keras.Input(shape=(n), dtype=tf.float32)
x = PolarSCDecoder(frozen_pos, n)(inputs)
model = tf.keras.Model(inputs=inputs, outputs=x)
b = source([bs, n])
model(b)
# call twice to see that bs can change
b2 = source([bs+1, n])
model(b2)
model.summary()
def test_multi_dimensional(self):
"""Test against arbitrary shapes.
"""
k = 120
n = 256
frozen_pos, _ = generate_5g_ranking(k, n)
source = BinarySource()
dec = PolarSCDecoder(frozen_pos, n)
b = source([100, n])
b_res = tf.reshape(b, [4, 5, 5, n])
# encode 2D Tensor
c = dec(b).numpy()
# encode 4D Tensor
c_res = dec(b_res).numpy()
# and reshape to 2D shape
c_res = tf.reshape(c_res, [100, k])
# both version should yield same result
self.assertTrue(np.array_equal(c, c_res))
def test_batch(self):
"""Test that all samples in batch yield same output (for same input).
"""
bs = 100
k = 120
n = 256
frozen_pos, _ = generate_5g_ranking(k, n)
source = BinarySource()
dec = PolarSCDecoder(frozen_pos, n)
b = source([1,15,n])
b_rep = tf.tile(b, [bs, 1, 1])
# and run tf version (to be tested)
c = dec(b_rep).numpy()
for i in range(bs):
self.assertTrue(np.array_equal(c[0,:,:], c[i,:,:]))
def test_tf_fun(self):
"""Test that graph mode works and xla is supported."""
@tf.function
def run_graph(u):
return dec(u)
@tf.function(jit_compile=True)
def run_graph_xla(u):
return dec(u)
bs = 10
k = 100
n = 128
source = BinarySource()
frozen_pos, _ = generate_5g_ranking(k, n)
dec = PolarSCDecoder(frozen_pos, n)
u = source([bs, n])
x = run_graph(u).numpy()
# execute the graph twice
x = run_graph(u).numpy()
# and change batch_size
u = source([bs+1, n])
x = run_graph(u).numpy()
# run same test for XLA (jit_compile=True)
u = source([bs, n])
x = run_graph_xla(u).numpy()
x = run_graph_xla(u).numpy()
u = source([bs+1, n])
x = run_graph_xla(u).numpy()
def test_ref_implementation(self):
"""Test against pre-calculated results from internal implementation.
"""
ref_path = '../test/codes/polar/'
filename = ["P_128_37", "P_128_110", "P_256_128"]
for f in filename:
A = np.load(ref_path + f + "_Avec.npy")
llr_ch = np.load(ref_path + f + "_Lch.npy")
u_hat = np.load(ref_path + f + "_uhat.npy")
frozen_pos = np.array(np.where(A==0)[0])
info_pos = np.array(np.where(A==1)[0])
n = len(frozen_pos) + len(info_pos)
k = len(info_pos)
dec = PolarSCDecoder(frozen_pos, n)
l_in = -1. * llr_ch # logits
u_hat_tf = dec(l_in).numpy()
# the output should be equal to the reference
self.assertTrue(np.array_equal(u_hat_tf, u_hat))
def test_dtype_flexible(self):
"""Test that output_dtype can be flexible."""
batch_size = 100
k = 30
n = 64
source = GaussianPriorSource()
frozen_pos, _ = generate_5g_ranking(k, n)
dtypes_supported = (tf.float16, tf.float32, tf.float64)
for dt_in in dtypes_supported:
for dt_out in dtypes_supported:
llr = source([[batch_size, n], 0.5])
llr = tf.cast(llr, dt_in)
dec = PolarSCDecoder(frozen_pos, n, output_dtype=dt_out)
x = dec(llr)
self.assertTrue(x.dtype==dt_out)
# test that complex-valued inputs raise error
llr = source([[batch_size, n], 0.5])
llr_c = tf.complex(llr, tf.zeros_like(llr))
dec = PolarSCDecoder(frozen_pos, n, output_dtype=tf.float32)
with self.assertRaises(TypeError):
x = dec(llr_c)
class TestPolarDecodingSCL(unittest.TestCase):
# Filter warnings related to large ressource allocation
@pytest.mark.filterwarnings("ignore: Required ressource allocation")
def test_invalid_inputs(self):
"""Test against invalid values of n and frozen_pos."""
# frozen vec to long
n = 32
frozen_pos = np.arange(n+1)
with self.assertRaises(AssertionError):
PolarSCLDecoder(frozen_pos, n)
# n not a pow of 2
# frozen vec to long
n = 32
k = 12
frozen_pos,_ = generate_5g_ranking(k, n)
with self.assertRaises(AssertionError):
PolarSCLDecoder(frozen_pos, n+1)
# also test valid shapes
# (k, n)
param_valid = [[0, 32], [10, 32], [32, 32], [100, 256],
[123, 1024], [1024, 1024]]
for p in param_valid:
frozen_pos, _ = generate_5g_ranking(p[0],p[1])
PolarSCLDecoder(frozen_pos, p[1])
# no complex-valued input allowed
with self.assertRaises(ValueError):
frozen_pos,_ = generate_5g_ranking(32, 64)
PolarSCLDecoder(frozen_pos, 64, output_dtype=tf.complex64)
# Filter warnings related to large ressource allocation
@pytest.mark.filterwarnings("ignore: Required ressource allocation")
def test_output_dim(self):
"""Test that output dims are correct (=n) and output is the all-zero
codeword."""
bs = 10
# (k, n)
param_valid = [[1, 32], [10, 32], [32, 32], [100, 256], [123, 1024],
[1024, 1024]]
# use_hybrid, use_fast_scl, cpu_only, use_scatter
for p in param_valid:
frozen_pos, _ = generate_5g_ranking(p[0], p[1])
for use_fast_scl in [False, True]:
for cpu_only in [False, True]:
for use_scatter in [False, True]:
dec = PolarSCLDecoder(frozen_pos,
p[1],
use_fast_scl=use_fast_scl,
cpu_only=cpu_only,
use_scatter=use_scatter)
# all-zero with BPSK (no noise);logits
c = -10. * np.ones([bs, p[1]])
u = dec(c).numpy()
# check shape
self.assertTrue(u.shape[-1]==p[0])
# also check that all-zero input yields all-zero
u_hat = np.zeros([bs, p[0]])
self.assertTrue(np.array_equal(u, u_hat))
# also test different list sizes
n = 32
k = 16
frozen_pos, _ = generate_5g_ranking(k, n)
list_sizes = [1, 2, 8, 32]
for list_size in list_sizes:
for use_fast_scl in [False, True]:
for cpu_only in [False, True]:
for use_scatter in [False, True]:
dec = PolarSCLDecoder(frozen_pos,
n,
list_size=list_size,
use_fast_scl=use_fast_scl,
cpu_only=cpu_only,
use_scatter=use_scatter)
# all-zero with BPSK (no noise);logits
c = -10. * np.ones([bs, n])
u = dec(c).numpy()
self.assertTrue(u.shape[-1]==k)
# also check that all-zero input yields all-zero
u_hat = np.zeros([bs, k])
self.assertTrue(np.array_equal(u, u_hat))
# Filter warnings related to large ressource allocation
@pytest.mark.filterwarnings("ignore: Required ressource allocation")
def test_numerical_stab(self):
"""Test for numerical stability (no nan or infty as output)"""
bs = 10
# (k, n)
param_valid = [[1, 32], [10, 32], [32, 32], [100, 256]]
source = GaussianPriorSource()
for p in param_valid:
frozen_pos, _ = generate_5g_ranking(p[0], p[1])
for use_fast_scl in [False, True]:
for cpu_only in [False, True]:
for use_scatter in [False, True]:
dec = PolarSCLDecoder(frozen_pos,
p[1],
use_fast_scl=use_fast_scl,
cpu_only=cpu_only,
use_scatter=use_scatter)
# case 1: extremely large inputs
c = source([[bs, p[1]], 0.0001])
# llrs
u1 = dec(c).numpy()
# no nan
self.assertFalse(np.any(np.isnan(u1)))
#no inftfy
self.assertFalse(np.any(np.isinf(u1)))
self.assertFalse(np.any(np.isneginf(u1)))
# case 2: zero input
c = tf.zeros([bs, p[1]])
# llrs
u2 = dec(c).numpy()
# no nan
self.assertFalse(np.any(np.isnan(u2)))
#no inftfy
self.assertFalse(np.any(np.isinf(u2)))
self.assertFalse(np.any(np.isneginf(u2)))
# Filter warnings related to large ressource allocation
@pytest.mark.filterwarnings("ignore: Required ressource allocation")
def test_identity(self):
"""Test that info bits can be recovered if no noise is added."""
bs = 10
# (k,n)
param_valid = [[1, 32], [10, 32], [32, 32], [100, 256]]
source = BinarySource()
# use_hybrid, use_fast_scl, cpu_only, use_scatter
for p in param_valid:
frozen_pos, _ = generate_5g_ranking(p[0], p[1])
enc = PolarEncoder(frozen_pos, p[1])
u = source([bs, p[0]])
c = enc(u)
llr_ch = 200.*(2.*c-1) # demod BPSK witout noise
for use_fast_scl in [False, True]:
for cpu_only in [False, True]:
for use_scatter in [False, True]:
dec = PolarSCLDecoder(frozen_pos,
p[1],
use_fast_scl=use_fast_scl,
cpu_only=cpu_only,
use_scatter=use_scatter)
u_hat = dec(llr_ch)
self.assertTrue(np.array_equal(u.numpy(),
u_hat.numpy()))
# also test different list sizes
n = 32
k = 16
crc_degree = "CRC11"
frozen_pos, _ = generate_5g_ranking(k, n)
enc = PolarEncoder(frozen_pos, n)
enc_crc = CRCEncoder(crc_degree)
u = source([bs, k-enc_crc.crc_length])
u_crc = enc_crc(u)
c = enc(u_crc)
llr_ch = 200.*(2.*c-1) # demod BPSK witout noise
list_sizes = [1, 2, 8, 32]
for list_size in list_sizes:
for use_fast_scl in [False, True]:
for cpu_only in [False, True]:
for use_scatter in [False, True]:
dec = PolarSCLDecoder(frozen_pos,
n,
list_size=list_size,
use_fast_scl=use_fast_scl,
cpu_only=cpu_only,
use_scatter=use_scatter,
crc_degree=crc_degree)
u_hat = dec(llr_ch)
self.assertTrue(np.array_equal(u_crc.numpy(),
u_hat.numpy()))
def test_keras(self):
"""Test that Keras model can be compiled (supports dynamic shapes)."""
bs = 10
k = 16
n = 32
for use_fast_scl in [False, True]:
for cpu_only in [False, True]:
for use_scatter in [False, True]:
source = BinarySource()
frozen_pos, _ = generate_5g_ranking(k, n)
inputs = tf.keras.Input(shape=(n), dtype=tf.float32)
x = PolarSCLDecoder(frozen_pos,
n,
use_fast_scl=use_fast_scl,
cpu_only=cpu_only,
use_scatter=use_scatter)(inputs)
model = tf.keras.Model(inputs=inputs, outputs=x)
b = source([bs,n])
model(b)
# call twice to see that bs can change
b2 = source([bs+1,n])
model(b2)
model.summary()
# Filter warnings related to large ressource allocation
@pytest.mark.filterwarnings("ignore: Required ressource allocation")
def test_multi_dimensional(self):
"""Test against multi-dimensional input shapes.
As reshaping is done before calling the actual decoder, no exhaustive
testing against all decoder options is required.
"""
k = 120
n = 256
frozen_pos, _ = generate_5g_ranking(k, n)
source = BinarySource()
dec = PolarSCLDecoder(frozen_pos, n)
b = source([100, n])
b_res = tf.reshape(b, [4, 5, 5, n])
# encode 2D Tensor
c = dec(b).numpy()
# encode 4D Tensor
c_res = dec(b_res).numpy()
# and reshape to 2D shape
c_res = tf.reshape(c_res, [100, k])
# both version should yield same result
self.assertTrue(np.array_equal(c, c_res))
def test_batch(self):
"""Test that all samples in batch yield same output (for same input).
"""
bs = 100
k = 78
n = 128
frozen_pos, _ = generate_5g_ranking(k, n)
source = BinarySource()
for use_fast_scl in [False, True]:
for cpu_only in [False, True]:
for use_scatter in [False, True]:
dec = PolarSCLDecoder(frozen_pos,
n,
use_fast_scl=use_fast_scl,
cpu_only=cpu_only,
use_scatter=use_scatter)
b = source([1,15,n])
b_rep = tf.tile(b, [bs, 1, 1])
# and run tf version (to be tested)
c = dec(b_rep).numpy()
for i in range(bs):
self.assertTrue(np.array_equal(c[0,:,:], c[i,:,:]))
def test_tf_fun(self):
"""Test that graph mode works and XLA is supported."""
bs = 10
k = 16
n = 32
source = BinarySource()
frozen_pos, _ = generate_5g_ranking(k, n)
crc_degrees = [None, "CRC11"]
for crc_degree in crc_degrees:
for use_fast_scl in [False, True]:
for cpu_only in [False, True]:
for use_scatter in [False, True]:
@tf.function
def run_graph(u):
return dec(u)
@tf.function(jit_compile=True)
def run_graph_xla(u):
return dec(u)
dec = PolarSCLDecoder(frozen_pos,
n,
use_fast_scl=use_fast_scl,
cpu_only=cpu_only,
use_scatter=use_scatter,
crc_degree=crc_degree)
# test that for arbitrary input only binary values are
# returned
u = source([bs, n])
x = run_graph(u).numpy()
# execute the graph twice
x = run_graph(u).numpy()
# and change batch_size
u = source([bs+1, n])
x = run_graph(u).numpy()
if not cpu_only: # cpu only does not support XLA
# run same test for XLA (jit_compile=True)
u = source([bs, n])
x = run_graph_xla(u).numpy()
x = run_graph_xla(u).numpy()
u = source([bs+1, n])
x = run_graph_xla(u).numpy()
# Filter warnings related to large ressource allocation
@pytest.mark.filterwarnings("ignore: Required ressource allocation")
def test_ref_implementation(self):
"""Test against pre-calculated results from internal implementation.
Also verifies that all decoding options yield same results.
Remark: results are for SC only, i.e., list_size=1.
"""
ref_path = '../test/codes/polar/'
filename = ["P_128_37", "P_128_110", "P_256_128"]
for f in filename:
A = np.load(ref_path + f + "_Avec.npy")
llr_ch = np.load(ref_path + f + "_Lch.npy")
u_hat = np.load(ref_path + f + "_uhat.npy")
frozen_pos = np.array(np.where(A==0)[0])
info_pos = np.array(np.where(A==1)[0])
n = len(frozen_pos) + len(info_pos)
k = len(info_pos)
for use_fast_scl in [False, True]:
for cpu_only in [False, True]:
for use_scatter in [False, True]:
dec = PolarSCLDecoder(frozen_pos,
n,
list_size=1,
use_fast_scl=use_fast_scl,
cpu_only=cpu_only,
use_scatter=use_scatter)
l_in = -1. * llr_ch # logits
u_hat_tf = dec(l_in).numpy()
# the output should be equal to the reference
self.assertTrue(np.array_equal(u_hat_tf, u_hat))
def test_hybrid_scl(self):
"""Verify hybrid SC decoding option.
Remark: XLA is currently not supported.
"""
bs = 10
n = 32
k = 16
crc_degree = "CRC11"
list_sizes = [1, 2, 8, 32]
frozen_pos, _ = generate_5g_ranking(k, n)
source = BinarySource()
enc = PolarEncoder(frozen_pos, n)
enc_crc = CRCEncoder(crc_degree)
k_crc = enc_crc.crc_length
u = source([bs, k-k_crc])
u_crc = enc_crc(u)
c = enc(u_crc)
llr_ch = 20.*(2.*c-1) # demod BPSK witout noise
for list_size in list_sizes:
dec = PolarSCLDecoder(frozen_pos,
n,
list_size=list_size,
use_hybrid_sc=True,
crc_degree=crc_degree)
u_hat = dec(llr_ch)
self.assertTrue(np.array_equal(u_crc.numpy(), u_hat.numpy()))
# verify that graph can be executed
@tf.function
def run_graph(u):
return dec(u)
u = source([bs, n])
# execute the graph twice
x = run_graph(u).numpy()
x = run_graph(u).numpy()
# and change batch_size
u = source([bs+1, n])
x = run_graph(u).numpy()
def test_dtype_flexible(self):
"""Test that output_dtype is variable."""
batch_size = 100
k = 30
n = 64
source = GaussianPriorSource()
frozen_pos, _ = generate_5g_ranking(k, n)
dtypes_supported = (tf.float16, tf.float32, tf.float64)
for dt_in in dtypes_supported:
for dt_out in dtypes_supported:
llr = source([[batch_size, n], 0.5])
llr = tf.cast(llr, dt_in)
dec = PolarSCLDecoder(frozen_pos, n, output_dtype=dt_out)
x = dec(llr)
self.assertTrue(x.dtype==dt_out)
# test that complex-valued inputs raise error
llr = source([[batch_size, n], 0.5])
llr_c = tf.complex(llr, tf.zeros_like(llr))
dec = PolarSCLDecoder(frozen_pos, n, output_dtype=tf.float32)
with self.assertRaises(TypeError):
x = dec(llr_c)
class TestPolarDecodingBP(unittest.TestCase):
"""Test Polar BP decoder."""
def test_invalid_inputs(self):
"""Test against invalid values of n and frozen_pos."""
# frozen vec to long
n = 32
frozen_pos = np.arange(n+1)
with self.assertRaises(AssertionError):
PolarBPDecoder(frozen_pos, n)
# n not a pow of 2
# frozen vec to long
n = 32
k = 12
frozen_pos,_ = generate_5g_ranking(k, n)
with self.assertRaises(AssertionError):
PolarBPDecoder(frozen_pos, n+1)
# test also valid shapes
# (k, n)
param_valid = [[0, 32], [10, 32], [32, 32], [100, 256],
[123, 1024], [1024, 1024]]
for p in param_valid:
frozen_pos, _ = generate_5g_ranking(p[0],p[1])
PolarBPDecoder(frozen_pos, p[1])
# no complex-valued input allowed
with self.assertRaises(ValueError):
frozen_pos,_ = generate_5g_ranking(32, 64)
PolarBPDecoder(frozen_pos, 64, output_dtype=tf.complex64)
def test_output_dim(self):
"""Test that output dims are correct (=n) and output is all-zero
codeword."""
# batch size
bs = 10
# (k, n)
param_valid = [[1, 32],[10, 32], [32, 32], [100, 256], [123, 1024],
[1024, 1024]]
for hard_out in [True, False]:
for p in param_valid:
frozen_pos, _ = generate_5g_ranking(p[0],p[1])
dec = PolarBPDecoder(frozen_pos,
p[1],
hard_out=hard_out)
# all-zero with BPSK (no noise);logits
c = -10. * np.ones([bs, p[1]])
u = dec(c).numpy()
self.assertTrue(u.shape[-1]==p[0])
if hard_out:
# also check that all-zero input yields all-zero output
u_hat = np.zeros([bs, p[0]])
self.assertTrue(np.array_equal(u, u_hat))
def test_identity(self):
"""Test that info bits can be recovered if no noise is added."""
bs = 10
# (k, n)
param_valid = [[1, 32], [10, 32], [32, 32], [100, 256], [123, 1024],
[1024, 1024]]
for p in param_valid:
source = BinarySource()
frozen_pos, _ = generate_5g_ranking(p[0], p[1])
enc = PolarEncoder(frozen_pos, p[1])
dec = PolarBPDecoder(frozen_pos, p[1])
u = source([bs, p[0]])
c = enc(u)
llr_ch = 20.*(2.*c-1) # demod BPSK witout noise
u_hat = dec(llr_ch)
self.assertTrue(np.array_equal(u.numpy(), u_hat.numpy()))
def test_keras(self):
"""Test that Keras model can be compiled (supports dynamic shapes)."""
bs = 10
k = 100
n = 128
source = BinarySource()
frozen_pos, _ = generate_5g_ranking(k, n)
inputs = tf.keras.Input(shape=(n), dtype=tf.float32)
x = PolarBPDecoder(frozen_pos, n)(inputs)
model = tf.keras.Model(inputs=inputs, outputs=x)
b = source([bs, n])
model(b)
# call twice to see that bs can change
b2 = source([bs+1, n])
model(b2)
model.summary()
def test_multi_dimensional(self):
"""Test against arbitrary shapes."""
k = 120
n = 256
frozen_pos, _ = generate_5g_ranking(k, n)
source = BinarySource()
dec = PolarBPDecoder(frozen_pos, n)
b = source([100, n])
b_res = tf.reshape(b, [4, 5, 5, n])
# encode 2D Tensor
c = dec(b).numpy()
# encode 4D Tensor
c_res = dec(b_res).numpy()
# and reshape to 2D shape
c_res = tf.reshape(c_res, [100, k])
# both version should yield same result
self.assertTrue(np.array_equal(c, c_res))
def test_batch(self):
"""Test that all samples in batch yield same output (for same input).
"""
bs = 100
k = 120
n = 256
frozen_pos, _ = generate_5g_ranking(k, n)
source = BinarySource()
dec = PolarBPDecoder(frozen_pos, n)
b = source([1, 15, n])
b_rep = tf.tile(b, [bs, 1, 1])
# and run tf version (to be tested)
c = dec(b_rep).numpy()
for i in range(bs):
self.assertTrue(np.array_equal(c[0,:,:], c[i,:,:]))
def test_numerics(self):
"""Test for numerical stability with large llrs and many iterations.
"""
bs = 100
k = 120
n = 256
num_iter = 200
for hard_out in [False, True]:
frozen_pos, _ = generate_5g_ranking(k, n)
source = GaussianPriorSource()
dec = PolarBPDecoder(frozen_pos,
n,
hard_out=hard_out,
num_iter=num_iter)
b = source([[bs,n], 0.001]) # very large llrs
c = dec(b).numpy()
# all values are finite (not nan and not inf)
self.assertTrue(np.sum(np.abs(1 - np.isfinite(c)))==0)
def test_tf_fun(self):
"""Test that graph mode works and XLA is supported."""
@tf.function
def run_graph(u):
return dec(u)
@tf.function(jit_compile=True)
def run_graph_xla(u):
return dec(u)
bs = 10
k = 32
n = 64
num_iter = 10
source = BinarySource()
frozen_pos, _ = generate_5g_ranking(k, n)
dec = PolarBPDecoder(frozen_pos, n, num_iter=num_iter)
# test that for arbitrary input only 0,1 values are returned
u = source([bs, n])
x = run_graph(u).numpy()
# execute the graph twice
x = run_graph(u).numpy()
# and change batch_size
u = source([bs+1, n])
x = run_graph(u).numpy()
x = run_graph(u).numpy()
# Currently not supported
# run same test for XLA (jit_compile=True)
#u = source([bs, n])
#x = run_graph_xla(u).numpy()
#x = run_graph_xla(u).numpy()
#u = source([bs+1, n])
#x = run_graph_xla(u).numpy()
def test_ref_implementation(self):
"""Test against Numpy reference implementation.
Test hard and soft output.
"""
def boxplus_np(x, y):
"""Check node update (boxplus) for LLRs in numpy.
See [Stimming_LLR]_ and [Hashemi_SSCL]_ for detailed equations.
"""
x_in = np.maximum(np.minimum(x, llr_max), -llr_max)
y_in = np.maximum(np.minimum(y, llr_max), -llr_max)
# avoid division for numerical stability
llr_out = np.log(1 + np.exp(x_in + y_in))
llr_out -= np.log(np.exp(x_in) + np.exp(y_in))
return llr_out
def decode_bp(llr_ch, n_iter, frozen_pos, info_pos):
n = llr_ch.shape[-1]
bs = llr_ch.shape[0]
n_stages = int(np.log2(n))
msg_r = np.zeros([bs, n_stages+1, n])
msg_l = np.zeros([bs, n_stages+1, n])
# init llr_ch
msg_l[:, n_stages, :] = -1*llr_ch.numpy()
# init frozen positions with infty
msg_r[:, 0, frozen_pos] = llr_max
# and decode
for iter in range(n_iter):
# update r messages
for s in range(n_stages):
# calc indices
ind_range = np.arange(int(n/2))
ind_1 = ind_range * 2 - np.mod(ind_range, 2**(s))
ind_2 = ind_1 + 2**s
# load messages
l1_in = msg_l[:, s+1, ind_1]
l2_in = msg_l[:, s+1, ind_2]
r1_in = msg_r[:, s, ind_1]
r2_in = msg_r[:, s, ind_2]
# r1_out
msg_r[:, s+1, ind_1] = boxplus_np(r1_in, l2_in + r2_in)
# r2_out
msg_r[:, s+1, ind_2] = boxplus_np(r1_in, l1_in) + r2_in
# update l messages
for s in range(n_stages-1, -1, -1):
ind_range = np.arange(int(n/2))
ind_1 = ind_range * 2 - np.mod(ind_range, 2**(s))
ind_2 = ind_1 + 2**s
l1_in = msg_l[:, s+1, ind_1]
l2_in = msg_l[:, s+1, ind_2]
r1_in = msg_r[:, s, ind_1]
r2_in = msg_r[:, s, ind_2]
# l1_out
msg_l[:, s, ind_1] = boxplus_np(l1_in, l2_in + r2_in)
# l2_out
msg_l[:, s, ind_2] = boxplus_np(r1_in, l1_in) + l2_in
# recover u_hat
u_hat_soft = msg_l[:, 0, info_pos]
u_hat = 0.5 * (1 - np.sign(u_hat_soft))
return u_hat, u_hat_soft
# generate llr_ch
noise_var = 0.3
num_iters = [5, 10, 20, 40]
llr_max = 19.3
bs = 100
n = 128
k = 64
frozen_pos, info_pos = generate_5g_ranking(k, n)
for num_iter in num_iters:
source = GaussianPriorSource()
llr_ch = source([[bs, n], noise_var])
# and decode
dec_bp = PolarBPDecoder(frozen_pos, n,
hard_out=True, num_iter=num_iter)
dec_bp_soft = PolarBPDecoder(frozen_pos, n,
hard_out=False, num_iter=num_iter)
u_hat_bp = dec_bp(llr_ch).numpy()
u_hat_bp_soft = dec_bp_soft(llr_ch,).numpy()
# and run BP decoder
u_hat_ref, u_hat_ref_soft = decode_bp(llr_ch,
num_iter,
frozen_pos,
info_pos)
# the output should be equal to the reference
self.assertTrue(np.array_equal(u_hat_bp, u_hat_ref))
self.assertTrue(np.allclose(-u_hat_bp_soft,
u_hat_ref_soft,
rtol=5e-2,
atol=5e-3))
def test_dtype_flexible(self):
"""Test that output dtype is variable."""
batch_size = 100
k = 30
n = 64
source = GaussianPriorSource()
frozen_pos, _ = generate_5g_ranking(k, n)
dtypes_supported = (tf.float16, tf.float32, tf.float64)
for dt_in in dtypes_supported:
for dt_out in dtypes_supported:
llr = source([[batch_size, n], 0.5])
llr = tf.cast(llr, dt_in)
dec = PolarBPDecoder(frozen_pos, n, output_dtype=dt_out)
x = dec(llr)
self.assertTrue(x.dtype==dt_out)
# test that complex inputs raise error
llr = source([[batch_size, n], 0.5])
llr_c = tf.complex(llr, tf.zeros_like(llr))
dec = PolarBPDecoder(frozen_pos, n, output_dtype=tf.float32)
with self.assertRaises(TypeError):
x = dec(llr_c)
class TestPolarDecoding5G(unittest.TestCase):
def test_invalid_inputs(self):
"""Test against invalid input values.
Note: consistency of code parameters is already checked by the encoder.
"""
enc = Polar5GEncoder(40, 60)
with self.assertRaises(AssertionError):
Polar5GDecoder(enc, dec_type=1)
with self.assertRaises(ValueError):
Polar5GDecoder(enc, dec_type="ABC")
with self.assertRaises(AssertionError):
Polar5GDecoder("SC")
# Filter warnings related to large ressource allocation
@pytest.mark.filterwarnings("ignore: Required ressource allocation")
def test_identity_de_ratematching(self):
"""Test that info bits can be recovered if no noise is added and
dimensions are correct."""
bs = 10
# (k,n)
param_valid = [[12, 32], [20, 32], [100, 257], [123, 897],
[1013, 1088]]
dec_types = ["SC", "SCL", "hybSCL", "BP"]
for p in param_valid:
for dec_type in dec_types:
source = BinarySource()
enc = Polar5GEncoder(p[0], p[1])
dec = Polar5GDecoder(enc, dec_type=dec_type)
u = source([bs, p[0]])
c = enc(u)
self.assertTrue(c.numpy().shape[-1]==p[1])
llr_ch = 20.*(2.*c-1) # demod BPSK witout noise
u_hat = dec(llr_ch)
self.assertTrue(np.array_equal(u.numpy(), u_hat.numpy()))
# Filter warnings related to large ressource allocation
@pytest.mark.filterwarnings("ignore: Required ressource allocation")
def test_keras(self):
"""Test that Keras model can be compiled (supports dynamic shapes)."""
bs = 10
k = 100
n = 145
source = BinarySource()
enc = Polar5GEncoder(k, n)
dec_types = ["SC", "SCL", "hybSCL", "BP"]
for dec_type in dec_types:
inputs = tf.keras.Input(shape=(n), dtype=tf.float32)
x = Polar5GDecoder(enc, dec_type=dec_type)(inputs)
model = tf.keras.Model(inputs=inputs, outputs=x)
b = source([bs,n])
model(b)
# call twice to see that bs can change
b2 = source([bs+1,n])
model(b2)
model.summary()
# Filter warnings related to large ressource allocation
@pytest.mark.filterwarnings("ignore: Required ressource allocation")
def test_multi_dimensional(self):
"""Test against arbitrary shapes."""
k = 120
n = 237
enc = Polar5GEncoder(k, n)
source = BinarySource()
dec_types = ["SC", "SCL", "hybSCL", "BP"]
for dec_type in dec_types:
dec = Polar5GDecoder(enc, dec_type=dec_type)
b = source([100, n])
b_res = tf.reshape(b, [4, 5, 5, n])
# encode 2D Tensor
c = dec(b).numpy()
# encode 4D Tensor
c_res = dec(b_res).numpy()
# and reshape to 2D shape
c_res = tf.reshape(c_res, [100, k])
# both version should yield same result
self.assertTrue(np.array_equal(c, c_res))
# Filter warnings related to large ressource allocation
@pytest.mark.filterwarnings("ignore: Required ressource allocation")
def test_batch(self):
"""Test that all samples in batch yield same output (for same input).
"""
bs = 100
k = 95
n = 145
enc = Polar5GEncoder(k, n)
source = GaussianPriorSource()
dec_types = ["SC", "SCL", "hybSCL", "BP"]
for dec_type in dec_types:
dec = Polar5GDecoder(enc, dec_type=dec_type)
llr = source([[1,4,n], 0.5])
llr_rep = tf.tile(llr, [bs, 1, 1])
# and run tf version (to be tested)
c = dec(llr_rep).numpy()
for i in range(bs):
self.assertTrue(np.array_equal(c[0,:,:], c[i,:,:]))
def test_tf_fun(self):
"""Test that tf.function decorator works
include xla compiler test."""
bs = 10
k = 45
n = 67
enc = Polar5GEncoder(k, n)
source = GaussianPriorSource()
# hybSCL does not support graph mode!
dec_types = ["SC", "SCL", "BP"]
for dec_type in dec_types:
print(dec_type)
dec = Polar5GDecoder(enc, dec_type=dec_type)
@tf.function
def run_graph(u):
return dec(u)
@tf.function(jit_compile=True)
def run_graph_xla(u):
return dec(u)
# test that for arbitrary input only binary values are returned
u = source([[bs, n], 0.5])
x = run_graph(u).numpy()
# execute the graph twice
x = run_graph(u).numpy()
# and change batch_size
u = source([[bs+1, n], 0.5])
x = run_graph(u).numpy()
# run same test for XLA (jit_compile=True)
# BP does currently not support XLA
if dec_type != "BP":
u = source([[bs, n], 0.5])
x = run_graph_xla(u).numpy()
x = run_graph_xla(u).numpy()
u = source([[bs+1, n], 0.5])
x = run_graph_xla(u).numpy()
def test_dtype_flexible(self):
"""Test that output dtype can be variable."""
batch_size = 100
k = 30
n = 64
source = GaussianPriorSource()
enc = Polar5GEncoder(k, n)
dtypes_supported = (tf.float16, tf.float32, tf.float64)
for dt_in in dtypes_supported:
for dt_out in dtypes_supported:
llr = source([[batch_size, n], 0.5])
llr = tf.cast(llr, dt_in)
dec = Polar5GDecoder(enc, output_dtype=dt_out)
x = dec(llr)
self.assertTrue(x.dtype==dt_out)
# test that complex inputs raise error
llr = source([[batch_size, n], 0.5])
llr_c = tf.complex(llr, tf.zeros_like(llr))
dec = Polar5GDecoder(enc, output_dtype=tf.float32)
with self.assertRaises(TypeError):
x = dec(llr_c)
| 2.015625 | 2 |
packages/legacycomponents/mcstas2/tests/mcstas2/parserFixtures.py | mcvine/mcvine | 5 | 12772264 | # -*- Python -*-
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# <NAME>
# California Institute of Technology
# (C) 2010 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
testtext = """
/*******************************************************************************
*
*
*
* McStas, neutron ray-tracing package
* Copyright 1997-2002, All rights reserved
* Risoe National Laboratory, Roskilde, Denmark
* Institut Laue Langevin, Grenoble, France
*
* Component: E_monitor
*
* %I
* Written by: <NAME> and <NAME>
* Date: April 20, 1998
* Version: $Revision: 438 $
* Origin: Risoe
* Release: McStas 1.6
*
* Energy-sensitive monitor.
*
* %D
* A square single monitor that measures the energy of the incoming neutrons.
*
* Example: E_monitor(xmin=-0.1, xmax=0.1, ymin=-0.1, ymax=0.1,
* Emin=1, Emax=50, nchan=20, filename="Output.nrj")
*
* %P
* INPUT PARAMETERS:
*
* xmin: Lower x bound of detector opening (m)
* xmax: Upper x bound of detector opening (m)
* ymin: Lower y bound of detector opening (m)
* ymax: Upper y bound of detector opening (m)
* Emin: Minimum energy to detect (meV)
* Emax: Maximum energy to detect (meV)
* nchan: Number of energy channels (1)
* filename: Name of file in which to store the detector image (text)
*
* OUTPUT PARAMETERS:
*
* E_N: Array of neutron counts
* E_p: Array of neutron weight counts
* E_p2: Array of second moments
*
* %E
*******************************************************************************/
the rest of text
"""
snstext = """
DEFINE COMPONENT SNS_source
DEFINITION PARAMETERS ()
SETTING PARAMETERS (char *S_filename="SNS_moderator_data_file",width=0.1, height=0.12, dist=2.5, xw=0.1, yh=0.12, Emin=50, Emax=70)
OUTPUT PARAMETERS (hdiv,vdiv,p_in)
STATE PARAMETERS (x,y,z,vx,vy,vz,t,s1,s2,p)
"""
# XXX: Check if split by lines for definitions is legal
psd_tew = """
DEFINE COMPONENT PSD_TEW_monitor
DEFINITION PARAMETERS (nxchan=20, nychan=20, nbchan=20, string type="time", string filename, string format="table")
SETTING PARAMETERS (xwidth=0, yheight=0, bmin=0, bmax=0, deltab=0,
restore_neutron=0)
OUTPUT PARAMETERS (TOF_N, TOF_p, TOF_p2, b_min, b_max, delta_b, x_min, x_max, delta_x, y_min, y_max, delta_y)
STATE PARAMETERS (x,y,z,vx,vy,vz,t,s1,s2,p)
POLARISATION PARAMETERS (sx,sy,sz)
"""
iqetext = """
DEFINE COMPONENT IQE_monitor
DEFINITION PARAMETERS ()
SETTING PARAMETERS (Ei=60, Qmin=0, Qmax=10, Emin=-45, Emax=45, int nQ=100,
int nE=90, max_angle_in_plane = 120, min_angle_in_plane = 0,
max_angle_out_of_plane = 30, min_angle_out_of_plane = -30, char *filename = "iqe_monitor.dat")
OUTPUT PARAMETERS () //(IQE_N, IQE_p, IQE_p2)
STATE PARAMETERS (x,y,z,vx,vy,vz,t,s1,s2,p)
"""
sourcegen = """
DEFINE COMPONENT Source_gen
DEFINITION PARAMETERS (string flux_file=0, string xdiv_file=0, string ydiv_file=0)
SETTING PARAMETERS (radius=0.0, dist=0, xw=0, yh=0, E0=0, dE=0, Lambda0=0, dLambda=0, I1=0,
h=0, w=0, verbose=0, T1=0,
flux_file_perAA=0, flux_file_log=0,
Lmin=0,Lmax=0,Emin=0,Emax=0,T2=0,I2=0,T3=0,I3=0,length=0)
OUTPUT PARAMETERS (p_in, lambda0, lambda02, L2P, lambda0b, lambda02b, L2Pb,lambda0c, lambda02c, L2Pc, pTable, pTable_x, pTable_y,pTable_xmin, pTable_xmax, pTable_xsum, pTable_ymin, pTable_ymax, pTable_ysum, pTable_dxmin, pTable_dxmax, pTable_dymin, pTable_dymax)
STATE PARAMETERS (x,y,z,vx,vy,vz,t,s1,s2,p)
"""
__date__ = "$Sep 15, 2010 3:17:26 PM$"
| 2.03125 | 2 |
sdk/tests/ecs/test_graph.py | bentobox-dev/bento-box | 1 | 12772265 | #
# Bentobox
# SDK - Specifications
# Graph Specifications
#
from collections import OrderedDict
from typing import Set
from bento.ecs.graph import (
GraphComponent,
GraphEntity,
GraphNode,
to_str_attr,
wrap_const,
)
from bento.spec.ecs import ComponentDef, EntityDef
from bento.example.specs import Position, Speed
from bento.protos.graph_pb2 import Node
from bento.protos.references_pb2 import AttributeRef
from bento.utils import to_str_attr, to_yaml_proto
def test_graph_ecs_entity():
components = [GraphComponent.from_def(entity_id=1, component_def=Position)]
entity = GraphEntity(components=components, entity_id=1)
# check Entity's components accessible via `.components`
assert [c.component_name for c in entity.components] == [Position.name]
# check component accessible by name using [] notation
position = entity[Position]
assert isinstance(position, GraphComponent)
def test_graph_ecs_entity_from_def():
entity_id = 1
car = GraphEntity.from_def(
entity_def=EntityDef(components=[Position.name], entity_id=1),
component_defs=[Position],
)
assert car.id == entity_id
# check Entity's components accessible via `.components`
assert [c.component_name for c in car.components] == [Position.name]
# test that we cannnot create from EntityDef with unset id
has_error = False
try:
GraphEntity.from_def(EntityDef([]), [])
except ValueError:
has_error = True
assert has_error
def test_graph_ecs_entity_update_input_outputs():
# test use_input_outputs() propagates input and output dict to components
entity_id, inputs, outputs = 1, OrderedDict(), OrderedDict()
car = GraphEntity.from_def(
entity_def=EntityDef(components=[Position, Speed], entity_id=1),
component_defs=[Position, Speed],
)
car.use_input_outputs(inputs, outputs)
# get/set should propagate retrieve mutate to inputs and output
car_pos_x = car[Position].x
car[Position].x = 1
car_speed_x = car[Speed].x
car[Position].x = 2
pos_attr_ref = AttributeRef(
entity_id=entity_id, component=Position.name, attribute="x"
)
speed_attr_ref = AttributeRef(
entity_id=entity_id, component=Position.name, attribute="x"
)
pos_expected_input = Node(retrieve_op=Node.Retrieve(retrieve_attr=pos_attr_ref))
pos_expected_output = Node(
mutate_op=Node.Mutate(mutate_attr=pos_attr_ref, to_node=wrap_const(1))
)
assert inputs[to_str_attr(pos_attr_ref)] == pos_expected_input
assert outputs[to_str_attr(pos_attr_ref)] == pos_expected_input
speed_expected_input = Node(retrieve_op=Node.Retrieve(retrieve_attr=speed_attr_ref))
speed_expected_output = Node(
mutate_op=Node.Mutate(mutate_attr=speed_attr_ref, to_node=wrap_const(2))
)
assert inputs[to_str_attr(speed_attr_ref)] == speed_expected_input
assert outputs[to_str_attr(speed_attr_ref)] == speed_expected_input
def test_graph_ecs_component_from_def():
entity_id = 1
position = GraphComponent.from_def(entity_id, Position)
assert position.component_name == Position.name
# test that we cannnot create from ComponentDef with unset name
has_error = False
try:
GraphComponent.from_def(entity_id, ComponentDef("", {}))
except ValueError:
has_error = True
assert has_error
def test_graph_ecs_component_get_attr():
entity_id, inputs, outputs = 1, OrderedDict(), OrderedDict()
position = GraphComponent.from_def(entity_id, Position)
position.use_input_outputs(inputs, outputs)
# check that getting an attribute from a component returns a GraphNode
# wrapping a Retrieve node that retrieves the attribute
pos_x = position.x
attr_ref = AttributeRef(entity_id=entity_id, component=Position.name, attribute="x")
expected_node = Node(retrieve_op=Node.Retrieve(retrieve_attr=attr_ref))
assert pos_x.node == expected_node
# check that component records the retrieve in
assert inputs[to_str_attr(attr_ref)].node == expected_node
# check that retrieving the same attribute only records it once
pos_y = position.x
assert len(inputs) == 1
def test_graph_ecs_component_get_attr_preserve_set_graph():
entity_id = 1
position = GraphComponent.from_def(entity_id, Position)
# check that getting an attribute from a component preserves
# any graph that has already being built by set_attr()
position.x = 2
pos_x = position.x
expected_node = GraphNode.wrap(2)
assert to_yaml_proto(pos_x.node) == to_yaml_proto(expected_node.node) # type: ignore
def test_graph_ecs_component_set_attr_node():
entity_id, inputs, outputs = 1, OrderedDict(), OrderedDict()
position = GraphComponent.from_def(entity_id, Position)
position.use_input_outputs(inputs, outputs)
pos_x = position.x
position.y = 10
# check setting attribute to node sets expected output node.
position.y = pos_x
attr_ref = AttributeRef(
entity_id=entity_id,
component=Position.name,
attribute="y",
)
expected_node = Node(
mutate_op=Node.Mutate(
mutate_attr=attr_ref,
to_node=pos_x.node,
)
)
assert outputs[to_str_attr(attr_ref)].node == expected_node
# check that setting attribute only takes the last definition
# the first definition should be ignored since the attribute is redefined
assert len(outputs) == 1
def test_graph_ecs_component_set_attr_native_value():
entity_id, inputs, outputs = 1, OrderedDict(), OrderedDict()
position = GraphComponent.from_def(entity_id, Position)
position.use_input_outputs(inputs, outputs)
# check setting attribute to native sets expected output node node.
position.y = 3
attr_ref = AttributeRef(
entity_id=entity_id,
component=Position.name,
attribute="y",
)
expected_node = Node(
mutate_op=Node.Mutate(
mutate_attr=attr_ref,
to_node=wrap_const(3),
)
)
assert outputs[to_str_attr(attr_ref)] == expected_node
def test_graph_ecs_component_set_attr_ignore_self_assign():
entity_id, inputs, outputs = 1, OrderedDict(), OrderedDict()
position = GraphComponent.from_def(entity_id, Position)
position.use_input_outputs(inputs, outputs)
# test assignment should be ignore as we are setting to the attribute to itself position.x = position.x
assert len(outputs) == 0
def test_graph_ecs_component_aug_assign_node():
entity_id, inputs, outputs = 1, OrderedDict(), OrderedDict()
position = GraphComponent.from_def(entity_id, Position)
position.use_input_outputs(inputs, outputs)
# check augment assignment flags the attribute (position.x) as both input and output
position.y += 30
attr_ref = AttributeRef(
entity_id=entity_id,
component=Position.name,
attribute="y",
)
expected_input = Node(retrieve_op=Node.Retrieve(retrieve_attr=attr_ref))
expected_output = Node(
mutate_op=Node.Mutate(
mutate_attr=attr_ref,
to_node=Node(
add_op=Node.Add(
x=expected_input,
y=wrap_const(30),
)
),
)
)
assert len(inputs) == 1
assert inputs[to_str_attr(attr_ref)] == expected_input
assert len(outputs) == 1
assert outputs[to_str_attr(attr_ref)] == expected_output
def test_graph_ecs_node_wrap():
wrap_cases = [
["w", GraphNode(node=wrap_const("w"))],
[wrap_const(1), GraphNode(node=wrap_const(1))],
[GraphNode(node=wrap_const(True)), GraphNode(node=wrap_const(True))],
]
for val, expected in wrap_cases:
assert GraphNode.wrap(val) == expected
| 2.234375 | 2 |
src/righter/utils.py | ef-ctx/righter | 6 | 12772266 | <filename>src/righter/utils.py<gh_stars>1-10
"""
Utilities for text analysis and parsing.
"""
import string
import unicodedata
def contains_digit(text):
"""
Check if text contains any number.
"""
for char in string.digits:
if char in text:
return True
return False
def remove_punctuation(text, ignore=None):
"""
Remove all punctiation from text
"""
punctuation = string.punctuation
if ignore:
punctuation = punctuation.replace(ignore, "")
for char in punctuation:
text = text.replace(char, ' ')
return text
def asciify(s):
return ''.join(c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn').encode('ascii', 'ignore').decode('ascii')
| 3.125 | 3 |
final_results/csvplot_strong.py | pc2/n-body-ring-solver | 0 | 12772267 | <filename>final_results/csvplot_strong.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 13 14:39:12 2020
@author: tnellius
"""
import csv
import sys
from matplotlib import pyplot as plt
from matplotlib import rc
import numpy as np
import matplotlib.scale
#Configuration:
FONTSIZE = 12
BOLD = True
#rc('font', ) TODO: specify font
rc('text', usetex=True)
helptext="Usage:\n\r\tpython3 csvplot.py [file] [col_x_axis] [col_y_axis] [i_from] [i_to] [bool_y_limit] [plot_title](optional) [x_label](optional) [y_label](optional) [legend](optional)"
rows = []
if "help" in sys.argv:
print(helptext)
exit()
print("Called with {} arguments.".format(len(sys.argv)))
try:
plotfile = sys.argv[1]
col_x_axis = [int(x) for x in sys.argv[2].split(',')] #put every x column in a list
col_y_axis = [int(y) for y in sys.argv[3].split(',')] #put ervery y column in a list
i_from = [int(i) for i in sys.argv[4].split(',')]
i_to = [int(i) for i in sys.argv[5].split(',')]
bool_y_limit = True if int(sys.argv[6]) != 0 else False
try:
plot_title = str(sys.argv[7])
except:
plot_title = None #no title
try:
x_label = str(sys.argv[8])
except:
x_label = None
try:
y_label = str(sys.argv[9])
except:
y_label = None
try:
legend = sys.argv[10].split(',')
except:
legend = None
with open(plotfile, 'r', newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
rows.append(row)
except:
print(sys.exc_info()[0])
print("Big oof\n\r") #you should not see this
print(helptext)
exit()
#prepare plot
f,ax1 = plt.subplots(figsize=(7,4.5))
plt.grid(True)
if bool_y_limit:
y_max = max([float(y[col_y_axis[0]]) for y in rows[1:][i_from[0]:i_to[0]]])
y_limit = np.ceil((y_max + (0.1*y_max))/1000)*1000
plt.ylim(0, y_limit)
#get labels
if(x_label == None or x_label == ""):
x_label = rows[0][col_x_axis[0]]
if(y_label == None or y_label == ""):
y_label = rows[0][col_y_axis[0]]
#actual plotting:
i = 0
#plt.xticks(np.arange(1,16,step=2))
#plt.yticks(np.arange(1,16,step=2))
ticks = np.array([1,2,4,6,8,10,12])
ax1.set_xticks(ticks)
for x_col,y_col, ifrom, ito in zip(col_x_axis, col_y_axis, i_from, i_to):
#get columns of csv file without first row
x_axis_list = list(map(lambda x: int(x[x_col]), rows[1:][ifrom:ito]))
y_axis_list = list(map(lambda y: float(y[y_col]), rows[1:][ifrom:ito]))
print(x_axis_list)
print(y_axis_list)
markers = '-o'
#plt.xticks(np.arange(0,4097,step=1024))
if i == 0:
markers='C0-s'
dashes=[1,0]
elif i == 1:
markers='C1-s'
dashes=[1,0]
elif i == 2:
markers='C0--s'
dashes=[4,1]
elif i == 3:
markers='C1--s'
dashes=[4,1]
elif i == 4:
markers='C0-.s'
dashes=[4,1,1,1,1,1]
elif i == 5:
markers='C1-.s'
dashes=[4,1,1,1,1,1]
ax1.plot(x_axis_list, y_axis_list,markers,fillstyle='none', dashes=dashes,markevery=1,label=legend[i])
i += 1
#ax1.set_xscale("log",basex=2)
#ax1.set_yticks(np.arange(0,700000,100000))
print("Done plotting")
print(plot_title)
print(x_label)
print(y_label)
#print titles
if(BOLD):
if(plot_title != None and plot_title != ""):
ax1.set_title(r'\textbf{{{0}}}'.format(plot_title), fontsize=FONTSIZE)
ax1.set_xlabel(r'\textbf{{{0}}}'.format(x_label), fontsize=FONTSIZE)
ax1.set_ylabel(r'\textbf{{{0}}}'.format(y_label), fontsize=FONTSIZE)
#ax2.set_ylabel(r'\textbf{ms/timestep}', fontsize=FONTSIZE, rotation=270, labelpad=15)
else:
if(plot_title != None and plot_title != ""):
plt.title(r'{0}'.format(plot_title), fontsize=FONTSIZE)
plt.xlabel(r'{0}'.format(x_label), fontsize=FONTSIZE)
plt.ylabel(r'{0}'.format(y_label), fontsize=FONTSIZE)
if legend:
lines_1, labels_1 = ax1.get_legend_handles_labels()
#lines_2, labels_2 = ax2.get_legend_handles_labels()
#lines = lines_1 + lines_2
#labels = labels_1 + labels_2
ax1.legend(lines_1,labels_1, loc=0)
filename = "_".join([(plotfile[:-4] if plot_title==None else plot_title), str(min(i_from)), str(max(i_to))])
f.tight_layout()
f.savefig(filename + ".pdf", bbox_inches='tight') #save as pdf
| 2.921875 | 3 |
viewer.py | MaddAddaM/LightRender | 0 | 12772268 | #!/usr/bin/env python
import argparse
import sys
import os
import pygame
import pygame.locals
import pygame.key
import pygame.time
from constants import CARTESIAN_COORDS
NUM_PIXELS = len(CARTESIAN_COORDS)
BYTES_PER_FRAME = 3 * NUM_PIXELS
FPS = 20
FRAMES_PER_SKIP = 5 * FPS
CANVAS = pygame.Rect((0, 0), (575, 575))
STATUSBAR = pygame.Rect(CANVAS.bottomleft, (CANVAS.width, 20))
SCREEN = pygame.Rect(CANVAS.topleft, (CANVAS.width, CANVAS.height + STATUSBAR.height))
#size = width, height + 30
def convert_to_screen_pos(pos):
# upper_left is 0,0
# bottom left is 0, CANVAS.width
x, y = pos
scaled_x = (int)(25*x+50)
scaled_y = (int)(CANVAS.width - 50 - (25*y))
return (scaled_x, scaled_y)
def get_next_pixel(fin):
try:
r, g, b = [ord(c) for c in fin.read(3)]
return r, g, b
except:
print "reached the end of the file"
sys.exit(0)
def frame_to_timestamp(framenum):
usecs = framenum * 1000000 / FPS
seconds, usecs = divmod(usecs, 1000000)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
return "%02d:%02d:%02d.%02d" % (hours, minutes, seconds, usecs / 10000)
def run(fin, number_lights=False):
pygame.init()
screen = pygame.display.set_mode(SCREEN.size)
positions = {i: v for i, v in enumerate(CARTESIAN_COORDS)}
myfont = pygame.font.SysFont("monospace", 15)
framenum = 0
clock = pygame.time.Clock()
paused = False
while True:
# did the user close the window?
for event in pygame.event.get():
if event.type == pygame.locals.QUIT:
sys.exit(0)
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
paused = not paused
if paused:
continue
skip = 0
pressed = pygame.key.get_pressed()
if pressed[pygame.K_RIGHT]:
skip += 1
if pressed[pygame.K_LEFT]:
skip -= 1
if skip < 0:
# seeking backwards from current offset
for i in range(FRAMES_PER_SKIP):
try:
fin.seek(skip * BYTES_PER_FRAME, 1)
framenum += skip
except IOError:
# No error at BOF, or if stream isn't seekable
pass
elif skip > 0:
# consume frames from the stream. Works even if not seekable.
for i in range(FRAMES_PER_SKIP):
fin.read(skip * BYTES_PER_FRAME)
framenum += 1
# update all the lights
for i, pos in positions.items():
screen_pos = convert_to_screen_pos(pos)
color = get_next_pixel(fin)
pygame.draw.circle(screen, color, screen_pos, 17)
if number_lights:
s = str(i)
label = myfont.render(s, 1, (255, 255, 255))
# center text on the light
x,y = screen_pos
x_s, y_s = myfont.size(s)
x, y = x-x_s/2, y-y_s/2
screen.blit(label, (x,y))
screen.fill((0, 0, 0), STATUSBAR)
label_text = "frame %d" % framenum
label = myfont.render(label_text, 1, (255, 255, 255))
label_width, label_height = myfont.size(label_text)
screen.blit(label, (STATUSBAR.right - label_width, STATUSBAR.bottom - label_height))
label_text = "%s" % frame_to_timestamp(framenum)
label = myfont.render(label_text, 1, (255, 255, 255))
label_width, label_height = myfont.size(label_text)
screen.blit(label, (STATUSBAR.left, STATUSBAR.bottom - label_height))
framenum += 1
pygame.display.update()
clock.tick(FPS)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='View a data file')
parser.add_argument('-n', '--number-lights', dest='number_lights',
action='store_true', default=False,
help='Label the lights with their index')
parser.add_argument('file', nargs='?',
type=argparse.FileType('rb'),
help='The file to view. You can also pipe the file to the process.')
args = parser.parse_args()
if args.file:
fin = args.file
print("reading from " + args.file.name)
else:
fin = sys.stdin
print("reading from stdin")
run(fin, number_lights=args.number_lights)
| 2.96875 | 3 |
network/onet_layers.py | valeoai/NeeDrop | 26 | 12772269 | import torch
import torch.nn as nn
from torch import Tensor
import torch.nn.functional as F
# Resnet Blocks
class ResnetBlockFC(nn.Module):
''' Fully connected ResNet Block class.
Args:
size_in (int): input dimension
size_out (int): output dimension
size_h (int): hidden dimension
'''
def __init__(self, size_in, size_out=None, size_h=None):
super().__init__()
# Attributes
if size_out is None:
size_out = size_in
if size_h is None:
size_h = min(size_in, size_out)
self.size_in = size_in
self.size_h = size_h
self.size_out = size_out
# Submodules
self.fc_0 = nn.Linear(size_in, size_h)
self.fc_1 = nn.Linear(size_h, size_out)
self.actvn = nn.ReLU()
if size_in == size_out:
self.shortcut = None
else:
self.shortcut = nn.Linear(size_in, size_out, bias=False)
# Initialization
nn.init.zeros_(self.fc_1.weight)
def forward(self, x):
net = self.fc_0(self.actvn(x))
dx = self.fc_1(self.actvn(net))
if self.shortcut is not None:
x_s = self.shortcut(x)
else:
x_s = x
return x_s + dx
class CResnetBlockConv1d(nn.Module):
''' Conditional batch normalization-based Resnet block class.
Args:
c_dim (int): dimension of latend conditioned code c
size_in (int): input dimension
size_out (int): output dimension
size_h (int): hidden dimension
norm_method (str): normalization method
'''
def __init__(self, c_dim, size_in, size_h=None, size_out=None,
norm_method='batch_norm'):
super().__init__()
# Attributes
if size_h is None:
size_h = size_in
if size_out is None:
size_out = size_in
self.size_in = size_in
self.size_h = size_h
self.size_out = size_out
# Submodules
self.bn_0 = CBatchNorm1d(
c_dim, size_in, norm_method=norm_method)
self.bn_1 = CBatchNorm1d(
c_dim, size_h, norm_method=norm_method)
self.fc_0 = nn.Conv1d(size_in, size_h, 1)
self.fc_1 = nn.Conv1d(size_h, size_out, 1)
self.actvn = nn.ReLU()
if size_in == size_out:
self.shortcut = None
else:
self.shortcut = nn.Conv1d(size_in, size_out, 1, bias=False)
# Initialization
nn.init.zeros_(self.fc_1.weight)
def forward(self, x, c):
net = self.fc_0(self.actvn(self.bn_0(x, c)))
dx = self.fc_1(self.actvn(self.bn_1(net, c)))
if self.shortcut is not None:
x_s = self.shortcut(x)
else:
x_s = x
return x_s + dx
class ResnetBlockConv1d(nn.Module):
''' 1D-Convolutional ResNet block class.
Args:
size_in (int): input dimension
size_out (int): output dimension
size_h (int): hidden dimension
'''
def __init__(self, size_in, size_h=None, size_out=None):
super().__init__()
# Attributes
if size_h is None:
size_h = size_in
if size_out is None:
size_out = size_in
self.size_in = size_in
self.size_h = size_h
self.size_out = size_out
# Submodules
self.bn_0 = nn.BatchNorm1d(size_in)
self.bn_1 = nn.BatchNorm1d(size_h)
self.fc_0 = nn.Conv1d(size_in, size_h, 1)
self.fc_1 = nn.Conv1d(size_h, size_out, 1)
self.actvn = nn.ReLU()
if size_in == size_out:
self.shortcut = None
else:
self.shortcut = nn.Conv1d(size_in, size_out, 1, bias=False)
# Initialization
nn.init.zeros_(self.fc_1.weight)
def forward(self, x):
net = self.fc_0(self.actvn(self.bn_0(x)))
dx = self.fc_1(self.actvn(self.bn_1(net)))
if self.shortcut is not None:
x_s = self.shortcut(x)
else:
x_s = x
return x_s + dx
# Utility modules
class AffineLayer(nn.Module):
''' Affine layer class.
Args:
c_dim (tensor): dimension of latent conditioned code c
dim (int): input dimension
'''
def __init__(self, c_dim, dim=3):
super().__init__()
self.c_dim = c_dim
self.dim = dim
# Submodules
self.fc_A = nn.Linear(c_dim, dim * dim)
self.fc_b = nn.Linear(c_dim, dim)
self.reset_parameters()
def reset_parameters(self):
nn.init.zeros_(self.fc_A.weight)
nn.init.zeros_(self.fc_b.weight)
with torch.no_grad():
self.fc_A.bias.copy_(torch.eye(3).view(-1))
self.fc_b.bias.copy_(torch.tensor([0., 0., 2.]))
def forward(self, x, p):
assert(x.size(0) == p.size(0))
assert(p.size(2) == self.dim)
batch_size = x.size(0)
A = self.fc_A(x).view(batch_size, 3, 3)
b = self.fc_b(x).view(batch_size, 1, 3)
out = p @ A + b
return out
class _RunningBatchNorm(nn.modules.batchnorm._NormBase):
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True,
track_running_stats=True):
super().__init__(
num_features, eps, momentum, affine, track_running_stats)
def forward(self, input: Tensor) -> Tensor:
self._check_input_dim(input)
# exponential_average_factor is set to self.momentum
# (when it is available) only so that it gets updated
# in ONNX graph when this node is exported to ONNX.
if self.momentum is None:
exponential_average_factor = 0.0
else:
exponential_average_factor = self.momentum
if self.training and self.track_running_stats:
# TODO: if statement only here to tell the jit to skip emitting this when it is None
if self.num_batches_tracked is not None:
self.num_batches_tracked = self.num_batches_tracked + 1
if self.momentum is None: # use cumulative moving average
exponential_average_factor = 1.0 / float(self.num_batches_tracked)
else: # use exponential moving average
exponential_average_factor = self.momentum
r"""
Decide whether the mini-batch stats should be used for normalization rather than the buffers.
Mini-batch stats are used in training mode, and in eval mode when buffers are None.
"""
if self.training:
bn_training = True
else:
bn_training = (self.running_mean is None) and (self.running_var is None)
r"""
Buffers are only updated if they are to be tracked and we are in training mode. Thus they only need to be
passed when the update should occur (i.e. in training mode when they are tracked), or when buffer stats are
used for normalization (i.e. in eval mode when buffers are not None).
"""
return F.batch_norm(
input,
# If buffers are not to be tracked, ensure that they won't be updated
self.running_mean,
self.running_var,
self.weight, self.bias, bn_training, exponential_average_factor, self.eps)
class RunningBatchNorm1d(_RunningBatchNorm):
def _check_input_dim(self, input):
if input.dim() != 2 and input.dim() != 3:
raise ValueError('expected 2D or 3D input (got {}D input)'
.format(input.dim()))
class CBatchNorm1d(nn.Module):
''' Conditional batch normalization layer class.
Args:
c_dim (int): dimension of latent conditioned code c
f_dim (int): feature dimension
norm_method (str): normalization method
'''
def __init__(self, c_dim, f_dim, norm_method='batch_norm'):
super().__init__()
self.c_dim = c_dim
self.f_dim = f_dim
self.norm_method = norm_method
# Submodules
self.conv_gamma = nn.Conv1d(c_dim, f_dim, 1)
self.conv_beta = nn.Conv1d(c_dim, f_dim, 1)
if norm_method == 'batch_norm':
self.bn = nn.BatchNorm1d(f_dim, affine=False)
elif norm_method == 'instance_norm':
self.bn = nn.InstanceNorm1d(f_dim, affine=False)
elif norm_method == 'group_norm':
self.bn = nn.GroupNorm1d(f_dim, affine=False)
elif norm_method == 'no_norm':
self.bn = nn.Identity()
elif norm_method == 'running_batch_norm':
self.bn = RunningBatchNorm1d(f_dim, affine=False)
else:
raise ValueError('Invalid normalization method!')
self.reset_parameters()
def reset_parameters(self):
nn.init.zeros_(self.conv_gamma.weight)
nn.init.zeros_(self.conv_beta.weight)
nn.init.ones_(self.conv_gamma.bias)
nn.init.zeros_(self.conv_beta.bias)
def forward(self, x, c):
assert(x.size(0) == c.size(0))
assert(c.size(1) == self.c_dim)
# c is assumed to be of size batch_size x c_dim x T
if len(c.size()) == 2:
c = c.unsqueeze(2)
# Affine mapping
gamma = self.conv_gamma(c)
beta = self.conv_beta(c)
# Batchnorm
net = self.bn(x)
out = gamma * net + beta
return out
| 2.53125 | 3 |
tests/tickets/test_views.py | allisson/django-pagseguro2-example | 13 | 12772270 | import pytest
import responses
import status
from django.urls import reverse
from apps.tickets.models import Purchase
pytestmark = pytest.mark.django_db
def test_event_list(admin_client, event):
url = reverse('tickets:event_list')
response = admin_client.get(url)
assert response.status_code == status.HTTP_200_OK
assert 'cart' in response.context
assert event in response.context['events']
def test_event_detail(admin_client, event):
url = reverse('tickets:event_detail', args=[event.id])
response = admin_client.get(url)
assert response.status_code == status.HTTP_200_OK
assert 'cart' in response.context
assert response.context['event'] == event
def test_cart_detail(admin_client):
url = reverse('tickets:cart_detail')
response = admin_client.get(url)
assert response.status_code == status.HTTP_200_OK
assert 'cart' in response.context
def test_cart_clear(admin_client, admin_user, cart_item):
url = reverse('tickets:cart_clear')
cart = cart_item.cart
assert cart.cart_items.count() == 1
response = admin_client.post(url, follow=True)
assert response.status_code == status.HTTP_200_OK
assert cart.cart_items.count() == 0
def test_cart_add_item(admin_client, cart, ticket):
url = reverse('tickets:cart_add_item')
assert cart.cart_items.count() == 0
response = admin_client.post(url, {'ticket': ticket.id, 'quantity': 1}, follow=True)
assert response.status_code == status.HTTP_200_OK
assert cart.cart_items.count() == 1
@responses.activate
def test_purchase_create(admin_client, cart_item, pagseguro_checkout_response):
responses.add(
responses.POST,
'https://ws.sandbox.pagseguro.uol.com.br/v2/checkout',
body=pagseguro_checkout_response,
status=200
)
url = reverse('tickets:purchase_create')
response = admin_client.post(url, follow=True)
assert response.status_code == status.HTTP_200_OK
purchase = Purchase.objects.filter(user=cart_item.cart.user).first()
assert purchase.status == 'pending'
assert purchase.pagseguro_redirect_url
def test_purchase_list(admin_client, purchase):
url = reverse('tickets:purchase_list')
response = admin_client.get(url)
assert response.status_code == status.HTTP_200_OK
assert purchase in response.context['purchases']
def test_purchase_detail(admin_client, purchase):
url = reverse('tickets:purchase_detail', args=[purchase.id])
response = admin_client.get(url)
assert response.status_code == status.HTTP_200_OK
assert response.context['purchase'] == purchase
| 2.15625 | 2 |
core/system.py | mackay/ble_detector | 0 | 12772271 | <reponame>mackay/ble_detector
from bunch import Bunch
from core.models import SystemOption
class SystemBase(object):
MODE_KEY = "mode"
MODES = Bunch({
"OFF": "off",
"RUN": "run",
"TRAINING": "training",
"TESTING": "testing",
"DEMO": "demo"
})
FILTER_KEY = "filter-data"
TRAINING_KEY = "training-data"
def __init__(self):
pass
def set_option(self, key, value):
try:
system_option = SystemOption.get(SystemOption.key == key)
except:
system_option = SystemOption()
system_option.value = value
system_option.key = key
system_option.save()
return system_option
def get_options(self):
return { option.key: option.value for option in SystemOption.select() }
def get_option(self, key, default=None):
try:
return SystemOption.get(SystemOption.key == key).value
except:
return default
def is_mode(self, mode):
return self.get_option(SystemBase.MODE_KEY, default=SystemBase.MODES.OFF) == mode
def is_mode_off(self):
return self.is_mode(SystemBase.MODES.OFF)
def is_mode_training(self):
return self.is_mode(SystemBase.MODES.TRAINING)
| 2.3125 | 2 |
fix-clip-text-vit-32-float32---scratch.py | josephrocca/onnx-typecast | 0 | 12772272 | <gh_stars>0
# I'm not sure why this script works, but it fixes this problem: https://github.com/microsoft/onnxruntime/issues/9760#issue-1053052192
# without causing this int64 error: https://github.com/microsoft/onnxruntime/issues/9760#issuecomment-968967562
# I just commented out all the actual conversion stuff (except for the model input/output conversion, but that should
# actually be doing anything here anyway) as a way to begin debugging, but then upon testing the output model it actually
# worked fine. So I'm guessing it has something to do with the way onnx adjusts the model when it "re-packages" it. I really
# have no idea though...
# To use this, just open a notebook/Colab and paste these lines:
# !git clone https://github.com/josephrocca/onnx-typecast
# %cd onnx-typecast
# !pip install -r requirements.txt
# !python3 ./fix-clip-text-vit-32-float32---scratch.py ./clip-text-vit-32-float32.onnx ./clip-text-vit-32-float32-int32.onnx
# Where the `clip-text-vit-32-float32.onnx` file is the file that you get from:
# 1. Exporting CLIP PyTorch model to ONNX using this: https://colab.research.google.com/github/josephrocca/openai-clip-js/blob/main/Export_CLIP_to_ONNX_tflite_tfjs_tf_saved_model.ipynb
# 2. Then putting the output (clip-text-vit-32.onnx) through this: https://colab.research.google.com/github/josephrocca/openai-clip-js/blob/main/ONNX_float16_to_float32.ipynb
import onnx
from onnx import helper as h
from onnx import checker as ch
from onnx import TensorProto, GraphProto, AttributeProto
from onnx import numpy_helper as nph
import numpy as np
from collections import OrderedDict
from logger import log
import typer
def make_param_dictionary(initializer):
params = OrderedDict()
for data in initializer:
params[data.name] = data
return params
def convert_params_to_int32(params_dict):
converted_params = []
for param in params_dict:
data = params_dict[param]
# if data.data_type == TensorProto.INT64:
# data_cvt = nph.to_array(data).astype(np.int32)
# data = nph.from_array(data_cvt, data.name)
converted_params += [data]
return converted_params
def convert_constant_nodes_to_int32(nodes):
"""
convert_constant_nodes_to_int32 Convert Constant nodes to INT32. If a constant node has data type INT64, a new version of the
node is created with INT32 data type and stored.
Args:
nodes (list): list of nodes
Returns:
list: list of new nodes all with INT32 constants.
"""
new_nodes = []
for node in nodes:
# if (
# node.op_type == "Constant"
# and node.attribute[0].t.data_type == TensorProto.INT64
# ):
# data = nph.to_array(node.attribute[0].t).astype(np.int32)
# new_t = nph.from_array(data)
# new_node = h.make_node(
# "Constant",
# inputs=[],
# outputs=node.output,
# name=node.name,
# value=new_t,
# )
# new_nodes += [new_node]
# else:
# new_nodes += [node]
# I replaced the above code with the line below because the above code does conversions that we don't want. E.g it converts the Constant input the Div_27 here: https://github.com/microsoft/onnxruntime/issues/9760#issuecomment-968967562
# and that was the cause of the bug in that comment, because the Gather_25 outputs int64 (and I'm not sure how to change that).
new_nodes += [node]
return new_nodes
def convert_model_to_int32(model_path: str, out_path: str):
"""
convert_model_to_int32 Converts ONNX model with INT64 params to INT32 params.\n
Args:\n
model_path (str): path to original ONNX model.\n
out_path (str): path to save converted model.
"""
log.info("ONNX INT64 --> INT32 Converter")
log.info(f"Loading Model: {model_path}")
# * load model.
model = onnx.load_model(model_path)
ch.check_model(model)
# * get model opset version.
opset_version = model.opset_import[0].version
graph = model.graph
# * The initializer holds all non-constant weights.
init = graph.initializer
# * collect model params in a dictionary.
params_dict = make_param_dictionary(init)
log.info("Converting INT64 model params to INT32...")
# * convert all INT64 aprams to INT32.
converted_params = convert_params_to_int32(params_dict)
log.info("Converting constant INT64 nodes to INT32...")
new_nodes = convert_constant_nodes_to_int32(graph.node)
# convert input and output to INT32:
input_type = graph.input[0].type.tensor_type.elem_type
output_type = graph.output[0].type.tensor_type.elem_type
if input_type == TensorProto.INT64:
graph.input[0].type.tensor_type.elem_type = TensorProto.INT32
if output_type == TensorProto.INT64:
graph.output[0].type.tensor_type.elem_type = TensorProto.INT32
# convert node attributes to INT32:
# for node in new_nodes:
# if node.name == "Gather_25":
# print(node)
# print(node.attribute[0].t.data_type)
# print("\n\n")
# if node.name == "Div_27":
# print(node)
# print("\n\n")
# if node.name == "Constant_26":
# print(node)
# print(TensorProto.INT64)
# print(TensorProto.INT32)
# print("\n\n")
# if node.op_type == "Constant":
# print(node)
# print("\n\n")
# for index, attribute in enumerate(node.attribute):
# if attribute.name == "to" and attribute.i == TensorProto.INT64: # for op_type=="Cast"
# attribute.i = TensorProto.INT32
# if hasattr(attribute, "type"):
# if attribute.type == AttributeProto.TENSOR:
# if attribute.t.data_type == TensorProto.INT64:
# attribute.t.CopyFrom( nph.from_array( nph.to_array(attribute.t).astype(np.int32) ) )
graph_name = f"{graph.name}-int32"
log.info("Creating new graph...")
# * create a new graph with converted params and new nodes.
graph_int32 = h.make_graph(
new_nodes,
graph_name,
graph.input,
graph.output,
initializer=converted_params,
)
log.info("Creating new int32 model...")
model_int32 = h.make_model(graph_int32, producer_name="onnx-typecast")
model_int32.opset_import[0].version = opset_version
ch.check_model(model_int32)
log.info(f"Saving converted model as: {out_path}")
onnx.save_model(model_int32, out_path)
log.info(f"Done Done London. 🎉")
return
if __name__ == "__main__":
typer.run(convert_model_to_int32)
| 2.21875 | 2 |
Phase Diagram Visualization/phase_diagram.py | THE-RAF/Mathematics | 0 | 12772273 | from scipy.integrate import odeint
from scipy.optimize import fsolve
import numpy as np
import itertools
import matplotlib.pyplot as plt
from colorlines import colorline
from matplotlib import style
class PhaseDiagram:
def __init__(self, system):
self.system = system
self.fig, self.ax = plt.subplots(1, 1)
def steady_states(self, search_space, discretization=5):
linspaces = [np.linspace(axis[0], axis[1], discretization) for axis in search_space]
guesses = list(itertools.product(*linspaces))
ss_system = lambda x: self.system(x, 0)
results = []
for guess in guesses:
calc_result, _, convergence_success, info = fsolve(ss_system, guess, full_output=True)
if convergence_success:
if len(results) == 0:
results.append(calc_result)
else:
new_guess = True
for result in results:
if all(np.isclose(calc_result, result, atol=1e-2)):
new_guess = False
if new_guess:
results.append(calc_result)
else:
print('convergence failure')
return results
def plot_trajectory(self, x0, time_sequence, ax, fade=0.1, linewidth=1):
r = odeint(f, x0, time_sequence)
colorline(x=r[:,0], y=r[:,1], ax=ax, cmap='bone_r', fade=fade, linewidth=linewidth)
# plt.plot(r[:,0], r[:,1])
def random_paths(self, n, time_sequence, x_rand_interval, y_rand_interval, fade=0.1, linewidth=1):
self.fig.subplots_adjust(
top=0.981,
bottom=0.043,
left=0.029,
right=0.981,
hspace=0.2,
wspace=0.2
)
for _ in range(n):
x_random = np.random.uniform(x_rand_interval[0], x_rand_interval[1])
y_random = np.random.uniform(y_rand_interval[0], y_rand_interval[1])
self.plot_trajectory([x_random, y_random], time_sequence=time_sequence, ax=self.ax, fade=fade, linewidth=linewidth)
plt.show()
def f(x, t):
y = np.zeros(shape=2)
y[0] = x[0] - x[1]*x[0]
y[1] = x[0]*x[1] - x[1]
return y
PD = PhaseDiagram(f)
steady_states = PD.steady_states(search_space=[[-10,40],[-10,40]])
print(steady_states)
time_sequence=np.linspace(0.1,2.5,1000)
PD.random_paths(n=150, time_sequence=time_sequence, x_rand_interval=[-.4, 1.5], y_rand_interval=[0, 2], fade=1.0)
# PD.fig.savefig('PD1.png', dpi=300)
| 2.71875 | 3 |
tests/openbb_terminal/stocks/due_diligence/test_csimarket_model.py | tehcoderer/GamestonkTerminal | 255 | 12772274 | <gh_stars>100-1000
# IMPORTATION STANDARD
# IMPORTATION THIRDPARTY
import pytest
# IMPORTATION INTERNAL
from openbb_terminal.stocks.due_diligence import csimarket_model
@pytest.mark.vcr
def test_get_suppliers(recorder):
result_txt = csimarket_model.get_suppliers(ticker="TSLA")
recorder.capture(result_txt)
@pytest.mark.vcr
def test_get_suppliers_invalid(recorder):
result_txt = csimarket_model.get_suppliers(ticker="INVALID_TICKER")
recorder.capture(result_txt)
@pytest.mark.vcr
def test_get_customers(recorder):
result_txt = csimarket_model.get_customers(ticker="TSLA")
recorder.capture(result_txt)
@pytest.mark.vcr
def test_get_customers_invalid(recorder):
result_txt = csimarket_model.get_customers(ticker="INVALID_TICKER")
recorder.capture(result_txt)
| 1.992188 | 2 |
read_and_write.py | zyhhh123/Deep-learning | 0 | 12772275 | <filename>read_and_write.py
# gdal2numpy and numpy2g
import sys
import numpy as np
from osgeo import gdal,gdal_array
def gdal2np(path):
# driver = gdal.GetDriverByName('HFA')
# driver.Register()
ds =gdal.Open(path,gdal.GA_ReadOnly)
if ds is None:
print('Could not open '+path)
sys.exit(1)
# H*W*C
W = ds.RasterXSize
H = ds.RasterYSize
C = ds.RasterCount
# get datatype
datatype = ds.GetRasterBand(1).DataType
# store to numpy
data = np.zeros((H,W,C),dtype = gdal_array.GDALTypeCodeToNumericTypeCode(datatype))
for i in range(C):
band = ds.GetRasterBand(i+1)
data[...,i] = band.ReadAsArray(0,0,W,H)
return data
def np2gdal(data,path):
driver = gdal.GetDriverByName('GTiff')
H,W,C = data.shape
ds = driver.Create(path,W,H,C,gdal.GDT_Float32)
for i in range(C):
ds.GetRasterBand(i+1).WriteArray(data[...,i])
import os
if __name__ =='__main__':
path = 'D:\\hello\\OCOD\\images\\Onera Satellite Change Detection dataset - Images\\rennes\\imgs_2_rect'
file = os.listdir(path)
for i in file:
path1 = os.path.join(path,i)
read = gdal2np(path1)
print(read.shape)
| 2.734375 | 3 |
tests/commands/test_mapping_sheet.py | mariob0y/ocdskit | 0 | 12772276 | import pytest
from ocdskit.cli.__main__ import main
from tests import assert_command, assert_command_error, path
def test_command(capsys, monkeypatch):
assert_command(capsys, monkeypatch, main,
['mapping-sheet', '--infer-required', path('release-schema.json')],
'mapping-sheet.csv')
def test_command_no_deprecated(capsys, monkeypatch):
assert_command(capsys, monkeypatch, main,
['mapping-sheet', '--infer-required', '--no-deprecated', path('release-schema.json')],
'mapping-sheet_no-deprecated.csv')
def test_command_order_by(capsys, monkeypatch):
assert_command(capsys, monkeypatch, main,
['mapping-sheet', '--infer-required', '--order-by', 'path', path('release-schema.json')],
'mapping-sheet_order-by.csv')
@pytest.mark.vcr()
def test_command_extension(capsys, monkeypatch):
url = 'https://github.com/open-contracting-extensions/ocds_lots_extension/archive/v1.1.4.zip'
assert_command(capsys, monkeypatch, main,
['mapping-sheet', '--infer-required', path('release-schema.json'), '--extension', url],
'mapping-sheet_extension.csv')
def test_command_extension_field(capsys, monkeypatch):
assert_command(capsys, monkeypatch, main,
['mapping-sheet', '--infer-required', '--extension-field', 'extension',
path('release-schema.json')],
'mapping-sheet_extension-field.csv')
@pytest.mark.vcr()
def test_command_extension_and_extension_field(capsys, monkeypatch):
url = 'https://github.com/open-contracting-extensions/ocds_lots_extension/archive/v1.1.4.zip'
assert_command(capsys, monkeypatch, main,
['mapping-sheet', '--infer-required', '--extension-field', 'extension',
path('release-schema.json'), '--extension', url],
'mapping-sheet_extension_extension-field.csv')
@pytest.mark.vcr()
def test_command_extension_and_extension_field_and_language(capsys, monkeypatch):
url = 'https://extensions.open-contracting.org/es/extensions/lots/master/'
assert_command(capsys, monkeypatch, main,
['mapping-sheet', '--infer-required', '--extension-field', 'extension',
path('release-schema.json'), '--extension', url, '--language', 'es'],
'mapping-sheet_extension_extension-field_language.csv')
@pytest.mark.vcr()
def test_command_extension_and_extension_field_location(capsys, monkeypatch):
url = 'https://github.com/open-contracting-extensions/ocds_location_extension/archive/v1.1.4.zip'
assert_command(capsys, monkeypatch, main,
['mapping-sheet', '--infer-required', '--extension-field', 'extension',
path('release-schema.json'), '--extension', url],
'mapping-sheet_extension_extension-field_location.csv')
def test_command_oc4ids(capsys, monkeypatch):
assert_command(capsys, monkeypatch, main,
['mapping-sheet', path('project-schema.json')],
'mapping-sheet_oc4ids.csv')
def test_command_bods(capsys, monkeypatch):
assert_command(capsys, monkeypatch, main,
['mapping-sheet', '--order-by', 'path', path('bods/person-statement.json')],
'mapping-sheet_bods.csv')
def test_command_sedl(capsys, monkeypatch):
assert_command(capsys, monkeypatch, main,
['mapping-sheet', path('sedl-schema.json')],
'mapping-sheet_sedl.csv')
def test_command_order_by_nonexistent(capsys, monkeypatch, caplog):
assert_command_error(capsys, monkeypatch, main, ['mapping-sheet', '--order-by',
'nonexistent', path('release-schema.json')])
assert len(caplog.records) == 1
assert caplog.records[0].levelname == 'CRITICAL'
assert caplog.records[0].message == "the column 'nonexistent' doesn't exist – did you make a typo?"
| 2.203125 | 2 |
figure2a_learningcurves_perlab.py | shenshan/paper-behavior | 0 | 12772277 | <filename>figure2a_learningcurves_perlab.py
"""
Learning curves for all labs
@author: <NAME>
15 January 2020
"""
import pandas as pd
import numpy as np
import os
import seaborn as sns
import matplotlib.pyplot as plt
from paper_behavior_functions import (query_subjects, seaborn_style, figpath,
group_colors, institution_map, seaborn_style)
from ibl_pipeline.analyses import behavior as behavioral_analyses
# INITIALIZE A FEW THINGS
seaborn_style()
figpath = figpath()
pal = group_colors()
institution_map, col_names = institution_map()
col_names = col_names[:-1]
# ================================= #
# GET DATA FROM TRAINED ANIMALS
# ================================= #
use_subjects = query_subjects()
b = (behavioral_analyses.BehavioralSummaryByDate * use_subjects)
behav = b.fetch(order_by='institution_short, subject_nickname, training_day',
format='frame').reset_index()
behav['institution_code'] = behav.institution_short.map(institution_map)
# how many mice are there for each lab?
N = behav.groupby(['institution_code'])['subject_nickname'].nunique().to_dict()
behav['n_mice'] = behav.institution_code.map(N)
behav['institution_name'] = behav.institution_code + \
': ' + behav.n_mice.apply(str) + ' mice'
# make sure each mouse starts at 0
for index, group in behav.groupby(['lab_name', 'subject_nickname']):
behav['training_day'][behav.index.isin(
group.index)] = group['training_day'] - group['training_day'].min()
# create another column only after the mouse is trained
behav2 = pd.DataFrame([])
for index, group in behav.groupby(['institution_code', 'subject_nickname']):
group['performance_easy_trained'] = group.performance_easy
group.loc[group['session_date'] < pd.to_datetime(group['date_trained']),
'performance_easy_trained'] = np.nan
# add this
behav2 = behav2.append(group)
behav = behav2
behav['performance_easy'] = behav.performance_easy * 100
behav['performance_easy_trained'] = behav.performance_easy_trained * 100
# ================================= #
# LEARNING CURVES
# ================================= #
# plot one curve for each animal, one panel per lab
fig = sns.FacetGrid(behav,
col="institution_code", col_wrap=7, col_order=col_names,
sharex=True, sharey=True, aspect=0.7, hue="subject_uuid", xlim=[-1, 41.5])
fig.map(sns.lineplot, "training_day",
"performance_easy", color='gray', alpha=0.3)
fig.map(sns.lineplot, "training_day",
"performance_easy_trained", color='darkblue', alpha=0.3)
fig.set_titles("{col_name}")
for axidx, ax in enumerate(fig.axes.flat):
ax.set_title(behav.institution_name.unique()[
axidx], color=pal[axidx], fontweight='bold')
# overlay the example mouse
sns.lineplot(ax=fig.axes[0], x='training_day', y='performance_easy', color='black',
data=behav[behav['subject_nickname'].str.contains('KS014')], legend=False)
fig.set_axis_labels('Training day', 'Performance (%) on easy trials')
fig.despine(trim=True)
fig.savefig(os.path.join(figpath, "figure2a_learningcurves.pdf"))
fig.savefig(os.path.join(figpath, "figure2a_learningcurves.png"), dpi=300)
# Plot all labs
fig, ax1 = plt.subplots(1, 1, figsize=(4, 4))
sns.lineplot(x='training_day', y='performance_easy', hue='institution_code', palette=pal,
ax=ax1, legend=False, data=behav, ci=None)
ax1.set_title('All labs', color='k', fontweight='bold')
ax1.set(xlabel='Training day',
ylabel='Performance (%) on easy trials', xlim=[-1, 41.5])
seaborn_style()
plt.tight_layout(pad=2)
fig.savefig(os.path.join(figpath, "figure2b_learningcurves_all_labs.pdf"))
fig.savefig(os.path.join(
figpath, "figure2b_learningcurves_all_labs.png"), dpi=300)
# ================================= #
# print some stats
# ================================= #
behav_summary_std = behav.groupby(['training_day'])[
'performance_easy'].std().reset_index()
behav_summary = behav.groupby(['training_day'])[
'performance_easy'].mean().reset_index()
print('number of days to reach 80% accuracy on easy trials: ')
print(behav_summary.loc[behav_summary.performance_easy >
80, 'training_day'].min())
| 2.828125 | 3 |
parser/team04/Interpreter/Expression/bitwise.py | webdev188/tytus | 35 | 12772278 | from Interpreter.Expression.expression import Expression
class BitWise(Expression):
def __init__(self, left, right=None):
self.left = left
self.right = right
def isNumeric(self, value):
return isinstance(value, int) or isinstance(value, float)
def getGraph(self, graph, idParent):
_id = str(id(self))
_label = self.__class__.__name__
graph.node(_id, label=_label)
graph.edge(idParent, _id)
self.left.getGraph(graph, _id)
if self.right:
self.right.getGraph(graph, _id)
class BitwiseAnd(BitWise):
def __init__(self, left, right):
BitWise.__init__(self, left, right)
def getValue(self, env):
leftValue = self.left.getValue(env)
rightValue = self.right.getValue(env)
areNums = self.isNumeric(leftValue) and self.isNumeric(rightValue)
if areNums:
return leftValue & rightValue
class BitwiseOr(BitWise):
def __init__(self, left, right):
BitWise.__init__(self, left, right)
def getValue(self, env):
leftValue = self.left.getValue(env)
rightValue = self.right.getValue(env)
areNums = self.isNumeric(leftValue) and self.isNumeric(rightValue)
if areNums:
return leftValue | rightValue
class BitwiseNot(BitWise):
def __init__(self, left):
BitWise.__init__(self, left)
def getValue(self, env):
leftValue = self.left.getValue(env)
isNum = self.isNumeric(leftValue)
if isNum:
return ~ leftValue
class BitwiseXOR(BitWise):
def __init__(self, left, right):
BitWise.__init__(self, left, right)
def getValue(self, env):
leftValue = self.left.getValue(env)
rightValue = self.right.getValue(env)
areNums = self.isNumeric(leftValue) and self.isNumeric(rightValue)
if areNums:
return leftValue ^ rightValue
class BitwiseRightShift(BitWise):
def __init__(self, left, right):
BitWise.__init__(self, left, right)
def getValue(self, env):
leftValue = self.left.getValue(env)
rightValue = self.right.getValue(env)
areNums = self.isNumeric(leftValue) and self.isNumeric(rightValue)
if areNums:
return leftValue >> rightValue
class BitwiseLeftShift(BitWise):
def __init__(self, left, right):
BitWise.__init__(self, left, right)
def getValue(self, env):
leftValue = self.left.getValue(env)
rightValue = self.right.getValue(env)
areNums = self.isNumeric(leftValue) and self.isNumeric(rightValue)
if areNums:
return leftValue << rightValue
| 3.109375 | 3 |
proxy_server.py | AcidCannon/CMPUT404.W2021.LAB2 | 0 | 12772279 | <reponame>AcidCannon/CMPUT404.W2021.LAB2
#!/usr/bin/env python3
import socket
# CONSTANTS
INBOUND_HOST = "" # Listen for all possible hosts
INBOUND_PORT = 8001
INBOUND_BUFFER_SIZE = 1024
OUTBOUND_HOST = "www.google.com"
OUTBOUND_PORT = 80
OUTBOUND_BUFFER_SIZE = 1024
def main():
# Create a socket object
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
# Set socket options, here, reuse the same bind port
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Bind socket to address
s.bind((INBOUND_HOST, INBOUND_PORT))
# Set to listening mode
s.listen(2)
# Continuously listen for connections
while True:
conn, addr = s.accept()
print("Connected by:", str(addr[0]) + ":" + str(addr[1]))
# Accepted connection
with conn:
# Fetch data from client
client_data = conn.recv(INBOUND_BUFFER_SIZE)
# Then send the client data to the target server
# Create a socket object
print("Received From:", str(addr[0]) + ":" + str(addr[1]), "Content:", client_data)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as proxy_s:
# Connect to the target server
proxy_s.connect((socket.gethostbyname(OUTBOUND_HOST), OUTBOUND_PORT))
print("Connecting to:", socket.gethostbyname(OUTBOUND_HOST) + ":" + str(OUTBOUND_PORT))
proxy_s.sendall(client_data)
proxy_data = b""
while True:
fetched_data = proxy_s.recv(OUTBOUND_BUFFER_SIZE)
if not fetched_data:
break
proxy_data += fetched_data
peer_addr = proxy_s.getpeername()
print("Response from:", str(peer_addr[0]) + ":" + str(peer_addr[1]), "Content:", proxy_data)
conn.sendall(proxy_data)
if __name__ == "__main__":
main()
| 3.125 | 3 |
snippets - machine learning sklearn/preprocessing-encoding-categorical-features.py | bjfisica/MachineLearning | 52 | 12772280 | <reponame>bjfisica/MachineLearning<filename>snippets - machine learning sklearn/preprocessing-encoding-categorical-features.py
from sklearn.preprocessing import LabelEncoder
encoder = LabelEncoder()
y = encoder.fit_transform(y) | 2.609375 | 3 |
monte/runtime/equalizer.py | washort/monte | 0 | 12772281 | <gh_stars>0
import warnings
from monte.runtime.base import MonteObject, toQuote
from monte.runtime.data import (null, true, false, bwrap, Integer, Float,
String, Character, Bool)
from monte.runtime.guards.base import deepFrozenGuard, selflessGuard
from monte.runtime.tables import ConstList, ConstMap
def isIn(val, container):
return any(val is x for x in container)
def _findSofar(left, right, sofar):
lid, rid = id(left), id(right)
if rid < lid:
lid, rid = rid, lid
return bwrap((lid, rid) in sofar)
def _pushSofar(left, right, sofar):
lid, rid = id(left), id(right)
if rid < lid:
lid, rid = rid, lid
sofar[(lid, rid)] = (left, right)
def _same(left, right, sofar, dbg=False):
from monte.runtime.ref import _resolution
left = _resolution(left)
right = _resolution(right)
# Equality by identity. Relatively rare but still useful.
if left is right:
return true
if left is None:
return bwrap(right is None)
if sofar and _findSofar(left, right, sofar):
return true
t = type(left)
if t is ConstList:
if type(right) is not ConstList:
return false
if len(left.l) != len(right.l):
return false
_pushSofar(left, right, sofar)
for l, r in zip(left, right):
result = _same(l, r, sofar, dbg)
if result is null:
return null
if result is false:
return false
if result is not true:
import pdb; pdb.set_trace()
return true
if t in DOES_OWN_HASHING:
return left == right
if (isIn(selflessGuard, getattr(left, '_m_auditorStamps', ())) and
isIn(selflessGuard, getattr(right, '_m_auditorStamps', ()))):
_pushSofar(left, right, sofar)
return _same(left._uncall(), right._uncall(), sofar)
# Equality of primitives.
if type(left) != type(right):
return false
if left in (null, true, false):
return bwrap(left is right)
if t in (Integer, Float):
return bwrap(left.n == right.n)
elif t is Bool:
return bwrap(left._b == right._b)
elif t is String: # Other Twines have uncall methods.
return bwrap(left.s == right.s)
elif t is Character:
return bwrap(left._c == right._c)
warnings.warn("Asked to equalize unknown type %r" % t,
RuntimeWarning)
return false
class Equalizer(MonteObject):
_m_fqn = "__equalizer"
_m_auditorStamps = (deepFrozenGuard,)
def debugSameEver(self, left, right):
import pdb; pdb.set_trace()
return self.sameEver(left, right, True)
def sameEver(self, left, right, dbg=False):
result = _same(left, right, {}, dbg)
if result is null:
raise RuntimeError("Not sufficiently settled: %s == %s" % (
toQuote(left), toQuote(right)))
return result
def sameYet(self, left, right):
result = _same(left, right, {})
if result is None:
return false
else:
return result
equalizer = Equalizer()
HASH_DEPTH = 10
def samenessHash(obj, hashDepth=HASH_DEPTH, path=None, fringe=None):
from monte.runtime.ref import _isSelfless, _isResolved, _resolution
if hashDepth <= 0:
if samenessFringe(obj, path, fringe):
# obj is settled.
return -1
elif fringe is None:
raise RuntimeError("Must be settled")
else:
#obj isn't settled.
return -1
obj = _resolution(obj)
if obj is null:
return 0
if type(obj) is ConstList:
result = len(obj.l)
for i, o in enumerate(obj.l):
if fringe is None:
fr = None
else:
fr = FringePath(i, path)
result ^= i ^ samenessHash(o, hashDepth - 1, fr, fringe)
return result
if type(obj) in DOES_OWN_HASHING:
return hash(obj)
if _isSelfless(obj):
return samenessHash(obj._uncall(), hashDepth, path, fringe)
if _isResolved(obj):
return id(obj)
elif fringe is None:
raise RuntimeError("Must be settled")
else:
# obj is unresolved
fringe.append(FringeNode(obj, path))
return -1
def sameYetHash(obj, fringe):
result = samenessHash(obj, HASH_DEPTH, None, fringe)
for f in fringe:
result ^= f.hash()
return result
def samenessFringe(original, path, fringe, sofar=None):
from monte.runtime.ref import _isResolved, _isSelfless, _resolution, _isDeepFrozen
if sofar is None:
sofar = set()
obj = _resolution(original)
if obj is None:
return True
if id(original) in sofar:
return True
if _isDeepFrozen(original):
return True
if id(obj) in sofar:
return True
if type(obj) is ConstList:
sofar.add(id(original))
result = True
for i, o in enumerate(obj.l):
if fringe is None:
fr = None
else:
fr = FringePath(i, path)
result &= samenessFringe(o, fr, fringe, sofar)
if (not result) and fringe is None:
# Unresolved promise found.
return False
return result
if type(obj) in DOES_OWN_HASHING:
return True
if _isSelfless(obj):
sofar.add(id(original))
return samenessFringe(obj._uncall(), sofar, path, fringe)
if _isResolved(obj):
return True
else:
if fringe is not None:
fringe.append(FringeNode(obj, path))
return False
class FringePath(object):
def __init__(self, position, next):
self.position = position
self.next = next
def __eq__(self, right):
left = self
while left is not None:
if right is None or left.position != right.position:
return False
left = left.next
right = right.next
return right is None
def hash(self):
p = self
h = 0
shift = 0
while p is not None:
h ^= self.position << shift
shift = (shift + 4) % 32
p = p.next
return h
class FringeNode(object):
def __init__(self, obj, path):
self.identity = id(obj)
self.path = path
def __eq__(self, other):
return (self.identity, self.path) == (other.identity, other.path)
def __hash__(self):
return self.identity ^ self.path.hash()
class TraversalKey(object):
_m_auditorStamps = (deepFrozenGuard, selflessGuard)
def __init__(self, wrapped):
from monte.runtime.ref import _resolution
self.wrapped = _resolution(wrapped)
fringeBuild = []
self.snapHash = sameYetHash(self.wrapped, self.fringeBuild)
self.fringe = fringeBuild
def __eq__(self, other):
if not isinstance(other, TraversalKey):
return False
if self.snapHash != other.snapHash:
return False
if not equalizer.sameYet(self.wrapped, other.wrapped) is true:
return False
if len(other.fringe) != len(self.fringe):
return False
return all(s == o for s, o in zip(self.fringe, other.fringe))
def _printOn(self, out):
out.raw_print(u'<a traversal key>')
def __hash__(self):
return self.snapHash
DOES_OWN_HASHING = (Integer, Float, String, Character, Bool, TraversalKey) #FarRef, DisconnectedRef, ...
| 2.03125 | 2 |
WebMirror/management/rss_parser_funcs/feed_parse_extractSharramycatsTranslations.py | fake-name/ReadableWebProxy | 193 | 12772282 | def extractSharramycatsTranslations(item):
"""
'Sharramycats Translations'
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol or frag) or 'preview' in item['title'].lower():
return None
tagmap = [
('11 Ways to Forget Your Ex-Boyfriend', '11 Ways to Forget Your Ex-Boyfriend', 'translated'),
('The Monster Inside Of My Bed', 'The Monster Inside Of My Bed', 'translated'),
('The Peculiars\' Tale', 'The Peculiars\' Tale', 'translated'),
('ARG', '<NAME>.', 'translated'),
('Legend of Gemini', 'Legend of Gemini', 'translated'),
('Kaliskis', 'Kaliskis', 'translated'),
('She Died', 'She Died', 'translated'),
('Ice Goddess', 'Ice Goddess', 'translated'),
('The Friendly Wedding', 'The Friendly Wedding', 'translated'),
('Forlorn Madness', 'Forlorn Madness', 'translated'),
('Hidden Inside The Academy', 'Hidden Inside The Academy', 'translated'),
('The Señorita', 'The Señorita', 'translated'),
('School Of Myths', 'School of Myths', 'translated'),
('The Guys Inside of My Bed', 'The Guys Inside of My Bed', 'translated'),
('The Guy Inside Of My Bed', 'The Guys Inside of My Bed', 'translated'),
('Titan Academy Of Special Abilities', 'Titan Academy Of Special Abilities', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False | 2.546875 | 3 |
contacts/processors.py | phildini/logtacts | 56 | 12772283 | <filename>contacts/processors.py
def book(request):
if hasattr(request, 'current_book'):
return {'book': request.current_book}
return {'book': None } | 2.25 | 2 |
database_setup.py | melissapott/CharacterSheets | 0 | 12772284 | from sqlalchemy import Column, ForeignKey, Integer, String, Enum
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
import psycopg2
Base = declarative_base()
# define database tables
class Person(Base):
__tablename__ = 'person'
id = Column(Integer, primary_key = True)
fname = Column(String(250), nullable = False)
lname = Column(String(250), nullable = False)
email = Column(String(250))
status = Column(String(10))
class Race(Base):
__tablename__ = 'race'
id = Column(Integer, primary_key = True)
name = Column(String(100), nullable = False)
class Character(Base):
__tablename__ = 'character'
id = Column(Integer, primary_key = True)
person_id = Column(Integer, ForeignKey('person.id'))
name = Column(String(250), nullable = False)
race_id = Column(Integer, ForeignKey('race.id'))
concept = Column(String(250), nullable = True)
person = relationship(Person)
race = relationship(Race)
class Faction(Base):
__tablename__ = 'faction'
id = Column(Integer, primary_key = True)
race_id = Column(Integer, ForeignKey('race.id'))
name = Column(String(100), nullable = False)
race = relationship(Race)
class Ability(Base):
__tablename__ = 'ability'
id = Column(Integer, primary_key = True)
type = Column(String(100), nullable = False)
name = Column(String(100), nullable = False)
class Attribute(Base):
__tablename__ = 'attribute'
id = Column(Integer, primary_key = True)
name = Column(String(100), nullable = False)
engine = create_engine('postgresql://charsheet:4ab62xxc@localhost/charsheet')
Base.metadata.create_all(engine)
| 2.703125 | 3 |
components/collector/tests/source_collectors/api_source_collectors/test_cobertura_jenkins_plugin.py | Gamer1120/quality-time | 1 | 12772285 | <filename>components/collector/tests/source_collectors/api_source_collectors/test_cobertura_jenkins_plugin.py
"""Unit tests for the Cobertura Jenkins plugin source."""
from .jenkins_plugin_test_case import JenkinsPluginTestCase, JenkinsPluginTestsMixin
class CoberturaJenkinsPluginTest(JenkinsPluginTestCase, JenkinsPluginTestsMixin):
"""Unit tests for the Cobertura Jenkins plugin metrics."""
source_type = "cobertura_jenkins_plugin"
async def test_uncovered_lines(self):
"""Test that the number of uncovered lines and the total number of lines are returned."""
metric = dict(type="uncovered_lines", sources=self.sources, addition="sum")
response = await self.collect(
metric,
get_request_json_return_value=dict(
results=dict(elements=[dict(denominator=15, numerator=13, name="Lines")])))
self.assert_measurement(response, value="2", total="15")
async def test_uncovered_branches(self):
"""Test that the number of uncovered branches and the total number of branches are returned."""
metric = dict(type="uncovered_branches", sources=self.sources, addition="sum")
response = await self.collect(
metric,
get_request_json_return_value=dict(
results=dict(elements=[dict(denominator=15, numerator=15, name="Conditionals")])))
self.assert_measurement(response, value="0", total="15")
| 2.21875 | 2 |
source/Graphs.py | tomaslaz/Grand_Cannonical_Potential | 0 | 12772286 | <gh_stars>0
"""
@author <NAME>, 2018
A module for plotting the results
"""
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import Constants
import Utilities
from Utilities import log
def _setup_temperature_legend(ax):
"""
Sets up a legend for temperatures
"""
lg = ax.legend(loc=3, bbox_to_anchor=(0.,1.02,1.,0.102),
title="Temperature (K)", ncol=5, fontsize=Constants.fig_legend_fontsize, mode="expand", borderaxespad=0.)
lg.get_frame().set_alpha(0.5)
lg.get_title().set_fontsize('%d' % (Constants.fig_label_fontsize))
def avg_values(temperatures, chem_pot_range, avg_array, prop_name, _accuracy, options):
"""
Plots average values with respect with mu and temperature
"""
fig = plt.figure(figsize=(Constants.fig_size_x, Constants.fig_size_y))
ax = fig.add_subplot(1,1,1)
temp_len = len(temperatures)
# for each temperature:
for t_index in range(temp_len):
temperature = temperatures[t_index]
colour = Utilities.get_temperature_colour(temperature)
ax.plot(chem_pot_range, avg_array[t_index, :], color=colour, linewidth=1.5, label="%.2d" % (temperature))
plt.grid()
ax.set_xlabel(r'$\mu$', fontsize = Constants.fig_label_fontsize)
ax.set_ylabel(r'$<%s>$' % (prop_name), fontsize=Constants.fig_label_fontsize)
# place the legend
_setup_temperature_legend(ax)
fig.savefig(Constants.avg_plot_filename % (prop_name), dpi=Constants.fig_dpi, bbox_inches='tight')
# clearing the figure settings
plt.clf()
plt.close()
def c_g_omega_over_mu(temperatures, chem_pot_range, omega_arr, _accuracy, options):
"""
Plots omega with respect with mu and temperature
"""
fig = plt.figure(figsize=(Constants.fig_size_x, Constants.fig_size_y))
ax = fig.add_subplot(1,1,1)
temp_len = len(temperatures)
# for each temperature:
for t_index in range(temp_len):
temperature = temperatures[t_index]
colour = Utilities.get_temperature_colour(temperature)
ax.plot(chem_pot_range, omega_arr[t_index, :], color=colour, linewidth=1.5, label="%.2d" % (temperature))
plt.grid()
ax.set_xlabel(r'$\mu$', fontsize = Constants.fig_label_fontsize)
ax.set_ylabel(r'$\Omega$', fontsize=Constants.fig_label_fontsize)
# place the legend
_setup_temperature_legend(ax)
fig.savefig(Constants.omega_mu_plot_filename, dpi=Constants.fig_dpi, bbox_inches='tight')
# clearing the figure settings
plt.clf()
plt.close()
def c_omega(chem_pot_multi, names, temperatures, omega_arr, _accuracy, options):
"""
"""
fig = plt.figure(figsize=(Constants.fig_size_x, Constants.fig_size_y))
ax = fig.add_subplot(1,1,1)
temp_len = len(temperatures)
# for each temperature:
for t_index in range(temp_len):
temperature = temperatures[t_index]
colour = Utilities.get_temperature_colour(temperature)
ax.plot(chem_pot_multi, omega_arr[t_index, :], color=colour, linewidth=1.5, label="%.2d" % (temperature))
plt.grid()
ax.set_xlabel(r'$m$', fontsize = Constants.fig_label_fontsize)
ax.set_ylabel(r'$\gamma^{c}_{m}$', fontsize=Constants.fig_label_fontsize)
# x axis ticklabels
plt.xticks(chem_pot_multi, names, rotation=30, fontsize = Constants.fig_ticklabel_fontsize)
# place the legend
_setup_temperature_legend(ax)
fig.savefig(Constants.omega_plot_filename, dpi=Constants.fig_dpi, bbox_inches='tight')
# clearing the figure settings
plt.clf()
plt.close()
def wm_contour(temperatures, names, chem_pot_range, chem_pot_multi, Wm_array, _accuracy, options):
"""
Plots distribution functions in terms of m and mu with respect to the temperature
"""
cmap = plt.cm.OrRd
levels = np.arange(0.0, 1.05, 0.05)
temp_len = len(temperatures)
chem_pot_len = len(chem_pot_range)
X, Y = np.meshgrid(chem_pot_multi, chem_pot_range)
# for each temperature:
for t_i in range(temp_len):
temperature = temperatures[t_i]
file_name = 'wm_%.2d.png' % (temperature)
log(__name__, "Plotting distribution functions @ %.2d K (%s)" % (temperature, file_name), options.verbose, indent=3)
fig = plt.figure(figsize=(Constants.fig_size_x, Constants.fig_size_y))
Z = Wm_array[t_i, :, :]
contour_filled = plt.contourf(Y, X, Z, levels, cmap=cmap, vmax=1.1, vmin=0.0, alpha=0.95)
cbar = plt.colorbar(contour_filled, ticks=[0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0])
cbar.ax.tick_params(labelsize=16)
cbar.ax.set_ylabel(r'$w_{m}$', fontsize=Constants.fig_label_fontsize)
for tick in cbar.ax.yaxis.get_major_ticks():
tick.label.set_fontsize(16)
# x axis ticklabels
plt.yticks(chem_pot_multi, names, fontsize = Constants.fig_legend_fontsize) # rotation=30,
plt.xlabel(r'$\mu$', fontsize = Constants.fig_label_fontsize)
plt.ylabel(r'$m$', fontsize = Constants.fig_label_fontsize)
plt.title("T = %d K" % (temperature), fontsize = Constants.fig_legend_fontsize)
ax = plt.gca()
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(16)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(16)
plt.grid()
fig.savefig(file_name+".png", dpi=Constants.fig_dpi, bbox_inches='tight')
# clearing the figure settings
plt.clf()
plt.close()
return | 2.9375 | 3 |
argparsex/__init__.py | J-A-M-E-5/argparsex | 0 | 12772287 | <reponame>J-A-M-E-5/argparsex
import argparse
SUPPRESS = argparse.SUPPRESS
class SentinelValue:
pass
class ArgumentParser(argparse.ArgumentParser):
_supplied_defaults = None
_defaults = None
def parse_supplied_args(self, args=None, namespace=None):
sentinel = SentinelValue()
real_defaults = {}
dict_ = self.__dict__['_option_string_actions']
for key, val in dict_.items():
if val.default != SUPPRESS and val.default != sentinel:
real_defaults[val.dest] = val.default
val.default = sentinel
supplied_vals = {}
supplied_default_vals = {}
default_vals = {}
for key, val in vars(super().parse_args(args, namespace)).items():
if val != sentinel:
if key not in real_defaults or val != real_defaults[key]:
supplied_vals[key] = val
else:
supplied_default_vals[key] = val
else:
default_vals[key] = real_defaults[key]
self._supplied_defaults = argparse.Namespace(**supplied_default_vals)
self._defaults = argparse.Namespace(**default_vals)
return argparse.Namespace(**supplied_vals)
def parse_supplied_default_args(self, args=None, namespace=None):
if self._supplied_defaults is None:
self.parse_args(args, namespace)
return self._supplied_defaults
def parse_default_args(self, args=None, namespace=None):
if self._defaults is None:
self.parse_args(args, namespace)
return self._defaults
| 2.625 | 3 |
features/tests/test_urls.py | KDD-OpenSource/fexum | 6 | 12772288 | <gh_stars>1-10
from django.test import TestCase
from django.urls import reverse
class TestExperimentListUrl(TestCase):
def test_sexperiment_list_url(self):
url = reverse('experiment-list')
self.assertEqual(url, '/api/experiments')
class TestExperimentDetailUrl(TestCase):
def test_experiment_detail_url(self):
url = reverse('experiment-detail', args=['391ec5ac-f741-45c9-855a-7615c89ce129'])
self.assertEqual(url, '/api/experiments/391ec5ac-f741-45c9-855a-7615c89ce129')
class TestExperimentTargetsDetailUrl(TestCase):
def test_experiment_targets_detail_url(self):
url = reverse('experiment-targets-detail', args=['391ec5ac-f741-45c9-855a-7615c89ce129'])
self.assertEqual(url, '/api/experiments/391ec5ac-f741-45c9-855a-7615c89ce129/target')
class TestDatasetListView(TestCase):
def test_dataset_list_url(self):
url = reverse('dataset-list')
self.assertEqual(url, '/api/datasets')
class TestDatasetUploadUrl(TestCase):
def test_target_upload_url(self):
url = reverse('dataset-upload')
self.assertEqual(url, '/api/datasets/upload')
class TestDatasetFeaturesListUrl(TestCase):
def test_feature_list_url(self):
url = reverse('dataset-features-list', args=['391ec5ac-f741-45c9-855a-7615c89ce129'])
self.assertEqual(url, '/api/datasets/391ec5ac-f741-45c9-855a-7615c89ce129/features')
class TestFeatureSamplesUrl(TestCase):
def test_feature_samples_url(self):
url = reverse('feature-samples', args=['391ec5ac-f741-45c9-855a-7615c89ce129'])
self.assertEqual(url, '/api/features/391ec5ac-f741-45c9-855a-7615c89ce129/samples')
class TestFeatureHistogramUrl(TestCase):
def test_feature_histogram_url(self):
url = reverse('feature-histogram', args=['391ec5ac-f741-45c9-855a-7615c89ce129'])
self.assertEqual(url, '/api/features/391ec5ac-f741-45c9-855a-7615c89ce129/histogram')
class TestFeatureSlicesUrl(TestCase):
def test_feature_slices_url(self):
url = reverse('target-feature-slices', args=['391ec5ac-f741-45c9-855a-7615c89ce129'])
self.assertEqual(url, '/api/targets/391ec5ac-f741-45c9-855a-7615c89ce129/slices')
class TestTargetFeatureRelevancyResultsUrl(TestCase):
def test_target_feature_relevancy_results_url(self):
url = reverse('target-feature-relevancy_results',
args=['391ec5ac-f741-45c9-855a-7615c89ce129'])
self.assertEqual(url, '/api/targets/391ec5ac-f741-45c9-855a-7615c89ce129/relevancy_results')
class TestDatasetRedundancyResultsUrl(TestCase):
def test_dataset_redundancy_results(self):
url = reverse('feature-redundancy_results', args=['391ec5ac-f741-45c9-855a-7615c89ce129'])
self.assertEqual(url,
'/api/targets/391ec5ac-f741-45c9-855a-7615c89ce129/redundancy_results')
class TestFeatureSpectrogramUrl(TestCase):
def test_feature_spectrogram_url(self):
url = reverse('feature-spectrogram', args=['391ec5ac-f741-45c9-855a-7615c89ce128'])
self.assertEqual(url, '/api/features/391ec5ac-f741-45c9-855a-7615c89ce128/spectrogram')
class TestFixedFeatureSetHicsUrl(TestCase):
def test_fixed_feature_set_hics(self):
url = reverse('fixed-feature-set-hics', args=['391ec5ac-f741-45c9-855a-7615c89ce128'])
self.assertEqual(url, '/api/targets/391ec5ac-f741-45c9-855a-7615c89ce128/hics')
class TestRetrieveCalculations(TestCase):
def test_retrieve_calculations(self):
url = reverse('calculation-list')
self.assertEqual(url, '/api/calculations')
class TestCurrentExperimentView(TestCase):
def test_retrieve_current_experiment(self):
url = reverse('current-experiment-detail')
self.assertEqual(url, '/api/experiments/current')
class TestSetCurrentExperimentView(TestCase):
def test_set_current_experiment(self):
url = reverse('set-current-experiment', args=['391ec5ac-f741-45c9-855a-7615c89ce128'])
self.assertEqual(url, '/api/experiments/current/391ec5ac-f741-45c9-855a-7615c89ce128')
| 2.4375 | 2 |
temboo/core/Library/Kiva/LendingActions/__init__.py | jordanemedlock/psychtruths | 7 | 12772289 | <reponame>jordanemedlock/psychtruths
from temboo.Library.Kiva.LendingActions.GetRecentLending import GetRecentLending, GetRecentLendingInputSet, GetRecentLendingResultSet, GetRecentLendingChoreographyExecution
| 1.007813 | 1 |
ex3-multi-class-classification/1_logistic_regression.py | themech/Machine-Learning-Coursera-Tensorflow | 5 | 12772290 | <filename>ex3-multi-class-classification/1_logistic_regression.py
import argparse
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from scipy import io, misc
from sklearn import metrics
import tensorflow as tf
# Parse the command line arguments (or use default values).
parser = argparse.ArgumentParser(description='Recognizing hand-written number '
'using multiclass logistic regression.')
parser.add_argument('-lr', '--learning_rate', type=float,
help='learning rate for the algorithm (default: 0.1)',
default=0.1)
parser.add_argument('-r', '--regularization', type=float,
help='theta regularization value (default: 0.1)',
default=0.1)
parser.add_argument('-e', '--epochs', type=int,
help='number of epochs (default: 400)', default=400)
parser.add_argument('-o', '--optimizer', type=str,
help='tensorflow optimizer class (default: AdamOptimizer)',
default='AdamOptimizer')
# other optimizers to try out: GradientDescentOptimizer, AdadeltaOptimizer,
# AdagradOptimizer, AdamOptimizer, FtrlOptimizer, RMSPropOptimizer
parser.add_argument('--verbose', dest='verbose', action='store_true',
help='increase output verbosity')
parser.add_argument('--silent', dest='verbose', action='store_false')
parser.set_defaults(verbose=True)
args = parser.parse_args()
optimizer_class = getattr(tf.train, args.optimizer)
# Load the hand-written digits data.
filename = 'data/ex3data1.mat'
data = io.loadmat(filename)
X_data, Y_data = data['X'], data['y']
# y==10 is digit 0, convert it to 0 then to make the code below simpler
Y_data[Y_data == 10] = 0
numSamples = X_data.shape[0]
if args.verbose:
print('Shape of the X_data', X_data.shape)
print('Shape of the Y data', Y_data.shape)
def plot_100_images(X):
"""Plot 100 randomly picked digits."""
width, height = 20, 20
nrows, ncols = 10, 10
indices_to_display = np.random.choice(range(X.shape[0]), nrows * ncols)
big_picture = np.zeros((height * nrows, width * ncols))
irow, icol = 0, 0
for idx in indices_to_display:
if icol == ncols:
irow += 1
icol = 0
iimg = X[idx].reshape(width, height).T # transpose the data set
big_picture[irow * height:irow * height + iimg.shape[0],
icol * width:icol * width + iimg.shape[1]] = iimg
icol += 1
fig = plt.figure(figsize=(6, 6))
img = misc.toimage(big_picture)
plt.imshow(img, cmap=matplotlib.cm.Greys_r)
plt.show()
if args.verbose:
# Plot some of the loaded digits.
plot_100_images(X_data)
# For each row, add a constant (1) at the beginning, needed for logistic
# regression.
X_data = np.insert(X_data, 0, 1, axis=1)
def logistic_regression(X_data, Y_data, optimizer_class, reg, learning_rate,
epochs, verbose=True):
"""
Trains and returns a classifier that recognizes one digit (although the
code below is fairly general).
:param X_data: Our digit data (learning set)
:param Y_data: Digit label, 1 for row containing the digit we're trying to
learn to recognize, 0 for others
:param optimizer_class: class that will be used to create optimizer object.
:param reg: regularization parameter
:param learning_rate: learning rate parameter
:param epochs: number of epochs
:param verbose: whether to print out some debugging information
:return: Trained classifier that can be used to classify digits.
"""
numFeatures = X_data.shape[1]
numSamples = X_data.shape[0]
X = tf.placeholder(tf.float32, shape=[None, numFeatures])
Y = tf.placeholder(tf.float32, shape=[None, 1])
W = tf.Variable(tf.zeros([numFeatures, 1]))
pred = tf.nn.sigmoid(tf.matmul(X, W))
cost = -tf.reduce_sum(Y * tf.log(tf.clip_by_value(pred, 1e-9, 1)) +
(1 - Y) * tf.log(tf.clip_by_value(1 - pred, 1e-9, 1))
) / numSamples
regularized_W = tf.slice(W, [1, 0], [-1, -1]) # don't regularize W[0]
regularizer = tf.reduce_sum(tf.square(regularized_W)) * reg / numFeatures
correct_predict = tf.equal(tf.cast(tf.greater(pred, 0.5), tf.float32), Y)
accuracy = tf.reduce_mean(tf.cast(correct_predict, tf.float32))
optimizer = optimizer_class(learning_rate).minimize(cost + regularizer)
init = tf.global_variables_initializer()
# Create a tensorflow session
sess = tf.Session()
sess.run(init)
for epoch in range(epochs):
_, cost_value, reg_cost, accuracy_value = sess.run(
[optimizer, cost, regularizer, accuracy],
feed_dict={X: X_data, Y: Y_data})
# Display logs per epoch step
if verbose and (epoch + 1) % 50 == 0:
print('Epoch: {:04d} cost={:.9f} reg={:.9f} accuracy={}'.format(
epoch+1, cost_value, reg_cost, accuracy_value))
classifier = tf.greater(pred, 0.5)
return lambda X_data: sess.run([pred, classifier], feed_dict={X: X_data})
classifiers = [] # This will hold our 10 classifiers, one for each digit.
for k in range(10):
# prepare the labels for the current digit.
Yk_data = (Y_data == k).astype(int)
print("Training classifier for digit {:d}...".format(k))
tk = logistic_regression(X_data, Yk_data, optimizer_class,
args.regularization, args.learning_rate,
args.epochs, args.verbose)
classifiers.append(tk)
# Now we're using each of the classifiers to estimate how much a given row
# reassembles the digit it tried to learn.
predictions = []
for t in classifiers:
# Classifier returns 2 values, a score and a boolean (if the score is
# above 0.5). We need just the score as we treat it as a confidence level.
pred, _ = t(X_data)
predictions.append(pred)
# For each row, merge the predictions from all the classifiers. Pick the
# classifier with the highest confidence level.
prob_matrix = np.concatenate(predictions, axis=1)
y_pred = np.argmax(prob_matrix, axis=1)
if args.verbose:
print("y_pred:", y_pred)
print("Y_data:", Y_data)
# Print the final report
print("Optimizer {}, epochs {:d}, learning_rate {:0.2f}, regularization param "
"{:0.2f}".format(args.optimizer, args.epochs, args.learning_rate,
args.regularization))
print(metrics.classification_report(Y_data, y_pred))
| 2.859375 | 3 |
datastructures/trees/binary/lca_with_pre_processing_seg_tree.py | hariharanragothaman/pymaster | 10 | 12772291 | class RangeQuery:
def __init__(self, data, func=min):
self.func = func
self._data = _data = [list(data)]
i, n = 1, len(_data[0])
while 2 * i <= n:
prev = _data[-1]
_data.append([func(prev[j], prev[j + i]) for j in range(n - 2 * i + 1)])
i <<= 1
def query(self, begin, end):
depth = (end - begin).bit_length() - 1
return self.func(
self._data[depth][begin], self._data[depth][end - (1 << depth)]
)
class LCA:
def __init__(self, root, graph):
"""Assumes the graph is zero-indexed"""
self.first = [-1] * len(graph)
self.path = [-1] * len(graph)
parents = [-1] * len(graph)
h = -1
dfs = [root]
# This is just routine-standard DFS traversal
while dfs:
print("The dfs is:", dfs)
node = dfs.pop()
print("The node popped is:", node)
self.path[h] = parents[node]
self.first[node] = h = h + 1
for nei in graph[node]:
if self.first[nei] == -1:
parents[nei] = node
dfs.append(nei)
print("The parents array is:", parents)
print("The first array:", self.first)
print("The path is:", self.path)
print("****************************************************************")
heights = [self.first[node] for node in self.path]
print("The heights are:", heights)
# Instantiating the rangeQuery class with heights
self.rmq = RangeQuery(heights)
def __call__(self, left, right):
if left == right:
return left
# The first array is storing the heights
left = self.first[left]
right = self.first[right]
# If left is greater than right
if left > right:
left, right = right, left
return self.path[self.rmq.query(left, right)]
if __name__ == "__main__":
g = {0: [1], 1: [2, 3, 4], 2: [5, 6], 3: [1], 4: [1, 7], 5: [2], 6: [2], 7: [4]}
print("The graph is:", g)
lca = LCA(1, g)
result = lca(5, 6)
print("The lowest common ancestor is:", result)
| 3.296875 | 3 |
tervis/api/submit_event.py | robopsi/sentry-health | 3 | 12772292 | import json
from tervis.environment import CurrentEnvironment
from tervis.auth import Auth
from tervis.producer import Producer
from tervis.exceptions import ApiError, PayloadTooLarge, ClientReadFailed, \
ClientBlacklisted
from tervis.web import Endpoint, ApiResponse, get_remote_addr
from tervis.filter import Filter
from libtervis.event import normalize_event
from libtervis.exceptions import ValidationError
class SubmitEventEndpoint(Endpoint):
url_path = '/events/{project_id}'
env = CurrentEnvironment()
auth = Auth()
producer = Producer()
filter = Filter()
async def get_allowed_origins(self):
return await self.filter.get_allowed_origins()
async def accept_event(self):
max_json_packet = self.env.get_config(
'apiserver.limits.max_json_packet')
line = await self.op.req.content.readline()
if not line:
return
try:
line = line.decode('utf-8')
if len(line) > max_json_packet:
raise PayloadTooLarge('JSON event above maximum size')
return normalize_event(json.loads(line))
except IOError as e:
raise ClientReadFailed(str(e))
except ValidationError as e:
raise ApiError(e.message)
async def post(self):
remote_addr = get_remote_addr(self.env, self.op.req)
if remote_addr is not None \
and await self.filter.ip_is_blacklisted(remote_addr):
raise ClientBlacklisted('The ip address of the client is '
'blacklisted for event submission')
errors = []
events = 0
while True:
try:
event = await self.accept_event()
if event is None:
break
await self.producer.produce_event(
self.auth.project_id, event, self.auth.timestamp)
events += 1
except ApiError as e:
errors.append(e.to_json())
return ApiResponse({
'errors': errors,
'events': events,
})
| 1.875 | 2 |
test_app/apps.py | fcfangcc/django-editor-md | 1 | 12772293 | from django.apps import AppConfig
class EditormdConfig(AppConfig):
name = 'test_app'
| 1.15625 | 1 |
paddlevideo/modeling/backbones/encoder.py | 0shelter0/pingpong_action_temport_generation_based_Paddle | 1 | 12772294 | <reponame>0shelter0/pingpong_action_temport_generation_based_Paddle
from dataclasses import replace
from tkinter.tix import Tree
import numpy as np
import paddle
import paddle.nn as nn
import paddle.optimizer as optim
import matplotlib.pyplot as plt
import math
from paddle import ParamAttr
## 6. MultiHeadAttention k=1/math.sqrt(fan) bias~U(-k, k)
def init_params(name=None, in_channels=1, kernel_size=1):
fan_in = in_channels * kernel_size * 1
k = 1. / math.sqrt(fan_in)
param_attr = ParamAttr(name=name,
initializer=paddle.nn.initializer.Uniform(low=-k,
high=k))
return param_attr
class MultiHeadAttention(nn.Layer):
def __init__(self, d_k, d_v, d_model, d_ff, n_heads=4, dropout=0.1):
super(MultiHeadAttention, self).__init__()
# read config
self.d_k = d_k
self.d_v = d_v
self.d_model = d_model
self.d_ff = d_ff
self.n_heads = n_heads
# nn.Linear weight initialized by constant(0) defalut, should be kaiming_normal, bias is constant(0) default
# nn.convND weight should init by kaiming_normal(i.e. default mananer),
# and bias should init by k=1/math.sqrt(fan)~U(-k, k), respectively
self.W_Q = nn.Linear(d_model, d_k * n_heads,
weight_attr=nn.initializer.KaimingUniform(),
bias_attr=False)
self.W_K = nn.Linear(d_model, d_k * n_heads,
weight_attr=nn.initializer.KaimingUniform(),
bias_attr=False)
self.W_V = nn.Linear(d_model, d_v * n_heads,
weight_attr=nn.initializer.KaimingUniform(),
bias_attr=False)
# optional
self.linear = nn.Linear(n_heads * d_v, d_model,
weight_attr=nn.initializer.KaimingUniform(),
bias_attr=None) # for output projection
self.layer_norm = nn.LayerNorm(normalized_shape=d_model)
self.dropout1 = nn.Dropout(p=dropout)
# for FFN
self.conv1 = nn.Conv1D(in_channels=d_model, out_channels=d_ff, kernel_size=1,
weight_attr=init_params(in_channels=d_model, kernel_size=1),
bias_attr=init_params(in_channels=d_model, kernel_size=1)
)
self.FFN_Relu = nn.ReLU()
self.FFN_dropout = nn.Dropout(p=dropout)
self.conv2 = nn.Conv1D(in_channels=d_ff, out_channels=d_model, kernel_size=1,
weight_attr=init_params(in_channels=d_model, kernel_size=1),
bias_attr=init_params(in_channels=d_ff, kernel_size=1)
)
self.dropout2 = nn.Dropout(p=dropout)
self.layer_norm_ffn = nn.LayerNorm(normalized_shape=d_model)
def forward(self, x): # x: [batch_size x len_q x d_model]
residual, batch_size = x, paddle.shape(x)[0]
# (B, S, D) -proj-> (B, S, D) -split-> (B, S, H, W) -trans-> (B, H, S, W)
Q = paddle.transpose(paddle.reshape(self.W_Q(x),shape=[batch_size, -1, self.n_heads, self.d_k]),perm=[0,2,1,3])
K = paddle.transpose(paddle.reshape(self.W_K(x),shape=[batch_size, -1, self.n_heads, self.d_k]),perm=[0,2,1,3])
V = paddle.transpose(paddle.reshape(self.W_V(x),shape=[batch_size, -1, self.n_heads, self.d_k]),perm=[0,2,1,3])
## context: [batch_size x n_heads x len_q x d_v], attn: [batch_size x n_heads x len_q x len_k]
scores = paddle.matmul(Q, paddle.transpose(K,perm=[0,1,3,2])) / np.sqrt(self.d_k)
softmax = nn.Softmax(axis=-1)
attn = softmax(scores)
context = paddle.matmul(attn, V)
context =paddle.reshape(paddle.transpose(context, perm=[0,2,1,3]),shape=[batch_size, -1, self.n_heads * self.d_v])
# context: [batch_size x len_q x n_heads * d_v]
output = self.linear(context) # optional
output = self.layer_norm(self.dropout1(output) + residual) # self.dropout1 is optional
# output: [batch_size x len_q x d_model]
residual = output # output : [batch_size, len_q, d_model]
output = self.FFN_Relu(self.conv1(paddle.transpose(output,perm=[0,2,1])))
output = self.FFN_dropout(output) # optional
output = paddle.transpose(self.conv2(output),perm=[0,2,1])
output = self.layer_norm_ffn(output + residual) # self.dropout2 is optional
# output : [batch_size, len_q, d_model]
# should transpose(1,2)
return output
## 3. PositionalEncoding 代码实现
class PositionalEncoding(nn.Layer):
def __init__(self, d_model, seq_len=100, dropout=0.1):
super(PositionalEncoding, self).__init__()
self.d_model = d_model
self.seq_len = seq_len
self.dropout = nn.Dropout(p=dropout)
# self.register_buffer('pe', self.pe)
def forward(self, x):
"""
x: [batch_size, d_model, seq_len]
"""
x = paddle.transpose(x, perm=[0,2,1])
pe = paddle.zeros(shape=[self.seq_len, self.d_model])
#position (max_len, 1)
position = paddle.unsqueeze(paddle.arange(start=0,end=self.seq_len, dtype='float32'),axis=1)
div_term = paddle.exp(paddle.arange(start=0,end=self.d_model,step=2,dtype='float32') * (-math.log(10000.0) / self.d_model))
pe[:, 0::2] = paddle.sin(paddle.multiply(position, div_term))
pe[:, 1::2] = paddle.cos(paddle.multiply(position, div_term))
#pe:[max_len*d_model]
pe = paddle.unsqueeze(pe,axis=0)
x = x + pe # [batch_size, seq_len, d_model]
return x # self.dropout(x)
class Encoder(nn.Layer): # d_k=d_v
def __init__(self, d_model, seq_len, n_layers, d_k, d_v, d_ff, n_heads=4, dropout=0.1):
super(Encoder, self).__init__()
self.pos_emb = PositionalEncoding(d_model, seq_len, dropout)
# d_k, d_v, d_model, d_ff, n_heads=4
self.layers = nn.LayerList([MultiHeadAttention(d_k, d_v, d_model, d_ff, n_heads) for _ in range(n_layers)])
def forward(self, x):
'''
x: [batch_size, d_model, seq_len]
'''
x = self.pos_emb(x)
for layer in self.layers:
x = layer(x)
x = paddle.transpose(x,perm=[0,2,1])
return x
| 2.625 | 3 |
sd3/tools/jap_tbl.py | rofferom/seiken_densetsu_3 | 5 | 12772295 | import os
import errno
import threading
import queue
from collections import namedtuple
import jinja2
from PIL import Image
import tesserocr
import sd3.gfx
import sd3.text_table
_Char = namedtuple("_Char", ["idx", "char", "img_path"])
_WorkDesc = namedtuple("_WorkDesc", ["idx", "tile"])
_WorkRes = namedtuple("_WorkDesc", ["idx", "char"])
_HTML_RESIZE_FACTOR = 2
_OCR_RESIZE_FACTOR = 5
_FIRST_CHAR_IDX = 0x20
_JPN_CHAR_START = 0x5F
_JPN_TESSEROCR_ID = "jpn"
_ENG_TESSEROCR_ID = "eng"
def _tile_to_char(tile, lang):
char_img = tile.to_img()
new_dim = (char_img.width * _OCR_RESIZE_FACTOR,
char_img.height * _OCR_RESIZE_FACTOR)
char_img = char_img.resize(new_dim, Image.LANCZOS)
return tesserocr.image_to_text(char_img, lang=lang, psm=10)
class _Worker:
def __init__(self, work_queue, result_queue):
self.work_queue = work_queue
self.result_queue = result_queue
def __call__(self):
while True:
work_desc = self.work_queue.get()
if work_desc is None:
break
if work_desc.idx >= _JPN_CHAR_START:
char = _tile_to_char(work_desc.tile, _JPN_TESSEROCR_ID)
else:
char = _tile_to_char(work_desc.tile, _ENG_TESSEROCR_ID)
if char:
char = char[0]
else:
char = "???"
print("%04X=%s" % (work_desc.idx, char))
self.result_queue.put(_WorkRes(work_desc.idx, char))
def generate(rom, output_path):
# Prepare communication tools
work_queue = queue.Queue()
result_queue = queue.Queue()
# Create and start workers
worker_list = []
thread_count = len(os.sched_getaffinity(0))
for _ in range(thread_count):
worker = _Worker(work_queue, result_queue)
t = threading.Thread(target=worker)
t.start()
worker_list.append(t)
# Dispatch tiles to decode
font_reader = sd3.gfx.FontReader(rom)
for idx, tile in font_reader.read_char_gen():
work_queue.put(_WorkDesc(idx, tile))
# Add a None work for each worker
for _ in worker_list:
work_queue.put(None)
for worker in worker_list:
worker.join()
print("Workers stopped")
# Gather results
decoded_dict = {}
while not result_queue.empty():
work_res = result_queue.get(block=False)
idx = work_res.idx + _FIRST_CHAR_IDX
decoded_dict[idx] = work_res.char
# Flush result in a file
out = open(output_path, "w")
for idx in sorted(decoded_dict.keys()):
out.write("%04X=%s\n" % (idx, decoded_dict[idx]))
out.close()
def _load_jina_template(name):
template_dir = os.path.dirname(os.path.abspath(__file__))
env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dir),
trim_blocks=True, lstrip_blocks=True)
return env.get_template(name)
def generate_html(rom, tbl_path, out_folder):
char_list = []
tbl = sd3.text_table.Table()
tbl.load(tbl_path)
# Create output folder
try:
os.makedirs(out_folder)
except OSError as e:
if e.errno != errno.EEXIST:
raise
# Read font
font_reader = sd3.gfx.FontReader(rom)
for idx, tile in font_reader.read_char_gen():
# Write file
img_name = "char_%04X.png" % idx
img_path = os.path.join(out_folder, img_name)
# Get and resize tile
img = tile.to_img()
new_dim = (img.width * _HTML_RESIZE_FACTOR,
img.height * _HTML_RESIZE_FACTOR)
img = img.resize(new_dim, Image.LANCZOS)
img.save(img_path)
idx += _FIRST_CHAR_IDX
char_list.append(_Char(idx, tbl.decode_char(idx), img_name))
# Generate html
template = _load_jina_template("jap_html_table.template")
rendered = template.render(char_list=char_list)
# Write output file
output_index = os.path.join(out_folder, "index.html")
with open(output_index, 'w') as out:
out.write(rendered)
| 2.390625 | 2 |
tests/planar_tests/test_contour_self_intersects.py | lycantropos/bentley_ottmann | 13 | 12772296 | from itertools import (chain,
combinations)
import pytest
from ground.base import (Context,
Relation)
from ground.hints import Contour
from hypothesis import given
from bentley_ottmann.planar import contour_self_intersects
from tests.utils import (contour_to_edges,
pop_left_vertex,
reverse_contour,
reverse_contour_coordinates)
from . import strategies
@given(strategies.contours)
def test_basic(contour: Contour) -> None:
result = contour_self_intersects(contour)
assert isinstance(result, bool)
@given(strategies.triangular_contours)
def test_base_case(context: Context, contour: Contour) -> None:
result = contour_self_intersects(contour)
left_vertex, mid_vertex, right_vertex = sorted(contour.vertices)
assert result is context.segment_contains_point(
context.segment_cls(left_vertex, right_vertex), mid_vertex)
@given(strategies.non_triangular_contours)
def test_step(context: Context, contour: Contour) -> None:
first_vertex, rest_contour = pop_left_vertex(contour)
rest_vertices = rest_contour.vertices
result = contour_self_intersects(rest_contour)
next_result = contour_self_intersects(contour)
first_edge = context.segment_cls(first_vertex, rest_vertices[0])
last_edge = context.segment_cls(rest_vertices[-1], first_vertex)
rest_edges = contour_to_edges(rest_contour)
overlap_relations = (Relation.COMPONENT, Relation.COMPOSITE,
Relation.EQUAL, Relation.OVERLAP)
assert (next_result
is (result
and len(rest_vertices) > 2
and (any(context.segments_relation(rest_edges[index],
rest_edges[other_index])
is not Relation.DISJOINT
for index in range(len(rest_edges) - 1)
for other_index
in chain(range(index - 1),
range(index + 2, len(rest_edges) - 1)))
or any(context.segments_relation(edge, other_edge)
in overlap_relations
for edge, other_edge
in combinations(rest_edges[:-1], 2)))
or any(context.segments_relation(first_edge, edge)
is not Relation.DISJOINT
for edge in rest_edges[1:-1])
or any(context.segments_relation(last_edge, edge)
is not Relation.DISJOINT
for edge in rest_edges[:-2])
or len(rest_vertices) > 1
and (context.segments_relation(first_edge, rest_edges[0])
in overlap_relations
or context.segments_relation(first_edge, last_edge)
in overlap_relations
or context.segments_relation(last_edge, rest_edges[0])
in overlap_relations)))
@given(strategies.contours)
def test_reversed(contour: Contour) -> None:
result = contour_self_intersects(contour)
assert result is contour_self_intersects(reverse_contour(contour))
@given(strategies.contours)
def test_reversed_coordinates(contour: Contour) -> None:
result = contour_self_intersects(contour)
assert result is contour_self_intersects(reverse_contour_coordinates(contour))
@given(strategies.degenerate_contours)
def test_degenerate_contour(contour: Contour) -> None:
with pytest.raises(ValueError):
contour_self_intersects(contour)
| 2.34375 | 2 |
GUI/models.py | muhammadtarek98/Graduation-project | 0 | 12772297 | import torch
import torch.nn as nn
from torchvision import models
import numpy as np
from torch.autograd import Variable
import os
class Model:
def __init__(self, key = 'abnormal'):
self.INPUT_DIM = 224
self.MAX_PIXEL_VAL = 255
self.MEAN = 58.09
self.STDDEV = 49.73
self.model_ab=MRI_alex(False)
if key == 'abnormal':
self.model_ab.load_state_dict(torch.load(r"models/abnormal.pt", map_location='cpu'))
elif key =='acl':
self.model_ab.load_state_dict(torch.load(r"models/acl.pt", map_location='cpu'))
else:
self.model_ab.load_state_dict(torch.load(r"models/men.pt", map_location='cpu'))
self.model_ab.cuda()
def preprocess(self, series):
pad = int((series.shape[2] - self.INPUT_DIM)/2)
series = series[:,pad:-pad,pad:-pad]
series = (series-np.min(series))/(np.max(series)-np.min(series))*self.MAX_PIXEL_VAL
series = (series - self.MEAN) / self.STDDEV
series = np.stack((series,)*3, axis=1)
series_float = torch.FloatTensor(series)
return series_float
def study(self, axial_path, sagit_path, coron_path):
vol_axial = np.load(axial_path)
vol_sagit = np.load(sagit_path)
vol_coron = np.load(coron_path)
vol_axial_tensor = self.preprocess(vol_axial)
vol_sagit_tensor = self.preprocess(vol_sagit)
vol_coron_tensor = self.preprocess(vol_coron)
return {"axial": vol_axial_tensor,
"sagit": vol_sagit_tensor,
"coron": vol_coron_tensor}
def predict(self, model, tensors, abnormality_prior=None):
vol_axial = tensors["axial"].cuda()
vol_sagit = tensors["sagit"].cuda()
vol_coron = tensors["coron"].cuda()
vol_axial = Variable(vol_axial)
vol_sagit = Variable(vol_sagit)
vol_coron = Variable(vol_coron)
logit = model.forward(vol_axial, vol_sagit, vol_coron)
pred = torch.sigmoid(logit)
pred_npy = pred.data.cpu().numpy()[0][0]
if abnormality_prior:
pred_npy = pred_npy * abnormality_prior
return pred_npy
def get_prediction(self):
self.predict(self.model_ab, self.study(axial_path, coronal_path, sagittal_path))
class MRI_alex(nn.Module):
def __init__(self, training=True):
super().__init__()
self.axial_net = models.alexnet(pretrained=training)
self.sagit_net = models.alexnet(pretrained=training)
self.coron_net = models.alexnet(pretrained=training)
self.gap_axial = nn.AdaptiveAvgPool2d(1)
self.gap_sagit = nn.AdaptiveAvgPool2d(1)
self.gap_coron = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(3*256, 1)
return
def forward(self,vol_axial, vol_sagit, vol_coron):
vol_axial = torch.squeeze(vol_axial, dim=0)
vol_sagit = torch.squeeze(vol_sagit, dim=0)
vol_coron = torch.squeeze(vol_coron, dim=0)
vol_axial = self.axial_net.features(vol_axial)
vol_sagit = self.sagit_net.features(vol_sagit)
vol_coron = self.coron_net.features(vol_coron)
vol_axial = self.gap_axial(vol_axial).view(vol_axial.size(0), -1)
x = torch.max(vol_axial, 0, keepdim=True)[0]
vol_sagit = self.gap_sagit(vol_sagit).view(vol_sagit.size(0), -1)
y = torch.max(vol_sagit, 0, keepdim=True)[0]
vol_coron = self.gap_coron(vol_coron).view(vol_coron.size(0), -1)
z = torch.max(vol_coron, 0, keepdim=True)[0]
w = torch.cat((x, y, z), 1)
out = self.classifier(w)
return out
| 2.390625 | 2 |
main.py | 17jrb17/01-Introduction | 0 | 12772298 | #!/usr/bin/env python3
import utils
utils.check_version((3,7))
utils.clear()
print('Hello, my name is <NAME>')
print('My favorite game is Bioshock Infinite')
print('My only concern is getting back into the groove of coding for this class')
print('I just want to learn more about what goes into creating the things I love and more about the industry in general.')
print('stackoverflow number: user:12003507')
print('github url: https://github.com/17jrb17') | 2.015625 | 2 |
tests/groups/family/__init__.py | aiidateam/aiida-pseudo | 3 | 12772299 | # -*- coding: utf-8 -*-
"""Tests for the :mod:`aiida_pseudo.groups.family` module."""
| 1.179688 | 1 |
contact/migrations/0004_auto_20181010_1631.py | djangulo/integralpsychology.life | 0 | 12772300 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-10-10 16:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contact', '0003_contactcaptchaformfield_hide_label'),
]
operations = [
migrations.RenameField(
model_name='contact',
old_name='first_names',
new_name='name',
),
migrations.RemoveField(
model_name='contact',
name='last_names',
),
migrations.RemoveField(
model_name='contactcaptchaformfield',
name='hide_label',
),
migrations.AddField(
model_name='contactmessage',
name='subject',
field=models.CharField(default='', max_length=254, verbose_name='Subject'),
preserve_default=False,
),
]
| 1.640625 | 2 |
crawl_good_softwares/crawl_good_softwares/spiders/firehorse_scrapy_software_spider.py | skihyy/GT-Spring-2017-CS-6262 | 2 | 12772301 | # -*- coding: utf-8 -*-
import scrapy
import urllib
import os
from scrapy.http import Request
from scrapy.selector import Selector
from crawl_good_softwares.items import CrawlGoodSoftwaresItem
class TestSpiderSpider(scrapy.Spider):
name = "firehorse_scrapy_software_spider"
start_urls = ['http://www.filehorse.com/popular/',
'http://www.filehorse.com/latest/',
'http://www.filehorse.com/software-benchmarking/',
'http://www.filehorse.com/software-compression-and-backup/']
current_page = 1
max_page = 6
headers = {
'Connection': 'keep - alive',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) '
'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.82 Safari/537.36'
}
def __init__(self, *a, **kw):
while self.current_page < self.max_page:
self.current_page = self.current_page + 1
link = 'http://www.filehorse.com/popular/page-' + str(self.current_page)
link2 = 'http://www.filehorse.com/latest/page-' + str(self.current_page)
link3 = 'http://www.filehorse.com/software-benchmarking/page-' + str(self.current_page)
link4 = 'http://www.filehorse.com/software-compression-and-backup/page-' + str(self.current_page)
self.start_urls.append(link)
self.start_urls.append(link2)
self.start_urls.append(link3)
self.start_urls.append(link4)
super(scrapy.Spider, self).__init__(*a, **kw)
def parse(self, response):
selector = Selector(response)
software_links = selector.xpath('//div[@class="cat_dl_btn"]/a/@href').extract()
if 0 < len(software_links):
for link in software_links:
yield Request(link, callback=self.parse, headers=self.headers)
else:
final_download_url = selector.xpath('//a[@id="download_url"]/@href').extract()
if 0 == len(final_download_url):
yield Request(response.url + 'download/', callback=self.parse, headers=self.headers)
else:
item = CrawlGoodSoftwaresItem()
item['link'] = final_download_url[0]
yield item
| 2.734375 | 3 |
backend/src/dataloaders/milestone.py | spiritutumduo/spiritumDuo | 1 | 12772302 | <reponame>spiritutumduo/spiritumDuo<gh_stars>1-10
from typing import List, Dict, Optional
from aiodataloader import DataLoader
from models import Milestone
from typing import Union
class MilestoneByDecisionPointLoader(DataLoader):
"""
This is class for loading milestones using IDs from
DecisionPoint and caching the result in the request context
Attributes:
loader_name (str): unique name of loader to cache data under
"""
loader_name = "_milestone_by_decision_point_loader"
_db = None
def __init__(self, db):
super().__init__()
self._db = db
@classmethod
def _get_loader_from_context(
cls,
context
) -> "MilestoneByDecisionPointLoader":
if cls.loader_name not in context:
context[cls.loader_name] = cls(db=context['db'])
return context[cls.loader_name]
async def fetch(self, keys) -> Dict[int, Milestone]:
async with self._db.acquire(reuse=False) as conn:
query = Milestone.query.where(
Milestone.decision_point_id.in_(keys)
)
result = await conn.all(query)
returnData = {}
for patient_milestone in result:
returnData[patient_milestone.decision_point_id] = patient_milestone
return returnData
async def batch_load_fn(self, keys):
fetchDict = await self.fetch([int(i) for i in keys])
sortedData = []
for key in keys:
sortedData.append(fetchDict.get(int(key)))
return sortedData
@classmethod
async def load_from_id(cls, context=None, id=None) -> Optional[Milestone]:
"""
Load a single entry from its DecisionPoint ID
Parameters:
context (dict): request context
id (int): ID to find
Returns:
Milestone/None
"""
if not id:
return None
return await cls._get_loader_from_context(context).load(id)
@classmethod
async def load_many_from_id(
cls,
context=None,
ids=None
) -> Optional[List[Milestone]]:
"""
Loads multiple entries from their DecisionPoint IDs
Parameters:
context (dict): request context
id (List[int]): IDs to find
Returns:
List[Milestone]/None
"""
if not ids:
return None
return await cls._get_loader_from_context(context).load_many(ids)
@classmethod
def prime_with_context(
cls,
context=None,
id=None,
value=None
) -> "MilestoneByDecisionPointLoader":
return cls._get_loader_from_context(context).prime(id, value)
class MilestoneByOnPathway:
"""
This is class for loading milestones and
caching the result in the request context
Attributes:
None
"""
@staticmethod
async def load_many_from_id(
context=None,
id=None,
notOnDecisionPoint=None
) -> Union[List[Milestone], None]:
"""
Load a multiple entries from their record ID
Parameters:
context (dict): request context
id (List[int]): IDs to find
notOnDecisionPoint (bool): this is a filter that will return
milestones based on whether they have a DecisionPoint ID
set
Returns:
List[Milestone]/None
"""
if not context or not id:
return None
_gino = context['db']
async with _gino.acquire(reuse=False) as conn:
query = Milestone.query.where(Milestone.on_pathway_id == id)
if notOnDecisionPoint:
query = query.where(Milestone.decision_point_id.is_(None))
milestones = await conn.all(query)
if MilestoneByDecisionPointLoader.loader_name not in context:
context[MilestoneByDecisionPointLoader.loader_name] = \
MilestoneByDecisionPointLoader(db=context['db'])
for milestone in milestones:
context[MilestoneByDecisionPointLoader.loader_name].prime(
milestone.id,
milestone
)
return milestones
| 2.578125 | 3 |
playerpiano/terminal_target.py | wearpants/playerpiano | 19 | 12772303 | import sys
def write(s):
"""write s to stdout"""
s = s.replace('\n', '\r\n')
sys.stdout.write(s)
sys.stdout.flush()
def make_target(options):
return write
def free_target():
pass
| 2.859375 | 3 |
torch_glow/tests/nodes/copy_test.py | YaronBenAtar/glow | 2,838 | 12772304 | <reponame>YaronBenAtar/glow<gh_stars>1000+
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class CopyModel(torch.nn.Module):
def __init__(self, shape):
super(CopyModel, self).__init__()
self.other = torch.randn(shape)
def forward(self, a):
b = a.copy_(self.other)
return a + b
class TestCopy(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: ("1x1 => 1x3", [1, 1], [1, 3]),
lambda: ("1x3x5 => 1x3x5", [1, 3, 5], [1, 3, 5]),
lambda: ("1x3 => 4x4x3", [1, 3], [4, 4, 3]),
]
)
def test_copy_(self, _, other_shape, tensor_shape):
"""Test of the PyTorch copy_ method on Glow."""
utils.compare_tracing_methods(
CopyModel(other_shape),
torch.randn(tensor_shape),
fusible_ops={"aten::copy_"},
)
@utils.deterministic_expand(
[
lambda: ("1x1x1 => 1x3", [1, 1, 1], [1, 3]),
lambda: ("1x4 => 4x4x3", [1, 4], [4, 4, 3]),
lambda: ("4x4x3 => 1x3", [4, 4, 3], [1, 3]),
]
)
def test_copy_broadcast_failure(self, _, other_shape, tensor_shape):
"""Test of the PyTorch copy_ method on Glow."""
with self.assertRaises(RuntimeError):
utils.compare_tracing_methods(
CopyModel(other_shape),
torch.randn(tensor_shape),
fusible_ops={"aten::copy_"},
)
| 1.90625 | 2 |
e2edet/criterion/__init__.py | eladb3/BoxeR | 18 | 12772305 | <filename>e2edet/criterion/__init__.py
import e2edet.criterion.metrics
import e2edet.criterion.losses
from e2edet.criterion.metrics import build_metric
from e2edet.criterion.losses import build_loss
__all__ = ["build_metric", "build_loss"]
| 1.085938 | 1 |
pyCMR/tests/test_cmr_integration.py | jasonduley/cmr | 12 | 12772306 | '''
Copyright 2017, United States Government, as represented by the Administrator of the National Aeronautics and Space Administration. All rights reserved.
The pyCMR platform is licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0.
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
'''
import os
import unittest
import xml.etree.ElementTree as ET
from ..pyCMR import CMR, Collection, Granule
class TestCMRIntegration(unittest.TestCase):
@classmethod
def setUpClass(cls):
configFilePath = "pyCMRConfig.cfg"
cls.cmr = CMR(configFilePath)
cls._test_collection_path = os.path.abspath(os.curdir) + "/pyCMR/tests/fixtures/test-collection.xml" #os.path.join(os.curdir, 'tests', 'fixtures', 'test-collection.xml')
cls._test_granule_path = os.path.abspath(os.curdir) + "/pyCMR/tests/fixtures/test-granule.xml" #os.path.join(os.curdir, 'tests', 'fixtures', 'test-granule.xml')
cls._test_collection_name = 'PYCMR TEST COLLECTION'
cls._test_granule_name = 'PYCMR_TEST_GRANULE.hd5'
def collection_search(self):
results = self.cmr.searchCollection()
# Make sure that the XML response was actually parsed
self.assertTrue(isinstance(results[0], Collection))
self.assertTrue('concept-id' in results[0].keys())
self.assertTrue('Collection' in results[0].keys())
def granule_search(self):
results = self.cmr.searchGranule()
self.assertTrue(isinstance(results[0], Granule))
self.assertTrue('concept-id' in results[0].keys())
self.assertTrue('Granule' in results[0].keys())
def collection_ingest(self):
result = self.cmr.ingestCollection(self._test_collection_path)
# If ingest wasn't successful, the above would've thrown a 4XX error
# But just to be sure, let's check that there was a result in the returned XML
# Otherwise, the top-level tag would be `<errors>`
parsed = ET.XML(result)
self.assertTrue(parsed.tag == 'result')
def granule_ingest(self):
result = self.cmr.ingestGranule(self._test_granule_path)
parsed = ET.XML(result)
self.assertTrue(parsed.tag == 'result')
def collection_update(self):
result = self.cmr.updateCollection(self._test_collection_path)
parsed = ET.XML(result)
self.assertTrue(parsed.tag == 'result')
def granule_update(self):
result = self.cmr.updateGranule(self._test_granule_path)
parsed = ET.XML(result)
self.assertTrue(parsed.tag == 'result')
def granule_delete(self):
result = self.cmr.deleteGranule(self._test_granule_name)
# Confirm that a tombstone was returned
parsed = ET.XML(result)
self.assertTrue(parsed.tag == 'result')
def collection_delete(self):
result = self.cmr.deleteCollection(self._test_collection_name)
parsed = ET.XML(result)
self.assertTrue(parsed.tag == 'result')
def test_monolith(self):
'''
Since these are order-sensitive integration tests,
wrap them in a monolithic test, so that they run in the proper order
and stop after a single failure (without having to specify `failfast`)
https://stackoverflow.com/questions/5387299/python-unittest-testcase-execution-order
'''
for test_name in [
'collection_search',
'granule_search',
'collection_ingest',
'granule_ingest',
'collection_update',
'granule_update',
'granule_delete',
'collection_delete'
]:
test = getattr(self, test_name)
test()
def test_search_limit(self):
''' Make sure that the correct number of items are returned by searches '''
results = self.cmr.searchCollection(limit=3)
self.assertTrue(len(results) == 3)
results = self.cmr.searchGranule(limit=91)
self.assertTrue(len(results) == 91)
| 1.679688 | 2 |
reding/settings.py | BuongiornoMIP/Reding | 3 | 12772307 | import os
REDIS_CONFIG = {
'host': os.getenv('REDING_REDIS_HOST', 'localhost'),
'port': int(os.getenv('REDING_REDIS_PORT', 6379)),
'db': int(os.getenv('REDING_REDIS_DB', 0)),
}
DAEMON_CONFIG = {
'host': os.getenv('REDING_DAEMON_HOST', '0.0.0.0'),
'port': int(os.getenv('REDING_DAEMON_PORT', 5000)),
}
KEY_CONFIG = {
'prefix': 'rating',
'subject': 'user',
'object': 'app',
'subjects': 'users',
'objects': 'apps'
}
PAGINATION_DEFAULT_OFFSET = 0
PAGINATION_DEFAULT_SIZE = 10
__all__ = (
'REDIS_CONFIG',
'DAEMON_CONFIG',
'KEY_CONFIG',
)
| 1.75 | 2 |
test_fit.py | QianLabUSC/cognitively-enhanced-decision-framework | 0 | 12772308 | from scipy import optimize
import matplotlib.pyplot as plt
import numpy as np
x = np.array([1, 1.1, 2, 3, 4, 5, 6, 7, 8, 9, 10 ,11, 12, 13, 14, 15], dtype=float)
y = np.array([5, 3, 7, 9, 11, 13, 15, 28.92, 42.81, 56.7, 70.59,
84.47, 98.36, 112.25, 126.14, 140.03])
# 一个输入序列,4个未知参数,2个分段函数
def piecewise_linear(x, x0, y0, k1, k2):
# x<x0 ⇒ lambda x: k1*x + y0 - k1*x0
# x>=x0 ⇒ lambda x: k2*x + y0 - k2*x0
return np.piecewise(x, [x < x0, x >= x0], [lambda x:k1*x + y0-k1*x0,
lambda x:k2*x + y0-k2*x0])
def gauss(mean, scale, x=np.linspace(1,22,22), sigma=4):
return scale * np.exp(-np.square(x - mean) / (2 * sigma ** 2))
# # 用已有的 (x, y) 去拟合 piecewise_linear 分段函数
# p , e = optimize.curve_fit(piecewise_linear, x, y)
# xd = np.linspace(0, 15, 100)
# plt.plot(x, y, "o")
# plt.plot(xd, piecewise_linear(xd, *p))
# plt.savefig('123.png')
xi = np.linspace(1,22,22)
information_matrix = np.zeros((22))
x = [1, 13]
for i in range(len(x)):
information_matrix += gauss(x[i],1)
# plt.plot(xi, information_matrix)
plt.plot(xi, information_matrix)
plt.show() | 2.96875 | 3 |
tests/tasks/sodaspark/test_sodaspark_tasks.py | suryatmodulus/prefect | 8,633 | 12772309 | from unittest.mock import MagicMock
import pytest
from pyspark.sql import SparkSession
from prefect.tasks.sodaspark import SodaSparkScan
class TestSodaSparkScan:
def test_construction_provide_scan_and_df(self):
expected_scan_def = "/foo/bar.yaml"
expected_df = SparkSession.builder.getOrCreate().createDataFrame(
[{"id": 123, "value": "foo"}, {"id": 456, "value": "bar"}]
)
soda_spark_scan_task = SodaSparkScan(scan_def=expected_scan_def, df=expected_df)
assert soda_spark_scan_task.scan_def == expected_scan_def
assert soda_spark_scan_task.df == expected_df
def test_construction_no_scan_and_df(self):
soda_spark_scan_task = SodaSparkScan()
assert soda_spark_scan_task.scan_def is None
assert soda_spark_scan_task.df is None
# @pytest.mark.skip(reason="Requires PySpark and Java to be installed")
def test_run_no_scan(self):
df = SparkSession.builder.getOrCreate().createDataFrame(
[{"id": 123, "value": "foo"}, {"id": 456, "value": "bar"}]
)
soda_spark_scan_task = SodaSparkScan(df=df)
with pytest.raises(ValueError) as exc:
soda_spark_scan_task.run()
assert "scan_def cannot be None" in str(exc)
def test_run_no_df(self):
soda_spark_scan_task = SodaSparkScan(scan_def="/foo/bar.yaml")
with pytest.raises(ValueError) as exc:
soda_spark_scan_task.run()
assert "df cannot be None" in str(exc)
# @pytest.mark.skip(reason="Requires PySpark and Java to be installed")
def test_run_invalid_scan(self, monkeypatch):
scan_def = "invalid scan definition"
df = SparkSession.builder.getOrCreate().createDataFrame(
[{"id": 123, "value": "foo"}, {"id": 456, "value": "bar"}]
)
soda_spark_scan_task = SodaSparkScan(scan_def=scan_def, df=df)
with pytest.raises(AttributeError):
soda_spark_scan_task.run()
def test_run_invalid_df(self, monkeypatch):
scan_def = """
table_name: demodata
metrics:
- row_count
- max
- min_length
tests:
- row_count > 0
"""
df = "not a valid df"
soda_spark_scan_task = SodaSparkScan(scan_def=scan_def, df=df)
with pytest.raises(AttributeError):
soda_spark_scan_task.run()
# @pytest.mark.skip(reason="Requires PySpark and Java to be installed")
def test_run_valid_scan_and_df_with_measurements(self):
scan_def = """
table_name: demodata
metrics:
- row_count
- max
- min_length
tests:
- row_count > 0
"""
df = SparkSession.builder.getOrCreate().createDataFrame(
[{"id": 123, "value": "foo"}, {"id": 456, "value": "bar"}]
)
soda_spark_scan_task = SodaSparkScan(scan_def=scan_def, df=df)
res = soda_spark_scan_task.run()
assert hasattr(res, "measurements")
# @pytest.mark.skip(reason="Requires PySpark and Java to be installed")
def test_run_valid_scan_and_df_with_errors(self):
scan_def = """
table_name: demodata
metrics:
- row_count
- max
- min_length
tests:
- row_count == 0
"""
df = SparkSession.builder.getOrCreate().createDataFrame(
[{"id": 123, "value": "foo"}, {"id": 456, "value": "bar"}]
)
soda_spark_scan_task = SodaSparkScan(scan_def=scan_def, df=df)
res = soda_spark_scan_task.run()
assert hasattr(res, "errors")
| 2.21875 | 2 |
Cura/Cura/cura/Settings/CuraStackBuilder.py | TIAO-JI-FU/3d-printing-with-moveo-1 | 0 | 12772310 | <gh_stars>0
# Copyright (c) 2019 <NAME>.
# Cura is released under the terms of the LGPLv3 or higher.
from typing import Optional
from UM.ConfigurationErrorMessage import ConfigurationErrorMessage
from UM.Logger import Logger
from UM.Settings.Interfaces import DefinitionContainerInterface
from UM.Settings.InstanceContainer import InstanceContainer
from cura.Machines.ContainerTree import ContainerTree
from cura.Machines.MachineNode import MachineNode
from .GlobalStack import GlobalStack
from .ExtruderStack import ExtruderStack
## Contains helper functions to create new machines.
class CuraStackBuilder:
## Create a new instance of a machine.
#
# \param name The name of the new machine.
# \param definition_id The ID of the machine definition to use.
#
# \return The new global stack or None if an error occurred.
@classmethod
def createMachine(cls, name: str, definition_id: str) -> Optional[GlobalStack]:
from cura.CuraApplication import CuraApplication
application = CuraApplication.getInstance()
registry = application.getContainerRegistry()
container_tree = ContainerTree.getInstance()
definitions = registry.findDefinitionContainers(id = definition_id)
if not definitions:
ConfigurationErrorMessage.getInstance().addFaultyContainers(definition_id)
Logger.log("w", "Definition {definition} was not found!", definition = definition_id)
return None
machine_definition = definitions[0]
machine_node = container_tree.machines[machine_definition.getId()]
generated_name = registry.createUniqueName("machine", "", name, machine_definition.getName())
# Make sure the new name does not collide with any definition or (quality) profile
# createUniqueName() only looks at other stacks, but not at definitions or quality profiles
# Note that we don't go for uniqueName() immediately because that function matches with ignore_case set to true
if registry.findContainersMetadata(id = generated_name):
generated_name = registry.uniqueName(generated_name)
new_global_stack = cls.createGlobalStack(
new_stack_id = generated_name,
definition = machine_definition,
variant_container = application.empty_variant_container,
material_container = application.empty_material_container,
quality_container = machine_node.preferredGlobalQuality().container,
)
new_global_stack.setName(generated_name)
# Create ExtruderStacks
extruder_dict = machine_definition.getMetaDataEntry("machine_extruder_trains")
for position in extruder_dict:
cls.createExtruderStackWithDefaultSetup(new_global_stack, position)
for new_extruder in new_global_stack.extruders.values(): # Only register the extruders if we're sure that all of them are correct.
registry.addContainer(new_extruder)
# Register the global stack after the extruder stacks are created. This prevents the registry from adding another
# extruder stack because the global stack didn't have one yet (which is enforced since Cura 3.1).
registry.addContainer(new_global_stack)
return new_global_stack
## Create a default Extruder Stack
#
# \param global_stack The global stack this extruder refers to.
# \param extruder_position The position of the current extruder.
@classmethod
def createExtruderStackWithDefaultSetup(cls, global_stack: "GlobalStack", extruder_position: int) -> None:
from cura.CuraApplication import CuraApplication
application = CuraApplication.getInstance()
registry = application.getContainerRegistry()
# Get the extruder definition.
extruder_definition_dict = global_stack.getMetaDataEntry("machine_extruder_trains")
extruder_definition_id = extruder_definition_dict[str(extruder_position)]
try:
extruder_definition = registry.findDefinitionContainers(id = extruder_definition_id)[0]
except IndexError:
# It still needs to break, but we want to know what extruder ID made it break.
msg = "Unable to find extruder definition with the id [%s]" % extruder_definition_id
Logger.logException("e", msg)
raise IndexError(msg)
# Find out what filament diameter we need.
approximate_diameter = round(extruder_definition.getProperty("material_diameter", "value")) # Can't be modified by definition changes since we are just initialising the stack here.
# Find the preferred containers.
machine_node = ContainerTree.getInstance().machines[global_stack.definition.getId()]
extruder_variant_node = machine_node.variants.get(machine_node.preferred_variant_name)
if not extruder_variant_node:
Logger.log("w", "Could not find preferred nozzle {nozzle_name}. Falling back to {fallback}.".format(nozzle_name = machine_node.preferred_variant_name, fallback = next(iter(machine_node.variants))))
extruder_variant_node = next(iter(machine_node.variants.values()))
extruder_variant_container = extruder_variant_node.container
material_node = extruder_variant_node.preferredMaterial(approximate_diameter)
material_container = material_node.container
quality_node = material_node.preferredQuality()
new_extruder_id = registry.uniqueName(extruder_definition_id)
new_extruder = cls.createExtruderStack(
new_extruder_id,
extruder_definition = extruder_definition,
machine_definition_id = global_stack.definition.getId(),
position = extruder_position,
variant_container = extruder_variant_container,
material_container = material_container,
quality_container = quality_node.container
)
new_extruder.setNextStack(global_stack)
registry.addContainer(new_extruder)
## Create a new Extruder stack
#
# \param new_stack_id The ID of the new stack.
# \param extruder_definition The definition to base the new stack on.
# \param machine_definition_id The ID of the machine definition to use for the user container.
# \param position The position the extruder occupies in the machine.
# \param variant_container The variant selected for the current extruder.
# \param material_container The material selected for the current extruder.
# \param quality_container The quality selected for the current extruder.
#
# \return A new Extruder stack instance with the specified parameters.
@classmethod
def createExtruderStack(cls, new_stack_id: str, extruder_definition: DefinitionContainerInterface,
machine_definition_id: str,
position: int,
variant_container: "InstanceContainer",
material_container: "InstanceContainer",
quality_container: "InstanceContainer") -> ExtruderStack:
from cura.CuraApplication import CuraApplication
application = CuraApplication.getInstance()
registry = application.getContainerRegistry()
stack = ExtruderStack(new_stack_id)
stack.setName(extruder_definition.getName())
stack.setDefinition(extruder_definition)
stack.setMetaDataEntry("position", str(position))
user_container = cls.createUserChangesContainer(new_stack_id + "_user", machine_definition_id, new_stack_id,
is_global_stack = False)
stack.definitionChanges = cls.createDefinitionChangesContainer(stack, new_stack_id + "_settings")
stack.variant = variant_container
stack.material = material_container
stack.quality = quality_container
stack.intent = application.empty_intent_container
stack.qualityChanges = application.empty_quality_changes_container
stack.userChanges = user_container
# Only add the created containers to the registry after we have set all the other
# properties. This makes the create operation more transactional, since any problems
# setting properties will not result in incomplete containers being added.
registry.addContainer(user_container)
return stack
## Create a new Global stack
#
# \param new_stack_id The ID of the new stack.
# \param definition The definition to base the new stack on.
# \param kwargs You can add keyword arguments to specify IDs of containers to use for a specific type, for example "variant": "0.4mm"
#
# \return A new Global stack instance with the specified parameters.
## Create a new Global stack
#
# \param new_stack_id The ID of the new stack.
# \param definition The definition to base the new stack on.
# \param variant_container The variant selected for the current stack.
# \param material_container The material selected for the current stack.
# \param quality_container The quality selected for the current stack.
#
# \return A new Global stack instance with the specified parameters.
@classmethod
def createGlobalStack(cls, new_stack_id: str, definition: DefinitionContainerInterface,
variant_container: "InstanceContainer",
material_container: "InstanceContainer",
quality_container: "InstanceContainer") -> GlobalStack:
from cura.CuraApplication import CuraApplication
application = CuraApplication.getInstance()
registry = application.getContainerRegistry()
stack = GlobalStack(new_stack_id)
stack.setDefinition(definition)
# Create user container
user_container = cls.createUserChangesContainer(new_stack_id + "_user", definition.getId(), new_stack_id,
is_global_stack = True)
stack.definitionChanges = cls.createDefinitionChangesContainer(stack, new_stack_id + "_settings")
stack.variant = variant_container
stack.material = material_container
stack.quality = quality_container
stack.intent = application.empty_intent_container
stack.qualityChanges = application.empty_quality_changes_container
stack.userChanges = user_container
registry.addContainer(user_container)
return stack
@classmethod
def createUserChangesContainer(cls, container_name: str, definition_id: str, stack_id: str,
is_global_stack: bool) -> "InstanceContainer":
from cura.CuraApplication import CuraApplication
application = CuraApplication.getInstance()
registry = application.getContainerRegistry()
unique_container_name = registry.uniqueName(container_name)
container = InstanceContainer(unique_container_name)
container.setDefinition(definition_id)
container.setMetaDataEntry("type", "user")
container.setMetaDataEntry("setting_version", CuraApplication.SettingVersion)
metadata_key_to_add = "machine" if is_global_stack else "extruder"
container.setMetaDataEntry(metadata_key_to_add, stack_id)
return container
@classmethod
def createDefinitionChangesContainer(cls, container_stack, container_name):
from cura.CuraApplication import CuraApplication
application = CuraApplication.getInstance()
registry = application.getContainerRegistry()
unique_container_name = registry.uniqueName(container_name)
definition_changes_container = InstanceContainer(unique_container_name)
definition_changes_container.setDefinition(container_stack.getBottom().getId())
definition_changes_container.setMetaDataEntry("type", "definition_changes")
definition_changes_container.setMetaDataEntry("setting_version", CuraApplication.SettingVersion)
registry.addContainer(definition_changes_container)
container_stack.definitionChanges = definition_changes_container
return definition_changes_container
| 2.125 | 2 |
tests/test_gs_convert.py | rogeriosan4/pdf2txt | 0 | 12772311 | <reponame>rogeriosan4/pdf2txt<filename>tests/test_gs_convert.py
import gs_convert as gs
x = gs.pdf2string("file.pdf")
print(x) | 2.453125 | 2 |
sssdevops/list_tools.py | doaa-altarawy/sssdevops | 2 | 12772312 | <filename>sssdevops/list_tools.py<gh_stars>1-10
"""
Main python file for the sssdevops example
"""
def mean(num_list):
"""
Calculate the mean of a list of numbers
Parameters
----------
num_list: list of int or float
Returns
-------
float of the mean of the list
Examples
--------
>>> mean([1, 2, 3, 4, 5])
3.0
"""
return sum(num_list) / len(num_list)
| 2.875 | 3 |
sem03_for/sem_03_192.py | AnnetteNegovora/dap_2020_fall | 23 | 12772313 | # %%
"""
<NAME> любит французские багеты. Длина французского
багета равна 1 метру. За один заглот <NAME> заглатывает
кусок случайной длины равномерно распределенной на отрезке
[0; 1]. Для того, чтобы съесть весь багет удаву потребуется случайное
количество N заглотов.
Оцените P(N=2), P(N=3), E(N)
"""
# %%
import numpy as np
import pandas as pd
from random import uniform
# %%
uniform(a=0, b=1)
list(range(7))
# %%
def eat_baget():
"""
Симулятор <NAME>.
Возвращает число укусов, потребовавшееся на один багет.
"""
n_ukusov = 0
baget = 1
while baget > 0:
zaglot = uniform(a=0, b=1)
baget -= zaglot
n_ukusov += 1
return n_ukusov
# %%
eat_baget()
# %%
n_exp = 1000
udaff_life = [eat_baget() for i in range(n_exp)]
udaff_life
EN_hat = np.mean(udaff_life)
EN_hat
PNeq2_hat = udaff_life.count(2) / n_exp
PNeq2_hat
PNeq3_hat = udaff_life.count(3) / n_exp
PNeq3_hat
# %%
"""
<NAME> подбрасывает кубик до первой шестёрки.
Обозначим: величина N — число бросков.
Событие A — при подбрасываниях выпадала только чётная грань.
Оцените P(N=2), P(N=3), E(N)
Оцените P(A), P(N=2|A), P(A|N=2), P(A OR N=2), P(A AND N=2)
"""
# %%
from random import randint
# %%
randint(a=1, b=2)
# %%
7 // 2
# %%
7 % 2
def throw_until_six():
"""
Подбрасываем кубик до первой шестёрки.
Считаем число бросков. И следим за тем, выпадали ли только четные числа.
Возвращает: (число бросков, True/False)
"""
n_broskov = 0
tolko_chet = True
brosok = -1 # вымышленный бросок, только чтобы зайти в цикл
while brosok < 6:
brosok = randint(1, 6)
n_broskov += 1
if brosok % 2 == 1:
tolko_chet = False
return (n_broskov, tolko_chet)
# %%
throw_until_six()
n_exp = 1000
throw_list = [throw_until_six() for i in range(n_exp)]
throw_list
# %%
throw_df = pd.DataFrame(throw_list, columns=['n_throw', 'only_even'])
throw_df.describe()
# %%
"""
Накануне войны Жестокий Тиран очень большой страны издал
указ. Отныне за каждого новорождённого мальчика семья получает
денежную премию, но если в семье рождается вторая девочка, то
всю семью убивают. Бедные жители страны запуганы и остро
нуждаются в деньгах, поэтому в каждой семье дети будут появляться
до тех пор, пока не родится первая девочка.
а) Каким будет среднее число детей в семье?
б) Какой будет доля мальчиков в стране?
в) Какой будет средняя доля мальчиков в случайной семье?
г) Сколько в среднем мальчиков в случайно выбираемой семье?
""" | 3.359375 | 3 |
tests/conftest.py | Github-shipchain/transmission | 1 | 12772314 | import json
import pytest
from django.conf import settings as test_settings
from rest_framework import status
from rest_framework.request import ForcedAuthentication
from rest_framework.test import APIClient
from shipchain_common.utils import random_id
from shipchain_common.test_utils import get_jwt, mocked_rpc_response
from apps.authentication import passive_credentials_auth
from apps.shipments.models import Shipment, Device
def fake_get_raw_token(self, header):
return header.split()[1]
def fake_get_header(self, request):
return b'JWT dummy'
ForcedAuthentication.get_raw_token = fake_get_raw_token
ForcedAuthentication.get_header = fake_get_header
USER_ID = random_id()
ORGANIZATION_ID = random_id()
VAULT_ID = random_id()
TRANSACTION_HASH = 'txHash'
DEVICE_ID = random_id()
@pytest.fixture(scope='session')
def token():
return get_jwt(username='<EMAIL>', sub=USER_ID, organization_id=ORGANIZATION_ID)
@pytest.fixture(scope='session')
def user(token):
return passive_credentials_auth(token)
@pytest.fixture(scope='session')
def api_client(user, token):
client = APIClient()
client.force_authenticate(user=user, token=token)
return client
@pytest.fixture
def mocked_engine_rpc(mocker):
mocker.patch('apps.shipments.rpc.Load110RPCClient.create_vault', return_value=(VAULT_ID, 's3://fake-vault-uri/'))
mocker.patch('apps.shipments.rpc.Load110RPCClient.add_shipment_data', return_value={'hash': TRANSACTION_HASH})
mocked_cst = mocker.patch('apps.shipments.rpc.Load110RPCClient.create_shipment_transaction',
return_value=('version', {}))
mocked_cst.__qualname__ = 'ShipmentRPCClient.create_shipment_transaction'
mocker.patch('apps.shipments.rpc.Load110RPCClient.sign_transaction', return_value=('version', {}))
mocked_uvht = mocker.patch('apps.shipments.rpc.Load110RPCClient.set_vault_hash_tx', return_value={})
mocked_uvht.__qualname__ = 'ShipmentRPCClient.set_vault_hash_tx'
mocker.patch('apps.shipments.rpc.Load110RPCClient.send_transaction', return_value={
"blockHash": "0xccb595947a121e37df8bf689c3f88c6d9c7fb56070c9afda38551540f9e231f7",
"blockNumber": 15,
"contractAddress": None,
"cumulativeGasUsed": 138090,
"from": "0x13b1eebb31a1aa2ecaa2ad9e7455df2f717f2143",
"gasUsed": 138090,
"logs": [],
"logsBloom": "0x0000000000",
"status": True,
"to": "0x25ff5dc79a7c4e34254ff0f4a19d69e491201dd3",
"transactionHash": TRANSACTION_HASH,
"transactionIndex": 0
})
@pytest.fixture
def mocked_iot_api(mocker):
return mocker.patch('apps.shipments.iot_client.DeviceAWSIoTClient.update_shadow', return_value=mocked_rpc_response(
{'data': {'shipmentId': 'dunno yet', 'shipmentState': 'dunno yet'}}))
@pytest.yield_fixture
def http_pretty():
import httpretty
httpretty.enable()
yield httpretty
httpretty.disable()
@pytest.fixture
def mocked_profiles(http_pretty):
profiles_ids = {
"shipper_wallet_id": random_id(),
"carrier_wallet_id": random_id(),
"storage_credentials_id": random_id()
}
http_pretty.register_uri(http_pretty.GET,
f"{test_settings.PROFILES_URL}/api/v1/wallet/{profiles_ids['shipper_wallet_id']}/",
body=json.dumps({'good': 'good'}), status=status.HTTP_200_OK)
http_pretty.register_uri(http_pretty.GET,
f"{test_settings.PROFILES_URL}/api/v1/wallet/{profiles_ids['carrier_wallet_id']}/",
body=json.dumps({'good': 'good'}), status=status.HTTP_200_OK)
http_pretty.register_uri(http_pretty.GET,
f"{test_settings.PROFILES_URL}/api/v1/storage_credentials/{profiles_ids['storage_credentials_id']}/",
body=json.dumps({'good': 'good'}), status=status.HTTP_200_OK)
return profiles_ids
@pytest.fixture
def shipment(mocked_engine_rpc, mocked_iot_api):
return Shipment.objects.create(vault_id=VAULT_ID,
carrier_wallet_id=random_id(),
shipper_wallet_id=random_id(),
storage_credentials_id=random_id(),
owner_id=USER_ID)
@pytest.fixture
def shipment_with_device(shipment):
shipment.device = Device.objects.create(id=DEVICE_ID)
shipment.save()
shipment.refresh_from_db(fields=('device',))
return shipment
| 1.84375 | 2 |
test/05_environment/setup.py | mtreinish/cibuildwheel | 1 | 12772315 | from setuptools import setup, Extension
import sys, os
# explode if environment isn't correct, as set in CIBW_ENVIRONMENT
CIBW_TEST_VAR = os.environ.get('CIBW_TEST_VAR')
CIBW_TEST_VAR_2 = os.environ.get('CIBW_TEST_VAR_2')
PATH = os.environ.get('PATH')
if CIBW_TEST_VAR != 'a b c':
raise Exception('CIBW_TEST_VAR should equal "a b c". It was "%s"' % CIBW_TEST_VAR)
if CIBW_TEST_VAR_2 != '1':
raise Exception('CIBW_TEST_VAR_2 should equal "1". It was "%s"' % CIBW_TEST_VAR_2)
if '/opt/cibw_test_path' not in PATH:
raise Exception('PATH should contain "/opt/cibw_test_path". It was "%s"' % PATH)
if '$PATH' in PATH:
raise Exception('$PATH should be expanded in PATH. It was "%s"' % PATH)
setup(
name="spam",
ext_modules=[Extension('spam', sources=['spam.c'])],
version="0.1.0",
)
| 2.328125 | 2 |
what-is-a-coroutine/8-asyncio-aiohttp.py | jreese/pycon | 32 | 12772316 | # Copyright 2019 <NAME>
# Licensed under the MIT License
import asyncio
import time
from aiohttp import request
URLS = [
"https://2019.northbaypython.org",
"https://duckduckgo.com",
"https://jreese.sh",
"https://news.ycombinator.com",
"https://python.org",
]
# Coroutines with aiohttp
async def fetch(url: str) -> str:
async with request("GET", url) as r:
return await r.text("utf-8")
async def main():
coros = [fetch(url) for url in URLS]
results = await asyncio.gather(*coros)
for result in results:
print(f"{result[:20]!r}")
if __name__ == "__main__":
asyncio.run(main())
| 3.265625 | 3 |
examples/meeting_room/views/rest_mediators.py | billyrrr/onto | 1 | 12772317 | <reponame>billyrrr/onto
from flask import Response, jsonify
from examples.meeting_room.domain_models import Meeting
from examples.meeting_room.view_models import MeetingSession
from examples.meeting_room.view_models.meeting_session import MeetingSessionC
from onto.source.rest import RestViewModelSource
from onto.view import Mediator
# class RestMediator(Mediator):
#
# view_model_cls = None
# source = RestViewModelSource()
class ViewModelResponse(Response):
pass
class MeetingSessionRest(Mediator):
# from onto import source, sink
view_model_cls = MeetingSessionC
rest = RestViewModelSource()
@rest.route('/<doc_id>', methods=('GET',))
def materialize_meeting_session(self, doc_id):
meeting = Meeting.get(doc_id=doc_id)
def notify(obj):
d = obj.to_snapshot().to_dict()
content = jsonify(d)
self.rest.emit(content)
_ = MeetingSessionC.get(
doc_id=meeting.doc_id,
once=False,
f_notify=notify
)
# @rest.route('/', methods=('GET',))
# def list_meeting_ids(self):
# return [meeting.to_snapshot().to_dict() for meeting in Meeting.all()]
@classmethod
def start(cls, app):
cls.rest.start(app)
| 2.265625 | 2 |
CodeGen/symbolTableItem.py | MMDBadCoder/Compiler-project | 0 | 12772318 | class SymbolTableItem:
def __init__(self, type, name, customId, value):
self.type = type
self.name = name
self.value = value
# if type == 'int' or type == 'bool':
# self.value = 0
# elif type == 'string':
# self.value = ' '
# else:
# self.value = None
self.id = 'id_{}'.format(customId)
def __str__(self):
return '{}, {}, {}, {}'.format(self.type, self.name, self.value, self.id)
| 3.03125 | 3 |
Desafios/Desafio 014 (Conversor de temp.).py | Kimberly07-Ernane/Pythondesafios | 0 | 12772319 | <gh_stars>0
#Desafio14
#Escreva um programa que converta uma temperatura digitando em
#graus Celsius e converta para graus Fahrenheit
temp=int(input('Digite a temperatura °C: '))
f=9*temp/5+32
print('A temperatura de {}°C corresponde a {}°F!'.format(temp,f))
| 4.15625 | 4 |
tbf/harness_generation.py | leostrakosch/testbasedfalsification | 10 | 12772320 | import tbf.utils as utils
class HarnessCreator(object):
def _get_vector_read_method(self):
return b"""char * parse_inp(char * __inp_var) {
unsigned int input_length = strlen(__inp_var)-1;
/* Remove '\\n' at end of input */
if (__inp_var[input_length] == '\\n') {
__inp_var[input_length] = '\\0';
}
char * parseEnd;
char * value_pointer = malloc(16);
unsigned long long intVal = strtoull(__inp_var, &parseEnd, 0);
if (*parseEnd != 0) {
long long sintVal = strtoll(__inp_var, &parseEnd, 0);
if (*parseEnd != 0) {
long double floatVal = strtold(__inp_var, &parseEnd);
if (*parseEnd != 0) {
fprintf(stderr, "Can't parse input: '%s' (failing at '%s')\\n", __inp_var, parseEnd);
abort();
} else {
memcpy(value_pointer, &floatVal, 16);
}
} else {
memcpy(value_pointer, &sintVal, 8);
}
} else {
memcpy(value_pointer, &intVal, 8);
}
return value_pointer;
}\n\n"""
def __init__(self):
self.repr_type = b"__repr"
def _get_preamble(self):
preamble = ''
preamble += utils.EXTERNAL_DECLARATIONS
preamble += "\n"
preamble += utils.get_assume_method() + "\n"
preamble = preamble.encode()
preamble += self._get_vector_read_method()
return preamble
def _get_error_definition(self, method_name):
definition = 'void {0}() {{\n'.format(method_name)
definition += ' fprintf(stderr, \"{0}\\n\");\n'.format(
utils.ERROR_STRING)
definition += ' exit(1);\n}\n\n'
return definition.encode()
def _get_nondet_method_definitions(self, nondet_methods, test_vector):
definitions = b''
if test_vector is not None:
definitions += b'unsigned int access_counter = 0;\n\n'
for method in nondet_methods:
definitions += utils.get_method_head(method['name'], method['type'],
method['params']).encode()
definitions += b' {\n'
if method['type'] != 'void':
definitions += " unsigned int inp_size = 3000;\n".encode()
definitions += " char * inp_var = malloc(inp_size);\n".encode(
)
if test_vector is None: # Build generic harness
definitions += " fgets(inp_var, inp_size, stdin);\n".encode(
)
else:
definitions += " switch(access_counter) {\n".encode()
for idx, item in enumerate(test_vector.vector):
if type(item['value']) is bytes:
value = item['value']
else:
value = item['value'].encode()
# yapf: disable
definitions += b''.join([
b'case ', str(idx).encode(),
b': strcpy(inp_var, "', value, b'"); break;\n'
])
# yapf: enable
definitions += b" default: abort();\n"
definitions += b" }\n"
definitions += b" access_counter++;\n"
definitions += b''.join([
b' return *((', method['type'].encode(),
b' *) parse_inp(inp_var));\n'
])
definitions += b'}\n\n'
return definitions
def create_harness(self, nondet_methods, error_method, test_vector=None):
harness = b''
harness += self._get_preamble()
if error_method:
harness += self._get_error_definition(error_method)
harness += self._get_nondet_method_definitions(nondet_methods,
test_vector)
return harness
| 2.5625 | 3 |
declaraciones/declaracion/urls/informacion_personal.py | rafaelhn2021/proyecto | 0 | 12772321 | from django.urls import path, re_path
from declaracion.views import (DeclaracionFormView, DatosCurricularesView,
DatosEncargoActualView, ExperienciaLaboralView,
ConyugeDependientesView,
DatosCurricularesDelete,
ExperienciaLaboralDeleteView,
ConyugeDependientesDeleteView,DeclaracionFiscalFormView,
DeclaracionFiscalDelete,listaMunicipios,DomiciliosViews, ParejaView,ParejaDeleteView)
from django.views.generic import TemplateView
from django.conf.urls import url
urlpatterns = [
re_path(r'^declaracion-fiscal/(?P<folio>[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12})/$',
DeclaracionFiscalFormView.as_view(),
name='declaracion-fiscal'),
re_path(r'^declaracion-fiscal/(?P<folio>[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12})/borrar/(?P<pk>\d+)/$',
DeclaracionFiscalDelete.as_view(),
name='declaracion-fiscal-borrar'),
re_path(r'^informacion-general/(?P<cat_tipos_declaracion>[0-9])/$', DeclaracionFormView.as_view(),
name='informacion-general'),
re_path(r'^informacion-general/(?P<folio>[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12})/$',
DeclaracionFormView.as_view(),
name='informacion-general'),
re_path(r'^direccion/(?P<folio>[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12})/$',
DomiciliosViews.as_view(),
name='direccion'),
re_path(r'^datos-curriculares/(?P<folio>[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12})/$',
DatosCurricularesView.as_view(),
name='datos-curriculares'),
re_path(r'^datos-curriculares/(?P<folio>[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12})/agregar/$',
DatosCurricularesView.as_view(), {'agregar': True},
name='datos-curriculares-agregar'),
re_path(r'^datos-curriculares/(?P<folio>[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12})/editar/(?P<pk>\d+)/$',
DatosCurricularesView.as_view(),
name='datos-curriculares-editar'),
re_path(r'^datos-curriculares/(?P<folio>[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12})/borrar/(?P<pk>\d+)/$',
DatosCurricularesDelete.as_view(),
name='datos-curriculares-borrar'),
re_path(r'^datos-del-encargo-actual/(?P<folio>[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12})/$',
DatosEncargoActualView.as_view(),
name='datos-del-encargo-actual'),
re_path(r'^experiencia-laboral/(?P<folio>[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12})/$',
ExperienciaLaboralView.as_view(),
name='experiencia-laboral'),
re_path(r'^experiencia-laboral/(?P<folio>[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12})/agregar/$',
ExperienciaLaboralView.as_view(), {'agregar': True},
name='experiencia-laboral-agregar'),
re_path(r'^experiencia-laboral/(?P<folio>[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12})/editar/(?P<pk>\d+)/$',
ExperienciaLaboralView.as_view(),
name='experiencia-laboral-editar'),
re_path(r'^experiencia-laboral/(?P<folio>[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12})/borrar/(?P<pk>\d+)/$',
ExperienciaLaboralDeleteView.as_view(),
name='experiencia-laboral-borrar'),
re_path(r'^dependientes-economicos/(?P<folio>[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12})/$',
ConyugeDependientesView.as_view(),
name='dependientes-economicos'),
re_path(r'^dependientes-economicos/(?P<folio>[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12})/agregar/$',
ConyugeDependientesView.as_view(), {'agregar': True},
name='dependientes-economicos-agregar'),
re_path(r'^dependientes-economicos/(?P<folio>[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12})/editar/(?P<pk>\d+)/$',
ConyugeDependientesView.as_view(),
name='dependientes-economicos-editar'),
re_path(r'^dependientes-economicos/(?P<folio>[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12})/borrar/(?P<pk>\d+)/$',
ConyugeDependientesDeleteView.as_view(),
name='dependientes-economicos-borrar'),
re_path(r'^datos-pareja/(?P<folio>[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12})/$',
ParejaView.as_view(),
name='datos-pareja'),
url(r'^ajax/lista_municipios/$', listaMunicipios, name='lista_municipios'),
]
| 2.03125 | 2 |
lido_sdk/methods/typing.py | lidofinance/lido-validator.py | 6 | 12772322 | try:
# For python >= 3.8
from typing import TypedDict
except ImportError:
# For python 3.7
from typing_extensions import TypedDict
class Operator(TypedDict):
index: int
active: bool
name: str
rewardAddress: str
stakingLimit: int
stoppedValidators: int
totalSigningKeys: int
usedSigningKeys: int
class OperatorKey(TypedDict):
index: int
operator_index: int
key: bytes
depositSignature: bytes
used: bool
| 2.125 | 2 |
heartbeat/views.py | zouyapeng/instance_monitor_server | 0 | 12772323 | #!/usr/bin/env python
# -*- coding: utf8 -*-
from django.shortcuts import render
from heartbeat.models import MonitorAgent, InstanceUUID
from trigger.models import Trigger
from heartbeat.serializers import MonitorAgentSerializer
from rest_framework import generics
from rest_framework.renderers import JSONRenderer
from rest_framework.response import Response
import datetime
def get_config(agent):
uuids = agent.instances_uuid.all()
config = {}
for uuid in uuids:
config[uuid.uuid] = [trigger.format_dict() for trigger in Trigger.objects.filter(instance_uuid=uuid.uuid)]
return config
class MonitorAgentCreateView(generics.ListCreateAPIView):
queryset = MonitorAgent.objects.all()
serializer_class = MonitorAgentSerializer
def post(self, request, *args, **kwargs):
agent_id = request.data.get('id', None)
hostname = request.data.get('hostname', None)
uuids = request.data.get('uuids', None)
if agent_id and uuids is not None:
config = None
try:
agent = MonitorAgent.objects.get(id=agent_id)
except MonitorAgent.DoesNotExist:
return Response({'id': None, 'config': config})
if agent.update_status:
config = get_config(agent)
agent.update_status = False
agent.status = True
agent.save()
instances_uuid = agent.instances_uuid.all()
instances_uuid_list = [instance_uuid.uuid for instance_uuid in instances_uuid]
for instance_uuid in instances_uuid:
if instance_uuid.uuid not in uuids:
instance_uuid.delete()
for uuid in uuids:
if uuid not in instances_uuid_list:
instance_uuid = InstanceUUID(uuid=uuid, agent=agent)
instance_uuid.save()
return Response({'id': agent_id, 'config': config})
elif agent_id and uuids is None:
config = None
try:
agent = MonitorAgent.objects.get(id=agent_id)
except MonitorAgent.DoesNotExist:
return Response({'id': None, 'config': config})
if agent.update_status:
config = get_config(agent)
agent.update_status = False
agent.status = True
agent.update_time = datetime.datetime.now()
agent.save()
return Response({'id': agent_id, 'config': config})
elif not agent_id and hostname:
try:
agent = MonitorAgent.objects.get(hostname=hostname)
except MonitorAgent.DoesNotExist:
agent = MonitorAgent(hostname=hostname, status=0)
agent.save()
if uuids is not None:
instances_uuid = agent.instances_uuid.all()
for instance_uuid in instances_uuid:
if instance_uuid.uuid not in uuids:
instance_uuid.delete()
for uuid in uuids:
try:
InstanceUUID.objects.get(uuid=uuid)
except InstanceUUID.DoesNotExist:
instance_uuid = InstanceUUID(uuid=uuid, agent=agent)
instance_uuid.save()
config = get_config(agent)
agent.update_status = False
agent.status = True
agent.save()
return Response({'id': agent.id, 'config': config})
else:
return Response({'id': None, 'config': None})
class InstanceUUIDRetrieveView(generics.RetrieveAPIView):
queryset = InstanceUUID.objects.all()
| 1.882813 | 2 |
uploader/config.py | wingez/G-CPU | 0 | 12772324 | <reponame>wingez/G-CPU
from pathlib import Path
import json
import os
def loadconfig(name):
thisPath = Path(__file__)
basefolder = thisPath.resolve().parent.parent
configfolder = basefolder / 'config'
path = configfolder / (name + '.json')
if not path.exists():
raise ValueError('File not found. ' + str(path))
with path.open() as jsonfile:
result = json.load(jsonfile)
return result
| 2.46875 | 2 |
fairmodels/plotnine/geoms/geom_map.py | Locust2520/python-fairmodels | 0 | 12772325 | import pandas as pd
import numpy as np
from matplotlib.collections import PatchCollection, LineCollection
from descartes.patch import PolygonPatch
try:
import geopandas # noqa: F401
except ImportError:
HAS_GEOPANDAS = False
else:
HAS_GEOPANDAS = True
from ..doctools import document
from ..exceptions import PlotnineError
from ..utils import to_rgba, SIZE_FACTOR
from .geom import geom
from .geom_point import geom_point
@document
class geom_map(geom):
"""
Draw map feature
The map feature are drawn without any special projections.
{usage}
Parameters
----------
{common_parameters}
Notes
-----
This geom is best suited for plotting a shapefile read into
geopandas dataframe. The dataframe should have a ``geometry``
column.
"""
DEFAULT_AES = {'alpha': 1, 'color': '#111111', 'fill': '#333333',
'linetype': 'solid', 'shape': 'o', 'size': 0.5,
'stroke': 0.5}
DEFAULT_PARAMS = {'stat': 'identity', 'position': 'identity',
'na_rm': False}
REQUIRED_AES = {'geometry'}
legend_geom = 'polygon'
def __init__(self, mapping=None, data=None, **kwargs):
if not HAS_GEOPANDAS:
raise PlotnineError(
"geom_map requires geopandas. "
"Please install geopandas."
)
geom.__init__(self, mapping, data, **kwargs)
# Almost all geodataframes loaded from shapefiles
# have a geometry column.
if 'geometry' not in self.mapping:
self.mapping['geometry'] = 'geometry'
def setup_data(self, data):
if not len(data):
return data
# Remove any NULL geometries, and remember
# All the non-Null shapes in a shapefile are required to be
# of the same shape type.
bool_idx = np.array([g is not None for g in data['geometry']])
if not np.all(bool_idx):
data = data.loc[bool_idx]
# Add polygon limits. Scale training uses them
try:
bounds = data['geometry'].bounds
except AttributeError:
# The geometry is not a GeoSeries
# Bounds calculation is extracted from
# geopandas.base.GeoPandasBase.bounds
bounds = pd.DataFrame(
np.array([x.bounds for x in data['geometry']]),
columns=['xmin', 'ymin', 'xmax', 'ymax'],
index=data.index)
else:
bounds.rename(
columns={
'minx': 'xmin',
'maxx': 'xmax',
'miny': 'ymin',
'maxy': 'ymax'
},
inplace=True)
data = pd.concat([data, bounds], axis=1)
return data
def draw_panel(self, data, panel_params, coord, ax, **params):
if not len(data):
return data
data.loc[data['color'].isnull(), 'color'] = 'none'
data.loc[data['fill'].isnull(), 'fill'] = 'none'
data['fill'] = to_rgba(data['fill'], data['alpha'])
geom_type = data.geometry.iloc[0].geom_type
if geom_type in ('Polygon', 'MultiPolygon'):
data['size'] *= SIZE_FACTOR
patches = [PolygonPatch(g) for g in data['geometry']]
coll = PatchCollection(
patches,
edgecolor=data['color'],
facecolor=data['fill'],
linestyle=data['linetype'],
linewidth=data['size'],
zorder=params['zorder'],
)
ax.add_collection(coll)
elif geom_type == 'Point':
# Extract point coordinates from shapely geom
# and plot with geom_point
arr = np.array([list(g.coords)[0] for g in data['geometry']])
data['x'] = arr[:, 0]
data['y'] = arr[:, 1]
for _, gdata in data.groupby('group'):
gdata.reset_index(inplace=True, drop=True)
gdata.is_copy = None
geom_point.draw_group(
gdata, panel_params, coord, ax, **params)
elif geom_type == 'LineString':
data['size'] *= SIZE_FACTOR
data['color'] = to_rgba(data['color'], data['alpha'])
segments = [list(g.coords) for g in data['geometry']]
coll = LineCollection(
segments,
edgecolor=data['color'],
linewidth=data['size'],
linestyle=data['linetype'],
zorder=params['zorder'])
ax.add_collection(coll)
| 2.78125 | 3 |
src/datasets/spectrometer.py | IBM/optimizing-precision-in-imbalanced-datasets | 6 | 12772326 | # Copyright 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import random
import logging
import numpy as np
import pandas as pd
from sklearn.model_selection import KFold, StratifiedKFold
class DataLoader:
def __init__(self, config, logger):
# Initialize Dataloader Configuration
logging.info('[DATALOADER]: Initializing Spectrometer Dataloader')
self.config = config
self.dl_cfg = self.config['dataloader']
# Initialize PRNG Seed Values
if self.dl_cfg.enable_seed:
random.seed(self.dl_cfg.seed)
np.random.seed(self.dl_cfg.seed)
# Load Dataset
logging.info('[DATALOADER]: Loading Dataset Files')
logging.info('[DATALOADER]: > Loading File: ' + self.dl_cfg.data_path)
# Preprocess Data
raw_data = open(self.dl_cfg.data_path, 'r').read().split('\n\n')
data = []
for row in raw_data:
if len(row) == 0: continue
row = row.replace('(', '').replace(')', '').strip()
row = row.replace('\n', ' ').split(' ')[2:]
row = list(map(lambda x: float(x), row))
if row[0] == 0: row[0] = 1.0 # Replace Encoding
data.append(row)
self.data = np.array(data)
logging.info('[DATALOADER]: > Loaded: ' + self.dl_cfg.data_path + '\t' + \
'Data Shape: ' + str(self.data.shape))
def get_data(self):
# Initialize Crossfold Validation
if self.dl_cfg.crossval.stratified:
kf = StratifiedKFold(n_splits=self.dl_cfg.crossval.folds,
shuffle=self.dl_cfg.crossval.shuffle,
random_state=self.dl_cfg.crossval.random_state)
else:
kf = KFold(n_splits=self.dl_cfg.crossval.folds,
shuffle=self.dl_cfg.crossval.shuffle,
random_state=self.dl_cfg.crossval.random_state)
for fold, (train_index, test_index) in enumerate(kf.split(self.data)):
# Initialize Data Features and Labels
X_train = self.data[train_index, 1:]
y_train = self.data[train_index, 0].astype(int) - 1
X_test = self.data[test_index, 1:]
y_test = self.data[test_index, 0].astype(int) - 1
# Set Dataloader Attributes
self.num_class = len(np.unique(y_train))
yield fold, X_train, y_train, X_test, y_test
| 2 | 2 |
sb2web/pyscripts/sb2web_create.py | pepstack/java-training | 0 | 12772327 | <reponame>pepstack/java-training<gh_stars>0
#!/usr/bin/python
#-*- coding: UTF-8 -*-
#
# @file: sb2web_create.py
#
# @version:
# @create:
# @update: 2019-09-20
#
#######################################################################
from __future__ import print_function
import os, sys, stat, signal, shutil, inspect, commands, time, datetime
import yaml, codecs, json, base64
import optparse, ConfigParser
# http://docs.jinkan.org/docs/jinja2/
# http://docs.jinkan.org/docs/jinja2/templates.html
from jinja2 import Environment, PackageLoader, FileSystemLoader
#######################################################################
# application specific
APPFILE = os.path.realpath(sys.argv[0])
APPHOME = os.path.dirname(APPFILE)
APPNAME,_ = os.path.splitext(os.path.basename(APPFILE))
APPVER = "1.0.0"
APPHELP = "Create java springboot2 web project."
#######################################################################
# import your local modules
import utils.utility as util
import utils.evntlog as elog
from utils.error import try_except_log
#######################################################################
@try_except_log
def validate_options_config (options):
elog.info("validate options...")
if not options.name:
options.name = options.artifact.replace("-", "")
if not options.packagename:
options.packagename = options.group + "." + options.name
if options.packaging.capitalize() != "Jar" and options.packaging.capitalize() != "War":
raise Exception("packaging not Jar or War")
options.packaging = options.packaging.capitalize()
if options.java != "8":
raise Exception("java version not 8")
if not options.context_path:
options.context_path = "/"
if options.context_path == "$name":
options.context_path = options.name
@try_except_log
def start_run_project (artifactDir):
runcmd = "mvn spring-boot:run"
elog.force(runcmd)
os.chdir(artifactDir)
os.system(runcmd)
pass
########################################################################
@try_except_log
def read_file_content (pathfile):
content = ""
fd = util.open_file(pathfile, mode='r+b', encoding='utf-8')
for line in fd.readlines():
content += line.encode('utf-8')
fd.close()
return content
@try_except_log
def write_out_file(fout, outcontent):
util.write_file_utf8(fout, outcontent.encode('utf-8'))
pass
@try_except_log
def render_output_file (srcfile, dstfile, renderConfig, verbose = True):
dstpath = os.path.dirname(dstfile)
dstname = os.path.basename(dstfile)
outputfile = os.path.join(dstpath, dstname)
srcfileRelPath = os.path.relpath(srcfile, renderConfig.j2envRoot)
j2tmpl = renderConfig.j2env.get_template(srcfileRelPath)
outcontent = j2tmpl.render(**renderConfig)
util.create_output_file(outputfile, write_out_file, outcontent, False)
pass
@try_except_log
def copy_template (srcfile, dstfile, renderConfig, verbose):
_, ext = os.path.splitext(os.path.basename(srcfile))
if ext == ".template":
# 需要模板处理
dstname, _ = os.path.splitext(dstfile)
if verbose:
util.info2("render file: %s -> %s" % (srcfile, dstname))
render_output_file(srcfile, dstname, renderConfig, verbose)
else:
# 不需要模板处理, 直接复制
if verbose:
util.info("copy file: %s -> %s" % (srcfile, dstfile))
shutil.copyfile(srcfile, dstfile)
pass
########################################################################
@try_except_log
def create_sb2web_project (appConfig, options):
artifactDir = os.path.join(appConfig.projectsDir, options.artifact)
elog.info("starting create project: %s", artifactDir)
if util.dir_exists(artifactDir):
util.warn("artifact already exists: %s" % artifactDir)
if not options.force:
util.warn("using '--force' to ovewrite it");
sys.exit(0)
pass
try:
shutil.rmtree(artifactDir)
except:
pass
pairConfig = (
util.DotDict(
),
util.DotDict(
)
)
# 载入模板工程的配置文件
templateDict = {}
templateYaml = os.path.join(appConfig.sb2webRoot, "j2template.yaml")
if util.file_exists(templateYaml):
fd = open(templateYaml)
templateDict = yaml.load(fd.read())
fd.close()
renderConfig = util.DotDict(
LICENSE_HEADER = read_file_content(os.path.join(APPHOME, 'license_header.txt'))
,j2envRoot = appConfig.sb2webRoot
,j2env = Environment(loader=FileSystemLoader(appConfig.sb2webRoot))
,templateDict = templateDict
,springbootVer = options.springboot
,groupId = options.group
,artifactId = options.artifact
,artifactName = options.name
,artifactVer = options.ver
,description = options.description
,packageName = options.packagename
,packaging = options.packaging
,javaVersion = options.java
,serverPort = options.port
,contextPath = options.context_path
)
# 复制目录树, 同时处理模板文件
util.copydirtree(appConfig.sb2webRoot, artifactDir, pairConfig, True, copy_template, renderConfig)
elog.info("success create project: %s", artifactDir)
if options.run:
elog.info("starting run: %s", artifactDir)
start_run_project(artifactDir)
pass
########################################################################
# 主函数仅仅处理日志和检查配置项
#
def main(parser, appConfig):
import utils.logger as logger
(options, args) = parser.parse_args(args=None, values=None)
loggerConfig = util.DotDict(
logging_config = options.log_config
,file = APPNAME + '.log'
,name = options.logger
)
logger_dictConfig = logger.set_logger(loggerConfig, options.log_path, options.log_level)
validate_options_config(options)
util.print_options_attrs2(options, [
("springboot", "version"),
("group", "groupId"),
("artifact", "artifactId"),
("name", "name"),
("ver", "artifact.version"),
("description", "description"),
("packagename", "packageName"),
("packaging", "packaging"),
("java", "java.version"),
("port", "server.port"),
("context_path", "server.servlet.context-path"),
])
create_sb2web_project(appConfig, options)
pass
#######################################################################
# Usage:
# $ %prog
# or
# $ %prog --force
#
if __name__ == "__main__":
parser, group, optparse, profile = util.init_parser_group(
apphome = APPHOME,
appname = APPNAME,
appver = APPVER,
apphelp = APPHELP,
usage = "%prog [Options]",
group_options = os.path.join(APPHOME, "options/sb2web_options.yaml")
)
print(profile)
appRoot = os.path.dirname(APPHOME)
# 应用程序的本地缺省配置
appConfig = util.DotDict(
appRoot = appRoot
,sb2webRoot = os.path.join(APPHOME, "sb2web")
,projectsDir = os.path.join(appRoot, "projects")
)
# 主函数
main(parser, appConfig)
sys.exit(0)
| 2.078125 | 2 |
pyscf/geomopt/berny_solver.py | robert-anderson/pyscf | 1 | 12772328 | <gh_stars>1-10
#!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Interface to geometry optimizer pyberny https://github.com/azag0/pyberny
'''
from __future__ import absolute_import
try:
from berny import Berny, geomlib, Logger
except ImportError:
msg = ('Geometry optimizer pyberny not found.\npyberny library '
'can be found on github https://github.com/azag0/pyberny.\n'
'You can install pyberny with "pip install pyberny"')
raise ImportError(msg)
import numpy
from pyscf import lib
from pyscf import __config__
INCLUDE_GHOST = getattr(__config__, 'geomopt_berny_solver_optimize_include_ghost', True)
ASSERT_CONV = getattr(__config__, 'geomopt_berny_solver_optimize_assert_convergence', True)
def to_berny_geom(mol, include_ghost=INCLUDE_GHOST):
atom_charges = mol.atom_charges()
if include_ghost:
# Symbol Ghost is not supported in current version of pyberny
#species = [mol.atom_symbol(i) if z != 0 else 'Ghost'
# for i,z in enumerate(atom_charges)]
species = [mol.atom_symbol(i) if z != 0 else 'H'
for i,z in enumerate(atom_charges)]
coords = mol.atom_coords() * lib.param.BOHR
else:
atmlst = numpy.where(atom_charges != 0)[0] # Exclude ghost atoms
species = [mol.atom_symbol(i) for i in atmlst]
coords = mol.atom_coords()[atmlst] * lib.param.BOHR
# geomlib.Geometry is available in the new version of pyberny solver. (issue #212)
if getattr(geomlib, 'Geometry', None):
return geomlib.Geometry(species, coords)
else:
return geomlib.Molecule(species, coords)
def _geom_to_atom(mol, geom, include_ghost):
atoms = list(geom)
position = numpy.array([x[1] for x in atoms])
if include_ghost:
atom_coords = position / lib.param.BOHR
else:
atmlst = numpy.where(mol.atom_charges() != 0)[0]
atom_coords = mol.atom_coords()
atom_coords[atmlst] = position / lib.param.BOHR
return atom_coords
def to_berny_log(pyscf_log):
'''Adapter to allow pyberny to use pyscf.logger
'''
class BernyLogger(Logger):
def __call__(self, msg, level=0):
if level >= -self.verbosity:
pyscf_log.info('%d %s', self.n, msg)
return BernyLogger()
def as_berny_solver(method, assert_convergence=ASSERT_CONV,
include_ghost=INCLUDE_GHOST):
'''Generate a solver to compute energy and gradients for the berny
optimization function.
'''
if isinstance(method, lib.GradScanner):
g_scanner = method
elif getattr(method, 'nuc_grad_method', None):
g_scanner = method.nuc_grad_method().as_scanner()
else:
raise NotImplementedError('Nuclear gradients of %s not available' % method)
if not include_ghost:
g_scanner.atmlst = numpy.where(method.mol.atom_charges() != 0)[0]
def solver(mol):
energy, gradients = g_scanner(mol)
if assert_convergence and not g_scanner.converged:
raise RuntimeError('Nuclear gradients of %s not converged' % method)
return energy, gradients
return solver
def as_pyscf_method(mol, scan_function):
'''Creat an wrapper for the given scan_function, to make it work as a
pyscf gradients scanner. The wrapper can be passed to :func:`optimize`.
Args:
scan_function : [mol] => (e_tot, grad)
Examples::
>>> mol = gto.M(atom='H; H 1 1.2', basis='ccpvdz')
>>> scan_fn = scf.RHF(mol).nuc_grad_method().as_scanner()
>>> m = as_pyscf_method(mol, scan_fn)
>>> berny_solver.kernel(m)
'''
class OmniGrad(lib.GradScanner):
def __init__(self, g):
self.base = g.base
def __call__(self, mol):
self.e_tot, grad = scan_function(mol)
return self.e_tot, grad
@property
def converged(self):
return True
class Grad(object):
def __init__(self, base):
self.base = base
def as_scanner(self):
return OmniGrad(self)
class OmniMethod(object):
def __init__(self, mol):
self.mol = mol
self.verbose = mol.verbose
self.stdout = mol.stdout
def nuc_grad_method(self):
return Grad(self)
return OmniMethod(mol)
def optimize(method, assert_convergence=ASSERT_CONV,
include_ghost=INCLUDE_GHOST, callback=None, **kwargs):
'''Optimize the geometry with the given method.
'''
mol = method.mol.copy()
if 'log' in kwargs:
log = lib.logger.new_logger(method, kwargs['log'])
elif 'verbose' in kwargs:
log = lib.logger.new_logger(method, kwargs['verbose'])
else:
log = lib.logger.new_logger(method)
# temporary interface, taken from berny.py optimize function
berny_log = to_berny_log(log)
solver = as_berny_solver(method, assert_convergence, include_ghost)
geom = to_berny_geom(mol, include_ghost)
optimizer = Berny(geom, log=berny_log, **kwargs)
e_last = 0
for cycle, geom in enumerate(optimizer):
if log.verbose >= lib.logger.NOTE:
log.note('\nGeometry optimization cycle %d', cycle+1)
_dump_mol_geometry(mol, geom, log)
mol.set_geom_(_geom_to_atom(mol, geom, include_ghost), unit='Bohr')
energy, gradients = solver(mol)
log.note('cycle %d: E = %.12g dE = %g norm(grad) = %g', cycle+1,
energy, energy - e_last, numpy.linalg.norm(gradients))
e_last = energy
optimizer.send((energy, gradients))
if callable(callback):
callback(locals())
return mol
kernel = optimize
del(INCLUDE_GHOST, ASSERT_CONV)
def _dump_mol_geometry(mol, geom, log):
atoms = list(geom)
new_coords = numpy.array([x[1] for x in atoms])
old_coords = mol.atom_coords() * lib.param.BOHR
dx = new_coords - old_coords
log.stdout.write('Cartesian coordinates (Angstrom)\n')
log.stdout.write(' Atom New coordinates dX dY dZ\n')
for i in range(mol.natm):
log.stdout.write('%4s %10.6f %10.6f %10.6f %9.6f %9.6f %9.6f\n' %
(mol.atom_symbol(i),
new_coords[i,0], new_coords[i,1], new_coords[i,2],
dx[i,0], dx[i,1], dx[i,2]))
if __name__ == '__main__':
from pyscf import gto
from pyscf import scf, dft, cc, mp
mol = gto.M(atom='''
C 1.1879 -0.3829 0.0000
C 0.0000 0.5526 0.0000
O -1.1867 -0.2472 0.0000
H -1.9237 0.3850 0.0000
H 2.0985 0.2306 0.0000
H 1.1184 -1.0093 0.8869
H 1.1184 -1.0093 -0.8869
H -0.0227 1.1812 0.8852
H -0.0227 1.1812 -0.8852
''',
basis='3-21g')
mf = scf.RHF(mol)
mol1 = optimize(mf)
print(mf.kernel() - -153.219208484874)
print(scf.RHF(mol1).kernel() - -153.222680852335)
mf = dft.RKS(mol)
mf.xc = 'pbe,'
mf.conv_tol = 1e-7
mol1 = optimize(mf)
mymp2 = mp.MP2(scf.RHF(mol))
mol1 = optimize(mymp2)
mycc = cc.CCSD(scf.RHF(mol))
mol1 = optimize(mycc)
| 1.960938 | 2 |
image_aug.py | EnochMHforever/CCF-BDCI2019-Multi-person-Face-Recognition-Competition-Baseline-master | 1 | 12772329 | <filename>image_aug.py<gh_stars>1-10
import os
import pickle
import random
import cv2 as cv
import numpy as np
from PIL import Image
from imgaug import augmenters as iaa
from torchvision import transforms
from config import IMG_DIR
from config import pickle_file
# Data augmentation and normalization for training
# Just normalization for validation
data_transforms = {
'train': transforms.Compose([
]),
}
transformer = data_transforms['train']
# Define our sequence of augmentation steps that will be applied to every image.
seq = iaa.Sequential(
[
iaa.Fliplr(0.5), # horizontally flip 50% of all images
iaa.Sometimes(0.2,
iaa.Grayscale(alpha=1.0)
)
]
)
def image_aug(src):
src = np.expand_dims(src, axis=0)
augs = seq.augment_images(src)
aug = augs[0]
return aug
if __name__ == "__main__":
with open(pickle_file, 'rb') as file:
data = pickle.load(file)
samples = data
sample = random.sample(samples, 1)[0]
filename = sample['img']
filename = os.path.join(IMG_DIR, filename)
print(filename)
img = cv.imread(filename) # BGR
cv.imwrite('origin.png', img)
img = img[..., ::-1] # RGB
img = Image.fromarray(img, 'RGB') # RGB
img = transformer(img)
img = image_aug(img) # RGB
img = img[..., ::-1] # BGR
cv.imwrite('out.png', img)
| 2.71875 | 3 |
functions/charge.py | pangq2/charge | 0 | 12772330 | <filename>functions/charge.py
from sys import *
import os
def read_ACF():
""" read the ACF.dat file
parameters
---------------
The index number of the atoms
Return
----------------
The charge of the selected atoms
"""
| 3.015625 | 3 |
object_detection/faster_rcnn_tracker.py | UDC-GAC/HybridTracker | 0 | 12772331 | #!/home/roberto/anaconda3/envs/tensorflow/bin/python
# Copyright 2022 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import sys
import multiprocessing
import time
import pandas as pd
import tensorflow as tf
import cv2
from utils import detector_utils as detector_utils
from utils import label_map_util
class FasterRCNN(multiprocessing.Process):
def __init__(self, input_pipe, kcf_pipe, gpu_id, num_classes, jump, video_name, player, model_name):
multiprocessing.Process.__init__(self)
self.input_pipe = input_pipe
self.kcf_pipe = kcf_pipe
self.gpu_id = gpu_id
self.num_classes = num_classes
self.jump = jump
self.video_name = video_name
self.player = player
self.model_name = model_name
def run(self):
cwd_path = os.getcwd()
path_to_ckpt = os.path.join(cwd_path, self.model_name,'frozen_inference_graph.pb')
path_to_labels = os.path.join(cwd_path,'training','labelmap.pbtxt')
path_to_video = os.path.join(cwd_path,self.video_name)
os.environ["CUDA_VISIBLE_DEVICES"] = "{}".format(self.gpu_id)
label_map = label_map_util.load_labelmap(path_to_labels)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=self.num_classes, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(path_to_ckpt, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = 0.4
sess = tf.Session(config=config, graph=detection_graph)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
print(detection_classes)
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
video = cv2.VideoCapture(path_to_video)
num_iter = 0
while(video.isOpened()):
_, frame = video.read()
if not (num_iter % self.jump):
if frame is None:
break
frame_expanded = np.expand_dims(frame, axis=0)
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: frame_expanded})
(box, score) = self.best_score_box(boxes, scores, classes)
# Send info to both threads
self.input_pipe.send((num_iter, box, score))
self.kcf_pipe.send((num_iter, box, score))
num_iter+=1
return
def best_score_box(self, boxes, scores, classes):
pos_max = np.where(scores==np.amax(scores[np.where(classes==self.player)]))
return boxes[pos_max], scores[pos_max] | 2.140625 | 2 |
freezing/model/__init__.py | freezingsaddles/freezing-model | 2 | 12772332 | <filename>freezing/model/__init__.py
import warnings
import sqlalchemy as sa
from alembic import command
from alembic.script import ScriptDirectory
from alembic.util import CommandError
from sqlalchemy import create_engine
from sqlalchemy.engine import Engine
from sqlalchemy.engine.reflection import Inspector
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, scoped_session
from sqlalchemy.sql.expression import Executable, ClauseElement
from freezing.model.exc import DatabaseVersionError
from freezing.model import meta, migrationsutil
from freezing.model.config import config as model_config
from freezing.model.autolog import log
from freezing.model.orm import (
Team,
Athlete,
RideError,
Ride,
RideGeo,
RideTrack,
RideEffort,
RidePhoto,
RideWeather,
)
# Make the list of managed tables explicit here. These tables will be automatically created by sqlalchemy
# in init_model if they do not exist *and* the database appears to be empty.
MANAGED_TABLES = [
Team.__table__,
Athlete.__table__,
RideError.__table__,
Ride.__table__,
RideGeo.__table__,
RideTrack.__table__,
RideEffort.__table__,
RidePhoto.__table__,
RideWeather.__table__,
]
def init_model(sqlalchemy_url: str, drop: bool = False, check_version: bool = True):
"""
Initializes the tables and classes of the model using configured engine.
:param sqlalchemy_url: The database URI.
:param drop: Whether to drop the tables first.
:param check_version: Whether to ensure that the database version is up-to-date.
"""
engine = create_engine(
sqlalchemy_url, pool_recycle=3600
) # pool_recycle is for mysql
sm = sessionmaker(autoflush=True, autocommit=False, bind=engine)
meta.engine = engine
meta.scoped_session = scoped_session(sm)
alembic_cfg = migrationsutil.create_config(sqlalchemy_url=sqlalchemy_url)
alembic_script = ScriptDirectory.from_config(alembic_cfg)
# Check to see whether the database has already been created or not.
# Based on this, we know whether we need to upgrade the database or mark the database
# as the latest version.
inspector = Inspector.from_engine(engine)
db_objects_created = len(inspector.get_table_names()) > 1
fresh_db = False
if not db_objects_created:
log.info("Database appears uninitialized, creating database tables")
meta.metadata.create_all(engine, tables=MANAGED_TABLES, checkfirst=True)
create_supplemental_db_objects(engine)
fresh_db = True
elif drop:
log.info("Dropping database tables and re-creating.")
drop_supplemental_db_objects(engine)
meta.metadata.drop_all(engine, tables=MANAGED_TABLES, checkfirst=True)
meta.metadata.create_all(engine, tables=MANAGED_TABLES)
fresh_db = True
if fresh_db:
command.stamp(alembic_cfg, "head")
else:
if check_version:
latest = alembic_script.get_current_head()
installed = migrationsutil.get_database_version()
try:
alembic_script.get_revisions(installed)
except CommandError:
warnings.warn(
"Unknown db revision {} installed, ignoring db upgrade.".format(
installed
)
)
else:
if latest != installed:
log.info(
"Installed database ({0}) does not match latest available ({1}). (UPGRADING)".format(
installed, latest
),
UserWarning,
)
command.upgrade(alembic_cfg, "head")
else:
log.info("Skipping database upgrade.")
class CreateView(Executable, ClauseElement):
def __init__(self, name, select):
self.name = name
self.select = select
@compiles(CreateView, "mysql")
def visit_create_view(element, compiler, **kw):
return "CREATE VIEW IF NOT EXISTS %s AS %s" % (
element.name,
compiler.process(element.select, literal_binds=True),
)
def drop_supplemental_db_objects(engine: Engine):
engine.execute("drop view if exists daily_scores")
engine.execute("drop view if exists ride_daylight")
engine.execute("drop view if exists _build_ride_daylight")
engine.execute("drop view if exists lbd_athletes")
def create_supplemental_db_objects(engine: Engine):
# Create VIEWS that may be helpful.
_v_daily_scores_create = sa.DDL(
"""
create view daily_scores as
select
A.team_id,
R.athlete_id,
sum(R.distance) as distance,
case
when sum(R.distance) < 1 then 0
when sum(R.distance) < 10 then
10 + 0.5 * (21 * sum(R.distance) -
(sum(R.distance) * sum(R.distance)))
else 65 + sum(R.distance) - 10
end as points,
date(CONVERT_TZ(R.start_date, R.timezone,'{0}')) as ride_date
from
rides R join athletes A on A.id = R.athlete_id
group by
A.id,
A.team_id,
ride_date
;
""".format(
model_config.TIMEZONE
)
)
engine.execute(_v_daily_scores_create)
_v_buid_ride_daylight = sa.DDL(
"""
create view _build_ride_daylight as
select R.id as ride_id, date(R.start_date) as ride_date,
sec_to_time(R.elapsed_time) as elapsed,
sec_to_time(R.moving_time) as moving,
TIME(R.start_date) as start_time,
TIME(date_add(R.start_date, interval R.elapsed_time second)) as end_time,
W.sunrise, W.sunset
from rides R
join ride_weather W on W.ride_id = R.id
;
"""
)
engine.execute(_v_buid_ride_daylight)
_v_ride_daylight = sa.DDL(
"""
create view ride_daylight as
select ride_id, ride_date, start_time, end_time, sunrise, sunset, moving,
IF(start_time < sunrise, LEAST(TIMEDIFF(sunrise, start_time), moving), sec_to_time(0)) as before_sunrise,
IF(end_time > sunset, LEAST(TIMEDIFF(end_time, sunset), moving), sec_to_time(0)) as after_sunset
from _build_ride_daylight
;
"""
)
engine.execute(_v_ride_daylight)
_v_leaderboard_athletes = sa.DDL(
"""
create view lbd_athletes as select a.id, a.name, a.display_name, a.team_id from athletes a
join teams T on T.id=a.team_id where not T.leaderboard_exclude
;
"""
)
engine.execute(_v_leaderboard_athletes)
_v_100_mile_team_score = sa.DDL(
"""
create or replace VIEW `weekly_stats` AS
select
`daily_scores`.`athlete_id` AS `athlete_id`,
`teams`.`id` as `team_id`,
`teams`.`name` as `team_name`,
week(`daily_scores`.`ride_date`,3) AS `week_num`,
(
select sum(
case
when `daily_scores`.`distance` >= 1
then 1
else 0
end
)
) as `days`,
sum(`daily_scores`.`distance`) AS `distance`,
sum(`daily_scores`.`points`) AS `points`,
(select case
when sum(`daily_scores`.`distance`) < 100
then sum(`daily_scores`.`distance`)
else (100)
end
) as team_distance
from
`daily_scores` join `teams`
on `teams`.`id` = `daily_scores`.`team_id`
where not `teams`.`leaderboard_exclude`
group by
`daily_scores`.`team_id`,
`daily_scores`.`athlete_id`,
week(`daily_scores`.`ride_date`,3)
;
"""
)
engine.execute(_v_100_mile_team_score)
_v_daily_variance = sa.DDL(
"""
create or replace view variance_by_day as
select
ds.athlete_id,
sum(case when ds.distance >= 1 then 1 else 0 end) ride_days,
sum(distance) total_miles,
var_pop(case when dayofweek(ds.ride_date)=1
then ds.distance end) sun_var_pop,
var_pop(case when dayofweek(ds.ride_date)=2
then ds.distance end) mon_var_pop,
var_pop(case when dayofweek(ds.ride_date)=3
then ds.distance end) tue_var_pop,
var_pop(case when dayofweek(ds.ride_date)=4
then ds.distance end) wed_var_pop,
var_pop(case when dayofweek(ds.ride_date)=5
then ds.distance end) thu_var_pop,
var_pop(case when dayofweek(ds.ride_date)=6
then ds.distance end) fri_var_pop,
var_pop(case when dayofweek(ds.ride_date)=7
then ds.distance end) sat_var_pop
from
daily_scores ds
group by ds.athlete_id;
"""
)
engine.execute(_v_daily_variance)
| 2.53125 | 3 |
gym/envs/debugging/__init__.py | enterstudio/gym | 49 | 12772333 | <reponame>enterstudio/gym<gh_stars>10-100
from gym.envs.debugging.one_round_deterministic_reward import OneRoundDeterministicRewardEnv
from gym.envs.debugging.two_round_deterministic_reward import TwoRoundDeterministicRewardEnv
from gym.envs.debugging.one_round_nondeterministic_reward import OneRoundNondeterministicRewardEnv
from gym.envs.debugging.two_round_nondeterministic_reward import TwoRoundNondeterministicRewardEnv
| 1.15625 | 1 |
Ch9/09-FileHandling-02.py | akuks/Python3.6---Novice-to-Ninja | 0 | 12772334 | # Define the fileName as a variable
fileToWrite = 'outputFile.txt'
fileHandle = open(fileToWrite, 'w')
i = 0
while i < 10:
fileHandle.write("This is line Number " + str(i) + "\n")
i += 1
fileHandle.close()
| 3.03125 | 3 |
library/telegram/session_backend/orm.py | RobbiNespu/hyperboria | 54 | 12772335 | <filename>library/telegram/session_backend/orm.py
import datetime
from typing import (
TYPE_CHECKING,
Any,
Optional,
Tuple,
Union,
)
from sqlalchemy import orm
from telethon import utils
from telethon.crypto import AuthKey
from telethon.sessions.memory import (
MemorySession,
_SentFileType,
)
from telethon.tl.types import (
InputDocument,
InputPhoto,
PeerChannel,
PeerChat,
PeerUser,
updates,
)
if TYPE_CHECKING:
from .sqlalchemy import AlchemySessionContainer
class AlchemySession(MemorySession):
def __init__(self, container: 'AlchemySessionContainer', session_id: str) -> None:
super().__init__()
self.container = container
self.db = container.db
self.engine = container.db_engine
self.Version, self.Session, self.Entity, self.SentFile, self.UpdateState = (
container.Version, container.Session, container.Entity,
container.SentFile, container.UpdateState)
self.session_id = session_id
self._load_session()
def _load_session(self) -> None:
sessions = self._db_query(self.Session).all()
session = sessions[0] if sessions else None
if session:
self._dc_id = session.dc_id
self._server_address = session.server_address
self._port = session.port
self._auth_key = AuthKey(data=session.auth_key)
def clone(self, to_instance=None) -> MemorySession:
return super().clone(MemorySession())
def _get_auth_key(self) -> Optional[AuthKey]:
sessions = self._db_query(self.Session).all()
session = sessions[0] if sessions else None
if session and session.auth_key:
return AuthKey(data=session.auth_key)
return None
def set_dc(self, dc_id: str, server_address: str, port: int) -> None:
super().set_dc(dc_id, server_address, port)
self._update_session_table()
self._auth_key = self._get_auth_key()
def get_update_state(self, entity_id: int) -> Optional[updates.State]:
row = self.UpdateState.query.get((self.session_id, entity_id))
if row:
date = datetime.datetime.utcfromtimestamp(row.date)
return updates.State(row.pts, row.qts, date, row.seq, row.unread_count)
return None
def set_update_state(self, entity_id: int, row: Any) -> None:
if row:
self.db.merge(self.UpdateState(session_id=self.session_id, entity_id=entity_id,
pts=row.pts, qts=row.qts, date=row.date.timestamp(),
seq=row.seq,
unread_count=row.unread_count))
self.save()
@MemorySession.auth_key.setter
def auth_key(self, value: AuthKey) -> None:
self._auth_key = value
self._update_session_table()
def _update_session_table(self) -> None:
self.Session.query.filter(self.Session.session_id == self.session_id).delete()
self.db.add(self.Session(session_id=self.session_id, dc_id=self._dc_id,
server_address=self._server_address, port=self._port,
auth_key=(self._auth_key.key if self._auth_key else b'')))
def _db_query(self, dbclass: Any, *args: Any) -> orm.Query:
return dbclass.query.filter(
dbclass.session_id == self.session_id, *args
)
def save(self) -> None:
self.container.save()
def close(self) -> None:
# Nothing to do here, connection is managed by AlchemySessionContainer.
pass
def delete(self) -> None:
self._db_query(self.Session).delete()
self._db_query(self.Entity).delete()
self._db_query(self.SentFile).delete()
self._db_query(self.UpdateState).delete()
def _entity_values_to_row(self, id: int, hash: int, username: str, phone: str, name: str
) -> Any:
return self.Entity(session_id=self.session_id, id=id, hash=hash,
username=username, phone=phone, name=name)
def process_entities(self, tlo: Any) -> None:
rows = self._entities_to_rows(tlo)
if not rows:
return
for row in rows:
self.db.merge(row)
self.save()
def get_entity_rows_by_phone(self, key: str) -> Optional[Tuple[int, int]]:
row = self._db_query(self.Entity,
self.Entity.phone == key).one_or_none()
return (row.id, row.hash) if row else None
def get_entity_rows_by_username(self, key: str) -> Optional[Tuple[int, int]]:
row = self._db_query(self.Entity,
self.Entity.username == key).one_or_none()
return (row.id, row.hash) if row else None
def get_entity_rows_by_name(self, key: str) -> Optional[Tuple[int, int]]:
row = self._db_query(self.Entity,
self.Entity.name == key).one_or_none()
return (row.id, row.hash) if row else None
def get_entity_rows_by_id(self, key: int, exact: bool = True) -> Optional[Tuple[int, int]]:
if exact:
query = self._db_query(self.Entity, self.Entity.id == key)
else:
ids = (
utils.get_peer_id(PeerUser(key)),
utils.get_peer_id(PeerChat(key)),
utils.get_peer_id(PeerChannel(key))
)
query = self._db_query(self.Entity, self.Entity.id.in_(ids))
row = query.one_or_none()
return (row.id, row.hash) if row else None
def get_file(self, md5_digest: str, file_size: int, cls: Any) -> Optional[Tuple[int, int]]:
row = self._db_query(self.SentFile,
self.SentFile.md5_digest == md5_digest,
self.SentFile.file_size == file_size,
self.SentFile.type == _SentFileType.from_type(
cls).value).one_or_none()
return (row.id, row.hash) if row else None
def cache_file(self, md5_digest: str, file_size: int,
instance: Union[InputDocument, InputPhoto]) -> None:
if not isinstance(instance, (InputDocument, InputPhoto)):
raise TypeError("Cannot cache {} instance".format(type(instance)))
self.db.merge(
self.SentFile(session_id=self.session_id, md5_digest=md5_digest, file_size=file_size,
type=_SentFileType.from_type(type(instance)).value,
id=instance.id, hash=instance.access_hash))
self.save()
| 2.125 | 2 |
cluster_viz/make_tree.py | sunlightlabs/fcc-net-neutrality-comments | 18 | 12772336 | <gh_stars>10-100
from collections import defaultdict
import csv, json, operator
SAMPLE_THRESHOLD = 150000
SAMPLE_SIZE = 20000
NUM_LEVELS = 11
ADD_CID = False
leaf_prefix = str(NUM_LEVELS - 1)
# map out the parent-child relationships
children = defaultdict(set)
levels = defaultdict(set)
for row in csv.DictReader(open("cluster_tree_table.csv")):
if row['level_0']:
path = [row['level_%s' % i] for i in range(NUM_LEVELS - 1 if ADD_CID else NUM_LEVELS)] + (['%s-%s' % (leaf_prefix, row['cluster'])] if ADD_CID else [])
for i in range(NUM_LEVELS - 1):
children[path[i]].add(path[i+1])
levels[i].add(path[i])
# add all the items to the leaves
docs = defaultdict(list)
keywords = defaultdict(list)
tree_data = json.load(open("clustered_docs.json"))
for node_id, node_data in tree_data.iteritems():
docs['%s-%s' % (leaf_prefix, node_id) if ADD_CID else node_id] = sorted(node_data['doc_id'], key=lambda d: d[1], reverse=True)
keywords['%s-%s' % (leaf_prefix, node_id) if ADD_CID else node_id] = sorted(node_data.get('keywords', [(node_id, 1)]), key=lambda k: k[1], reverse=True)
# roll it up
for level in range(NUM_LEVELS - 2, -1, -1):
for node_id in levels[level]:
doc_lists = [docs[child_id] for child_id in children[node_id]]
docs[node_id] = sorted(reduce(operator.add, doc_lists), key=lambda d: d[1], reverse=True)
keyword_lists = [keywords[child_id] for child_id in children[node_id]]
keywords[node_id] = sorted(reduce(operator.add, keyword_lists), key=lambda k: k[1], reverse=True)
def get_subtree(node_id):
node = {
'id': node_id,
'size': len(docs[node_id]),
'keywords': [k[0] for k in keywords[node_id][:5]]
}
node['sample'] = node['size'] > SAMPLE_THRESHOLD
node_children = [get_subtree(child_id) for child_id in children[node_id]]
filtered_children = [child for child in node_children if child]
if filtered_children:
node['children'] = filtered_children
if node['size'] > 0:
return node
else:
return None
tree = get_subtree('0-1')['children'] if len(levels[0]) == 1 else [get_subtree(node_id) for node_id in levels[0]]
print json.dumps(tree, indent=4)
outf1 = open('tree.json', 'wb')
json.dump(tree, outf1, indent=4)
outf1.close()
outf2 = open('docs.json', 'wb')
json.dump({'docs': docs, 'sample_threshold': SAMPLE_THRESHOLD}, outf2, indent=4)
outf2.close() | 2.421875 | 2 |
demos/map_depth_to_color_and_3d_demo.py | thecreativecontrollab/KinZ-Python | 13 | 12772337 | <gh_stars>10-100
"""
Mapping from depth to color demo
How to use:
- Clic on the depth image to show the corresponding point on color image
and the 3D coordinates
"""
import numpy as np
import cv2
import kinz
depth_points = []
def mouse_event(event, x, y, flags, param):
global depth_points
if event == cv2.EVENT_LBUTTONDOWN:
depth_points.append((x, y))
def main():
global depth_points
# Create Kinect object and initialize
kin = kinz.Kinect(resolution=720, wfov=True, binned=True)
depth_window_name = 'Click on the Depth image'
color_window_name = 'Mapped coordinates in Color'
cv2.namedWindow(color_window_name, cv2.WINDOW_AUTOSIZE)
cv2.namedWindow(depth_window_name, cv2.WINDOW_AUTOSIZE)
cv2.setMouseCallback(depth_window_name, mouse_event)
points_3d = []
prev_points_3d = []
while True:
if kin.get_frames(get_color=True, get_depth=True, get_ir=False,
align_depth=True):
color_data = kin.get_color_data()
depth_data = kin.get_depth_data()
depth_image = np.array(depth_data.buffer, copy = True)
color_image = np.array(color_data.buffer, copy = True)
color_image = cv2.cvtColor(color_image, cv2.COLOR_BGRA2BGR)
# Project depth image points to color image points
# depth_points is a list of value pairs [[xd1,yd1], [xd2,yd2], ...]
# color_points have the same format as depth_points
# map_coords_depth_to_color return [-1, -1] if there is no depth information
# at the desire location, or the transformation failed.
color_points = kin.map_coords_depth_to_color(depth_points)
# Backproject depth image points to 3D depth camera space
# points_3d is a list of triplets of X, Y, Z points in
# the depth camera reference
# if depth_coords is [[x1,y1], [x2,y2], ...]
# points_3d is [[X1, Y1, Z1], [X2, Y2, Z2], ...]
# points_3d contains [0, 0, 0] if for desired depth coordinates
# there is not depth available.
# E.g. if at [x2, y2] the depth is 0, points_3d = [[X1, Y1, Z1], [0, 0, 0]]
# we select 0 because depth = 0 means no depth data.
points_3d = kin.map_coords_depth_to_3D(depth_points)
if points_3d != prev_points_3d:
print("3D point:", points_3d)
# Draw mapped points in color image
for p in color_points:
cv2.circle(color_image,(p[0], p[1]), 8, (0,0,255), -1)
# Apply colormap on depth image for visualization
depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)
# Draw depth points
for p in depth_points:
cv2.circle(depth_colormap,(p[0], p[1]), 5, (0,0,255), -1)
# Resize color image for visualization purposes
color_small = cv2.resize(color_image, None, fx=0.5, fy=0.5)
cv2.imshow(depth_window_name, depth_colormap)
cv2.imshow(color_window_name, color_small)
prev_points_3d = points_3d
k = cv2.waitKey(1) & 0xFF
if k == ord('c'):
depth_points = []
if k ==27:
break
kin.close() # close Kinect
cv2.destroyAllWindows()
if __name__ == '__main__':
main() | 3 | 3 |
NOTES/c1-notes.py | frawst/cs207-chaykowj | 0 | 12772338 | <filename>NOTES/c1-notes.py
NOTES = """
(c) 2017 <NAME>
PROVIDED UNDER MIT LICENSE
SCHOOLOGY.COM
ACCESS CODE: GNH9N-KZ2C2
RIC 115 <-- OFFICE HOURS:
M 4-6 PM
W 12-6 PM
##########################################
# RICE LAB
#
# TEXT BOOK = ARDX ARDUINO EXPERIMENTER'S KIT - OOMLOUT
#
# CLASS PROJECT --MUST-- BUILD OFF OF WORK ALREADY DONE BY OTHER PEOPLE
#
# CTECH MAKER SPACE UNDER RIDDELL !!!!
##########################################
'FIRST FINAL EXAM QUESTION':
- SEE THE TOPICS MAP PROVIDED "TOPIC_MAP_EXAMQ_1_2.JPG"
HACKER:
A MORE LEGITIMATE DEFINITION: "SOMEONE WHO MAKES A DEVICE DO SOMETHING OTHER
THAN IT WAS ORIGINALLY INTENDED TO DO."
SOCIETY DEFINITION (DO NOT USE): "SOMEONE WHO BREAKS INTO COMPUTERS FOR MALICIOUS
PURPOSES."
RICHARD STALLMAN'S DEFINITION: "SOMEONE WHO ENJOYS PLAYFUL CLEVERNESS"
"MAKER MOVEMENT"
'MAKING' BECOMES MAINSTREAM THANKS TO NEW TECHNOLOGIES ENTERING PUB. DOMAIN:
1. FUSED DEPOSITION MOULDING
2. ARDUINO
THE ARDUINO
- ORIGINALLY CALLED 'WIRING'
-
"""
if __name__ == ('__main__'):
print(NOTES)
print('Done.')
if __name__ != ('__main__'):
print('first-day-notes.py successfully imported.')
print('first-day-notes.NOTES the class notes.')
| 2.3125 | 2 |
examples/eg02r__exploring_conwhat_atlases.py | GriffithsLab/conwhat | 1 | 12772339 | <reponame>GriffithsLab/conwhat<gh_stars>1-10
"""
Exploring the ConWhAt Atlases
===============================
"""
# There are four different atlas types in ConWhat, corresponding to the 2 ontology types (Tract-based / Connectivity-Based) and 2 representation types (Volumetric / Streamlinetric).
#
# (More on this schema [here](http://conwhat.readthedocs.io/en/latest/about_conwhat/ontology_and_representation.html))
# sphinx_gallery_thumbnail_number = 3
###################################################################################################
# Setup
# ---------------------
# ConWhAt stuff
from conwhat import VolConnAtlas,StreamConnAtlas,VolTractAtlas,StreamTractAtlas
from conwhat.viz.volume import plot_vol_scatter,plot_vol_and_rois_nilearn
# Neuroimaging stuff
import nibabel as nib
from nilearn.plotting import plot_stat_map,plot_surf_roi
from nilearn.datasets import fetch_surf_fsaverage
from nilearn.surface import load_surf_mesh
# Viz stuff
from matplotlib import pyplot as plt
import seaborn as sns
# Generic stuff
import glob, numpy as np, pandas as pd, networkx as nx
# supress warnings for docs build
import warnings
###################################################################################################
# Load and explore
# ---------------------
# We'll start with the scale 33 lausanne 2008 volumetric connectivity-based atlas.
# Define the atlas name and top-level directory location
atlas_dir = 'conwhat_atlases'
atlas_name = 'CWL2k8Sc33Vol3d100s_v01'
# Initialize the atlas class
vca = VolConnAtlas(atlas_dir=atlas_dir + '/' + atlas_name,
atlas_name=atlas_name)
# This atlas object contains various pieces of general information
vca.atlas_name
vca.atlas_dir
# Information about each atlas entry is contained in the `vfms` attribute, which returns a pandas dataframe
vca.vfms.head()
# Additionally, connectivity-based atlases also contain a `networkx` graph object `vca.Gnx`,
# which contains information about each connectome edge
vca.Gnx.edges[(10,35)]
# Individual atlas entry nifti images can be grabbed like so
img = vca.get_vol_from_vfm(1637)
fig = plt.figure(figsize=(12,3))
plot_stat_map(img, figure=fig,colorbar=True,vmax=0.5)
# Or alternatively as a 3D scatter plot, along with the x,y,z bounding box
vca.bbox.loc[1637]
ax = plot_vol_scatter(vca.get_vol_from_vfm(1),c='r',bg_img='nilearn_destrieux',
bg_params={'s': 0.1, 'c':'k'},figsize=(20, 15))
ax.set_xlim([0,200]); ax.set_ylim([0,200]); ax.set_zlim([0,200]);
# We can also view the weights matrix like so:
fig, ax = plt.subplots(figsize=(16,12))
sns.heatmap(np.log1p(vca.weights),xticklabels=vca.region_labels,
yticklabels=vca.region_labels,ax=ax);
plt.tight_layout()
# The `vca` object also contains x,y,z bounding boxes for each structure
# We also stored additional useful information about the ROIs in the associated parcellation, including cortical/subcortical labels
vca.cortex
# ...hemisphere labels
vca.hemispheres
# ...and region mappings to freesurfer's fsaverage brain
vca.region_mapping_fsav_lh
vca.region_mapping_fsav_rh
# which can be used for, e.g. plotting ROI data on a surface
fsav = fetch_surf_fsaverage('fsaverage7');
fsav_lhp = load_surf_mesh(fsav['pial_left']);
vtx,tri = fsav_lhp.coordinates,fsav_lhp.faces
disp = plot_surf_roi([vtx,tri],vca.region_mapping_fsav_lh);
| 2.078125 | 2 |
kalmus/tkinter_windows/ReshapeBarcodeWindow.py | yc015/KALMUS | 7 | 12772340 | """ ReshapeBarcodeWindow Class """
import tkinter
import copy
import cv2
from kalmus.tkinter_windows.gui_utils import update_graph, resource_path
class ReshapeBarcodeWindow():
"""
ReshapeBarcodeWindow Class
GUI window for user to reshape the selected barcode into the desirable shape
"""
def __init__(self, barcode_1, barcode_2, axes, canvas):
"""
Initialize
:param barcode_1: The barcode 1
:param barcode_2: The barcode 2
:param axes: The display axes in the MainWindow of the kalmus
:param canvas: The display canvas in the MainWindow of the kalmus
"""
self.barcode_1 = barcode_1
self.barcode_2 = barcode_2
self.axes = axes
self.canvas = canvas
# Initialize the window
self.window = tkinter.Tk()
self.window.wm_title("Reshape/Resize Barcode Config")
self.window.iconbitmap(resource_path("kalmus_icon.ico"))
# Reshape/Resize option
self.config_option = tkinter.StringVar(self.window)
self.config_option.set("Reshape") # initialize
# Prompt for the resize parameters specification
params_label = tkinter.Label(self.window, text="Config Params: ")
params_label.grid(row=0, column=0, columnspan=1, sticky=tkinter.W)
# Label (text) prompt and entry for user to specify the resize parameters
column_length_label = tkinter.Label(self.window, text="Frames per Column: ")
column_length_label.grid(row=1, column=0, sticky=tkinter.W)
self.column_length_entry = tkinter.Entry(self.window, textvariable="-1", width=5)
self.column_length_entry.grid(row=1, column=1, padx=15)
self.resize_x_label = tkinter.Label(self.window, text="Scale Width by (ratio): ")
self.resize_x_label.grid(row=2, column=0, sticky=tkinter.W)
self.resize_x_entry = tkinter.Entry(self.window, textvariable="-2", width=5, state="disabled")
self.resize_x_entry.grid(row=2, column=1, padx=15)
self.resize_y_label = tkinter.Label(self.window, text="Scale Height by (ratio): ")
self.resize_y_label.grid(row=3, column=0, sticky=tkinter.W)
self.resize_y_entry = tkinter.Entry(self.window, textvariable="-3", width=5, state="disabled")
self.resize_y_entry.grid(row=3, column=1, padx=15)
# Label prompt for displaying the width and height of the currently selected barcode
self.size_label = tkinter.Label(self.window, text="Current Width = {:d}\nCurrent Height = {:d}"
.format(self.barcode_1.get_barcode().shape[1],
self.barcode_1.get_barcode().shape[0]))
self.size_label.grid(row=4, column=0, columnspan=1)
# Button to process the resize
self.process_button = tkinter.Button(self.window, text="Process", command=self.reshape_resize_barcode)
self.process_button.grid(row=4, column=2, sticky=tkinter.W)
# Label prompt for the Resize type selection
config_label = tkinter.Label(self.window, text="Config options: ")
config_label.grid(row=0, column=2, columnspan=1)
# Radio button for selecting the resize type
radio_reshape = tkinter.Radiobutton(self.window, text="Reshape", variable=self.config_option,
value="Reshape", anchor='w',
command=self.reshape)
radio_reshape.grid(row=1, column=2, sticky=tkinter.W)
radio_reshape.select()
radio_scaling = tkinter.Radiobutton(self.window, text="Scaling", variable=self.config_option,
value="Scaling", anchor='w',
command=self.scale)
radio_scaling.grid(row=2, column=2, sticky=tkinter.W)
radio_resize = tkinter.Radiobutton(self.window, text="Resize", variable=self.config_option,
value="Resize", anchor='w',
command=self.resize)
radio_resize.grid(row=3, column=2, sticky=tkinter.W)
# Label prompt for selecting which barcode to resize
which_barcode_label = tkinter.Label(self.window, text="Barcode: ")
which_barcode_label.grid(row=0, column=3, columnspan=1)
# Option variable
self.barcode_option = tkinter.StringVar(self.window)
self.barcode_option.set("Barcode 1")
# Radio button for selecting which barcode to resize
radio_barcode_1 = tkinter.Radiobutton(self.window, text="Barcode 1", variable=self.barcode_option,
value="Barcode 1", command=self.update_size_label)
radio_barcode_1.grid(row=1, column=3)
radio_barcode_1.select()
radio_barcode_2 = tkinter.Radiobutton(self.window, text="Barcode 2", variable=self.barcode_option,
value="Barcode 2", command=self.update_size_label)
radio_barcode_2.grid(row=2, column=3)
def update_size_label(self):
"""
Update the size label if the currently selected barcode is changed
"""
if self.barcode_option.get() == "Barcode 1":
text = "Current Width = {:d}\nCurrent Height = {:d}".format(
self.barcode_1.get_barcode().shape[1], self.barcode_1.get_barcode().shape[0])
elif self.barcode_option.get() == "Barcode 2":
text = "Current Width = {:d}\nCurrent Height = {:d}".format(
self.barcode_2.get_barcode().shape[1], self.barcode_2.get_barcode().shape[0])
self.size_label['text'] = text
def reshape(self):
"""
Enable or disable the input parameters entry if the reshape radio button is selected
"""
self.column_length_entry.config(state='normal')
self.resize_x_entry.config(state='disabled')
self.resize_y_entry.config(state='disabled')
def scale(self):
"""
Enable or disable the input parameters entry and update the corresponding text
if the scale radio button is selected
"""
self.resize_x_label['text'] = "Scale Width by (ratio): "
self.resize_y_label['text'] = "Scale Height by (ratio): "
self.column_length_entry.config(state='disabled')
self.resize_x_entry.config(state='normal')
self.resize_y_entry.config(state='normal')
def resize(self):
"""
Enable or disable the input parameters entry and update the corresponding text
if the resize radio button is selected
"""
self.resize_x_label['text'] = "Resize Width to (pixels): "
self.resize_y_label['text'] = "Resize Height to (pixels): "
self.column_length_entry.config(state='disabled')
self.resize_x_entry.config(state='normal')
self.resize_y_entry.config(state='normal')
def reshape_resize_barcode(self):
"""
Reshape or resize the barcode using the given parameters
"""
# Get the reshape/resize type from the user selection
option = self.config_option.get()
# Get which barcode to reshape/resize
if self.barcode_option.get() == "Barcode 1":
barcode = self.barcode_1
elif self.barcode_option.get() == "Barcode 2":
barcode = self.barcode_2
# Save the current barcode size
old_barcode_size = barcode.get_barcode().shape[0] * barcode.get_barcode().shape[1]
# Reshape/resize the currently selected barcode using the given type with parameters
if option == "Reshape":
frames_per_column_str = self.column_length_entry.get()
# Check if the reshape parameter is given
# If not given, return and do not process the reshape
if len(frames_per_column_str) == 0:
return
frames_per_column = int(frames_per_column_str)
barcode.reshape_barcode(frames_per_column)
self.update_scale_factor(barcode, old_barcode_size)
self.updated_new_barcode()
elif option == "Resize":
barcode_shape = barcode.get_barcode().shape
resize_x, resize_y = self._check_resize_entry(barcode_shape[1], barcode_shape[0])
if resize_x is None:
return
resized_barcode = cv2.resize(barcode.get_barcode(),
dsize=(int(resize_x), int(resize_y)),
interpolation=cv2.INTER_NEAREST)
barcode.barcode = resized_barcode
self.update_scale_factor(barcode, old_barcode_size)
self.updated_new_barcode()
elif option == "Scaling":
resize_x, resize_y = self._check_resize_entry(1, 1)
if resize_x is None:
return
resized_barcode = cv2.resize(barcode.get_barcode(),
dsize=(0, 0),
fx=float(resize_x),
fy=float(resize_y),
interpolation=cv2.INTER_NEAREST)
barcode.barcode = resized_barcode
self.update_scale_factor(barcode, old_barcode_size)
self.updated_new_barcode()
# Quit the window
self.window.destroy()
def _check_resize_entry(self, default_x, default_y):
"""
Check if the resize parameter is given
If one of the parameter is not given assume that dimension is unchanged
If both are not given, return and do not process the resize.
:param default_x: Default x dimension
:param default_y: Default y dimension
:return: Processed resize x and y parameters from the user input
"""
resize_x_str = self.resize_x_entry.get()
resize_y_str = self.resize_y_entry.get()
if len(resize_x_str) == 0 and len(resize_y_str) == 0:
return None, None
if len(resize_x_str) == 0:
resize_x = str(default_x)
else:
resize_x = resize_x_str
if len(resize_y_str) == 0:
resize_y = str(default_y)
else:
resize_y = resize_y_str
return resize_x, resize_y
def update_scale_factor(self, barcode, old_barcode_size):
"""
Update the scale factor of the barcode
:param barcode: The barcode to update
:param old_barcode_size: The old size of that barcode
"""
barcode.scale_factor *= (old_barcode_size / (barcode.get_barcode().shape[0] * barcode.get_barcode().shape[1]))
def updated_new_barcode(self):
"""
Update the resized/reshaped barcode to the MainWindow of the kalmus
"""
# Clear the display axes
self.axes[0][0].cla()
self.axes[1][0].cla()
self.axes[0][1].cla()
self.axes[1][1].cla()
# Update the displayed barcode and redraw the canvas
if self.barcode_1.get_barcode().shape[1] > self.barcode_2.get_barcode().shape[1]:
temp = copy.deepcopy(self.barcode_1)
self.barcode_1.__dict__ = self.barcode_2.__dict__.copy()
self.barcode_2.__dict__ = temp.__dict__.copy()
update_graph(barcode_1=self.barcode_1, barcode_2=self.barcode_2, axes=self.axes)
# Redraw the canvas
self.canvas.draw()
| 3.40625 | 3 |
system/t09_repo/add.py | Yelp/aptly | 0 | 12772341 | import tempfile
import shutil
import os
import inspect
from lib import BaseTest
class AddRepo1Test(BaseTest):
"""
add package to local repo: .deb file
"""
fixtureCmds = [
"aptly repo create -comment=Repo1 -distribution=squeeze repo1",
]
runCmd = "aptly repo add repo1 ${files}/libboost-program-options-dev_1.49.0.1_i386.deb"
def check(self):
self.check_output()
self.check_cmd_output("aptly repo show -with-packages repo1", "repo_show")
# check pool
self.check_exists('pool/c7/6b/4bd12fd92e4dfe1b55b18a67a669_libboost-program-options-dev_1.49.0.1_i386.deb')
class AddRepo2Test(BaseTest):
"""
add package to local repo: .dsc file
"""
fixtureCmds = [
"aptly repo create -comment=Repo2 -distribution=squeeze repo2",
]
runCmd = "aptly repo add repo2 ${files}/pyspi_0.6.1-1.3.dsc ${files}/pyspi-0.6.1-1.3.stripped.dsc"
def check(self):
self.check_output()
self.check_cmd_output("aptly repo show -with-packages repo2", "repo_show")
# check pool
self.check_exists('pool/2e/77/0b28df948f3197ed0b679bdea99f_pyspi_0.6.1-1.3.diff.gz')
self.check_exists('pool/d4/94/aaf526f1ec6b02f14c2f81e060a5_pyspi_0.6.1-1.3.dsc')
self.check_exists('pool/64/06/9ee828c50b1c597d10a3fefbba27_pyspi_0.6.1.orig.tar.gz')
self.check_exists('pool/28/9d/3aefa970876e9c43686ce2b02f47_pyspi-0.6.1-1.3.stripped.dsc')
class AddRepo3Test(BaseTest):
"""
add package to local repo: directory
"""
fixtureCmds = [
"aptly repo create -comment=Repo3 -distribution=squeeze repo3",
]
runCmd = "aptly repo add repo3 ${files}"
def check(self):
self.check_output()
self.check_cmd_output("aptly repo show -with-packages repo3", "repo_show")
# check pool
self.check_exists('pool/c7/6b/4bd12fd92e4dfe1b55b18a67a669_libboost-program-options-dev_1.49.0.1_i386.deb')
self.check_exists('pool/2e/77/0b28df948f3197ed0b679bdea99f_pyspi_0.6.1-1.3.diff.gz')
self.check_exists('pool/d4/94/aaf526f1ec6b02f14c2f81e060a5_pyspi_0.6.1-1.3.dsc')
self.check_exists('pool/64/06/9ee828c50b1c597d10a3fefbba27_pyspi_0.6.1.orig.tar.gz')
self.check_exists('pool/28/9d/3aefa970876e9c43686ce2b02f47_pyspi-0.6.1-1.3.stripped.dsc')
class AddRepo4Test(BaseTest):
"""
add package to local repo: complex directory + remove
"""
fixtureCmds = [
"aptly repo create -comment=Repo4 -distribution=squeeze repo4",
]
runCmd = "aptly repo add -remove-files repo4 "
def prepare(self):
super(AddRepo4Test, self).prepare()
self.tempSrcDir = tempfile.mkdtemp()
os.makedirs(os.path.join(self.tempSrcDir, "01"), 0o755)
os.makedirs(os.path.join(self.tempSrcDir, "02", "03"), 0o755)
shutil.copy(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "libboost-program-options-dev_1.49.0.1_i386.deb"),
os.path.join(self.tempSrcDir, "01"))
shutil.copy(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "pyspi_0.6.1-1.3.dsc"),
os.path.join(self.tempSrcDir, "02", "03"))
shutil.copy(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "pyspi_0.6.1.orig.tar.gz"),
os.path.join(self.tempSrcDir, "02", "03"))
shutil.copy(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "pyspi_0.6.1-1.3.diff.gz"),
os.path.join(self.tempSrcDir, "02", "03"))
shutil.copy(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "pyspi_0.6.1-1.3.diff.gz"),
os.path.join(self.tempSrcDir, "02", "03", "other.file"))
self.runCmd += self.tempSrcDir
def check(self):
self.check_output()
self.check_cmd_output("aptly repo show -with-packages repo4", "repo_show")
# check pool
self.check_exists('pool/c7/6b/4bd12fd92e4dfe1b55b18a67a669_libboost-program-options-dev_1.49.0.1_i386.deb')
self.check_exists('pool/2e/77/0b28df948f3197ed0b679bdea99f_pyspi_0.6.1-1.3.diff.gz')
self.check_exists('pool/d4/94/aaf526f1ec6b02f14c2f81e060a5_pyspi_0.6.1-1.3.dsc')
self.check_exists('pool/64/06/9ee828c50b1c597d10a3fefbba27_pyspi_0.6.1.orig.tar.gz')
path = os.path.join(self.tempSrcDir, "01", "libboost-program-options-dev_1.49.0.1_i386.deb")
if os.path.exists(path):
raise Exception("path %s shouldn't exist" % (path, ))
path = os.path.join(self.tempSrcDir, "02", "03", "pyspi_0.6.1.orig.tar.gz")
if os.path.exists(path):
raise Exception("path %s shouldn't exist" % (path, ))
path = os.path.join(self.tempSrcDir, "02", "03", "other.file")
if not os.path.exists(path):
raise Exception("path %s doesn't exist" % (path, ))
shutil.rmtree(self.tempSrcDir)
class AddRepo5Test(BaseTest):
"""
add package to local repo: some source files missing
"""
fixtureCmds = [
"aptly repo create -comment=Repo5 -distribution=squeeze repo5",
]
runCmd = "aptly repo add repo5 "
expectedCode = 1
def outputMatchPrepare(self, s):
return s.replace(self.tempSrcDir, "")
def prepare(self):
super(AddRepo5Test, self).prepare()
self.tempSrcDir = tempfile.mkdtemp()
os.makedirs(os.path.join(self.tempSrcDir, "02", "03"), 0o755)
shutil.copy(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "pyspi_0.6.1-1.3.dsc"),
os.path.join(self.tempSrcDir, "02", "03"))
shutil.copy(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "pyspi_0.6.1.orig.tar.gz"),
os.path.join(self.tempSrcDir, "02", "03"))
self.runCmd += self.tempSrcDir
def check(self):
self.check_output()
self.check_cmd_output("aptly repo show repo5", "repo_show")
shutil.rmtree(self.tempSrcDir)
class AddRepo6Test(BaseTest):
"""
add package to local repo: missing file
"""
fixtureCmds = [
"aptly repo create -comment=Repo6 -distribution=squeeze repo6",
]
runCmd = "aptly repo add repo6 no-such-file"
expectedCode = 1
class AddRepo7Test(BaseTest):
"""
add package to local repo: missing repo
"""
runCmd = "aptly repo add repo7 ${files}"
expectedCode = 1
class AddRepo8Test(BaseTest):
"""
add package to local repo: conflict in packages
"""
fixtureCmds = [
"aptly repo create -comment=Repo8 -distribution=squeeze repo8",
"aptly repo add repo8 ${files}/pyspi_0.6.1-1.3.dsc",
]
runCmd = "aptly repo add repo8 ${testfiles}/pyspi_0.6.1-1.3.conflict.dsc"
expectedCode = 1
def outputMatchPrepare(self, s):
return s.replace(os.path.join(os.path.dirname(inspect.getsourcefile(self.__class__)), self.__class__.__name__), ""). \
replace(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files"), "")
def check(self):
self.check_output()
self.check_cmd_output("aptly repo show -with-packages repo8", "repo_show")
class AddRepo9Test(BaseTest):
"""
add package to local repo: conflict in files
"""
fixtureCmds = [
"aptly repo create -comment=Repo9 -distribution=squeeze repo9",
]
runCmd = "aptly repo add repo9 ${files}/pyspi_0.6.1-1.3.dsc"
gold_processor = BaseTest.expand_environ
expectedCode = 1
def outputMatchPrepare(self, s):
return s.replace(os.path.join(os.path.dirname(inspect.getsourcefile(self.__class__)), self.__class__.__name__), ""). \
replace(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files"), "")
def prepare(self):
super(AddRepo9Test, self).prepare()
os.makedirs(os.path.join(os.environ["HOME"], ".aptly", "pool/64/06/"))
with open(os.path.join(os.environ["HOME"], ".aptly", "pool/64/06/9ee828c50b1c597d10a3fefbba27_pyspi_0.6.1.orig.tar.gz"), "w") as f:
f.write("abcd")
class AddRepo10Test(BaseTest):
"""
add package to local repo: double import
"""
fixtureCmds = [
"aptly repo create -comment=Repo10 -distribution=squeeze repo10",
"aptly repo add repo10 ${files}",
]
runCmd = "aptly repo add repo10 ${files}/pyspi_0.6.1-1.3.dsc"
def check(self):
self.check_output()
self.check_cmd_output("aptly repo show -with-packages repo10", "repo_show")
class AddRepo11Test(BaseTest):
"""
add package to local repo: conflict in packages + -force-replace
"""
fixtureCmds = [
"aptly repo create -comment=Repo11 -distribution=squeeze repo11",
"aptly repo add repo11 ${files}/pyspi_0.6.1-1.3.dsc",
]
runCmd = "aptly repo add -force-replace repo11 ${testfiles}/pyspi_0.6.1-1.3.conflict.dsc"
def outputMatchPrepare(self, s):
return s.replace(os.path.join(os.path.dirname(inspect.getsourcefile(self.__class__)), self.__class__.__name__), ""). \
replace(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files"), "")
def check(self):
self.check_output()
self.check_cmd_output("aptly repo show -with-packages repo11", "repo_show")
class AddRepo12Test(BaseTest):
"""
add package to local repo: .udeb file
"""
fixtureCmds = [
"aptly repo create -comment=Repo12 -distribution=squeeze repo12",
]
runCmd = "aptly repo add repo12 ${udebs}/dmraid-udeb_1.0.0.rc16-4.1_amd64.udeb"
def check(self):
self.check_output()
self.check_cmd_output("aptly repo show -with-packages repo12", "repo_show")
# check pool
self.check_exists('pool/ef/ae/69921b97494e40437712053b60a5_dmraid-udeb_1.0.0.rc16-4.1_amd64.udeb')
class AddRepo13Test(BaseTest):
"""
add package to local repo: .udeb and .deb files
"""
fixtureCmds = [
"aptly repo create -comment=Repo13 -distribution=squeeze repo13",
]
runCmd = "aptly repo add repo13 ${udebs} ${files}"
def check(self):
self.check_output()
self.check_cmd_output("aptly repo show -with-packages repo13", "repo_show")
# check pool
self.check_exists('pool/ef/ae/69921b97494e40437712053b60a5_dmraid-udeb_1.0.0.rc16-4.1_amd64.udeb')
self.check_exists('pool/d4/94/aaf526f1ec6b02f14c2f81e060a5_pyspi_0.6.1-1.3.dsc')
class AddRepo14Test(BaseTest):
"""
add same package to local repo twice and make sure the file doesn't get truncated.
"""
fixtureCmds = [
"aptly repo create -comment=Repo14 -distribution=squeeze repo14",
"aptly repo add repo14 ${files}/libboost-program-options-dev_1.49.0.1_i386.deb",
"aptly publish repo -distribution=test1 -skip-signing repo14"
]
runCmd = "aptly repo add repo14 $aptlyroot/public/pool/"
def check(self):
super(AddRepo14Test, self).check()
# check pool
self.check_exists('pool/c7/6b/4bd12fd92e4dfe1b55b18a67a669_libboost-program-options-dev_1.49.0.1_i386.deb')
class AddRepo15Test(BaseTest):
"""
add package with wrong case in stanza and missing fields
"""
fixtureCmds = [
"aptly repo create -comment=Repo15 -distribution=squeeze repo15",
]
runCmd = "aptly repo add repo15 ${testfiles}"
expectedCode = 1
def outputMatchPrepare(self, s):
return s.replace(os.path.join(os.path.dirname(inspect.getsourcefile(self.__class__)), self.__class__.__name__), ""). \
replace(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files"), "")
class AddRepo16Test(BaseTest):
"""
add package to local repo: some source files missing, but already in the pool
"""
fixtureCmds = [
"aptly repo create repo1",
"aptly repo create repo2",
"aptly repo add repo1 ${files}"
]
runCmd = "aptly repo add repo2 "
def outputMatchPrepare(self, s):
return s.replace(self.tempSrcDir, "")
def prepare(self):
super(AddRepo16Test, self).prepare()
self.tempSrcDir = tempfile.mkdtemp()
os.makedirs(os.path.join(self.tempSrcDir, "02", "03"), 0o755)
shutil.copy(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "pyspi_0.6.1-1.3.dsc"),
os.path.join(self.tempSrcDir, "02", "03"))
shutil.copy(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "pyspi_0.6.1.orig.tar.gz"),
os.path.join(self.tempSrcDir, "02", "03"))
self.runCmd += self.tempSrcDir
def check(self):
self.check_output()
self.check_cmd_output("aptly repo show repo2", "repo_show")
shutil.rmtree(self.tempSrcDir)
| 2.3125 | 2 |
Challenges/30/tests/test_hashtable.py | makkahwi/data-structures-and-algorithms | 0 | 12772342 | <reponame>makkahwi/data-structures-and-algorithms
from hashtable.hashtable import Hashtable
import pytest
def test_initializing():
ht = Hashtable()
assert ht.size == 100
assert ht.table == [None] * 100
# 7. Successfully hash a key to an in-range value
def test_key_hash():
ht = Hashtable()
actual = ht.hash("key1")
expected = 82
assert actual == expected
# 1. Setting a key/value to your hashtable results in the value being in the data structure
def test_value_set(ht):
ht.set("key4", "value4")
actual = ht.get("key4")
expected = [("key4", "value4")]
assert actual == expected
# 2. Retrieving based on a key returns the value stored
def test_key_based_retrieve(ht):
actual = ht.get("key1")
expected = [("key1", "value1")]
assert actual == expected
# 5. Successfully handle a collision within the hashtable
# 6. Successfully retrieve a value from a bucket within the hashtable that has a collision
def test_key_based_retrieve_collision(ht):
ht.set("key3", "value33")
actual = ht.get("key3")
expected = [("key3", "value3"), ("key3", "value33")]
assert actual == expected
# 3. Successfully returns null for a key that does not exist in the hashtable
def test_wrong_key_get(ht):
actual = ht.get("key4")
expected = None
assert actual == expected
# 4. Successfully returns a list of all unique keys that exist in the hashtable
def test_unique_keys(ht):
actual = ht.keys()
expected = ["key2", "key3", "key1"]
assert actual == expected
def test_contains(ht):
actual = ht.contains("key1")
expected = True
assert actual == expected
def test_contains_wrong(ht):
actual = ht.contains("key4")
expected = False
assert actual == expected
@pytest.fixture
def ht():
ht = Hashtable()
ht.set("key3", "value3")
ht.set("key2", "value2")
ht.set("key1", "value1")
return ht
| 3.8125 | 4 |
examples/Ex4_fit_line_SPT0346-52_CO21.py | dchx/visilens | 16 | 12772343 | """
Example 4, to be run in python. In this example, we fit the CO
emission using the data made from Ex2 (which you should be
able to download from the same place you got this script).
For these data, I just fixed the lens properties to those
we got from the ALMA 870um data, because the ALMA data have
way higher signal-to-noise than these do. You could also do
one big joint fit, if you needed to.
"""
import numpy as np
import matplotlib.pyplot as pl; pl.ioff()
import sys
sys.path.append('/Users/jspilker/Research/visilens')
import visilens as vl
import time
import glob
# We match the file names made by our Example 2 script.
center = '0'
width = '200'
# Read in the data from each day's observations...
datasets = []
for f in sorted(glob.glob('spt0346linedata/spt0346_*_'+center+'_dv'+width+'*.bin')):
datasets.append(vl.read_visdata(f))
# and concatenate it all together. We could have done this earlier when
# we split out the data, but this way we keep open the option of re-scaling the
# amplitudes on different days if we ever have enough s/n for that.
atcadata = vl.concatvis(datasets)
atcadata.filename = 'All, '+center+'km/s, '+width+'km/s wide'
# A plot name helper
plotfbase = 'SPT0346-52_CO21_'+center+'_dv'+width
# We set the lens parameters to the ALMA best-fit values.
# Lens positions are relative to the pointing center offset
# ALMA pointed at 03:46:41.19, -52:05:05.5, ATCA at 03:46:41.13, -52:05:02.1
# to calculate offset, use astropy.coordinates & astropy.units:
# import astropy.units as u; import astropy.coordinates as ac
# alma = ac.SkyCoord('03h46m41.19s','-52d05m05.5s')
# atca = ac.SkyCoord('03h46m41.13s','-52d05m02.1s')
# x = xLens*u.arcsec - (alma.ra - atca.ra).to(u.arcsec)*np.cos(alma.dec)
# y = (alma.dec + yLens*u.arcsec - atca.dec).to(u.arcsec)
# Best-fit lens to combined data, sersic + shear
# ALMA xL = 0.806, yL = 3.036 --> xL_atca = 0.253, yL_atca = -0.364
lens = [vl.SIELens(z=0.8,
x={'value':0.253,'fixed':True,'prior':[-0.221,0.779]},
y={'value':-0.364,'fixed':True,'prior':[-0.828,0.172]},
M={'value':2.811e11,'fixed':True,'prior':[1e10,5e13]},
e={'value':0.515,'fixed':True,'prior':[0.4,0.6]},
PA={'value':70.90,'fixed':True,'prior':[60, 95]}),
vl.ExternalShear(
shear={'value':0.119,'fixed':True},
shearangle={'value':122.13,'fixed':True})]
# We're just going to model the source as a simple symmetric Gaussian
src = vl.GaussSource(z=5.65,
xoff={'value':0.142,'fixed':False,'prior':[-1., 1.]},
yoff={'value':0.30,'fixed':False,'prior':[-1., 1.]},
flux={'value':0.000273,'fixed':False,'prior':[0.,0.002]},
width={'value':0.108,'fixed':False,'prior':[0.02,0.5]})
# Set up the gridding for the moedling.
# The ATCA primary beam is ~70arcsec, but a 38arcsec box keeps
# our grid to 512x512 instead of 1024x1024, and our shortest
# baselines don't care about this anyway.
xmax = 38.
highresbox = [0.3-2.5, 0.3+2.5, -0.36-2.5, -0.36+2.5]
emitres, fieldres = 0.03, 0.15
# In this case, we don't have much signal-to-noise, so we won't
# allow flux scaling or astrometric shifts
scaleamp, shiftphase = False, False
# Similarly, we don't allow antenna-based phase errors. If these
# were significant, we'd see them in the delays across the ATCA
# bandwidth. Also, we observed many-hour tracks, so atmospheric
# effects have largely averaged out.
modelcal = False
# Do some setup for the MCMC things and multiprocessing
nwalkers,nburn,nstep = 300,200,200
nthreads=4 # use 4 cores for the calculations
mpirun = False
# Now we do the actual MCMC calculations, as in the previous example
t1 = time.time()
mcmcresult = vl.LensModelMCMC(atcadata,lens,src,xmax=xmax,highresbox=highresbox,\
fieldres=fieldres,emitres=emitres,scaleamp=scaleamp,shiftphase=shiftphase,\
modelcal=modelcal,nwalkers=nwalkers,nburn=nburn,nstep=nstep,nthreads=nthreads,pool=None,mpirun=mpirun)
t2 = time.time()
print "total time: {0:.1f} hours for {1:.0f} samples".format((t2-t1)/3600.,nwalkers*(nburn+nstep))
# We just dump out the results of the mcmc run.
import pickle
import gzip
pickle.dump(mcmcresult,gzip.open('chains_'+plotfbase+'.pzip','w'))
# Plot triangle degeneracy plot.
f,axesarray = vl.TrianglePlot_MCMC(mcmcresult,plotmag=True,plotnuisance=True)
f.savefig(plotfbase+'_triangle.png')
pl.close()
# Plot images, and we're done!
if plotpanels:
f,axarr = vl.plot_images(atcadata,mcmcresult,imsize=500,pixsize=0.07,
limits=[-5,5,-5,5],mapcontours=np.array([-4,4,6,8,10]))
axarr[0][0].text(0.,-0.2,"Data contours: steps of 2$\sigma$ starting at $\pm$4; "\
"Residual contours: Steps of 1$\sigma$ starting at $\pm$2",\
transform=axarr[0][0].transAxes)
f.savefig(plotfbase+'_panels.png')
pl.close()
| 3.0625 | 3 |
python_venv_legacy/mediapipe/calculators/util/landmarks_smoothing_calculator_pb2.py | 4sunshine/mediapipe | 0 | 12772344 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: mediapipe/calculators/util/landmarks_smoothing_calculator.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from mediapipe.framework import calculator_options_pb2 as mediapipe_dot_framework_dot_calculator__options__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='mediapipe/calculators/util/landmarks_smoothing_calculator.proto',
package='mediapipe',
syntax='proto2',
serialized_pb=_b('\n?mediapipe/calculators/util/landmarks_smoothing_calculator.proto\x12\tmediapipe\x1a,mediapipe/framework/calculator_options.proto\"\xb9\x03\n#LandmarksSmoothingCalculatorOptions\x12L\n\tno_filter\x18\x01 \x01(\x0b\x32\x37.mediapipe.LandmarksSmoothingCalculatorOptions.NoFilterH\x00\x12X\n\x0fvelocity_filter\x18\x02 \x01(\x0b\x32=.mediapipe.LandmarksSmoothingCalculatorOptions.VelocityFilterH\x00\x1a\n\n\x08NoFilter\x1am\n\x0eVelocityFilter\x12\x16\n\x0bwindow_size\x18\x01 \x01(\x05:\x01\x35\x12\x1a\n\x0evelocity_scale\x18\x02 \x01(\x02:\x02\x31\x30\x12\'\n\x18min_allowed_object_scale\x18\x03 \x01(\x02:\x05\x31\x65-062]\n\x03\x65xt\x12\x1c.mediapipe.CalculatorOptions\x18\x85\xb4\xa5\x9b\x01 \x01(\x0b\x32..mediapipe.LandmarksSmoothingCalculatorOptionsB\x10\n\x0e\x66ilter_options')
,
dependencies=[mediapipe_dot_framework_dot_calculator__options__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_LANDMARKSSMOOTHINGCALCULATOROPTIONS_NOFILTER = _descriptor.Descriptor(
name='NoFilter',
full_name='mediapipe.LandmarksSmoothingCalculatorOptions.NoFilter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=332,
serialized_end=342,
)
_LANDMARKSSMOOTHINGCALCULATOROPTIONS_VELOCITYFILTER = _descriptor.Descriptor(
name='VelocityFilter',
full_name='mediapipe.LandmarksSmoothingCalculatorOptions.VelocityFilter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='window_size', full_name='mediapipe.LandmarksSmoothingCalculatorOptions.VelocityFilter.window_size', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=5,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='velocity_scale', full_name='mediapipe.LandmarksSmoothingCalculatorOptions.VelocityFilter.velocity_scale', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(10),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='min_allowed_object_scale', full_name='mediapipe.LandmarksSmoothingCalculatorOptions.VelocityFilter.min_allowed_object_scale', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1e-06),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=344,
serialized_end=453,
)
_LANDMARKSSMOOTHINGCALCULATOROPTIONS = _descriptor.Descriptor(
name='LandmarksSmoothingCalculatorOptions',
full_name='mediapipe.LandmarksSmoothingCalculatorOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='no_filter', full_name='mediapipe.LandmarksSmoothingCalculatorOptions.no_filter', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='velocity_filter', full_name='mediapipe.LandmarksSmoothingCalculatorOptions.velocity_filter', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
_descriptor.FieldDescriptor(
name='ext', full_name='mediapipe.LandmarksSmoothingCalculatorOptions.ext', index=0,
number=325671429, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
options=None),
],
nested_types=[_LANDMARKSSMOOTHINGCALCULATOROPTIONS_NOFILTER, _LANDMARKSSMOOTHINGCALCULATOROPTIONS_VELOCITYFILTER, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='filter_options', full_name='mediapipe.LandmarksSmoothingCalculatorOptions.filter_options',
index=0, containing_type=None, fields=[]),
],
serialized_start=125,
serialized_end=566,
)
_LANDMARKSSMOOTHINGCALCULATOROPTIONS_NOFILTER.containing_type = _LANDMARKSSMOOTHINGCALCULATOROPTIONS
_LANDMARKSSMOOTHINGCALCULATOROPTIONS_VELOCITYFILTER.containing_type = _LANDMARKSSMOOTHINGCALCULATOROPTIONS
_LANDMARKSSMOOTHINGCALCULATOROPTIONS.fields_by_name['no_filter'].message_type = _LANDMARKSSMOOTHINGCALCULATOROPTIONS_NOFILTER
_LANDMARKSSMOOTHINGCALCULATOROPTIONS.fields_by_name['velocity_filter'].message_type = _LANDMARKSSMOOTHINGCALCULATOROPTIONS_VELOCITYFILTER
_LANDMARKSSMOOTHINGCALCULATOROPTIONS.oneofs_by_name['filter_options'].fields.append(
_LANDMARKSSMOOTHINGCALCULATOROPTIONS.fields_by_name['no_filter'])
_LANDMARKSSMOOTHINGCALCULATOROPTIONS.fields_by_name['no_filter'].containing_oneof = _LANDMARKSSMOOTHINGCALCULATOROPTIONS.oneofs_by_name['filter_options']
_LANDMARKSSMOOTHINGCALCULATOROPTIONS.oneofs_by_name['filter_options'].fields.append(
_LANDMARKSSMOOTHINGCALCULATOROPTIONS.fields_by_name['velocity_filter'])
_LANDMARKSSMOOTHINGCALCULATOROPTIONS.fields_by_name['velocity_filter'].containing_oneof = _LANDMARKSSMOOTHINGCALCULATOROPTIONS.oneofs_by_name['filter_options']
DESCRIPTOR.message_types_by_name['LandmarksSmoothingCalculatorOptions'] = _LANDMARKSSMOOTHINGCALCULATOROPTIONS
LandmarksSmoothingCalculatorOptions = _reflection.GeneratedProtocolMessageType('LandmarksSmoothingCalculatorOptions', (_message.Message,), dict(
NoFilter = _reflection.GeneratedProtocolMessageType('NoFilter', (_message.Message,), dict(
DESCRIPTOR = _LANDMARKSSMOOTHINGCALCULATOROPTIONS_NOFILTER,
__module__ = 'mediapipe.calculators.util.landmarks_smoothing_calculator_pb2'
# @@protoc_insertion_point(class_scope:mediapipe.LandmarksSmoothingCalculatorOptions.NoFilter)
))
,
VelocityFilter = _reflection.GeneratedProtocolMessageType('VelocityFilter', (_message.Message,), dict(
DESCRIPTOR = _LANDMARKSSMOOTHINGCALCULATOROPTIONS_VELOCITYFILTER,
__module__ = 'mediapipe.calculators.util.landmarks_smoothing_calculator_pb2'
# @@protoc_insertion_point(class_scope:mediapipe.LandmarksSmoothingCalculatorOptions.VelocityFilter)
))
,
DESCRIPTOR = _LANDMARKSSMOOTHINGCALCULATOROPTIONS,
__module__ = 'mediapipe.calculators.util.landmarks_smoothing_calculator_pb2'
# @@protoc_insertion_point(class_scope:mediapipe.LandmarksSmoothingCalculatorOptions)
))
_sym_db.RegisterMessage(LandmarksSmoothingCalculatorOptions)
_sym_db.RegisterMessage(LandmarksSmoothingCalculatorOptions.NoFilter)
_sym_db.RegisterMessage(LandmarksSmoothingCalculatorOptions.VelocityFilter)
_LANDMARKSSMOOTHINGCALCULATOROPTIONS.extensions_by_name['ext'].message_type = _LANDMARKSSMOOTHINGCALCULATOROPTIONS
mediapipe_dot_framework_dot_calculator__options__pb2.CalculatorOptions.RegisterExtension(_LANDMARKSSMOOTHINGCALCULATOROPTIONS.extensions_by_name['ext'])
# @@protoc_insertion_point(module_scope)
| 1.289063 | 1 |
pssvi/cubature.py | AntixK/parallel-SSVI | 0 | 12772345 | import objax
from jax import vmap, grad, jacrev
import jax.numpy as np
from jax.scipy.linalg import cholesky, cho_factor
from .utils import inv, solve, gaussian_first_derivative_wrt_mean, gaussian_second_derivative_wrt_mean
from numpy.polynomial.hermite import hermgauss
import numpy as onp
import itertools
class Cubature(objax.Module):
def __init__(self, dim=None):
if dim is None: # dimension of cubature not known upfront
self.store = False
else: # dimension known, store sigma points and weights
self.store = True
self.x, self.w = self.get_cubature_points_and_weights(dim)
def __call__(self, dim):
if self.store:
return self.x, self.w
else:
return self.get_cubature_points_and_weights(dim)
def get_cubature_points_and_weights(self, dim):
raise NotImplementedError
class GaussHermite(Cubature):
def __init__(self, dim=None, num_cub_points=20):
self.num_cub_points = num_cub_points
super().__init__(dim)
def get_cubature_points_and_weights(self, dim):
return gauss_hermite(dim, self.num_cub_points)
class UnscentedThirdOrder(Cubature):
def get_cubature_points_and_weights(self, dim):
return symmetric_cubature_third_order(dim)
class UnscentedFifthOrder(Cubature):
def get_cubature_points_and_weights(self, dim):
return symmetric_cubature_fifth_order(dim)
class Unscented(UnscentedFifthOrder):
pass
def mvhermgauss(H: int, D: int):
"""
This function is adapted from GPflow: https://github.com/GPflow/GPflow
Return the evaluation locations 'xn', and weights 'wn' for a multivariate
Gauss-Hermite quadrature.
The outputs can be used to approximate the following type of integral:
int exp(-x)*f(x) dx ~ sum_i w[i,:]*f(x[i,:])
:param H: Number of Gauss-Hermite evaluation points.
:param D: Number of input dimensions. Needs to be known at call-time.
:return: eval_locations 'x' (H**DxD), weights 'w' (H**D)
"""
gh_x, gh_w = hermgauss(H)
x = np.array(list(itertools.product(*(gh_x,) * D))) # H**DxD
w = np.prod(np.array(list(itertools.product(*(gh_w,) * D))), 1) # H**D
return x, w
def gauss_hermite(dim=1, num_quad_pts=20):
"""
Return weights and sigma-points for Gauss-Hermite cubature
"""
# sigma_pts, weights = hermgauss(num_quad_pts) # Gauss-Hermite sigma points and weights
sigma_pts, weights = mvhermgauss(num_quad_pts, dim)
sigma_pts = np.sqrt(2) * sigma_pts.T
weights = weights.T * np.pi ** (-0.5 * dim) # scale weights by 1/√π
return sigma_pts, weights
def symmetric_cubature_third_order(dim=1, kappa=None):
"""
Return weights and sigma-points for the symmetric cubature rule of order 3.
Uses 2dim+1 sigma-points
"""
if kappa is None:
# kappa = 1 - dim
kappa = 0 # CKF
w0 = kappa / (dim + kappa)
wm = 1 / (2 * (dim + kappa))
u = onp.sqrt(dim + kappa)
if (dim == 1) and (kappa == 0):
weights = onp.array([w0, wm, wm])
sigma_pts = onp.array([0., u, -u])
# sigma_pts = onp.array([-u, 0., u])
# weights = onp.array([wm, w0, wm])
elif (dim == 2) and (kappa == 0):
weights = onp.array([w0, wm, wm, wm, wm])
sigma_pts = onp.block([[0., u, 0., -u, 0.],
[0., 0., u, 0., -u]])
elif (dim == 3) and (kappa == 0):
weights = onp.array([w0, wm, wm, wm, wm, wm, wm])
sigma_pts = onp.block([[0., u, 0., 0., -u, 0., 0.],
[0., 0., u, 0., 0., -u, 0.],
[0., 0., 0., u, 0., 0., -u]])
else:
weights = onp.concatenate([onp.array([[kappa / (dim + kappa)]]), wm * onp.ones([1, 2*dim])], axis=1)
sigma_pts = onp.sqrt(dim + kappa) * onp.block([onp.zeros([dim, 1]), onp.eye(dim), -onp.eye(dim)])
return sigma_pts, weights
def symmetric_cubature_fifth_order(dim=1):
"""
Return weights and sigma-points for the symmetric cubature rule of order 5
Uses 2(dim**2)+1 sigma-points
"""
# The weights and sigma-points from McNamee & Stenger
I0 = 1
I2 = 1
I4 = 3
I22 = 1
u = onp.sqrt(I4 / I2)
A0 = I0 - dim * (I2 / I4) ** 2 * (I4 - 0.5 * (dim - 1) * I22)
A1 = 0.5 * (I2 / I4) ** 2 * (I4 - (dim - 1) * I22)
A2 = 0.25 * (I2 / I4) ** 2 * I22
# we implement specific cases manually to save compute
if dim == 1:
weights = onp.array([A0, A1, A1])
sigma_pts = onp.array([0., u, -u])
elif dim == 2:
weights = onp.array([A0, A1, A1, A1, A1, A2, A2, A2, A2])
sigma_pts = onp.block([[0., u, -u, 0., 0., u, -u, u, -u],
[0., 0., 0., u, -u, u, -u, -u, u]])
elif dim == 3:
weights = onp.array([A0, A1, A1, A1, A1, A1, A1, A2, A2, A2, A2, A2, A2, A2, A2, A2, A2, A2, A2])
sigma_pts = onp.block([[0., u, -u, 0., 0., 0., 0., u, -u, u, -u, u, -u, u, -u, 0., 0., 0., 0.],
[0., 0., 0., u, -u, 0., 0., u, -u, -u, u, 0., 0., 0., 0., u, -u, u, -u],
[0., 0., 0., 0., 0., u, -u, 0., 0., 0., 0., u, -u, -u, u, u, -u, -u, u]])
else:
# general case
U0 = sym_set(dim, [])
U1 = sym_set(dim, [u])
U2 = sym_set(dim, [u, u])
sigma_pts = onp.concatenate([U0, U1, U2], axis=1)
weights = onp.concatenate([A0 * onp.ones(U0.shape[1]),
A1 * onp.ones(U1.shape[1]),
A2 * onp.ones(U2.shape[1])])
return sigma_pts, weights
def sym_set(n, gen=None):
if (gen is None) or (len(gen) == 0):
U = onp.zeros([n, 1])
else:
lengen = len(gen)
if lengen == 1:
U = onp.zeros([n, 2 * n])
elif lengen == 2:
U = onp.zeros([n, 2 * n * (n - 1)])
else:
raise NotImplementedError
ind = 0
for i in range(n):
u = onp.zeros(n)
u[i] = gen[0]
if lengen > 1:
if abs(gen[0] - gen[1]) < 1e-10:
V = sym_set(n-i-1, gen[1:])
for j in range(V.shape[1]):
u[i+1:] = V[:, j]
U[:, 2*ind] = u
U[:, 2*ind + 1] = -u
ind += 1
else:
raise NotImplementedError
# V = sym_set(n-1, gen[1:])
# for j in range(V.shape[1]):
# u[:i-1, i+1:] = V[:, j]
# U = onp.concatenate([U, u, -u])
# ind += 1
else:
U[:, 2*i] = u
U[:, 2*i+1] = -u
return U
def variational_expectation_cubature(likelihood, y, post_mean, post_cov, cubature=None):
"""
Computes the "variational expectation" via cubature, i.e. the
expected log-likelihood, and its derivatives w.r.t. the posterior mean
E[log p(yₙ|fₙ)] = ∫ log p(yₙ|fₙ) 𝓝(fₙ|mₙ,vₙ) dfₙ
with EP power a.
:param likelihood: the likelihood model
:param y: observed data (yₙ) [scalar]
:param post_mean: posterior mean (mₙ) [scalar]
:param post_cov: posterior variance (vₙ) [scalar]
:param cubature: the function to compute sigma points and weights to use during cubature
:return:
exp_log_lik: the expected log likelihood, E[log p(yₙ|fₙ)] [scalar]
dE_dm: derivative of E[log p(yₙ|fₙ)] w.r.t. mₙ [scalar]
d2E_dm2: second derivative of E[log p(yₙ|fₙ)] w.r.t. mₙ [scalar]
"""
if cubature is None:
x, w = gauss_hermite(post_mean.shape[0], 20) # Gauss-Hermite sigma points and weights
else:
x, w = cubature(post_mean.shape[0])
# fsigᵢ=xᵢ√(vₙ) + mₙ: scale locations according to cavity dist.
sigma_points = cholesky(post_cov) @ np.atleast_2d(x) + post_mean
# pre-compute wᵢ log p(yₙ|xᵢ√(2vₙ) + mₙ)
weighted_log_likelihood_eval = w * likelihood.evaluate_log_likelihood(y, sigma_points)
# Compute expected log likelihood via cubature:
# E[log p(yₙ|fₙ)] = ∫ log p(yₙ|fₙ) 𝓝(fₙ|mₙ,vₙ) dfₙ
# ≈ ∑ᵢ wᵢ p(yₙ|fsigᵢ)
exp_log_lik = np.sum(
weighted_log_likelihood_eval
)
# Compute first derivative via cubature:
# dE[log p(yₙ|fₙ)]/dmₙ = ∫ (fₙ-mₙ) vₙ⁻¹ log p(yₙ|fₙ) 𝓝(fₙ|mₙ,vₙ) dfₙ
# ≈ ∑ᵢ wᵢ (fₙ-mₙ) vₙ⁻¹ log p(yₙ|fsigᵢ)
invv = np.diag(post_cov)[:, None] ** -1
dE_dm = np.sum(
invv * (sigma_points - post_mean)
* weighted_log_likelihood_eval, axis=-1
)[:, None]
# Compute second derivative via cubature (deriv. w.r.t. var = 0.5 * 2nd deriv. w.r.t. mean):
# dE[log p(yₙ|fₙ)]/dvₙ = ∫ [(fₙ-mₙ)² vₙ⁻² - vₙ⁻¹]/2 log p(yₙ|fₙ) 𝓝(fₙ|mₙ,vₙ) dfₙ
# ≈ ∑ᵢ wᵢ [(fₙ-mₙ)² vₙ⁻² - vₙ⁻¹]/2 log p(yₙ|fsigᵢ)
dE_dv = np.sum(
(0.5 * (invv ** 2 * (sigma_points - post_mean) ** 2) - 0.5 * invv)
* weighted_log_likelihood_eval, axis=-1
)
dE_dv = np.diag(dE_dv)
d2E_dm2 = 2 * dE_dv
return exp_log_lik, dE_dm, d2E_dm2
def log_density_cubature(likelihood, y, mean, cov, cubature=None):
"""
logZₙ = log ∫ p(yₙ|fₙ) 𝓝(fₙ|mₙ,vₙ) dfₙ
:param likelihood: the likelihood model
:param y: observed data (yₙ) [scalar]
:param mean: cavity mean (mₙ) [scalar]
:param cov: cavity covariance (cₙ) [scalar]
:param cubature: the function to compute sigma points and weights to use during cubature
:return:
lZ: the log density, logZₙ [scalar]
"""
if cubature is None:
x, w = gauss_hermite(mean.shape[0], 20) # Gauss-Hermite sigma points and weights
else:
x, w = cubature(mean.shape[0])
cav_cho, low = cho_factor(cov)
# fsigᵢ=xᵢ√cₙ + mₙ: scale locations according to cavity dist.
sigma_points = cav_cho @ np.atleast_2d(x) + mean
# pre-compute wᵢ p(yₙ|xᵢ√(2vₙ) + mₙ)
weighted_likelihood_eval = w * likelihood.evaluate_likelihood(y, sigma_points)
# Compute partition function via cubature:
# Zₙ = ∫ p(yₙ|fₙ) 𝓝(fₙ|mₙ,vₙ) dfₙ ≈ ∑ᵢ wᵢ p(yₙ|fsigᵢ)
Z = np.sum(
weighted_likelihood_eval, axis=-1
)
lZ = np.log(np.maximum(Z, 1e-8))
return lZ
def moment_match_cubature(likelihood, y, cav_mean, cav_cov, power=1.0, cubature=None):
"""
TODO: N.B. THIS VERSION ALLOWS MULTI-DIMENSIONAL MOMENT MATCHING, BUT CAN BE UNSTABLE
Perform moment matching via cubature.
Moment matching involves computing the log partition function, logZₙ, and its derivatives w.r.t. the cavity mean
logZₙ = log ∫ pᵃ(yₙ|fₙ) 𝓝(fₙ|mₙ,vₙ) dfₙ
with EP power a.
:param likelihood: the likelihood model
:param y: observed data (yₙ) [scalar]
:param cav_mean: cavity mean (mₙ) [scalar]
:param cav_cov: cavity covariance (cₙ) [scalar]
:param power: EP power / fraction (a) [scalar]
:param cubature: the function to compute sigma points and weights to use during cubature
:return:
lZ: the log partition function, logZₙ [scalar]
dlZ: first derivative of logZₙ w.r.t. mₙ (if derivatives=True) [scalar]
d2lZ: second derivative of logZₙ w.r.t. mₙ (if derivatives=True) [scalar]
"""
if cubature is None:
x, w = gauss_hermite(cav_mean.shape[0], 20) # Gauss-Hermite sigma points and weights
else:
x, w = cubature(cav_mean.shape[0])
cav_cho, low = cho_factor(cav_cov)
# fsigᵢ=xᵢ√cₙ + mₙ: scale locations according to cavity dist.
sigma_points = cav_cho @ np.atleast_2d(x) + cav_mean
# pre-compute wᵢ pᵃ(yₙ|xᵢ√(2vₙ) + mₙ)
weighted_likelihood_eval = w * likelihood.evaluate_likelihood(y, sigma_points) ** power
# Compute partition function via cubature:
# Zₙ = ∫ pᵃ(yₙ|fₙ) 𝓝(fₙ|mₙ,vₙ) dfₙ
# ≈ ∑ᵢ wᵢ pᵃ(yₙ|fsigᵢ)
Z = np.sum(
weighted_likelihood_eval, axis=-1
)
lZ = np.log(np.maximum(Z, 1e-8))
Zinv = 1.0 / np.maximum(Z, 1e-8)
# Compute derivative of partition function via cubature:
# dZₙ/dmₙ = ∫ (fₙ-mₙ) vₙ⁻¹ pᵃ(yₙ|fₙ) 𝓝(fₙ|mₙ,vₙ) dfₙ
# ≈ ∑ᵢ wᵢ (fₙ-mₙ) vₙ⁻¹ pᵃ(yₙ|fsigᵢ)
d1 = vmap(
gaussian_first_derivative_wrt_mean, (1, None, None, 1)
)(sigma_points[..., None], cav_mean, cav_cov, weighted_likelihood_eval)
dZ = np.sum(d1, axis=0)
# dlogZₙ/dmₙ = (dZₙ/dmₙ) / Zₙ
dlZ = Zinv * dZ
# Compute second derivative of partition function via cubature:
# d²Zₙ/dmₙ² = ∫ [(fₙ-mₙ)² vₙ⁻² - vₙ⁻¹] pᵃ(yₙ|fₙ) 𝓝(fₙ|mₙ,vₙ) dfₙ
# ≈ ∑ᵢ wᵢ [(fₙ-mₙ)² vₙ⁻² - vₙ⁻¹] pᵃ(yₙ|fsigᵢ)
d2 = vmap(
gaussian_second_derivative_wrt_mean, (1, None, None, 1)
)(sigma_points[..., None], cav_mean, cav_cov, weighted_likelihood_eval)
d2Z = np.sum(d2, axis=0)
# d²logZₙ/dmₙ² = d[(dZₙ/dmₙ) / Zₙ]/dmₙ
# = (d²Zₙ/dmₙ² * Zₙ - (dZₙ/dmₙ)²) / Zₙ²
# = d²Zₙ/dmₙ² / Zₙ - (dlogZₙ/dmₙ)²
d2lZ = -dlZ @ dlZ.T + Zinv * d2Z
return lZ, dlZ, d2lZ
# def statistical_linear_regression_cubature(likelihood, mean, cov, cubature=None):
# """
# Perform statistical linear regression (SLR) using cubature.
# We aim to find a likelihood approximation p(yₙ|fₙ) ≈ 𝓝(yₙ|Afₙ+b,Ω).
# TODO: this currently assumes an additive noise model (ok for our current applications), make more general
# """
# if cubature is None:
# x, w = gauss_hermite(mean.shape[0], 20) # Gauss-Hermite sigma points and weights
# else:
# x, w = cubature(mean.shape[0])
# # fsigᵢ=xᵢ√(vₙ) + mₙ: scale locations according to cavity dist.
# sigma_points = cholesky(cov) @ np.atleast_2d(x) + mean
# lik_expectation, lik_covariance = likelihood.conditional_moments(sigma_points)
# # Compute muₙ via cubature:
# # muₙ = ∫ E[yₙ|fₙ] 𝓝(fₙ|mₙ,vₙ) dfₙ
# # ≈ ∑ᵢ wᵢ E[yₙ|fsigᵢ]
# mu = np.sum(
# w * lik_expectation, axis=-1
# )[:, None]
# # Compute variance S via cubature:
# # S = ∫ [(E[yₙ|fₙ]-muₙ) (E[yₙ|fₙ]-muₙ)' + Cov[yₙ|fₙ]] 𝓝(fₙ|mₙ,vₙ) dfₙ
# # ≈ ∑ᵢ wᵢ [(E[yₙ|fsigᵢ]-muₙ) (E[yₙ|fsigᵢ]-muₙ)' + Cov[yₙ|fₙ]]
# # TODO: allow for multi-dim cubature
# S = np.sum(
# w * ((lik_expectation - mu) * (lik_expectation - mu) + lik_covariance), axis=-1
# )[:, None]
# # Compute cross covariance C via cubature:
# # C = ∫ (fₙ-mₙ) (E[yₙ|fₙ]-muₙ)' 𝓝(fₙ|mₙ,vₙ) dfₙ
# # ≈ ∑ᵢ wᵢ (fsigᵢ -mₙ) (E[yₙ|fsigᵢ]-muₙ)'
# C = np.sum(
# w * (sigma_points - mean) * (lik_expectation - mu), axis=-1
# )[:, None]
# # compute equivalent likelihood noise, omega
# omega = S - C.T @ solve(cov, C)
# # Compute derivative of z via cubature:
# # d_mu = ∫ E[yₙ|fₙ] vₙ⁻¹ (fₙ-mₙ) 𝓝(fₙ|mₙ,vₙ) dfₙ
# # ≈ ∑ᵢ wᵢ E[yₙ|fsigᵢ] vₙ⁻¹ (fsigᵢ-mₙ)
# prec = inv(cov)
# d_mu = np.sum(
# # w * lik_expectation * (solve(cov, sigma_points - mean)), axis=-1
# w * lik_expectation * (prec @ (sigma_points - mean)), axis=-1
# )[None, :]
# # Second derivative:
# # d2_mu = -∫ E[yₙ|fₙ] vₙ⁻¹ 𝓝(fₙ|mₙ,vₙ) dfₙ + ∫ E[yₙ|fₙ] (vₙ⁻¹ (fₙ-mₙ))² 𝓝(fₙ|mₙ,vₙ) dfₙ
# # ≈ ∑ᵢ wᵢ E[yₙ|fsigᵢ] ((vₙ⁻¹ (fsigᵢ-mₙ))² - vₙ⁻¹)
# d2_mu = np.sum(
# w * lik_expectation * (prec @ (sigma_points - mean) ** 2 - prec), axis=-1
# )[None, :]
# return mu, omega, d_mu, d2_mu
def statistical_linear_regression_cubature(likelihood, mean, cov, cubature=None):
"""
Perform statistical linear regression (SLR) using cubature.
We aim to find a likelihood approximation p(yₙ|fₙ) ≈ 𝓝(yₙ|Afₙ+b,Ω).
TODO: this currently assumes an additive noise model (ok for our current applications), make more general
"""
mu, omega = expected_conditional_mean(likelihood, mean, cov, cubature)
dmu_dm = expected_conditional_mean_dm(likelihood, mean, cov, cubature)
d2mu_dm2 = expected_conditional_mean_dm2(likelihood, mean, cov, cubature)
return mu.reshape(-1, 1), omega, dmu_dm.reshape(1, -1), d2mu_dm2
# return mu.reshape(-1, 1), omega, dmu_dm[None], np.swapaxes(d2mu_dm2, axis1=0, axis2=2)
def expected_conditional_mean(likelihood, mean, cov, cubature=None):
"""
Compute Eq[E[y|f]] = ∫ Ey[p(y|f)] 𝓝(f|mean,cov) dfₙ
"""
if cubature is None:
x, w = gauss_hermite(mean.shape[0], 20) # Gauss-Hermite sigma points and weights
else:
x, w = cubature(mean.shape[0])
# fsigᵢ=xᵢ√(vₙ) + mₙ: scale locations according to cavity dist.
sigma_points = cholesky(cov) @ np.atleast_2d(x) + mean
lik_expectation, lik_covariance = likelihood.conditional_moments(sigma_points)
# Compute muₙ via cubature:
# muₙ = ∫ E[yₙ|fₙ] 𝓝(fₙ|mₙ,vₙ) dfₙ
# ≈ ∑ᵢ wᵢ E[yₙ|fsigᵢ]
mu = np.sum(
w * lik_expectation, axis=-1
)[:, None]
S = np.sum(
# w * ((lik_expectation - mu) @ (lik_expectation - mu).T + lik_covariance), axis=-1 # TODO: CHECK MULTI-DIM
w * ((lik_expectation - mu) ** 2 + lik_covariance), axis=-1
)[:, None]
# Compute cross covariance C via cubature:
# C = ∫ (fₙ-mₙ) (E[yₙ|fₙ]-muₙ)' 𝓝(fₙ|mₙ,vₙ) dfₙ
# ≈ ∑ᵢ wᵢ (fsigᵢ -mₙ) (E[yₙ|fsigᵢ]-muₙ)'
C = np.sum(
w * (sigma_points - mean) * (lik_expectation - mu), axis=-1
)[:, None]
# compute equivalent likelihood noise, omega
omega = S - C.T @ solve(cov, C)
return np.squeeze(mu), omega
def expected_conditional_mean_dm(likelihood, mean, cov, cubature=None):
"""
"""
dmu_dm, _ = grad(expected_conditional_mean, argnums=1, has_aux=True)(likelihood, mean, cov, cubature)
return np.squeeze(dmu_dm)
def expected_conditional_mean_dm2(likelihood, mean, cov, cubature=None):
"""
"""
d2mu_dm2 = jacrev(expected_conditional_mean_dm, argnums=1)(likelihood, mean, cov, cubature)
return d2mu_dm2
def predict_cubature(likelihood, mean_f, var_f, cubature=None):
"""
predict in data space given predictive mean and var of the latent function
"""
if cubature is None:
x, w = gauss_hermite(mean_f.shape[0], 20) # Gauss-Hermite sigma points and weights
else:
x, w = cubature(mean_f.shape[0])
chol_f, low = cho_factor(var_f)
# fsigᵢ=xᵢ√cₙ + mₙ: scale locations according to latent dist.
sigma_points = chol_f @ np.atleast_2d(x) + mean_f
# Compute moments via cubature:
# E[y] = ∫ E[yₙ|fₙ] 𝓝(fₙ|mₙ,vₙ) dfₙ
# ≈ ∑ᵢ wᵢ E[yₙ|fₙ]
# E[y^2] = ∫ (Cov[yₙ|fₙ] + E[yₙ|fₙ]^2) 𝓝(fₙ|mₙ,vₙ) dfₙ
# ≈ ∑ᵢ wᵢ (Cov[yₙ|fₙ] + E[yₙ|fₙ]^2)
conditional_expectation, conditional_covariance = likelihood.conditional_moments(sigma_points)
expected_y = np.sum(w * conditional_expectation, axis=-1)
expected_y_squared = np.sum(w * (conditional_covariance + conditional_expectation ** 2), axis=-1)
# Cov[y] = E[y^2] - E[y]^2
covariance_y = expected_y_squared - expected_y ** 2
return expected_y, covariance_y | 2.140625 | 2 |
tests/test_cqcconnection.py | Doomsk/CQC-Python | 16 | 12772346 | import pytest
from cqc.util import parse_cqc_message
from cqc.pythonLib import CQCConnection, qubit
from cqc.pythonLib import CQCMixConnection
from cqc.cqcHeader import (
CQCCmdHeader,
CQCHeader,
CQCType,
CQC_CMD_H,
CQC_CMD_NEW,
CQC_CMD_RELEASE,
)
from utilities import get_header
from test_cases_cqcconnection.flush import (
commands_to_apply_flush, get_expected_headers_flush
)
from test_cases_cqcconnection.cqc_mix import (
commands_to_apply_bit_flip_code,
get_expected_headers_bit_flip_code,
commands_to_apply_simple_mix,
get_expected_headers_simple_mix,
commands_to_apply_mix_with_factory,
get_expected_headers_mix_with_factory,
commands_to_apply_mix_if_else,
get_expected_headers_mix_if_else,
commands_to_apply_mix_nested_if_else,
get_expected_headers_mix_nested_if_else,
)
def get_expected_headers_simple_h():
"""What headers we expect"""
hdr_tp_cmd = get_header(
CQCHeader,
version=2,
tp=CQCType.COMMAND,
app_id=0,
length=CQCCmdHeader.HDR_LENGTH,
)
hdr_cmd_new = get_header(
CQCCmdHeader,
qubit_id=0,
instr=CQC_CMD_NEW,
notify=True,
action=False,
block=True,
)
hdr_cmd_h = get_header(
CQCCmdHeader,
qubit_id=1,
instr=CQC_CMD_H,
notify=True,
action=False,
block=True,
)
hdr_cmd_release = get_header(
CQCCmdHeader,
qubit_id=1,
instr=CQC_CMD_RELEASE,
notify=True,
action=False,
block=True,
)
expected_headers = [
hdr_tp_cmd + hdr_cmd_new,
hdr_tp_cmd + hdr_cmd_h,
hdr_tp_cmd + hdr_cmd_release,
]
return expected_headers
def commands_to_apply_simple_h(cqc):
"""What to do with the CQCConnection"""
q = qubit(cqc)
q.H()
@pytest.mark.parametrize("conn_type, commands_to_apply, get_expected_headers", [
(CQCConnection, commands_to_apply_simple_h, get_expected_headers_simple_h),
(CQCConnection, commands_to_apply_flush, get_expected_headers_flush),
(CQCMixConnection, commands_to_apply_bit_flip_code, get_expected_headers_bit_flip_code),
(CQCMixConnection, commands_to_apply_simple_mix, get_expected_headers_simple_mix),
(CQCMixConnection, commands_to_apply_mix_with_factory, get_expected_headers_mix_with_factory),
(CQCMixConnection, commands_to_apply_mix_if_else, get_expected_headers_mix_if_else),
(CQCMixConnection, commands_to_apply_mix_nested_if_else, get_expected_headers_mix_nested_if_else),
])
def test_commands(conn_type, commands_to_apply, get_expected_headers, monkeypatch, mock_socket, mock_read_message):
# logging.getLogger().setLevel(logging.DEBUG)
with conn_type("Test", socket_address=('localhost', 8000), use_classical_communication=False) as cqc:
commands_to_apply(cqc)
expected_messages = get_expected_headers()
send_calls = list(filter(lambda call: call.name == 'send', cqc._s.calls))
sent_messages = [call.args[0] for call in send_calls]
full_msg = {}
# Parse and print what we expect and what we got
for name, messages in zip(["EXPECTED", "GOT"], [expected_messages, sent_messages]):
print("\n{}:".format(name))
for msg in messages:
print('[')
for hdr in parse_cqc_message(msg):
print(" {}".format(hdr))
print('\n]')
full_msg[name] = b''.join([msg for msg in messages])
# Check if full messages are equal
assert full_msg["EXPECTED"] == full_msg["GOT"]
for got, expected in zip(sent_messages, expected_messages):
# Excluding None gives the opportunity to not specify all expected headers but still check the number of them
if expected is not None:
assert got == expected
| 2.015625 | 2 |
udps.py | arunlakshmanan/pathgen_processing | 8 | 12772347 | <filename>udps.py<gh_stars>1-10
#!/usr/bin/env python
#Edit however required
import socket
import struct
import math
UDP = "localhost"
PORT = 10000
xs = [1.0, -1.0, 1.0]
ys = [1.0, -1.0, -1.0]
yaw = [1.0, -0.5, -3.141]
status = 1.0
nbots = 3
count = 1
try:
while True:
for i in range(0,nbots):
sock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
msg = struct.pack('<dddddd',xs[i],ys[i],yaw[i],i+2,status,count)
sock.sendto(msg, (UDP,PORT))
count += 1
except:
print("\nClosing Ports\n")
sock.close() | 2.84375 | 3 |
steampipe_alchemy/models/aws_vpc_route.py | RyanJarv/steampipe_alchemy | 9 | 12772348 | from sqlalchemy import Column
from sqlalchemy.types import JSON, Text, Boolean, TIMESTAMP, BigInteger
from sqlalchemy.dialects import postgresql as psql
from steampipe_alchemy.mixins import FormatMixins
from steampipe_alchemy import Base
class AwsVpcRoute(Base, FormatMixins):
__tablename__ = 'aws_vpc_route'
destination_ipv6_cidr_block = Column('destination_ipv6_cidr_block', psql.CIDR, nullable=True)
destination_cidr_block = Column('destination_cidr_block', psql.CIDR, nullable=True)
akas = Column('akas', JSON, nullable=True)
destination_prefix_list_id = Column('destination_prefix_list_id', Text, nullable=True)
egress_only_internet_gateway_id = Column('egress_only_internet_gateway_id', Text, nullable=True)
gateway_id = Column('gateway_id', Text, nullable=True)
instance_id = Column('instance_id', Text, nullable=True)
instance_owner_id = Column('instance_owner_id', Text, nullable=True)
local_gateway_id = Column('local_gateway_id', Text, nullable=True)
nat_gateway_id = Column('nat_gateway_id', Text, nullable=True)
network_interface_id = Column('network_interface_id', Text, nullable=True)
transit_gateway_id = Column('transit_gateway_id', Text, nullable=True)
vpc_peering_connection_id = Column('vpc_peering_connection_id', Text, nullable=True)
origin = Column('origin', Text, nullable=True)
title = Column('title', Text, primary_key=True, nullable=True)
partition = Column('partition', Text, nullable=True)
region = Column('region', Text, nullable=True)
route_table_id = Column('route_table_id', Text, nullable=True)
account_id = Column('account_id', Text, nullable=True)
state = Column('state', Text, nullable=True)
carrier_gateway_id = Column('carrier_gateway_id', Text, nullable=True) | 2.140625 | 2 |
apps/equipment/views/basicinfor_view.py | kane-zh/MES_server | 0 | 12772349 | from rest_framework import viewsets
from rest_framework import filters
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework.authentication import SessionAuthentication
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from rest_framework.permissions import IsAuthenticated
from django.core import exceptions
from django.db.models import Q
from rest_framework.mixins import (CreateModelMixin,
ListModelMixin,
RetrieveModelMixin,
UpdateModelMixin,
)
from apps.equipment.serializes.basicinfor_serialize import *
from apps.equipment.filters.basicinfor_filters import *
from apps.commonFunction import StandardResultsSetPagination
class EquipmentAuditRecordView(ListModelMixin,RetrieveModelMixin, viewsets.GenericViewSet):
"""
当前APP操作记录
"""
pagination_class = StandardResultsSetPagination
filter_backends = (DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter)
filter_class = EquipmentAuditRecordFilters
search_fields = ["uri", "uri_id"]
ordering_fields = ["id","update_time"]
authentication_classes = [SessionAuthentication, JSONWebTokenAuthentication, IsOwnerOrReadOnly]
permission_classes = [IsAuthenticated, ]
# 重载获取序列化类的方法,根据不同的操作返回不同的序列化类
def get_serializer_class(self):
if self.action == "list":
return EquipmentAuditRecordSerialize_List
elif self.action == "retrieve":
return EquipmentAuditRecordSerialize_Retrieve
return EquipmentAuditRecordSerialize_List
# 重载数据查询的方法,根据不同的操作查询不同的数据范围
def get_queryset(self):
if self.request.user.is_superuser: # 超级用户可以查看所有信息
return EquipmentAuditRecordModel.objects.all().order_by("-id")
user = self.request.user.username
condtions1 = {'user__iexact': user} # 普通用户只能查看自己的信息
if self.action == "list": # 如果是查看列表
if not self.request.user.has_perm('equipment.view_equipmentauditrecordmodel'): # 如果当前用户没有查看权限
raise exceptions.PermissionDenied
if self.action == "retrieve": # 如果是查看详情
if not self.request.user.has_perm('equipment.read_equipmentauditrecordmodel'): # 如果当前用户没有查看详情权限
raise exceptions.PermissionDenied
return EquipmentAuditRecordModel.objects.filter(Q(**condtions1))
class EquipmentAlterRecordView(CreateModelMixin, viewsets.GenericViewSet):
"""
当前APP审核记录
"""
queryset = EquipmentAlterRecordModel.objects.all().order_by("-id")
serializer_class = EquipmentAlterRecordSerialize_Create
authentication_classes = [SessionAuthentication, JSONWebTokenAuthentication, IsOwnerOrReadOnly]
permission_classes = [IsAuthenticated, ]
class EquipmentImageView(CreateModelMixin,viewsets.GenericViewSet):
"""
当前APP图片项
"""
queryset = EquipmentImageModel.objects.all().order_by("-id")
serializer_class = EquipmentImageSerialize_Create
authentication_classes = [SessionAuthentication, JSONWebTokenAuthentication, IsOwnerOrReadOnly]
permission_classes = [IsAuthenticated, ]
class EquipmentFileView(CreateModelMixin, viewsets.GenericViewSet):
"""
当前APP文件项
"""
queryset = EquipmentFileModel.objects.all().order_by("-id")
serializer_class = EquipmentFileSerialize_Create
authentication_classes = [SessionAuthentication, JSONWebTokenAuthentication, IsOwnerOrReadOnly]
permission_classes = [IsAuthenticated, ]
class EquipmentVendorDefinitionView(CreateModelMixin, ListModelMixin,
RetrieveModelMixin, UpdateModelMixin,
viewsets.GenericViewSet):
"""
设备厂商定义
"""
pagination_class = StandardResultsSetPagination
filter_backends = (DjangoFilterBackend, filters.SearchFilter,filters.OrderingFilter)
filter_class = EquipmentVendorDefinitionFilters
search_fields = ["name","code","company_name","company_abbre"]
ordering_fields = ["id","update_time"]
authentication_classes = [SessionAuthentication, JSONWebTokenAuthentication, ]
permission_classes = [IsAuthenticated, ]
# 重载获取序列化类的方法,根据不同的操作返回不同的序列化类
def get_serializer_class(self):
if self.action == "create":
return EquipmentVendorDefinitionSerialize_Create
elif self.action == "list":
return EquipmentVendorDefinitionSerialize_List
elif self.action == "retrieve":
return EquipmentVendorDefinitionSerialize_Retrieve
elif self.action == "update":
return EquipmentVendorDefinitionSerialize_Update
elif self.action == "partial_update":
return EquipmentVendorDefinitionSerialize_Partial
return EquipmentVendorDefinitionSerialize_List
# 重载数据查询的方法,根据不同的操作查询不同的数据范围
def get_queryset(self):
if self.request.user.is_superuser:
return EquipmentVendorDefinitionModel.objects.all().order_by("-id") # 超级用户可以查看所有信息
user = self.request.user.username
condtions1 = {'create_user__iexact': user,
'state__in': ("新建", "审核中", "使用中") # 信息创建者可以看到 (新建,审核,使用中)的数据,,
}
condtions2 = {'auditor__iexact': user,
'state__in': ("审核中", "使用中",) # 信息审核者可以看到 (审核,使用中)的数据
}
condtions3 = {'state__in': ("使用中",) # 其他用户 可以看到(使用中)的数据
}
if self.action == "list": # 如果是查看列表
if not self.request.user.has_perm('equipment.view_equipmentvendordefinitionmodel'): # 如果当前用户没有查看权限
condtions3 = {} #如果普通用户不具备查看列表权限权限,则不能查看列表信息
if self.action == "retrieve": # 如果是查看列表
if not self.request.user.has_perm('equipment.read_equipmentvendordefinitionmodel'): # 如果当前用户没有查看详情权限
condtions3 = {} #如果普通用户不具备查看详情权限,则不能查看详情信息
if self.action == "update": # 如果是更新列表
condtions2 = {}
condtions3 = {} # 只有创建者可以更新
if self.action == "partial_update": # 如果是部分更新列表
condtions3 = {} # 只有创建者跟审核者可以部分更新
return EquipmentVendorDefinitionModel.objects.filter(Q(**condtions1) | Q(**condtions2) | Q(**condtions3)).order_by("-id")
class PartsTypeDefinitionView(CreateModelMixin, ListModelMixin,
RetrieveModelMixin, UpdateModelMixin,
viewsets.GenericViewSet):
"""
配件类型定义
"""
pagination_class = StandardResultsSetPagination
filter_backends = (DjangoFilterBackend, filters.SearchFilter,filters.OrderingFilter)
filter_class = PartsTypeDefinitionFilters
search_fields = ["name","code"]
ordering_fields = ["id","update_time"]
authentication_classes = [SessionAuthentication, JSONWebTokenAuthentication, ]
permission_classes = [IsAuthenticated, ]
# 重载获取序列化类的方法,根据不同的操作返回不同的序列化类
def get_serializer_class(self):
if self.action == "create":
return PartsTypeDefinitionSerialize_Create
elif self.action == "list":
return PartsTypeDefinitionSerialize_List
elif self.action == "retrieve":
return PartsTypeDefinitionSerialize_Retrieve
elif self.action == "update":
return PartsTypeDefinitionSerialize_Update
elif self.action == "partial_update":
return PartsTypeDefinitionSerialize_Partial
return PartsTypeDefinitionSerialize_List
# 重载数据查询的方法,根据不同的操作查询不同的数据范围
def get_queryset(self):
if self.request.user.is_superuser:
return PartsTypeDefinitionModel.objects.all().order_by("-id") # 超级用户可以查看所有信息
user = self.request.user.username
condtions1 = {'create_user__iexact': user,
'state__in': ("新建", "审核中", "使用中") # 信息创建者可以看到 (新建,审核,使用中)的数据,,
}
condtions2 = {'auditor__iexact': user,
'state__in': ("审核中", "使用中",) # 信息审核者可以看到 (审核,使用中)的数据
}
condtions3 = {'state__in': ("使用中",) # 其他用户 可以看到(使用中)的数据
}
if self.action == "list": # 如果是查看列表
if not self.request.user.has_perm('equipment.view_partstypedefinitionmodel'): # 如果当前用户没有查看权限
condtions3 = {} #如果普通用户不具备查看列表权限权限,则不能查看列表信息
if self.action == "retrieve": # 如果是查看列表
if not self.request.user.has_perm('equipment.read_partstypedefinitionmodel'): # 如果当前用户没有查看详情权限
condtions3 = {} #如果普通用户不具备查看详情权限,则不能查看详情信息
if self.action == "update": # 如果是更新列表
condtions2 = {}
condtions3 = {} # 只有创建者可以更新
if self.action == "partial_update": # 如果是部分更新列表
condtions3 = {} # 只有创建者跟审核者可以部分更新
return PartsTypeDefinitionModel.objects.filter(Q(**condtions1) | Q(**condtions2) | Q(**condtions3)).order_by("-id")
class PartsTypeDefinitionViews(ListModelMixin,viewsets.GenericViewSet):
"""
配件类型层级结构
"""
serializer_class = PartsTypeDefinitionSerialize_First
authentication_classes = [SessionAuthentication, JSONWebTokenAuthentication, ]
permission_classes = [IsAuthenticated, ]
def get_queryset(self):
if (self.request.user.is_superuser or self.request.user.has_perm('equipment.view_partstypedefinitionmodel')):
return PartsTypeDefinitionModel.objects.filter(classes="一级类别")
else:
raise exceptions.PermissionDenied
class PartsInforDefinitionView(CreateModelMixin, ListModelMixin,
RetrieveModelMixin, UpdateModelMixin,
viewsets.GenericViewSet):
"""
配件信息定义
"""
pagination_class = StandardResultsSetPagination
filter_backends = (DjangoFilterBackend, filters.SearchFilter,filters.OrderingFilter)
filter_class = PartsInforDefinitionFilters
search_fields = ["name","code",]
ordering_fields = ["id","update_time"]
authentication_classes = [SessionAuthentication, JSONWebTokenAuthentication, ]
permission_classes = [IsAuthenticated, ]
# 重载获取序列化类的方法,根据不同的操作返回不同的序列化类
def get_serializer_class(self):
if self.action == "create":
return PartsInforDefinitionSerialize_Create
elif self.action == "list":
return PartsInforDefinitionSerialize_List
elif self.action == "retrieve":
return PartsInforDefinitionSerialize_Retrieve
elif self.action == "update":
return PartsInforDefinitionSerialize_Update
elif self.action == "partial_update":
return PartsInforDefinitionSerialize_Partial
return PartsInforDefinitionSerialize_List
# 重载数据查询的方法,根据不同的操作查询不同的数据范围
def get_queryset(self):
if self.request.user.is_superuser:
return PartsInforDefinitionModel.objects.all().order_by("-id") # 超级用户可以查看所有信息
user = self.request.user.username
condtions1 = {'create_user__iexact': user,
'state__in': ("新建", "审核中", "使用中") # 信息创建者可以看到 (新建,审核,使用中)的数据,,
}
condtions2 = {'auditor__iexact': user,
'state__in': ("审核中", "使用中",) # 信息审核者可以看到 (审核,使用中)的数据
}
condtions3 = {'state__in': ("使用中",) # 其他用户 可以看到(使用中)的数据
}
if self.action == "list": # 如果是查看列表
if not self.request.user.has_perm('equipment.view_partsinfordefinitionmodel'): # 如果当前用户没有查看权限
condtions3 = {} #如果普通用户不具备查看列表权限权限,则不能查看列表信息
if self.action == "retrieve": # 如果是查看列表
if not self.request.user.has_perm('equipment.read_partsinfordefinitionmodel'): # 如果当前用户没有查看详情权限
condtions3 = {} #如果普通用户不具备查看详情权限,则不能查看详情信息
if self.action == "update": # 如果是更新列表
condtions2 = {}
condtions3 = {} # 只有创建者可以更新
if self.action == "partial_update": # 如果是部分更新列表
condtions3 = {} # 只有创建者跟审核者可以部分更新
return PartsInforDefinitionModel.objects.filter(Q(**condtions1) | Q(**condtions2) | Q(**condtions3)).order_by("-id")
class EquipmentTypeDefinitionView(CreateModelMixin, ListModelMixin,
RetrieveModelMixin, UpdateModelMixin,
viewsets.GenericViewSet):
"""
设备类型定义
"""
pagination_class = StandardResultsSetPagination
filter_backends = (DjangoFilterBackend, filters.SearchFilter,filters.OrderingFilter)
filter_class = EquipmentTypeDefinitionFilters
search_fields = ["name","code",]
ordering_fields = ["id","update_time"]
authentication_classes = [SessionAuthentication, JSONWebTokenAuthentication, ]
permission_classes = [IsAuthenticated, ]
# 重载获取序列化类的方法,根据不同的操作返回不同的序列化类
def get_serializer_class(self):
if self.action == "create":
return EquipmentTypeDefinitionSerialize_Create
elif self.action == "list":
return EquipmentTypeDefinitionSerialize_List
elif self.action == "retrieve":
return EquipmentTypeDefinitionSerialize_Retrieve
elif self.action == "update":
return EquipmentTypeDefinitionSerialize_Update
elif self.action == "partial_update":
return EquipmentTypeDefinitionSerialize_Partial
return EquipmentTypeDefinitionSerialize_List
# 重载数据查询的方法,根据不同的操作查询不同的数据范围
def get_queryset(self):
if self.request.user.is_superuser:
return EquipmentTypeDefinitionModel.objects.all().order_by("-id") # 超级用户可以查看所有信息
user = self.request.user.username
condtions1 = {'create_user__iexact': user,
'state__in': ("新建", "审核中", "使用中") # 信息创建者可以看到 (新建,审核,使用中)的数据,,
}
condtions2 = {'auditor__iexact': user,
'state__in': ("审核中", "使用中",) # 信息审核者可以看到 (审核,使用中)的数据
}
condtions3 = {'state__in': ("使用中",) # 其他用户 可以看到(使用中)的数据
}
if self.action == "list": # 如果是查看列表
if not self.request.user.has_perm('equipment.view_equipmenttypedefinitionmodel'): # 如果当前用户没有查看权限
condtions3 = {} #如果普通用户不具备查看列表权限权限,则不能查看列表信息
if self.action == "retrieve": # 如果是查看列表
if not self.request.user.has_perm('equipment.read_equipmenttypedefinitionmodel'): # 如果当前用户没有查看详情权限
condtions3 = {} #如果普通用户不具备查看详情权限,则不能查看详情信息
if self.action == "update": # 如果是更新列表
condtions2 = {}
condtions3 = {} # 只有创建者可以更新
if self.action == "partial_update": # 如果是部分更新列表
condtions3 = {} # 只有创建者跟审核者可以部分更新
return EquipmentTypeDefinitionModel.objects.filter(Q(**condtions1) | Q(**condtions2) | Q(**condtions3)).order_by("-id")
class EquipmentTypeDefinitionViews(ListModelMixin,viewsets.GenericViewSet):
"""
设备类型层级结构
"""
serializer_class = EquipmentTypeDefinitionSerialize_First
authentication_classes = [SessionAuthentication, JSONWebTokenAuthentication, ]
permission_classes = [IsAuthenticated, ]
def get_queryset(self):
if (self.request.user.is_superuser or self.request.user.has_perm('equipment.view_equipmenttypedefinitionmodel')):
return EquipmentTypeDefinitionModel.objects.filter(classes="一级类别")
else:
raise exceptions.PermissionDenied
class EquipmentAccountView(CreateModelMixin, ListModelMixin,
RetrieveModelMixin, UpdateModelMixin,
viewsets.GenericViewSet):
"""
设备台账定义
"""
pagination_class = StandardResultsSetPagination
filter_backends = (DjangoFilterBackend, filters.SearchFilter,filters.OrderingFilter)
filter_class = EquipmentAccountFilters
search_fields = ["name","code",]
ordering_fields = ["id","update_time","dataOfActivation","dataOfPurchase"]
authentication_classes = [SessionAuthentication, JSONWebTokenAuthentication, ]
permission_classes = [IsAuthenticated, ]
# 重载获取序列化类的方法,根据不同的操作返回不同的序列化类
def get_serializer_class(self):
if self.action == "create":
return EquipmentAccountSerialize_Create
elif self.action == "list":
return EquipmentAccountSerialize_List
elif self.action == "retrieve":
return EquipmentAccountSerialize_Retrieve
elif self.action == "update":
return EquipmentAccountSerialize_Update
elif self.action == "partial_update":
return EquipmentAccountSerialize_Partial
return EquipmentAccountSerialize_List
# 重载数据查询的方法,根据不同的操作查询不同的数据范围
def get_queryset(self):
if self.request.user.is_superuser:
return EquipmentAccountModel.objects.all().order_by("-id") # 超级用户可以查看所有信息
user = self.request.user.username
condtions1 = {'create_user__iexact': user,
'state__in': ("新建", "审核中", "使用中") # 信息创建者可以看到 (新建,审核,使用中)的数据,,
}
condtions2 = {'auditor__iexact': user,
'state__in': ("审核中", "使用中",) # 信息审核者可以看到 (审核,使用中)的数据
}
condtions3 = {'state__in': ("使用中",) # 其他用户 可以看到(使用中)的数据
}
if self.action == "list": # 如果是查看列表
if not self.request.user.has_perm('equipment.view_equipmentaccountmodel'): # 如果当前用户没有查看权限
condtions3 = {} #如果普通用户不具备查看列表权限权限,则不能查看列表信息
if self.action == "retrieve": # 如果是查看列表
if not self.request.user.has_perm('equipment.read_equipmentaccountmodel'): # 如果当前用户没有查看详情权限
condtions3 = {} #如果普通用户不具备查看详情权限,则不能查看详情信息
if self.action == "update": # 如果是更新列表
condtions2 = {}
condtions3 = {} # 只有创建者可以更新
if self.action == "partial_update": # 如果是部分更新列表
condtions3 = {} # 只有创建者跟审核者可以部分更新
return EquipmentAccountModel.objects.filter(Q(**condtions1) | Q(**condtions2) | Q(**condtions3)).order_by("-id")
class MaintainRecordTypeDefinitionView(CreateModelMixin, ListModelMixin,
RetrieveModelMixin, UpdateModelMixin,
viewsets.GenericViewSet):
"""
维护记录类型定义
"""
pagination_class = StandardResultsSetPagination
filter_backends = (DjangoFilterBackend, filters.SearchFilter,filters.OrderingFilter)
filter_class = MaintainRecordTypeDefinitionFilters
search_fields = ["name","code"]
ordering_fields = ["id","update_time"]
authentication_classes = [SessionAuthentication, JSONWebTokenAuthentication, ]
permission_classes = [IsAuthenticated, ]
# 重载获取序列化类的方法,根据不同的操作返回不同的序列化类
def get_serializer_class(self):
if self.action == "create":
return MaintainRecordTypeDefinitionSerialize_Create
elif self.action == "list":
return MaintainRecordTypeDefinitionSerialize_List
elif self.action == "retrieve":
return MaintainRecordTypeDefinitionSerialize_Retrieve
elif self.action == "update":
return MaintainRecordTypeDefinitionSerialize_Update
elif self.action == "partial_update":
return MaintainRecordTypeDefinitionSerialize_Partial
return MaintainRecordTypeDefinitionSerialize_List
# 重载数据查询的方法,根据不同的操作查询不同的数据范围
def get_queryset(self):
if self.request.user.is_superuser:
return MaintainRecordTypeDefinitionModel.objects.all().order_by("-id") # 超级用户可以查看所有信息
user = self.request.user.username
condtions1 = {'create_user__iexact': user,
'state__in': ("新建", "审核中", "使用中") # 信息创建者可以看到 (新建,审核,使用中)的数据,,
}
condtions2 = {'auditor__iexact': user,
'state__in': ("审核中", "使用中",) # 信息审核者可以看到 (审核,使用中)的数据
}
condtions3 = {'state__in': ("使用中",) # 其他用户 可以看到(使用中)的数据
}
if self.action == "list": # 如果是查看列表
if not self.request.user.has_perm('equipment.view_maintainrecordtypedefinitionmodel'): # 如果当前用户没有查看权限
condtions3 = {} #如果普通用户不具备查看列表权限权限,则不能查看列表信息
if self.action == "retrieve": # 如果是查看列表
if not self.request.user.has_perm('equipment.read_maintainrecordtypedefinitionmodel'): # 如果当前用户没有查看详情权限
condtions3 = {} #如果普通用户不具备查看详情权限,则不能查看详情信息
if self.action == "update": # 如果是更新列表
condtions2 = {}
condtions3 = {} # 只有创建者可以更新
if self.action == "partial_update": # 如果是部分更新列表
condtions3 = {} # 只有创建者跟审核者可以部分更新
return MaintainRecordTypeDefinitionModel.objects.filter(Q(**condtions1) | Q(**condtions2) | Q(**condtions3)).order_by("-id")
class MaintainRecordTypeDefinitionViews(ListModelMixin,viewsets.GenericViewSet):
"""
维护记录类型层级结构
"""
serializer_class = MaintainRecordTypeDefinitionSerialize_First
authentication_classes = [SessionAuthentication, JSONWebTokenAuthentication, ]
permission_classes = [IsAuthenticated, ]
def get_queryset(self):
if (self.request.user.is_superuser or self.request.user.has_perm('equipment.view_maintainrecordtypedefinitionmodel')):
return MaintainRecordTypeDefinitionModel.objects.filter(classes="一级类别")
else:
raise exceptions.PermissionDenied
class EquipmentBoardView(CreateModelMixin, ListModelMixin,
RetrieveModelMixin, UpdateModelMixin,
viewsets.GenericViewSet):
"""
设备看板定义
"""
pagination_class = StandardResultsSetPagination
filter_backends = (DjangoFilterBackend, filters.SearchFilter,filters.OrderingFilter)
filter_class =EquipmentBoardFilters
search_fields = ["name","code"]
ordering_fields = ["id","update_time"]
authentication_classes = [SessionAuthentication, JSONWebTokenAuthentication,]
permission_classes = [IsAuthenticated, ]
# 重载获取序列化类的方法,根据不同的操作返回不同的序列化类
def get_serializer_class(self):
if self.action == "create":
return EquipmentBoardSerialize_Create
elif self.action == "list":
return EquipmentBoardSerialize_List
elif self.action == "retrieve":
return EquipmentBoardSerialize_Retrieve
elif self.action == "update":
return EquipmentBoardSerialize_Update
elif self.action == "partial_update":
return EquipmentBoardSerialize_Partial
return EquipmentBoardSerialize_List
# 重载数据查询的方法,根据不同的操作查询不同的数据范围
def get_queryset(self):
if self.request.user.is_superuser:
return EquipmentBoardModel.objects.all().order_by("-id") # 超级用户可以查看所有信息
user = self.request.user.username
condtions1 = {'create_user__iexact': user,
'state__in': ("新建", "审核中", "使用中") # 信息创建者可以看到 (新建,审核,使用中)的数据,,
}
condtions2 = {'auditor__iexact': user,
'state__in': ("审核中", "使用中",) # 信息审核者可以看到 (审核,使用中)的数据
}
condtions3 = {'state__in': ("使用中",) # 其他用户 可以看到(使用中)的数据
}
if self.action == "list": # 如果是查看列表
if not self.request.user.has_perm('equipment.view_equipmentboardmodel'): # 如果当前用户没有查看权限
condtions3 = {} #如果普通用户不具备查看列表权限权限,则不能查看列表信息
if self.action == "retrieve": # 如果是查看列表
if not self.request.user.has_perm('equipment.read_equipmentboardmodel'): # 如果当前用户没有查看详情权限
condtions3 = {} #如果普通用户不具备查看详情权限,则不能查看详情信息
if self.action == "update": # 如果是更新列表
condtions2 = {}
condtions3 = {} # 只有创建者可以更新
if self.action == "partial_update": # 如果是部分更新列表
condtions3 = {} # 只有创建者跟审核者可以部分更新
return EquipmentBoardModel.objects.filter(Q(**condtions1) | Q(**condtions2) | Q(**condtions3)).order_by("-id") | 1.875 | 2 |
RandomGenerator/randomIntSeed.py | mjdroz/StatisticsCalculator | 0 | 12772350 | from RandomGenerator.randomInt import randomInt
from numpy import random
def randomIntSeed (start, end, seed):
state = random.get_state()
random.seed(seed)
try:
randIntSeeded = randomInt(start, end)
return randIntSeeded
finally:
random.set_state(state)
| 2.96875 | 3 |