hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fd6a3f22c2d3733edb4c735642f81e6a28bf18d0 | 2,008 | py | Python | gamechangerml/api/utils/pathselect.py | ekmixon/gamechanger-ml | e7967261a4b2f21b06347020cd7e6a010538eb8f | [
"MIT"
] | null | null | null | gamechangerml/api/utils/pathselect.py | ekmixon/gamechanger-ml | e7967261a4b2f21b06347020cd7e6a010538eb8f | [
"MIT"
] | 76 | 2021-07-24T02:33:16.000Z | 2022-03-20T22:40:46.000Z | gamechangerml/api/utils/pathselect.py | ekmixon/gamechanger-ml | e7967261a4b2f21b06347020cd7e6a010538eb8f | [
"MIT"
] | null | null | null | import os
import logging
from gamechangerml.api.fastapi.model_config import Config
logger = logging.getLogger()
| 30.424242 | 68 | 0.593127 | import os
import logging
from gamechangerml.api.fastapi.model_config import Config
logger = logging.getLogger()
def get_model_paths():
model_dict = {}
# QEXP MODEL
try:
qexp_names = [
f
for f in os.listdir(Config.LOCAL_PACKAGED_MODELS_DIR)
if ("qexp_" in f) and ("tar" not in f)
]
qexp_names.sort(reverse=True)
if len(qexp_names) > 0:
QEXP_MODEL_PATH = os.path.join(
Config.LOCAL_PACKAGED_MODELS_DIR, qexp_names[0]
)
else:
print("defaulting INDEX_PATH to qexp")
QEXP_MODEL_PATH = os.path.join(
Config.LOCAL_PACKAGED_MODELS_DIR, "qexp_20201217"
)
except Exception as e:
logger.error(e)
logger.info("Cannot get QEXP model path")
# TRANSFORMER MODEL PATH
try:
LOCAL_TRANSFORMERS_DIR = os.path.join(
Config.LOCAL_PACKAGED_MODELS_DIR, "transformers"
)
except Exception as e:
logger.error(e)
logger.info("Cannot get TRANSFORMER model path")
# SENTENCE INDEX
# get largest file name with sent_index prefix (by date)
try:
sent_index_name = [
f
for f in os.listdir(Config.LOCAL_PACKAGED_MODELS_DIR)
if ("sent_index" in f) and ("tar" not in f)
]
sent_index_name.sort(reverse=True)
if len(sent_index_name) > 0:
INDEX_PATH = os.path.join(
Config.LOCAL_PACKAGED_MODELS_DIR, sent_index_name[0]
)
else:
print("defaulting INDEX_PATH to sent_index")
INDEX_PATH = os.path.join(
Config.LOCAL_PACKAGED_MODELS_DIR, "sent_index")
except Exception as e:
logger.error(e)
logger.info("Cannot get Sentence Index model path")
model_dict = {
"transformers": LOCAL_TRANSFORMERS_DIR,
"sentence": INDEX_PATH,
"qexp": QEXP_MODEL_PATH,
}
return model_dict
| 1,871 | 0 | 23 |
f9029a8b137073befb100973174a9b005eea8484 | 3,712 | py | Python | astropy/io/fits/py3compat.py | REMeyer/astropy | 28c49fb618538a01812e586cd07bccdf0591a6c6 | [
"BSD-3-Clause"
] | 3 | 2018-03-20T15:09:16.000Z | 2021-05-27T11:17:33.000Z | astropy/io/fits/py3compat.py | REMeyer/astropy | 28c49fb618538a01812e586cd07bccdf0591a6c6 | [
"BSD-3-Clause"
] | null | null | null | astropy/io/fits/py3compat.py | REMeyer/astropy | 28c49fb618538a01812e586cd07bccdf0591a6c6 | [
"BSD-3-Clause"
] | null | null | null | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import warnings
from ...extern import six
from ...utils.compat.numpycompat import NUMPY_LT_1_10
from ...utils.exceptions import AstropyUserWarning
if not six.PY2:
# Stuff to do if Python 3
# Make the decode_ascii utility function actually work
from . import util
import numpy
util.encode_ascii = encode_ascii
util.decode_ascii = decode_ascii
# Here we monkey patch (yes, I know) numpy to fix a few numpy Python 3
# bugs. The only behavior that's modified is that bugs are fixed, so that
# should be OK.
# Fix chararrays; this is necessary in numpy 1.9.x and below
# The fix for this is in https://github.com/numpy/numpy/pull/5982 and is
# available as of Numpy 1.10
if NUMPY_LT_1_10:
_chararray = numpy.char.chararray
for m in [numpy, numpy.char, numpy.core.defchararray,
numpy.core.records]:
m.chararray = chararray
| 40.791209 | 78 | 0.56681 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import warnings
from ...extern import six
from ...utils.compat.numpycompat import NUMPY_LT_1_10
from ...utils.exceptions import AstropyUserWarning
if not six.PY2:
# Stuff to do if Python 3
# Make the decode_ascii utility function actually work
from . import util
import numpy
def encode_ascii(s):
if isinstance(s, str):
return s.encode('ascii')
elif (isinstance(s, numpy.ndarray) and
issubclass(s.dtype.type, numpy.str_)):
ns = numpy.char.encode(s, 'ascii').view(type(s))
if ns.dtype.itemsize != s.dtype.itemsize / 4:
ns = ns.astype((numpy.bytes_, s.dtype.itemsize / 4))
return ns
elif (isinstance(s, numpy.ndarray) and
not issubclass(s.dtype.type, numpy.bytes_)):
raise TypeError('string operation on non-string array')
return s
util.encode_ascii = encode_ascii
def decode_ascii(s):
if isinstance(s, bytes):
try:
return s.decode('ascii')
except UnicodeDecodeError:
warnings.warn('non-ASCII characters are present in the FITS '
'file header and have been replaced by "?" '
'characters', AstropyUserWarning)
s = s.decode('ascii', errors='replace')
return s.replace(u'\ufffd', '?')
elif (isinstance(s, numpy.ndarray) and
issubclass(s.dtype.type, numpy.bytes_)):
# np.char.encode/decode annoyingly don't preserve the type of the
# array, hence the view() call
# It also doesn't necessarily preserve widths of the strings,
# hence the astype()
if s.size == 0:
# Numpy apparently also has a bug that if a string array is
# empty calling np.char.decode on it returns an empty float64
# array wth
dt = s.dtype.str.replace('S', 'U')
ns = numpy.array([], dtype=dt).view(type(s))
else:
ns = numpy.char.decode(s, 'ascii').view(type(s))
if ns.dtype.itemsize / 4 != s.dtype.itemsize:
ns = ns.astype((numpy.str_, s.dtype.itemsize))
return ns
elif (isinstance(s, numpy.ndarray) and
not issubclass(s.dtype.type, numpy.str_)):
# Don't silently pass through on non-string arrays; we don't want
# to hide errors where things that are not stringy are attempting
# to be decoded
raise TypeError('string operation on non-string array')
return s
util.decode_ascii = decode_ascii
# Here we monkey patch (yes, I know) numpy to fix a few numpy Python 3
# bugs. The only behavior that's modified is that bugs are fixed, so that
# should be OK.
# Fix chararrays; this is necessary in numpy 1.9.x and below
# The fix for this is in https://github.com/numpy/numpy/pull/5982 and is
# available as of Numpy 1.10
if NUMPY_LT_1_10:
_chararray = numpy.char.chararray
class chararray(_chararray):
def __getitem__(self, obj):
val = numpy.ndarray.__getitem__(self, obj)
if isinstance(val, numpy.character):
temp = val.rstrip()
if numpy.char._len(temp) == 0:
val = ''
else:
val = temp
return val
for m in [numpy, numpy.char, numpy.core.defchararray,
numpy.core.records]:
m.chararray = chararray
| 2,600 | 7 | 119 |
0b745278ebefd72e35f673d94c6f0761fa275970 | 1,492 | py | Python | pyPLM/Widgets/GraphicPathItem.py | vtta2008/pipelineTool | 2431d2fc987e3b31f2a6a63427fee456fa0765a0 | [
"Apache-2.0"
] | 7 | 2020-10-11T21:21:50.000Z | 2022-03-07T03:37:51.000Z | pyPLM/Widgets/GraphicPathItem.py | vtta2008/pipelineTool | 2431d2fc987e3b31f2a6a63427fee456fa0765a0 | [
"Apache-2.0"
] | null | null | null | pyPLM/Widgets/GraphicPathItem.py | vtta2008/pipelineTool | 2431d2fc987e3b31f2a6a63427fee456fa0765a0 | [
"Apache-2.0"
] | 3 | 2019-03-11T21:54:52.000Z | 2019-11-25T11:23:17.000Z | # -*- coding: utf-8 -*-
"""
Script Name: GraphicPathItem.py
Author: Do Trinh/Jimmy - 3D artist.
Description:
"""
# -------------------------------------------------------------------------------------------------------------
""" Import """
from PySide2.QtWidgets import QGraphicsPathItem
from pyPLM.models import DamgSignals
from pyPLM.settings import AppSettings
# -------------------------------------------------------------------------------------------------------------
# Created by panda on 3/12/2019 - 4:12 AM
# © 2017 - 2018 DAMGteam. All rights reserved | 29.84 | 111 | 0.476542 | # -*- coding: utf-8 -*-
"""
Script Name: GraphicPathItem.py
Author: Do Trinh/Jimmy - 3D artist.
Description:
"""
# -------------------------------------------------------------------------------------------------------------
""" Import """
from PySide2.QtWidgets import QGraphicsPathItem
from pyPLM.models import DamgSignals
from pyPLM.settings import AppSettings
class GraphicPathItem(QGraphicsPathItem):
Type = 'DAMGGRAPHICVIEW'
key = 'GraphicView'
_name = 'DAMG Graphic View'
def __init__(self, *__args):
QGraphicsPathItem.__init__(self)
self.settings = AppSettings(self)
self.signals = DamgSignals()
def setValue(self, key, value):
return self.settings.initSetValue(key, value, self.key)
def getValue(self, key, decode=None):
if decode is None:
return self.settings.initValue(key, self.key)
else:
return self.settings.initValue(key, self.key, decode)
@property
def name(self):
return self._name
@name.setter
def name(self, newName):
self._name = newName
# -------------------------------------------------------------------------------------------------------------
# Created by panda on 3/12/2019 - 4:12 AM
# © 2017 - 2018 DAMGteam. All rights reserved | 501 | 377 | 23 |
0af582c4ced2b971bfa928c00cc6694228c5eb53 | 1,262 | py | Python | plugins/extractor.py | yasirstream3/Streams-Extractor | e8ea1fdeaf823027e6283d67cb3cc3f01183204a | [
"MIT"
] | null | null | null | plugins/extractor.py | yasirstream3/Streams-Extractor | e8ea1fdeaf823027e6283d67cb3cc3f01183204a | [
"MIT"
] | null | null | null | plugins/extractor.py | yasirstream3/Streams-Extractor | e8ea1fdeaf823027e6283d67cb3cc3f01183204a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @trojanzhex
from pyrogram import filters
from pyrogram import Client as trojanz
from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton
from config import Config
from script import Script
@trojanz.on_message(filters.private & (filters.document | filters.video))
| 35.055556 | 196 | 0.649762 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @trojanzhex
from pyrogram import filters
from pyrogram import Client as trojanz
from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton
from config import Config
from script import Script
@trojanz.on_message(filters.private & (filters.document | filters.video))
async def confirm_dwnld(client, message):
if message.from_user.id not in Config.AUTH_USERS:
return await message.reply("🚫 Mohon maaf, bot ini hanya bisa digunakan oleh orang tertentu saja.\n\n<i>Sorry, this bot only can used by authorized user.</i>\n\n<b>Owner</b>: @YasirArisM")
media = message
filetype = media.document or media.video
if filetype.mime_type.startswith("video/"):
await message.reply_text(
"**What you want me to do??\n\nApa yang ingin kamu lakukan?**",
quote=True,
reply_markup=InlineKeyboardMarkup([
[InlineKeyboardButton(text="Download and Process", callback_data="download_file")],
[InlineKeyboardButton(text="Cancel", callback_data="close")]
])
)
else:
await message.reply_text(
"Invalid Media",
quote=True
)
| 898 | 0 | 23 |
7a724e7e0294769414354070ef463fbef9db0d53 | 335 | py | Python | Clients/PythonCatalyst/Testing/Legacy/CatalystLoadVTKmFilterPlugin.py | xj361685640/ParaView | 0a27eef5abc5a0c0472ab0bc806c4db881156e64 | [
"Apache-2.0",
"BSD-3-Clause"
] | 815 | 2015-01-03T02:14:04.000Z | 2022-03-26T07:48:07.000Z | Clients/PythonCatalyst/Testing/Legacy/CatalystLoadVTKmFilterPlugin.py | xj361685640/ParaView | 0a27eef5abc5a0c0472ab0bc806c4db881156e64 | [
"Apache-2.0",
"BSD-3-Clause"
] | 9 | 2015-04-28T20:10:37.000Z | 2021-08-20T18:19:01.000Z | Clients/PythonCatalyst/Testing/Legacy/CatalystLoadVTKmFilterPlugin.py | xj361685640/ParaView | 0a27eef5abc5a0c0472ab0bc806c4db881156e64 | [
"Apache-2.0",
"BSD-3-Clause"
] | 328 | 2015-01-22T23:11:46.000Z | 2022-03-14T06:07:52.000Z | from paraview.simple import *
# Load the distributed plugin.
LoadDistributedPlugin("VTKmFilters" , remote=False, ns=globals())
assert VTKmContour
| 25.769231 | 71 | 0.802985 | from paraview.simple import *
# Load the distributed plugin.
LoadDistributedPlugin("VTKmFilters" , remote=False, ns=globals())
assert VTKmContour
def DoCoProcessing(datadescription):
print("in DoCoProcessing")
def RequestDataDescription(datadescription):
datadescription.GetInputDescriptionByName('input').GenerateMeshOn()
| 141 | 0 | 46 |
6289cc75ced36679c8f4d8730c871cd06399e07d | 18,788 | py | Python | train.py | pokonglai/ods-net | d9cc443c6b8fbf778e43e9044c38474d060221a3 | [
"MIT"
] | 22 | 2019-03-27T22:22:12.000Z | 2022-03-10T20:55:31.000Z | train.py | pokonglai/ods-net | d9cc443c6b8fbf778e43e9044c38474d060221a3 | [
"MIT"
] | null | null | null | train.py | pokonglai/ods-net | d9cc443c6b8fbf778e43e9044c38474d060221a3 | [
"MIT"
] | 3 | 2019-12-11T09:19:48.000Z | 2022-02-21T20:48:36.000Z | from __future__ import print_function
import os
import time
import sys
sys.path.append("..")
import tensorflow as tf
import keras.backend.tensorflow_backend as KTF
gpu_frac = 0.7
def get_session(gpu_fraction=gpu_frac):
'''Assume that you have 6GB of GPU memory and want to allocate ~2GB then gpu_frac=0.3 '''
num_threads = os.environ.get('OMP_NUM_THREADS')
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction)
if num_threads:
return tf.Session(config=tf.ConfigProto(
gpu_options=gpu_options, intra_op_parallelism_threads=num_threads))
else:
return tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
## attempt to limit the memory usage of TF
KTF.set_session(get_session(gpu_fraction=gpu_frac))
import keras
from keras.utils import plot_model
from keras.callbacks import CSVLogger, ModelCheckpoint
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
import numpy as np
import cv2
import matplotlib.pyplot as plt
from utils import *
from get_config import *
from get_models import *
from data_loader import *
class ETATimer:
''' Simple class to store a timer for the ETA of one epoch/validation run '''
if __name__ == '__main__':
train_with_multibatch()
| 49.835544 | 202 | 0.624175 | from __future__ import print_function
import os
import time
import sys
sys.path.append("..")
import tensorflow as tf
import keras.backend.tensorflow_backend as KTF
gpu_frac = 0.7
def get_session(gpu_fraction=gpu_frac):
'''Assume that you have 6GB of GPU memory and want to allocate ~2GB then gpu_frac=0.3 '''
num_threads = os.environ.get('OMP_NUM_THREADS')
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction)
if num_threads:
return tf.Session(config=tf.ConfigProto(
gpu_options=gpu_options, intra_op_parallelism_threads=num_threads))
else:
return tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
## attempt to limit the memory usage of TF
KTF.set_session(get_session(gpu_fraction=gpu_frac))
import keras
from keras.utils import plot_model
from keras.callbacks import CSVLogger, ModelCheckpoint
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
import numpy as np
import cv2
import matplotlib.pyplot as plt
from utils import *
from get_config import *
from get_models import *
from data_loader import *
class ETATimer:
''' Simple class to store a timer for the ETA of one epoch/validation run '''
def __init__(self, n_images):
self.n_images = n_images
self.all_times = []
self.avg_time = 0
def update(self, time_per_batch):
self.all_times.append(time_per_batch)
self.avg_time = np.mean(self.all_times)
def get_eta(self, n_processed):
return self.avg_time * (self.n_images - n_processed)
def reset(self):
self.all_times = []
self.avg_time = 0
def prep_data_for_network(img_array, hparams):
# reshape the raw numpy data into what's needed for keras networks
n_channels = 1
if len(img_array.shape) == 4: # 4 implies --> batch size, rows, cols, channels
n_channels = img_array.shape[3]
if K.image_data_format() == 'channels_first': return img_array.reshape(img_array.shape[0], n_channels, hparams["img_rows"], hparams["img_cols"])
else: return img_array.reshape(img_array.shape[0], hparams["img_rows"], hparams["img_cols"], n_channels)
class KerasModel_DataInBatches:
def __init__(self, model_name, img_input_shape, hparams):
self.model_name = model_name
self.hyper_params = hparams
self.img_input_shape = img_input_shape
self.model = get_model(model_name, self.img_input_shape, hparams)
if hparams["output_model_structure"]: plot_model(self.model, to_file=os.path.join("./model_vis/",self.model_name+'.png'), show_shapes=True)
def train(self, input_datapaths, strOutputModelPath):
train_start_time = time.time()
## create the log file
log_file_train = open(os.path.join(strOutputModelPath, self.model_name+'_'+str(self.hyper_params["img_rows"])+"x"+str(self.hyper_params["img_cols"])+'_training.log'), 'w')
log_file_val = open(os.path.join(strOutputModelPath, self.model_name+'_'+str(self.hyper_params["img_rows"])+"x"+str(self.hyper_params["img_cols"])+'_validation.log'), 'w')
## create the data batching generator functions
fpaths_train, fpaths_val = input_datapaths
batches_train = data_batcher(self.hyper_params["batch_size"], fpaths_train)
batches_test = data_batcher(self.hyper_params["batch_size"], fpaths_val)
## loss function outputs use the layer name, when writing text to console this takes up too much space
## so create a mapping between the output layer name and a shorter keyword
output_layers = ['depth_map', 'normal_map']
output_layer_shortnames = {}
output_layer_shortnames['depth_map'] = 'D'
output_layer_shortnames['normal_map'] = 'N'
## do the same for the accuracy metrics
acc_measures = ['rmse', 'median_dist', 'log10_error']
acc_measures_shortnames = {}
acc_measures_shortnames['rmse'] = 'rmse'
acc_measures_shortnames['median_dist'] = 'med'
acc_measures_shortnames['log10_error'] = 'log10'
## save the model with the best values for the depth map
best_val_results = {}
best_val_results['depth_map_loss'] = sys.maxsize
best_val_results['depth_map_rmse'] = sys.maxsize
best_val_results['depth_map_median_dist'] = sys.maxsize
best_val_results['depth_map_log10_error'] = sys.maxsize
## write the header to log_file
str_header = "epoch,"
for l in output_layers:
str_header += l+"_loss,"
for am in acc_measures:
str_header += l+"_"+am+","
str_header+="\n"
log_file_train.write(str_header)
log_file_val.write(str_header)
ndigits_iter = number_of_digits(self.hyper_params["epochs"])
eta_train = ETATimer(self.hyper_params["batches_per_epoch"])
eta_val = ETATimer(self.hyper_params["batches_per_val_run"])
for i in range(self.hyper_params["epochs"]):
str_iter_msg = str(i+1) + "/" + str(self.hyper_params["epochs"])
iter_start_time = time.time()
## store all the loss/acc values and average them as we work through an epoch
latest_loss_train = {}
batch_loss_train = {}
latest_loss_val = {}
batch_loss_val = {}
for l in output_layers:
latest_loss_train[l+"_loss"] = -1
latest_loss_val[l+"_loss"] = -1
batch_loss_train[l+"_loss"] = []
batch_loss_val[l+"_loss"] = []
latest_acc_train = {}
batch_acc_train = {}
latest_acc_val = {}
batch_acc_val = {}
for l in output_layers:
for am in acc_measures:
latest_acc_train[l+"_"+am] = -1
latest_acc_val[l+"_"+am] = -1
batch_acc_train[l+"_"+am] = []
batch_acc_val[l+"_"+am] = []
eta_train.reset()
eta_val.reset()
batch_start_time = time.time()
for bs in range(self.hyper_params["batches_per_epoch"]):
single_batch_start_time = time.time()
training_imgs_fpaths = next(batches_train)
## left-right --> center depth and normals
imgs_train_L, imgs_train_R, imgs_gt_depth, imgs_gt_norms = load_LR_norm_hdf5_batch(training_imgs_fpaths, self.hyper_params["data_aug_params"])
imgs_L = prep_data_for_network(np.array(imgs_train_L), self.hyper_params)
imgs_R = prep_data_for_network(np.array(imgs_train_R), self.hyper_params)
imgs_D = prep_data_for_network(np.array(imgs_gt_depth), self.hyper_params)
imgs_N = prep_data_for_network(np.array(imgs_gt_norms), self.hyper_params)
x_train = [ imgs_L, imgs_R ]
y_train = [ imgs_D, imgs_N ]
## train this batch
fit_hist = self.model.fit(x_train, y_train, batch_size=self.hyper_params["batch_size"], epochs=1, verbose=0)
## record the training losses and accuracy
for l in output_layers:
batch_loss_train[l+"_loss"].append(fit_hist.history[l+"_loss"][0])
for am in acc_measures:
batch_acc_train[l+"_"+am].append(fit_hist.history[l+"_"+am][0])
for l in output_layers:
latest_loss_train[l+"_loss"] = sum(batch_loss_train[l+"_loss"]) / len(batch_loss_train[l+"_loss"])
for am in acc_measures:
latest_acc_train[l+"_"+am] = sum(batch_acc_train[l+"_"+am]) / len(batch_acc_train[l+"_"+am])
batch_end_time = time.time()
batch_elapsed_time = batch_end_time - batch_start_time
single_batch_elasped_time = batch_end_time - single_batch_start_time
## build the display string with the new updated average values for training loss and accuracy
str_loss = ""
str_acc = ""
for l in output_layers:
str_loss += output_layer_shortnames[l]+"_loss=" + "{:5.6f}".format(latest_loss_train[l+"_loss"]) + " "
for am in acc_measures:
str_acc += output_layer_shortnames[l]+"_"+acc_measures_shortnames[am]+"=" +"{:5.6f}".format(latest_acc_train[l+"_"+am]) + " "
td, th, tm, ts = time_in_seconds_to_d_h_m_s(batch_elapsed_time) ## total elapsed time for training only
str_time = str(int(th)) + "h - " + str(int(tm)) + "m - " + "{:3.2f}".format(ts)+"s"
eta_train.update(single_batch_elasped_time)
eta_td, eta_th, eta_tm, eta_ts = time_in_seconds_to_d_h_m_s(eta_train.get_eta(bs+1) ) ## estimated time remaining
str_eta_time = str(int(eta_th)) + "h - " + str(int(eta_tm)) + "m - " + "{:3.2f}".format(eta_ts)+"s"
loss_msg = " [ "+str_loss+"]+[ "+str_acc+"] >> time = " + str_time + " - ETA (" + str_eta_time + ") " # extra spaces to erase the extra chars at the end if the string is shorter
print_text_progress_bar(bs/(self.hyper_params["batches_per_epoch"]-1), bar_name="## T "+str_iter_msg + " >> ", bar_length=5, debug_msg=loss_msg)
print()
## now load all of the validation data blocks and evaluate the network loss
batch_start_time = time.time()
for bs in range(self.hyper_params["batches_per_val_run"]):
single_batch_start_time = time.time()
validation_imgs_fpaths = next(batches_test)
## left-right --> center depth and normals
imgs_test_L, imgs_test_R, imgs_gt_depth, imgs_gt_norms = load_LR_norm_hdf5_batch(validation_imgs_fpaths, self.hyper_params["data_aug_params"])
imgs_L = prep_data_for_network(np.array(imgs_test_L), self.hyper_params)
imgs_R = prep_data_for_network(np.array(imgs_test_R), self.hyper_params)
imgs_D = prep_data_for_network(np.array(imgs_gt_depth), self.hyper_params)
imgs_N = prep_data_for_network(np.array(imgs_gt_norms), self.hyper_params)
x_test = [ imgs_L, imgs_R ]
y_test = [ imgs_D, imgs_N ]
## evaluate this batch
eval_score = self.model.evaluate(x_test, y_test, verbose=0)
## ODSNet.metrics_names --> ['loss', 'depth_map_loss', 'normal_map_loss', 'depth_map_rmse', 'normal_map_rmse']
# print(self.model.metrics_names)
# print(eval_score)
for eval_idx in range(len(self.model.metrics_names)):
mname = self.model.metrics_names[eval_idx]
if mname in batch_loss_val: batch_loss_val[mname].append(eval_score[eval_idx])
elif mname in batch_acc_val: batch_acc_val[mname].append(eval_score[eval_idx])
for l in output_layers:
latest_loss_val[l+"_loss"] = sum(batch_loss_val[l+"_loss"]) / len(batch_loss_val[l+"_loss"])
for am in acc_measures:
latest_acc_val[l+"_"+am] = sum(batch_acc_val[l+"_"+am]) / len(batch_acc_val[l+"_"+am])
batch_end_time = time.time()
batch_elapsed_time = batch_end_time - batch_start_time
single_batch_elasped_time = batch_end_time - single_batch_start_time
## build the display string with the new updated average values for validation loss and accuracy
str_loss = ""
str_acc = ""
for l in output_layers:
str_loss += output_layer_shortnames[l]+"_loss=" + "{:5.6f}".format(latest_loss_val[l+"_loss"]) + " "
for am in acc_measures:
str_acc += output_layer_shortnames[l]+"_"+acc_measures_shortnames[am]+"=" +"{:5.6f}".format(latest_acc_val[l+"_"+am]) + " "
td, th, tm, ts = time_in_seconds_to_d_h_m_s(batch_elapsed_time) ## total elapsed time for training only
str_time = str(int(th)) + "h - " + str(int(tm)) + "m - " + "{:3.2f}".format(ts)+"s"
eta_val.update(single_batch_elasped_time)
eta_td, eta_th, eta_tm, eta_ts = time_in_seconds_to_d_h_m_s(eta_val.get_eta(bs+1) ) ## estimated time remaining
str_eta_time = str(int(eta_th)) + "h - " + str(int(eta_tm)) + "m - " + "{:3.2f}".format(eta_ts)+"s"
acc_msg = " [ "+str_loss+"]+[ "+str_acc+"] >> time = " + str_time + " - ETA (" + str_eta_time + ") " # extra spaces to erase the extra chars at the end if the string is shorter
print_text_progress_bar(bs/(self.hyper_params["batches_per_val_run"]-1), bar_name="$$ V "+str_iter_msg + " >> ", bar_length=5, debug_msg=acc_msg)
print()
## display the total elapsed time for an iteration
iter_end_time = time.time()
iter_elapsed_time = iter_end_time - iter_start_time
td, th, tm, ts = time_in_seconds_to_d_h_m_s(iter_elapsed_time)
str_time = str(int(th)) + "h - " + str(int(tm)) + "m - " + "{:3.2f}".format(ts)+"s "
print("Total elapsed time = " + str_time)
print()
## log the training and valudation stats for this iteration
log_line = str(i)+","
for l in output_layers:
log_line+=str(latest_loss_train[l+"_loss"])+","
for am in acc_measures:
log_line+=str(latest_acc_train[l+"_"+am])+","
log_line += "\n"
log_file_train.write(log_line)
log_file_train.flush()
os.fsync(log_file_train.fileno())
log_line = str(i)+","
for l in output_layers:
log_line+=str(latest_loss_val[l+"_loss"])+","
for am in acc_measures:
log_line+=str(latest_acc_val[l+"_"+am])+","
log_line += "\n"
log_file_val.write(log_line)
log_file_val.flush()
os.fsync(log_file_val.fileno())
## making sure we save the best model for the depth map loss and accuracy measures
if self.hyper_params["save_best"]:
if best_val_results['depth_map_loss'] > latest_loss_val["depth_map_loss"]:
print("Lowest validation LOSS model = iter ", i)
best_val_results['depth_map_loss'] = latest_loss_val["depth_map_loss"]
self.save(strOutputModelPath, self.model_name+"_"+str(self.hyper_params["img_rows"])+"x"+str(self.hyper_params["img_cols"]) + "_lowest_loss")
if best_val_results['depth_map_rmse'] > latest_acc_val["depth_map_rmse"]:
print("Lowest validation RMSE model = iter ", i)
best_val_results['depth_map_rmse'] = latest_acc_val["depth_map_rmse"]
self.save(strOutputModelPath, self.model_name+"_"+str(self.hyper_params["img_rows"])+"x"+str(self.hyper_params["img_cols"]) + "_lowest_rmse")
if best_val_results['depth_map_median_dist'] > latest_acc_val["depth_map_median_dist"]:
print("Lowest validation MEDIAN DIST model = iter ", i)
best_val_results['depth_map_median_dist'] = latest_acc_val["depth_map_median_dist"]
self.save(strOutputModelPath, self.model_name+"_"+str(self.hyper_params["img_rows"])+"x"+str(self.hyper_params["img_cols"]) + "_lowest_med")
if best_val_results['depth_map_log10_error'] > latest_acc_val["depth_map_log10_error"]:
print("Lowest validation LOG_10 model = iter ", i)
best_val_results['depth_map_log10_error'] = latest_acc_val["depth_map_log10_error"]
self.save(strOutputModelPath, self.model_name+"_"+str(self.hyper_params["img_rows"])+"x"+str(self.hyper_params["img_cols"]) + "_lowest_log10")
print()
## we have a valid save_period, so check if it is time to save the current model to disk
if self.hyper_params["save_period"] > 0:
if (i % self.hyper_params["save_period"]) == 0:
self.save(strOutputModelPath, self.model_name+"_"+str(i).zfill(ndigits_iter)+"_"+str(self.hyper_params["img_rows"])+"x"+str(self.hyper_params["img_cols"]))
log_file_train.close()
log_file_val.close()
train_end_time = time.time()
td, th, tm, ts = time_in_seconds_to_d_h_m_s(train_end_time - train_start_time)
str_time = str(int(td)) + "d - " + str(int(th)) + "h - " + str(int(tm)) + "m - " + "{:3.2f}".format(ts)+"s"
print()
print("Total training time for '" + self.model_name + "' ==> " + str_time)
print()
def save(self, folder_path, filename, show_debug_msg=False):
full_path = os.path.join(folder_path, filename + ".h5")
if show_debug_msg:
print()
print("*** Saving model to: " + full_path, end=' ', flush=True)
self.model.save(full_path)
if show_debug_msg:
print(" ... DONE!!")
print()
def train_with_multibatch():
hyper_params = get_training_config()
input_shape = (hyper_params["img_rows"], hyper_params["img_cols"], 3)
# str_model_name = "ODS_Net" ## regular SepUNet
str_model_name = "ODS_Net_borderloss" ## with the Lpano loss function
strTopFolder = "./data/" # input folder containing file lists
# fpaths_train = load_filelist(strTopFolder, "all_areas_train_v2.txt") # actual file list fot training
# fpaths_val = load_filelist(strTopFolder, "all_areas_val_v2.txt")
fpaths_train = load_filelist(strTopFolder, "sample_train.txt") # small sample of data to demo the code
fpaths_val = load_filelist(strTopFolder, "sample_val.txt")
strOutputModelPath = "./training_output/" # folder that will we dump log files and trained models which were saved according to save_period in hyper_params
## create the wrapper object that will take the folder of h5 images and feed it into the network in batches (as specified in hyper_params)
kmodel = KerasModel_DataInBatches(str_model_name, input_shape, hyper_params)
kmodel.train([fpaths_train, fpaths_val], strOutputModelPath) ## just LR -> L depth normals
## output final model
kmodel.save(strOutputModelPath, str_model_name+"_"+str(hyper_params["img_rows"])+"x"+str(hyper_params["img_cols"]))
if __name__ == '__main__':
train_with_multibatch()
| 17,147 | 10 | 263 |
7acfc08fadc93b69b6501043db89181dfb9a03ea | 22,541 | py | Python | LLC_Membranes/setup/solvation_equilibration.py | shirtsgroup/LLC_Membranes | e94694f298909352d7e9d912625314a1e46aa5b6 | [
"MIT"
] | 4 | 2019-06-18T15:26:49.000Z | 2021-08-11T18:57:39.000Z | LLC_Membranes/setup/solvation_equilibration.py | shirtsgroup/LLC_Membranes | e94694f298909352d7e9d912625314a1e46aa5b6 | [
"MIT"
] | 2 | 2019-08-22T20:11:46.000Z | 2019-08-22T22:35:17.000Z | LLC_Membranes/setup/solvation_equilibration.py | shirtsgroup/LLC_Membranes | e94694f298909352d7e9d912625314a1e46aa5b6 | [
"MIT"
] | 4 | 2019-07-06T15:41:53.000Z | 2021-01-27T17:59:13.000Z | #!/usr/bin/env python
import argparse
import subprocess
import sqlite3 as sql
from LLC_Membranes.llclib import topology, file_rw
from LLC_Membranes.analysis import solute_partitioning
from LLC_Membranes.setup import lc_class, equil, solvate_tails
import mdtraj as md
import numpy as np
import os
if __name__ == "__main__":
os.environ["GMX_MAXBACKUP"] = "-1" # stop GROMACS from making backups
args = initialize().parse_args()
sys = System(args.build_monomer, args.weight_percent, args.ratio, solute='HOH', nopores=args.nopores,
ncolumns=args.ncolumns, monomers_per_column=args.monomers_per_column, p2p=args.p2p,
parallel_displaced=args.parallel_displaced, dbwl=args.dbwl, random_seed=args.random_seed,
mpi=args.mpi, nproc=args.nproc, tolerance=args.tolerance)
while not sys.converged:
sys.query_database(database='water_content.db', guess_range=args.guess_range, guess_stride=args.guess_stride)
sys.equilibrate(input_files=True, length=args.length_nvt, force=args.forces[0],
restraint_residue=args.restraint_residue, restraint_axis=args.restraint_axis,
ring_restraints=args.ring_restraints)
sys.calculate_pore_water(args.forces[0])
sys.write_final_pore_configuration()
sys.place_water_tails(args.output)
sys.full_equilibration(args.forces, fully_solvated=args.output, l_berendsen=args.length_berendsen,
l_nvt=args.length_nvt, l_pr=args.length_Parrinello_Rahman,
restraint_axis=args.restraint_axis, restraint_residue=args.restraint_residue,
ring_restraints=args.ring_restraints)
| 50.540359 | 121 | 0.626458 | #!/usr/bin/env python
import argparse
import subprocess
import sqlite3 as sql
from LLC_Membranes.llclib import topology, file_rw
from LLC_Membranes.analysis import solute_partitioning
from LLC_Membranes.setup import lc_class, equil, solvate_tails
import mdtraj as md
import numpy as np
import os
def initialize():
parser = argparse.ArgumentParser(description='Solvate system and adjust so there is a certain wt % of water')
parser.add_argument('-ratio', '--ratio', default=2, type=float, help='Ratio of water in pores to water in tails')
parser.add_argument('-wt', '--weight_percent', default=10, type=float, help='Total weight percent of water')
parser.add_argument('-tol', '--tolerance', default=1, type=int, help='Number of water molecules')
parser.add_argument('-guess_range', default=[.4, 1], nargs='+', help='If water_content.db has no entries for the '
'build monomer, an initial radius will be randomly selected from this range')
parser.add_argument('-guess_stride', default=0.2, type=float, help='How far above/below the highest/lowest value to'
'make the next guess at pore radius if you need more/less water than the bounds of the water '
'content database (nm)')
parser.add_argument('-o', '--output', default='solvated_final.gro', help='Name of fully solvated output file')
parser.add_argument('-seed', '--random_seed', default=0, type=int, help='Numpy random seed')
# parallelization
parser.add_argument('-mpi', '--mpi', action="store_true", help='Run MD simulations in parallel')
parser.add_argument('-np', '--nproc', default=4, help='Number of MPI processes')
# same flags as to build.py
parser.add_argument('-b', '--build_monomer', default='NAcarb11V', type=str, help='Name of single monomer used to '
'build full system')
parser.add_argument('-m', '--monomers_per_column', default=20, type=int, help='Number of monomers to stack in each'
'column')
parser.add_argument('-c', '--ncolumns', default=5, type=int, help='Number of columns used to build each pore')
parser.add_argument('-r', '--pore_radius', default=.6, type=float, help='Initial guess at pore radius (nm)')
parser.add_argument('-p', '--p2p', default=4.5, type=float, help='Initial pore-to-pore distance (nm)')
parser.add_argument('-n', '--nopores', default=4, type=int, help='Number of pores (only works with 4 currently)')
parser.add_argument('-d', '--dbwl', default=.37, type=float, help='Distance between vertically stacked monomers'
'(nm)')
parser.add_argument('-pd', '--parallel_displaced', default=0, type=float, help='Angle of wedge formed between line'
'extending from pore center to monomer and line from pore center to vertically adjacent monomer'
'head group.')
parser.add_argument('-angles', '--angles', nargs='+', default=[90, 90, 60], type=float, help='Angles between'
'box vectors')
# flags unique to equil.py
parser.add_argument('-ring_restraints', '--ring_restraints', default=["C", "C1", "C2", "C3", "C4", "C5"], nargs='+',
help='Name of atoms used to restrain head groups during initial equilibration.')
parser.add_argument('-forces', default=[1000000, 3162, 56, 8, 3, 2, 1, 0], help='Sequence of force constants to'
'apply to ring restraints')
parser.add_argument('--restraint_residue', default='HII', type=str, help='Name of residue to which ring_restraint'
'atoms belong')
parser.add_argument('--restraint_axis', default='xyz', type=str, help='Axes along which to apply position '
'restraints')
parser.add_argument('-l_nvt', '--length_nvt', default=50, type=int, help='Length of restrained NVT simulations '
'(ps)')
parser.add_argument('-lb', '--length_berendsen', default=5000, type=int, help='Length of simulation using berendsen'
'pressure control')
parser.add_argument('-lpr', '--length_Parrinello_Rahman', default=400000, type=int, help='Length of simulation '
'using Parrinello-Rahman pressure control')
return parser
class System(object):
def __init__(self, build_monomer, weight_percent, ratio, tolerance=1, solute='HOH', nopores=4, ncolumns=5,
monomers_per_column=20, p2p=4.5, parallel_displaced=0, dbwl=0.37, random_seed=0, mpi=False, nproc=4):
""" Get unit cell build parameters and determine the number of water molecules required for each region in order
to have the correct final composition.
:param build_monomer: Name of liquid crystal monomer to use to build unit cell (no file extension)
:param weight_percent: Weight percent of water in the whole system
:param ratio: Ratio of water in the pores to water in the tails
:param tolerance: Acceptable error in the total number of water molecules placed in the pore region
:param solute: name of solute used to solvate system
:param nopores: Number of pores in unit cell
:param ncolumns: Number of stacked monomer columns per pore
:param monomers_per_column: Number of monomers stacked into a column
:param p2p: Distance between pores (nm)
:param parallel_displaced: Angle of wedge formed between line extending from pore center to monomer and line \
from pore center to vertically adjacent monomer head group
:param dbwl: Distance between stacked monomers (nm)
:param random_seed: Monomer columns are randomly displaced in the z-direction when the initial configuration \
is built. Specify a random seed so that the same displacement is achieved each time the radius is changed. I \
think this helps with convergence, but it hasn't been extensively tested.
:param mpi: Run the MPI version of GROMACS
:param np: number of MPI process if mpi = True
:type build_monomer: str
:type weight_percent: float
:type ratio: float
:type tolerance: int
:type solute: str
:type nopores: int
:type ncolumns: int
:type monomers_per_column: int
:type p2p: float
:type parallel_displaced: float
:type dbwl: float
:type random_seed: int
:type mpi: bool
:type np: int
"""
# Initialize variables needed later
self.location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
self.parallel_displaced = parallel_displaced
self.dbwl = dbwl
self.random_seed = random_seed
self.monomers_per_column = monomers_per_column
self.ncolumns = ncolumns
self.p2p = p2p
self.nopores = nopores
self.mpi = mpi
self.np = nproc
self.tolerance = tolerance
self.build_monomer = topology.LC(build_monomer)
t = md.load('%s/../top/topologies/%s.gro' % (self.location, build_monomer))
residues = set([a.residue.name for a in t.topology.atoms])
# since liquid crystals with counterions contain multiple residues
self.build_monomer_mw = 0
for r in residues:
self.build_monomer_mw += topology.Residue(r).MW
nmon = self.nopores * self.ncolumns * self.monomers_per_column # number of build monomers in the system
self.dry_mass = nmon * self.build_monomer_mw # mass of dry system
# calculate required water in pores and tails
self.water = topology.Residue(solute)
self.weight_percent = weight_percent / 100.0 # convert to fraction
self.total_water = int((self.weight_percent * nmon * self.build_monomer_mw) / (self.water.MW *
(1 - self.weight_percent)))
tail_water = self.total_water / (ratio + 1)
self.pore_water = int(ratio * tail_water)
self.tail_water = int(tail_water)
self.r = 0 # pore radius
self.converged = False
self.solvated = None # an object that will describe solvated systems
def query_database(self, database='water_content.db', guess_range=(0.4, 1), guess_stride=0.2):
""" Read an SQL database, of number of water molecules versus pore radius, to inform the next choice of pore
radius. The decision process bisects the two radii with water contents closest to the desired concentration. If
there is a single data point, an educated guess is made. If there are no data points, then make a random guess.
:param database: Name of database
:param guess_range: If water_content.db has no entries for the build monomer, an initial radius will be \
randomly selected from this range
:param guess_stride: How far above/below the highest/lowest value to make the next guess at pore radius if you \
need more/less water than the bounds of the water
:type database: str
:type guess_range: tuple
:type guess_stride: float
"""
# read database of pore radii and associated water contents
connection = sql.connect("%s/%s" % (self.location, database)) # database created in this directory
crsr = connection.cursor()
sql_command = "select nwater, radius from radii where monomer = '%s' and pd_angle = %.2f and dbwl = %.2f and " \
"mon_per_col = %d and seed = %s;" % (self.build_monomer.name, self.parallel_displaced, self.dbwl,
self.monomers_per_column, self.random_seed)
try:
sql_output = crsr.execute(sql_command).fetchall()
except sql.OperationalError:
sql_output = []
if sql_output:
# assumes nwater scales with radii (which might be erroneous for data points that are close together)
nwater = sorted([x[0] for x in sql_output])
radii = sorted([float(x[1]) for x in sql_output])
print(radii, nwater, self.pore_water)
if len(nwater) == 1:
if self.pore_water > nwater[0]:
self.r = radii[0] + guess_stride
elif self.pore_water < nwater[0]:
self.r = radii[0] - guess_stride
else:
self.r = radii[0]
elif self.pore_water < min(nwater):
self.r = min(radii) - guess_stride
elif self.pore_water > max(nwater):
self.r = max(radii) + guess_stride
else:
bin = np.digitize(self.pore_water, nwater)
upper_bound, lower_bound = nwater[bin], nwater[
bin - 1] # upper bound is exclusive. Lower bound is inclusive
# calculate water content if an entry doesn't already exist
if abs(self.pore_water - lower_bound) > self.tolerance:
# linearly interpolate what the next pore radius should be based on desired amount of water in pore
interpolation = (self.pore_water - lower_bound) / (upper_bound - lower_bound)
self.r = radii[bin - 1] + (radii[bin] - radii[bin - 1]) * interpolation
else:
self.r = radii[bin - 1]
else:
self.r = (guess_range[1] - guess_range[0]) * np.random.sample() + guess_range[0]
connection.close()
def build(self):
equil.build('%s.gro' % self.build_monomer.name, 'initial.gro', self.monomers_per_column, self.ncolumns,
self.r, self.p2p, self.dbwl, self.parallel_displaced,
nopores=self.nopores, seed=self.random_seed)
def restrain(self, force, restraint_axis='xyz', ring_restraints=("C", "C1", "C2", "C3", "C4", "C5")):
equil.restrain('initial.gro', self.build_monomer.name, force, restraint_axis, ring_restraints)
def input_files(self, gro, ensemble, length=50, restraint_residue='HII'):
equil.generate_input_files(gro, ensemble, length, restraints=restraint_residue)
def put_in_box(self, gro, tpr):
p1 = subprocess.Popen(['echo', '0'], stdout=subprocess.PIPE)
trjconv = "gmx trjconv -f %s -o %s -pbc atom -s %s -ur tric" % (gro, gro, tpr)
p2 = subprocess.Popen(trjconv.split(), stdin=p1.stdout)
p2.wait()
def equilibrate(self, input_files=True, length=50, force=1000000, restraint_residue='HII', restraint_axis='xyz',
ring_restraints=("C", "C1", "C2", "C3", "C4", "C5")):
""" Simulate the unit cell with restraints placed on the head group
:param input_files: Generate GROMACS .mdp and topology files
:param length: Simulation length (ps)
:param force: Force with which to restrain head groups kJ mol^-1 nm^-2
:param restraint_residue: Name of residue to which position restraints will be applied
:param restraint_axis: Axis/axes along which head groups should be restrained
:param ring_restraints: Names of head group atoms
:type input_files: bool
:type length: int
:type force: float or int
:type restraint_residue: str
:type restraint_axis: str
:type ring_restraints: tuple
"""
self.build() # build initial configuration
if input_files: # doesn't need to be done every time
self.restrain(force, restraint_axis=restraint_axis, ring_restraints=ring_restraints)
self.input_files('initial.gro', 'nvt', length=length, restraint_residue=restraint_residue)
nrg = 1
while nrg > 0:
self.build()
equil.simulate('em.mdp', 'topol.top', 'initial.gro', 'em', mpi=self.mpi, np=self.np, restrained=True)
nrg = equil.check_energy(logname='em.log')
if nrg > 0: # choose a new random seed if energy minimization doesn't work
self.random_seed = np.random.randint(0, 4294967295)
cp = 'cp em.gro %s.gro' % force
p = subprocess.Popen(cp.split())
p.wait()
self.put_in_box('%s.gro' % force, 'em.tpr')
def calculate_pore_water(self, config):
""" Determine the total number of water molecules within the pore radius. Update database with system
configuration parameters.
:param config: Name of .gro configuration of which to calculate pore water content
:type config: str
"""
# solvate the system
if self.mpi:
gmx = "mpirun -np %s gmx_mpi" % self.np
else:
gmx = "gmx"
cmd = "%s solvate -cp %s.gro -cs spc216.gro -o solvated.gro -p topol.top" % (gmx, config)
subprocess.call(cmd.split())
self.put_in_box('solvated.gro', 'solvated.gro')
self.solvated = solute_partitioning.System('solvated.gro', self.build_monomer.name, 'SOL')
self.solvated.locate_pore_centers()
# radius based on reference atom, but partition based on pore_defining_atoms. Need to make choice or leave it
self.solvated.partition(self.r)
if abs(self.pore_water - len(self.solvated.pore_water[0])) <= self.tolerance:
self.converged = True
if self.pore_water != self.solvated.pore_water[0]: # avoid duplicates
self.update_database()
def update_database(self, database='water_content.db'):
connection = sql.connect("%s/%s" % (self.location, database)) # database created in this directory
crsr = connection.cursor()
sql_command = "insert into radii (monomer, radius, mon_per_col, nwater, pd_angle, dbwl, seed) values ('%s'," \
"%.6f, %d, %d, %.2f, %.2f, %d);" % (self.build_monomer.name, self.r, self.monomers_per_column,
len(self.solvated.pore_water[0]), self.parallel_displaced,
self.dbwl, self.random_seed)
crsr.execute(sql_command)
connection.commit()
connection.close()
def write_final_pore_configuration(self):
""" Write the configuration with the correct number of water molecules placed in the pore region, then
rwrite topology files.
"""
# only do this if we have converged on the correct number of waters
water_indices = []
for i in self.solvated.tail_water[0]: # tail_water indices are given as the center of mass of each water
water_indices += self.solvated.residue_indices[(self.water.natoms * i): self.water.natoms * (i + 1)].tolist()
keep = np.full(self.solvated.pos.shape[1], True, dtype=bool) # array of True. Booleans are fast
keep[water_indices] = False # set pore indices to False
# change all 'HOH' to 'SOL' because mdtraj changed it
res = np.array(self.solvated.res)
res[np.where(np.array(self.solvated.res) == 'HOH')[0]] = 'SOL'
file_rw.write_gro_pos(self.solvated.pos[0, keep, :], 'solvated_pores.gro', ids=np.array(self.solvated.ids)[keep],
res=res[keep], box=self.solvated.box)
# rewrite topology files
self.input_files('solvated_pores.gro', 'nvt')
def place_water_tails(self, output):
""" Place the desired number of water molecules in the tails. This is done by randomly inserting water molecules
in close proximity to the tails, one-by-one. A short energy minimzation is performed between each insertion.
:param output: Name of final configuration
:type output: str
"""
tails = solvate_tails.System('solvated_pores.gro', 'topol.top', self.build_monomer.name, rbounds=[0.3, 1],
restraints=True, mpi=self.mpi, nproc=self.np)
tails.insert_all_water(self.tail_water, output=output, final_topname='topol.top')
def full_equilibration(self, forces, fully_solvated='solvated_final.gro', l_nvt=50, l_berendsen=5000, l_pr=400000,
restraint_residue='HII', restraint_axis='xyz',
ring_restraints=("C", "C1", "C2", "C3", "C4", "C5")):
""" Simulate the unit cell with a sequence of decreasing restraints placed on the head group
:param forces: sequence of forces to apply to ring_restraints (:math:`\dfrac{kJ}{mol~nm^2}`)
:param fully_solvated: name of fully solvated coordinate file
:param l_nvt: Length of short restrained simulations (ps)
:param l_berendsen: Length of equilibration simulation run with berendsen pressure control (ps)
:param l_pr: Length of long equilibration simulation run with Parrinello-Rahman pressure control (ps)
:param restraint_residue: Name of residue to which position restraints will be applied
:param restraint_axis: Axis/axes along which head groups should be restrained
:param ring_restraints: Names of head group atoms
:type forces: tuple or list
:type fully_solvated: str
:type l_nvt: int
:type l_berendsen: int
:type l_pr: int
:type restraint_residue: str
:type restraint_axis: str
:type ring_restraints: tuple
"""
equil.simulate('nvt.mdp', 'topol.top', '%s' % fully_solvated, 'nvt', mpi=self.mpi, np=self.np, restrained=True)
cp = "cp nvt.gro em.gro"
subprocess.Popen(cp.split()).wait()
cp = "cp nvt.gro %s.gro" % forces[0]
subprocess.Popen(cp.split()).wait()
equil.generate_input_files('nvt.gro', 'nvt', l_nvt, genvel=False, restraints=restraint_residue)
for f in forces[1:]:
equil.restrain(fully_solvated, self.build_monomer.name, f, restraint_axis, ring_restraints)
equil.simulate('nvt.mdp', 'topol.top', 'em.gro', 'nvt', mpi=self.mpi, np=self.np, restrained=True)
cp = "cp nvt.gro %s.gro" % f
subprocess.Popen(cp.split()).wait()
cp = "cp nvt.trr %s.trr" % f
subprocess.Popen(cp.split()).wait()
equil.generate_input_files(fully_solvated, 'npt', l_berendsen, genvel=False, barostat='berendsen', frames=50)
equil.simulate('npt.mdp', 'topol.top', 'nvt.gro', 'berendsen', mpi=self.mpi, np=self.np)
equil.generate_input_files('berendsen.gro', 'npt', l_pr, genvel=False, barostat='Parrinello-Rahman', frames=500)
equil.simulate('npt.mdp', 'topol.top', 'berendsen.gro', 'PR', mpi=self.mpi, np=self.np)
if __name__ == "__main__":
os.environ["GMX_MAXBACKUP"] = "-1" # stop GROMACS from making backups
args = initialize().parse_args()
sys = System(args.build_monomer, args.weight_percent, args.ratio, solute='HOH', nopores=args.nopores,
ncolumns=args.ncolumns, monomers_per_column=args.monomers_per_column, p2p=args.p2p,
parallel_displaced=args.parallel_displaced, dbwl=args.dbwl, random_seed=args.random_seed,
mpi=args.mpi, nproc=args.nproc, tolerance=args.tolerance)
while not sys.converged:
sys.query_database(database='water_content.db', guess_range=args.guess_range, guess_stride=args.guess_stride)
sys.equilibrate(input_files=True, length=args.length_nvt, force=args.forces[0],
restraint_residue=args.restraint_residue, restraint_axis=args.restraint_axis,
ring_restraints=args.ring_restraints)
sys.calculate_pore_water(args.forces[0])
sys.write_final_pore_configuration()
sys.place_water_tails(args.output)
sys.full_equilibration(args.forces, fully_solvated=args.output, l_berendsen=args.length_berendsen,
l_nvt=args.length_nvt, l_pr=args.length_Parrinello_Rahman,
restraint_axis=args.restraint_axis, restraint_residue=args.restraint_residue,
ring_restraints=args.ring_restraints)
| 6,046 | 14,728 | 46 |
b13f24bb3851a6fe39d0c308d4a22e0c6710f0e7 | 480 | py | Python | anarchoApp/anarcho/routes/swagger.py | nrudenko/anarcho | 99930bf720ea56518a9c4c2efcf99d8987531193 | [
"MIT"
] | 7 | 2015-03-03T15:59:58.000Z | 2019-10-10T06:15:40.000Z | anarchoApp/anarcho/routes/swagger.py | nrudenko/anarcho | 99930bf720ea56518a9c4c2efcf99d8987531193 | [
"MIT"
] | 68 | 2015-02-03T21:39:00.000Z | 2021-06-20T22:41:11.000Z | anarchoApp/anarcho/routes/swagger.py | nrudenko/anarcho | 99930bf720ea56518a9c4c2efcf99d8987531193 | [
"MIT"
] | 4 | 2015-02-25T09:49:49.000Z | 2015-10-29T09:07:23.000Z | from anarcho import app
from flask import jsonify
from flask_swagger import swagger
@app.route('/swagger/spec.json', methods=['GET'])
def spec():
"""
Returns the swagger spec.
Read more by this link https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md
:return:
"""
swag = swagger(app)
swag['info']['title'] = app.name
swag['consumes'] = ['application/json']
swag['produces'] = ['application/json']
return jsonify(swag)
| 26.666667 | 98 | 0.664583 | from anarcho import app
from flask import jsonify
from flask_swagger import swagger
@app.route('/swagger/spec.json', methods=['GET'])
def spec():
"""
Returns the swagger spec.
Read more by this link https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md
:return:
"""
swag = swagger(app)
swag['info']['title'] = app.name
swag['consumes'] = ['application/json']
swag['produces'] = ['application/json']
return jsonify(swag)
| 0 | 0 | 0 |
46c897109e481f3c93beaf9b400390c02e0a8b97 | 739 | py | Python | python/Interval-List-Intersections/two_pointers.py | yutong-xie/Leetcode-with-python | 6578f288a757bf76213030b73ec3319a7baa2661 | [
"MIT"
] | null | null | null | python/Interval-List-Intersections/two_pointers.py | yutong-xie/Leetcode-with-python | 6578f288a757bf76213030b73ec3319a7baa2661 | [
"MIT"
] | null | null | null | python/Interval-List-Intersections/two_pointers.py | yutong-xie/Leetcode-with-python | 6578f288a757bf76213030b73ec3319a7baa2661 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Copyright 2020, Yutong Xie, UIUC.
Using Two Pointers to find the interval intersection.
'''
| 21.114286 | 57 | 0.419486 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Copyright 2020, Yutong Xie, UIUC.
Using Two Pointers to find the interval intersection.
'''
class Solution(object):
def intervalIntersection(self, A, B):
"""
:type A: List[List[int]]
:type B: List[List[int]]
:rtype: List[List[int]]
"""
if not A or not B:
return []
a, b = 0, 0
ans = []
while a < len(A) and b < len(B):
low = max(A[a][0], B[b][0])
high = min(A[a][1], B[b][1])
if low <= high:
ans.append([low, high])
if A[a][1] < B[b][1]:
a += 1
else:
b += 1
return ans
| 0 | 564 | 23 |
e26c1de61513b9dd04386c2769fcd9c9212d69dd | 19,754 | py | Python | src/plugin.py | saitho/galaxy_blizzard_plugin | 9716c44e9db261d85baddc019d3f9e5eb1f28cf7 | [
"MIT"
] | null | null | null | src/plugin.py | saitho/galaxy_blizzard_plugin | 9716c44e9db261d85baddc019d3f9e5eb1f28cf7 | [
"MIT"
] | null | null | null | src/plugin.py | saitho/galaxy_blizzard_plugin | 9716c44e9db261d85baddc019d3f9e5eb1f28cf7 | [
"MIT"
] | null | null | null | import asyncio
import json
import os
import sys
import multiprocessing
import webbrowser
import requests
import requests.cookies
import logging as log
import subprocess
import time
from galaxy.api.consts import LocalGameState, Platform
from galaxy.api.plugin import Plugin, create_and_run_plugin
from galaxy.api.types import Achievement, Game, LicenseInfo, LocalGame
from galaxy.api.errors import ( AuthenticationRequired,
BackendTimeout, BackendNotAvailable, BackendError, NetworkError, UnknownError, InvalidCredentials
)
from version import __version__ as version
from process import ProcessProvider
from local_client_base import ClientNotInstalledError
from local_client import LocalClient
from backend import BackendClient, AccessTokenExpired
from definitions import Blizzard, DataclassJSONEncoder, License_Map, ClassicGame
from consts import SYSTEM
from consts import Platform as pf
from http_client import AuthenticatedHttpClient
# async def get_unlocked_achievements(self, game_id):
# if not self.website_client.is_authenticated():
# raise AuthenticationRequired()
# try:
# if game_id == "21298":
# return await self._get_sc2_achievements()
# elif game_id == "5730135":
# return await self._get_wow_achievements()
# else:
# return []
# except requests.Timeout:
# raise BackendTimeout()
# except requests.ConnectionError:
# raise NetworkError()
# except Exception as e:
# log.exception(str(e))
# return []
if __name__ == "__main__":
main()
| 42.665227 | 136 | 0.621849 | import asyncio
import json
import os
import sys
import multiprocessing
import webbrowser
import requests
import requests.cookies
import logging as log
import subprocess
import time
from galaxy.api.consts import LocalGameState, Platform
from galaxy.api.plugin import Plugin, create_and_run_plugin
from galaxy.api.types import Achievement, Game, LicenseInfo, LocalGame
from galaxy.api.errors import ( AuthenticationRequired,
BackendTimeout, BackendNotAvailable, BackendError, NetworkError, UnknownError, InvalidCredentials
)
from version import __version__ as version
from process import ProcessProvider
from local_client_base import ClientNotInstalledError
from local_client import LocalClient
from backend import BackendClient, AccessTokenExpired
from definitions import Blizzard, DataclassJSONEncoder, License_Map, ClassicGame
from consts import SYSTEM
from consts import Platform as pf
from http_client import AuthenticatedHttpClient
class BNetPlugin(Plugin):
def __init__(self, reader, writer, token):
super().__init__(Platform.Battlenet, version, reader, writer, token)
self.local_client = LocalClient(self._update_statuses)
self.authentication_client = AuthenticatedHttpClient(self)
self.backend_client = BackendClient(self, self.authentication_client)
self.owned_games_cache = []
self.watched_running_games = set()
self.local_games_called = False
async def _notify_about_game_stop(self, game, starting_timeout):
if not self.local_games_called:
return
id_to_watch = game.info.id
if id_to_watch in self.watched_running_games:
log.debug(f'Game {id_to_watch} is already watched. Skipping')
return
try:
self.watched_running_games.add(id_to_watch)
await asyncio.sleep(starting_timeout)
ProcessProvider().update_games_processes([game])
log.info(f'Setuping process watcher for {game._processes}')
loop = asyncio.get_event_loop()
await loop.run_in_executor(None, game.wait_until_game_stops)
finally:
self.update_local_game_status(LocalGame(id_to_watch, LocalGameState.Installed))
self.watched_running_games.remove(id_to_watch)
def _update_statuses(self, refreshed_games, previous_games):
if not self.local_games_called:
return
for blizz_id, refr in refreshed_games.items():
prev = previous_games.get(blizz_id, None)
if prev is None:
if refr.playable:
log.debug('Detected playable game')
state = LocalGameState.Installed
else:
log.debug('Detected installation begin')
state = LocalGameState.None_
elif refr.playable and not prev.playable:
log.debug('Detected playable game')
state = LocalGameState.Installed
elif refr.last_played != prev.last_played:
log.debug('Detected launched game')
state = LocalGameState.Installed | LocalGameState.Running
asyncio.create_task(self._notify_about_game_stop(refr, 5))
else:
continue
log.info(f'Changing game {blizz_id} state to {state}')
self.update_local_game_status(LocalGame(blizz_id, state))
for blizz_id, prev in previous_games.items():
refr = refreshed_games.get(blizz_id, None)
if refr is None:
log.debug('Detected uninstalled game')
state = LocalGameState.None_
self.update_local_game_status(LocalGame(blizz_id, state))
def log_out(self):
if self.backend_client:
asyncio.create_task(self.authentication_client.shutdown())
self.authentication_client.user_details = None
self.owned_games_cache = []
async def open_battlenet_browser(self):
url = self.authentication_client.blizzard_battlenet_download_url
log.info(f'Opening battle.net website: {url}')
loop = asyncio.get_running_loop()
await loop.run_in_executor(None, lambda x: webbrowser.open(x, autoraise=True), url)
async def install_game(self, game_id):
if not self.authentication_client.is_authenticated():
raise AuthenticationRequired()
installed_game = self.local_client.get_installed_games().get(game_id, None)
if installed_game and os.access(installed_game.install_path, os.F_OK):
log.warning("Received install command on an already installed game")
return await self.launch_game(game_id)
if game_id in Blizzard.legacy_game_ids:
if SYSTEM == pf.WINDOWS:
platform = 'windows'
elif SYSTEM == pf.MACOS:
platform = 'macos'
webbrowser.open(f"https://www.blizzard.com/download/confirmation?platform={platform}&locale=enUS&version=LIVE&id={game_id}")
return
try:
self.local_client.refresh()
log.info(f'Installing game of id {game_id}')
self.local_client.install_game(game_id)
except ClientNotInstalledError as e:
log.warning(e)
await self.open_battlenet_browser()
except Exception as e:
log.exception(f"Installing game {game_id} failed: {e}")
def _open_battlenet_at_id(self, game_id):
try:
self.local_client.refresh()
self.local_client.open_battlenet(game_id)
except Exception as e:
log.exception(f"Opening battlenet client on specific game_id {game_id} failed {e}")
try:
self.local_client.open_battlenet()
except Exception as e:
log.exception(f"Opening battlenet client failed {e}")
async def uninstall_game(self, game_id):
if not self.authentication_client.is_authenticated():
raise AuthenticationRequired()
if game_id == 'wow_classic':
# attempting to uninstall classic wow through protocol gives you a message that the game cannot
# be uninstalled through protocol and you should use battle.net
return self._open_battlenet_at_id(game_id)
if SYSTEM == pf.MACOS:
self._open_battlenet_at_id(game_id)
else:
try:
installed_game = self.local_client.get_installed_games().get(game_id, None)
if installed_game is None or not os.access(installed_game.install_path, os.F_OK):
log.error(f'Cannot uninstall {Blizzard[game_id].uid}')
self.update_local_game_status(LocalGame(game_id, LocalGameState.None_))
return
if not isinstance(installed_game.info, ClassicGame):
if self.local_client.uninstaller is None:
raise FileNotFoundError('Uninstaller not found')
uninstall_tag = installed_game.uninstall_tag
client_lang = self.local_client.config_parser.locale_language
self.local_client.uninstaller.uninstall_game(installed_game, uninstall_tag, client_lang)
except Exception as e:
log.exception(f'Uninstalling game {game_id} failed: {e}')
async def launch_game(self, game_id):
if not self.local_games_called:
await self.get_local_games()
try:
if self.local_client.get_installed_games() is None:
log.error(f'Launching game that is not installed: {game_id}')
return await self.install_game(game_id)
game = self.local_client.get_installed_games().get(game_id, None)
if game is None:
log.error(f'Launching game that is not installed: {game_id}')
return await self.install_game(game_id)
if isinstance(game.info, ClassicGame):
log.info(f'Launching game of id: {game_id}, {game} at path {os.path.join(game.install_path, game.info.exe)}')
if SYSTEM == pf.WINDOWS:
subprocess.Popen(os.path.join(game.install_path, game.info.exe))
elif SYSTEM == pf.MACOS:
if not game.info.bundle_id:
log.warning(f"{game.name} has no bundle id, help by providing us bundle id of this game")
subprocess.Popen(['open', '-b', game.info.bundle_id])
self.update_local_game_status(LocalGame(game_id, LocalGameState.Installed | LocalGameState.Running))
asyncio.create_task(self._notify_about_game_stop(game, 6))
return
self.local_client.refresh()
log.info(f'Launching game of id: {game_id}, {game}')
await self.local_client.launch_game(game, wait_sec=60)
self.update_local_game_status(LocalGame(game_id, LocalGameState.Installed | LocalGameState.Running))
self.local_client.close_window()
asyncio.create_task(self._notify_about_game_stop(game, 3))
except ClientNotInstalledError as e:
log.warning(e)
await self.open_battlenet_browser()
except TimeoutError as e:
log.warning(str(e))
except Exception as e:
log.exception(f"Launching game {game_id} failed: {e}")
async def authenticate(self, stored_credentials=None):
try:
if stored_credentials:
auth_data = self.authentication_client.process_stored_credentials(stored_credentials)
try:
await self.authentication_client.create_session()
await self.backend_client.refresh_cookies()
auth_status = await self.backend_client.validate_access_token(auth_data.access_token)
except (BackendNotAvailable, BackendError, NetworkError, UnknownError, BackendTimeout) as e:
raise e
except Exception:
raise InvalidCredentials()
if self.authentication_client.validate_auth_status(auth_status):
self.authentication_client.user_details = await self.backend_client.get_user_info()
return self.authentication_client.parse_user_details()
else:
return self.authentication_client.authenticate_using_login()
except Exception as e:
raise e
async def pass_login_credentials(self, step, credentials, cookies):
if "logout&app=oauth" in credentials['end_uri']:
# 2fa expired, repeat authentication
return self.authentication_client.authenticate_using_login()
if self.authentication_client.attempted_to_set_battle_tag:
self.authentication_client.user_details = await self.backend_client.get_user_info()
return self.authentication_client.parse_auth_after_setting_battletag()
cookie_jar = self.authentication_client.parse_cookies(cookies)
auth_data = await self.authentication_client.get_auth_data_login(cookie_jar, credentials)
try:
await self.authentication_client.create_session()
await self.backend_client.refresh_cookies()
except (BackendNotAvailable, BackendError, NetworkError, UnknownError, BackendTimeout) as e:
raise e
except Exception:
raise InvalidCredentials()
auth_status = await self.backend_client.validate_access_token(auth_data.access_token)
if not ("authorities" in auth_status and "IS_AUTHENTICATED_FULLY" in auth_status["authorities"]):
raise InvalidCredentials()
self.authentication_client.user_details = await self.backend_client.get_user_info()
self.authentication_client.set_credentials()
return self.authentication_client.parse_battletag()
async def get_owned_games(self):
if not self.authentication_client.is_authenticated():
raise AuthenticationRequired()
def _parse_classic_games(classic_games):
for classic_game in classic_games["classicGames"]:
log.info(f"looking for {classic_game} in classic games")
try:
blizzard_game = Blizzard[classic_game["localizedGameName"].replace(u'\xa0', ' ')]
log.info(f"match! {blizzard_game}")
classic_game["titleId"] = blizzard_game.uid
classic_game["gameAccountStatus"] = "Good"
except KeyError:
continue
return classic_games
def _get_not_added_free_games(owned_games):
owned_games_ids = []
for game in owned_games:
if "titleId" in game:
owned_games_ids.append(str(game["titleId"]))
return [{"titleId": game.blizzard_id,
"localizedGameName": game.name,
"gameAccountStatus": "Free"}
for game in Blizzard.free_games if game.blizzard_id not in owned_games_ids]
try:
games = await self.backend_client.get_owned_games()
classic_games = _parse_classic_games(await self.backend_client.get_owned_classic_games())
owned_games = games["gameAccounts"] + classic_games["classicGames"]
# Add wow classic if retail wow is present in owned games
for owned_game in owned_games.copy():
if 'titleId' in owned_game:
if owned_game['titleId'] == 5730135:
owned_games.append({'titleId': 'wow_classic',
'localizedGameName': 'World of Warcraft Classic',
'gameAccountStatus': owned_game['gameAccountStatus']})
free_games_to_add = _get_not_added_free_games(owned_games)
owned_games += free_games_to_add
self.owned_games_cache = owned_games
return [
Game(
str(game["titleId"]),
game["localizedGameName"],
[],
LicenseInfo(License_Map[game["gameAccountStatus"]]),
)
for game in self.owned_games_cache if "titleId" in game
]
except Exception as e:
log.exception(f"failed to get owned games: {repr(e)}")
raise
async def get_local_games(self):
timeout = time.time() + 2
try:
translated_installed_games = []
while not self.local_client.games_finished_parsing():
await asyncio.sleep(0.1)
if time.time() >= timeout:
break
running_games = self.local_client.get_running_games()
installed_games = self.local_client.get_installed_games()
log.info(f"Installed games {installed_games.items()}")
log.info(f"Running games {running_games}")
for id_, game in installed_games.items():
if game.playable:
state = LocalGameState.Installed
if id_ in running_games:
state |= LocalGameState.Running
else:
state = LocalGameState.None_
translated_installed_games.append(LocalGame(id_, state))
self.local_client.installed_games_cache = installed_games
return translated_installed_games
except Exception as e:
log.exception(f"failed to get local games: {str(e)}")
raise
finally:
self.local_games_called = True
async def _get_wow_achievements(self):
achievements = []
try:
characters_data = await self.backend_client.get_wow_character_data()
characters_data = characters_data["characters"]
wow_character_data = await asyncio.gather(
*[
self.backend_client.get_wow_character_achievements(character["realm"], character["name"])
for character in characters_data
],
return_exceptions=True,
)
for data in wow_character_data:
if isinstance(data, requests.Timeout) or isinstance(data, requests.ConnectionError):
raise data
wow_achievement_data = [
list(
zip(
data["achievements"]["achievementsCompleted"],
data["achievements"]["achievementsCompletedTimestamp"],
)
)
for data in wow_character_data
if type(data) is dict
]
already_in = set()
for char_ach in wow_achievement_data:
for ach in char_ach:
if ach[0] not in already_in:
achievements.append(Achievement(achievement_id=ach[0], unlock_time=int(ach[1] / 1000)))
already_in.add(ach[0])
except (AccessTokenExpired, BackendError) as e:
log.exception(str(e))
with open('wow.json', 'w') as f:
f.write(json.dumps(achievements, cls=DataclassJSONEncoder))
return achievements
async def _get_sc2_achievements(self):
account_data = await self.backend_client.get_sc2_player_data(self.authentication_client.user_details["id"])
# TODO what if more sc2 accounts?
assert len(account_data) == 1
account_data = account_data[0]
profile_data = await self.backend_client.get_sc2_profile_data(
account_data["regionId"], account_data["realmId"],
account_data["profileId"]
)
sc2_achievement_data = [
Achievement(achievement_id=achievement["achievementId"], unlock_time=achievement["completionDate"])
for achievement in profile_data["earnedAchievements"]
if achievement["isComplete"]
]
with open('sc2.json', 'w') as f:
f.write(json.dumps(sc2_achievement_data, cls=DataclassJSONEncoder))
return sc2_achievement_data
# async def get_unlocked_achievements(self, game_id):
# if not self.website_client.is_authenticated():
# raise AuthenticationRequired()
# try:
# if game_id == "21298":
# return await self._get_sc2_achievements()
# elif game_id == "5730135":
# return await self._get_wow_achievements()
# else:
# return []
# except requests.Timeout:
# raise BackendTimeout()
# except requests.ConnectionError:
# raise NetworkError()
# except Exception as e:
# log.exception(str(e))
# return []
async def launch_platform_client(self):
if self.local_client.is_running():
log.info("Launch platform client called but client is already running")
return
self.local_client.open_battlenet()
await self.local_client.prevent_battlenet_from_showing()
async def shutdown_platform_client(self):
await self.local_client.shutdown_platform_client()
async def shutdown(self):
log.info("Plugin shutdown.")
await self.authentication_client.shutdown()
def main():
multiprocessing.freeze_support()
create_and_run_plugin(BNetPlugin, sys.argv)
if __name__ == "__main__":
main()
| 17,569 | 4 | 531 |
4377c782264c94cece405287180a0c40430af740 | 2,055 | py | Python | delta/layers/ops/kernels/synthfiltbank_op_test.py | headhunterChris/delta | a916e06f55213dcd1fea39a5950927dfed1483c7 | [
"Apache-2.0"
] | 1 | 2021-01-11T13:25:19.000Z | 2021-01-11T13:25:19.000Z | delta/layers/ops/kernels/synthfiltbank_op_test.py | lhhriver/delta | a916e06f55213dcd1fea39a5950927dfed1483c7 | [
"Apache-2.0"
] | null | null | null | delta/layers/ops/kernels/synthfiltbank_op_test.py | lhhriver/delta | a916e06f55213dcd1fea39a5950927dfed1483c7 | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2017 Beijing Didi Infinity Technology and Development Co.,Ltd.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
''' synthesis filter bank Op unit-test '''
import os
from pathlib import Path
import tensorflow as tf
from absl import logging
from delta.layers.ops import py_x_ops
from delta.data import feat as feat_lib
class SfbOpTest(tf.test.TestCase):
''' synthesis filter bank op unittest'''
def setUp(self):
'''set up'''
self.wavpath = str(
Path(os.environ['MAIN_ROOT']).joinpath(
'delta/layers/ops/data/sm1_cln.wav'))
def tearDown(self):
'''tear down'''
def test_sfb(self):
''' test sfb op'''
with self.session():
sample_rate, input_data = feat_lib.load_wav(self.wavpath, sr=16000)
power_spc, phase_spc = py_x_ops.analyfiltbank(input_data, sample_rate)
logging.info('power_spc: {}'.format(power_spc.eval().shape))
logging.info('phase_spc: {}'.format(phase_spc.eval().shape))
output = py_x_ops.synthfiltbank(power_spc.eval(), phase_spc.eval(),
sample_rate)
self.assertEqual(tf.rank(output).eval(), 1)
# beginning 400 samples are different, due to the overlap and add
self.assertAllClose(
output.eval().flatten()[500:550],
input_data[500:550],
rtol=1e-4,
atol=1e-4)
if __name__ == '__main__':
logging.set_verbosity(logging.INFO)
tf.test.main()
| 31.136364 | 80 | 0.655961 | # Copyright (C) 2017 Beijing Didi Infinity Technology and Development Co.,Ltd.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
''' synthesis filter bank Op unit-test '''
import os
from pathlib import Path
import tensorflow as tf
from absl import logging
from delta.layers.ops import py_x_ops
from delta.data import feat as feat_lib
class SfbOpTest(tf.test.TestCase):
''' synthesis filter bank op unittest'''
def setUp(self):
'''set up'''
self.wavpath = str(
Path(os.environ['MAIN_ROOT']).joinpath(
'delta/layers/ops/data/sm1_cln.wav'))
def tearDown(self):
'''tear down'''
def test_sfb(self):
''' test sfb op'''
with self.session():
sample_rate, input_data = feat_lib.load_wav(self.wavpath, sr=16000)
power_spc, phase_spc = py_x_ops.analyfiltbank(input_data, sample_rate)
logging.info('power_spc: {}'.format(power_spc.eval().shape))
logging.info('phase_spc: {}'.format(phase_spc.eval().shape))
output = py_x_ops.synthfiltbank(power_spc.eval(), phase_spc.eval(),
sample_rate)
self.assertEqual(tf.rank(output).eval(), 1)
# beginning 400 samples are different, due to the overlap and add
self.assertAllClose(
output.eval().flatten()[500:550],
input_data[500:550],
rtol=1e-4,
atol=1e-4)
if __name__ == '__main__':
logging.set_verbosity(logging.INFO)
tf.test.main()
| 0 | 0 | 0 |
419729da0eec2d3259b159ccea87b89282cddc66 | 267 | py | Python | request_to_pay/userapi/urls.py | alex-kozin/request-to-pay | 30cb5526eff0c35b9e6508d45832fd00d93d403c | [
"MIT"
] | 1 | 2020-05-07T23:39:41.000Z | 2020-05-07T23:39:41.000Z | request_to_pay/userapi/urls.py | Mystery3051/request-to-pay | 30cb5526eff0c35b9e6508d45832fd00d93d403c | [
"MIT"
] | null | null | null | request_to_pay/userapi/urls.py | Mystery3051/request-to-pay | 30cb5526eff0c35b9e6508d45832fd00d93d403c | [
"MIT"
] | 1 | 2020-03-09T04:12:45.000Z | 2020-03-09T04:12:45.000Z | from django.urls import path
from rest_framework.authtoken import views as drf_views
from . import views
from . import api_views
urlpatterns = [
path('users/', api_views.UserList.as_view()),
path('users/<int:id>/', api_views.UserRetrieveUpdate.as_view()),
]
| 26.7 | 68 | 0.745318 | from django.urls import path
from rest_framework.authtoken import views as drf_views
from . import views
from . import api_views
urlpatterns = [
path('users/', api_views.UserList.as_view()),
path('users/<int:id>/', api_views.UserRetrieveUpdate.as_view()),
]
| 0 | 0 | 0 |
1629437f0d3436af92c37675ac71a98e706b3aad | 501 | py | Python | lambdata_tomfox1/what_to_do.py | tomfox1/lambdata | 63abf251172c9bd28f5d8d02c7284f316298cbc5 | [
"MIT"
] | null | null | null | lambdata_tomfox1/what_to_do.py | tomfox1/lambdata | 63abf251172c9bd28f5d8d02c7284f316298cbc5 | [
"MIT"
] | 4 | 2020-03-24T17:21:08.000Z | 2021-06-02T00:09:05.000Z | lambdata_tomfox1/what_to_do.py | tomfox1/lambdata | 63abf251172c9bd28f5d8d02c7284f316298cbc5 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
class Cake:
"""example"""
x=2
class Datacleaner:
""""class for cleaning our data"""
def replace_values(self, value=np.nan, new=0):
"""replace values in a dataframe"""
self.df = self.df.replace(value, new)
| 18.555556 | 50 | 0.576846 | import numpy as np
import pandas as pd
class Cake:
"""example"""
x=2
def __init__(self):
self.baked = False
def bake(self):
self.baked = True
print("All done!")
class Datacleaner:
""""class for cleaning our data"""
def __init__(self, df):
self.df = df
def replace_values(self, value=np.nan, new=0):
"""replace values in a dataframe"""
self.df = self.df.replace(value, new)
def impute_values(self):
pass
| 111 | 0 | 107 |
ef2fe4cbb2c804d35ac7b2dfc28c686831c8090d | 2,059 | py | Python | pastepwn/analyzers/tests/googleapikeyanalyzer_test.py | palaparthi/pastepwn | 62b8b4e0100a55aa0db6f3441a272770e9b9d23b | [
"MIT"
] | null | null | null | pastepwn/analyzers/tests/googleapikeyanalyzer_test.py | palaparthi/pastepwn | 62b8b4e0100a55aa0db6f3441a272770e9b9d23b | [
"MIT"
] | null | null | null | pastepwn/analyzers/tests/googleapikeyanalyzer_test.py | palaparthi/pastepwn | 62b8b4e0100a55aa0db6f3441a272770e9b9d23b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import unittest
from unittest import mock
from pastepwn.analyzers.googleapikeyanalyzer import GoogleApiKeyAnalyzer
if __name__ == '__main__':
unittest.main()
| 33.209677 | 76 | 0.687227 | # -*- coding: utf-8 -*-
import unittest
from unittest import mock
from pastepwn.analyzers.googleapikeyanalyzer import GoogleApiKeyAnalyzer
class TestGoogleApiKeyAnalyzer(unittest.TestCase):
def setUp(self):
self.analyzer = GoogleApiKeyAnalyzer(None)
self.paste = mock.Mock()
def test_match_positive(self):
"""Test if positives are recognized"""
# google key dump
self.paste.body = "AIzaSyCTmst6SvsOAQanZKNt-2pt6nuLoFf2kSA"
self.assertTrue(self.analyzer.match(self.paste))
# google key dump
self.paste.body = "AIzaSyBKNst9JE89f4lHuNXQFTUgZKh8VZpvR6M"
self.assertTrue(self.analyzer.match(self.paste))
# google key dump
self.paste.body = "AIzammmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm"
self.assertTrue(self.analyzer.match(self.paste))
# google key dump
self.paste.body = "AIza00000000000000000000000000000000000"
self.assertTrue(self.analyzer.match(self.paste))
# key in the middle of a string
self.paste.body = "api key: AIza00000000000000000000000000000000000"
self.assertTrue(self.analyzer.match(self.paste))
def test_match_negative(self):
"""Test if negatives are not recognized"""
self.paste.body = ""
self.assertFalse(self.analyzer.match(self.paste))
self.paste.body = None
self.assertFalse(self.analyzer.match(self.paste))
# Invalid start
self.paste.body = "aiza00000000000000000000000000000000000"
self.assertFalse(self.analyzer.match(self.paste))
# Invalid start
self.paste.body = "000000000000000000000000000000000000000"
self.assertFalse(self.analyzer.match(self.paste))
# Invalid length
self.paste.body = "AIzammmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm"
self.assertFalse(self.analyzer.match(self.paste))
# Invalid length
self.paste.body = "AIzammmmmmmmm"
self.assertFalse(self.analyzer.match(self.paste))
if __name__ == '__main__':
unittest.main()
| 79 | 1,768 | 23 |
530ce985f0f9a000385d2e117083af25e10d22fd | 122 | py | Python | learning_logs/admin.py | kevinbowen777/learning_log | ec81918ae92d4859591f840274dd88a37afa4998 | [
"MIT"
] | null | null | null | learning_logs/admin.py | kevinbowen777/learning_log | ec81918ae92d4859591f840274dd88a37afa4998 | [
"MIT"
] | null | null | null | learning_logs/admin.py | kevinbowen777/learning_log | ec81918ae92d4859591f840274dd88a37afa4998 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Entry, Topic
admin.site.register(Topic)
admin.site.register(Entry)
| 17.428571 | 32 | 0.803279 | from django.contrib import admin
from .models import Entry, Topic
admin.site.register(Topic)
admin.site.register(Entry)
| 0 | 0 | 0 |
0de32e1f3cb80e4c22764e4a88bea375a2cf3b59 | 1,009 | py | Python | matterbabble/images.py | DeclanHoare/matterbabble | cbca58faa26e9df09eabf0f6730ad241c74d0ae9 | [
"Apache-2.0"
] | 11 | 2019-03-02T22:45:44.000Z | 2021-07-09T14:46:02.000Z | matterbabble/images.py | DeclanHoare/matterbabble | cbca58faa26e9df09eabf0f6730ad241c74d0ae9 | [
"Apache-2.0"
] | 3 | 2019-09-18T20:00:57.000Z | 2020-12-17T21:12:18.000Z | matterbabble/images.py | DeclanHoare/matterbabble | cbca58faa26e9df09eabf0f6730ad241c74d0ae9 | [
"Apache-2.0"
] | null | null | null | # uploads.py - Discourse only sends the real addresses of some embedded
# images in the 'cooked' HTML. This module helps to extract them from
# there and turn them into real links in the Markdown.
import html.parser
import io
import commonmark
import commonmark_extensions.plaintext
| 28.828571 | 76 | 0.718533 | # uploads.py - Discourse only sends the real addresses of some embedded
# images in the 'cooked' HTML. This module helps to extract them from
# there and turn them into real links in the Markdown.
import html.parser
import io
import commonmark
import commonmark_extensions.plaintext
class html_image_ripper(html.parser.HTMLParser):
def __init__(self):
super().__init__()
self.images = []
def handle_starttag(self, tag, attrs):
if tag == "img" and ("class", "emoji") not in attrs:
self.images.append(dict(attrs)["src"])
def replace_images(msg, addr):
parser = commonmark.Parser()
ast = parser.parse(msg["data"]["raw"])
ripper = html_image_ripper()
ripper.feed(msg["data"]["cooked"])
for cur, entering in ast.walker():
if cur.t == "image" and entering:
cur.t = "link"
dest = ripper.images.pop(0)
if dest.startswith("/"):
dest = addr + dest
cur.destination = dest
renderer = commonmark_extensions.plaintext.CommonMarkToCommonMarkRenderer()
return renderer.render(ast)
| 601 | 27 | 94 |
ba6b7797b88ddb185bff4fc2a5b1692f0c59ef03 | 190 | py | Python | pksampler/prof.py | patrickkidd/pksampler-0.3 | ffe5f1fde1d86052da34d9ee9c44934461c441e2 | [
"MIT"
] | null | null | null | pksampler/prof.py | patrickkidd/pksampler-0.3 | ffe5f1fde1d86052da34d9ee9c44934461c441e2 | [
"MIT"
] | null | null | null | pksampler/prof.py | patrickkidd/pksampler-0.3 | ffe5f1fde1d86052da34d9ee9c44934461c441e2 | [
"MIT"
] | null | null | null | #!/bin/env python
import profile
from pstats import Stats
from Main import main
s = profile.run('main()', 'pksampler.profile')
Stats('pksampler.profile').sort_stats('calls').print_stats()
| 21.111111 | 60 | 0.747368 | #!/bin/env python
import profile
from pstats import Stats
from Main import main
s = profile.run('main()', 'pksampler.profile')
Stats('pksampler.profile').sort_stats('calls').print_stats()
| 0 | 0 | 0 |
2256cbd602479d97aaf72395f0531d2dd11d5f58 | 1,173 | py | Python | Dev/pieces_script/knight.py | nicolaspaquette/Projet-Synthese-Chess-GamByte | 81e0a3e98efee7db3d0411c376f8b40d43408f81 | [
"Unlicense"
] | null | null | null | Dev/pieces_script/knight.py | nicolaspaquette/Projet-Synthese-Chess-GamByte | 81e0a3e98efee7db3d0411c376f8b40d43408f81 | [
"Unlicense"
] | null | null | null | Dev/pieces_script/knight.py | nicolaspaquette/Projet-Synthese-Chess-GamByte | 81e0a3e98efee7db3d0411c376f8b40d43408f81 | [
"Unlicense"
] | null | null | null | from piece import piece
| 36.65625 | 249 | 0.620631 | from piece import piece
class knight(piece):
def __init__(self, color):
self.color = color
self.sign = "N"
self.name = "knight"
self.as_moved = False
self.initialized_row = None
self.initialized_column = None
self.can_be_captured_en_passant = False
self.value = 320
def get_valid_positions(self, board_positions, row, column):
valid_positions = []
directions = [(-2,-1), (-1,-2), (-2,1), (-1,2), (1,-2), (2,-1), (1,2), (2,1)]
for direction in directions:
checking_row = row
checking_column = column
checking_row += direction[0]
checking_column += direction[1]
if checking_row < 8 and checking_row > -1 and checking_column < 8 and checking_column > -1:
if board_positions[checking_row][checking_column].get_piece() == None or board_positions[checking_row][checking_column].get_piece().color != self.color:# and board_positions[checking_row][checking_column].get_piece().name != "king"):
valid_positions.append((checking_row, checking_column))
return valid_positions
| 1,073 | -1 | 76 |
d4655db5fcec6fbc5e892216a03ce89b59c68c73 | 303 | py | Python | test/test_tokenizer.py | zhupengjia/nlptools | a0afc4873ee1b3adb383d38075ad5ae5e0293055 | [
"MIT"
] | 1 | 2020-10-26T02:32:25.000Z | 2020-10-26T02:32:25.000Z | test/test_tokenizer.py | zhupengjia/nlptools | a0afc4873ee1b3adb383d38075ad5ae5e0293055 | [
"MIT"
] | null | null | null | test/test_tokenizer.py | zhupengjia/nlptools | a0afc4873ee1b3adb383d38075ad5ae5e0293055 | [
"MIT"
] | 1 | 2020-10-26T02:34:05.000Z | 2020-10-26T02:34:05.000Z | #!/usr/bin/env python3
from nlptools.text.tokenizer import Tokenizer_BERT
from nlptools.utils import zload
import sys
s = Tokenizer_BERT(bert_model_name='/home/pzhu/.pytorch_pretrained_bert/bert-base-uncased')
txt = 'Who was Jim Henson ? Jim Henson was a puppeteer'
print(s.seg(txt))
print(s.vocab)
| 23.307692 | 91 | 0.782178 | #!/usr/bin/env python3
from nlptools.text.tokenizer import Tokenizer_BERT
from nlptools.utils import zload
import sys
s = Tokenizer_BERT(bert_model_name='/home/pzhu/.pytorch_pretrained_bert/bert-base-uncased')
txt = 'Who was Jim Henson ? Jim Henson was a puppeteer'
print(s.seg(txt))
print(s.vocab)
| 0 | 0 | 0 |
b3faaa0d7ca9f9489c2a9ef4da2ab108da3ff7ac | 1,165 | py | Python | pyfibot/modules/available/module_urlsafety.py | aapa/pyfibot | a8a4330d060b05f0ce63cbcfc6915afb8141955f | [
"BSD-3-Clause"
] | null | null | null | pyfibot/modules/available/module_urlsafety.py | aapa/pyfibot | a8a4330d060b05f0ce63cbcfc6915afb8141955f | [
"BSD-3-Clause"
] | null | null | null | pyfibot/modules/available/module_urlsafety.py | aapa/pyfibot | a8a4330d060b05f0ce63cbcfc6915afb8141955f | [
"BSD-3-Clause"
] | null | null | null | from __future__ import unicode_literals, print_function, division
import requests
from datetime import datetime
from bs4 import BeautifulSoup
import re
| 37.580645 | 93 | 0.611159 | from __future__ import unicode_literals, print_function, division
import requests
from datetime import datetime
from bs4 import BeautifulSoup
import re
def handle_url(bot, user, channel, url, msg):
s = requests.Session()
s.get('http://www.unmaskparasites.com/token/')
dt = datetime.utcnow()
dt_str = '%i-%i-%i-%i-%i-%i' % (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second)
r = s.post('http://www.unmaskparasites.com/results/', {'siteUrl': url, 't': dt_str})
bs = BeautifulSoup(r.text)
try:
span = bs.find('div', {'class': 'conclusion'}).find('span')
except AttributeError:
return
state = span.text.replace('<', '').replace('>', '').strip()
if state != 'clean':
info = bs.find('div', {'class': 'brief_report'}).text.strip()
info = re.sub(r'\(.*?\)', '', info).strip()
# To remove as many false positives as possible,
# there seems to be quite a lot of sites with one
# suspicious inline script...
if info == '1 suspicious inline script found.':
return
return bot.say(channel, 'Warning: Site seems to be %s! (%s)' % (state, info))
| 989 | 0 | 23 |
07b2ac58471b95575cdf5f1ec218e92687d496dc | 5,429 | py | Python | kivymd/stackfloatingbuttons.py | Bitmessage/KivyMD | 035ddf62208be143f35f82d5228a292eb083c757 | [
"MIT"
] | 1 | 2020-03-31T21:33:15.000Z | 2020-03-31T21:33:15.000Z | kivymd/stackfloatingbuttons.py | Bitmessage/KivyMD | 035ddf62208be143f35f82d5228a292eb083c757 | [
"MIT"
] | null | null | null | kivymd/stackfloatingbuttons.py | Bitmessage/KivyMD | 035ddf62208be143f35f82d5228a292eb083c757 | [
"MIT"
] | 1 | 2020-07-12T03:03:08.000Z | 2020-07-12T03:03:08.000Z | # -*- coding: utf-8 -*-
"""
Stack Floating Buttons
======================
Copyright © 2010-2018 HeaTTheatR
For suggestions and questions:
<kivydevelopment@gmail.com>
This file is distributed under the terms of the same license,
as the Kivy framework.
Example
-------
from kivy.app import App
from kivy.lang import Builder
from kivy.factory import Factory
from kivymd.toast import toast
from kivymd.theming import ThemeManager
from kivymd.stackfloatingbuttons import MDStackFloatingButtons
Builder.load_string('''
#:import Toolbar kivymd.toolbar.Toolbar
<ExampleFloatingButtons@BoxLayout>
orientation: 'vertical'
Toolbar:
title: 'Stack Floating Buttons'
md_bg_color: app.theme_cls.primary_color
elevation: 10
left_action_items: [['menu', lambda x: None]]
''')
class Example(App):
theme_cls = ThemeManager()
theme_cls.primary_palette = 'Teal'
title = "Example Stack Floating Buttons"
create_stack_floating_buttons = False
floating_data = {
'Python': 'language-python',
'Php': 'language-php',
'C++': 'language-cpp'}
def set_my_language(self, instance_button):
toast(instance_button.icon)
def build(self):
screen = Factory.ExampleFloatingButtons()
# Use this condition otherwise the stack will be created each time.
if not self.create_stack_floating_buttons:
screen.add_widget(MDStackFloatingButtons(
icon='lead-pencil',
floating_data={
'Python': 'language-python',
'Php': 'language-php',
'C++': 'language-cpp'},
callback=self.set_my_language))
self.create_stack_floating_buttons = True
return screen
Example().run()
"""
from kivy.animation import Animation
from kivy.core.window import Window
from kivy.uix.floatlayout import FloatLayout
from kivy.lang import Builder
from kivy.properties import StringProperty, DictProperty, ObjectProperty
from kivy.metrics import dp
from kivymd.cards import MDCard
Builder.load_string('''
#:import Window kivy.core.window.Window
#:import MDFloatingActionButton kivymd.button.MDFloatingActionButton
<FloatingButton@MDFloatingActionButton>
x: Window.width - (self.width + dp(21))
y: dp(25)
size_hint: None, None
size: dp(46), dp(46)
elevation: 5
md_bg_color: app.theme_cls.primary_color
on_release: self.parent.callback(self)
<MDFloatingLabel>
size_hint: None, None
height: dp(20)
width: label.texture_size[0]
border_color_a: .5
md_bg_color: app.theme_cls.primary_color
x: -self.width
Label:
id: label
color: 0, 0, 0, 1
bold: True
markup: True
text: ' %s ' % root.text
<MDStackFloatingButtons>
FloatingButton:
id: f_btn_1
icon: list(root.floating_data.values())[0]
FloatingButton:
id: f_btn_2
icon: list(root.floating_data.values())[1]
FloatingButton:
id: f_btn_3
icon: list(root.floating_data.values())[2]
MDFloatingLabel:
id: f_lbl_1
text: list(root.floating_data.keys())[0]
y: dp(117)
MDFloatingLabel:
id: f_lbl_2
text: list(root.floating_data.keys())[1]
y: dp(170)
MDFloatingLabel:
id: f_lbl_3
text: list(root.floating_data.keys())[2]
y: dp(226)
MDFloatingActionButton:
icon: root.icon
size: dp(56), dp(56)
x: Window.width - (self.width + dp(15))
md_bg_color: app.theme_cls.primary_color
y: dp(15)
on_release: root.show_floating_buttons()
''')
| 26.876238 | 78 | 0.638055 | # -*- coding: utf-8 -*-
"""
Stack Floating Buttons
======================
Copyright © 2010-2018 HeaTTheatR
For suggestions and questions:
<kivydevelopment@gmail.com>
This file is distributed under the terms of the same license,
as the Kivy framework.
Example
-------
from kivy.app import App
from kivy.lang import Builder
from kivy.factory import Factory
from kivymd.toast import toast
from kivymd.theming import ThemeManager
from kivymd.stackfloatingbuttons import MDStackFloatingButtons
Builder.load_string('''
#:import Toolbar kivymd.toolbar.Toolbar
<ExampleFloatingButtons@BoxLayout>
orientation: 'vertical'
Toolbar:
title: 'Stack Floating Buttons'
md_bg_color: app.theme_cls.primary_color
elevation: 10
left_action_items: [['menu', lambda x: None]]
''')
class Example(App):
theme_cls = ThemeManager()
theme_cls.primary_palette = 'Teal'
title = "Example Stack Floating Buttons"
create_stack_floating_buttons = False
floating_data = {
'Python': 'language-python',
'Php': 'language-php',
'C++': 'language-cpp'}
def set_my_language(self, instance_button):
toast(instance_button.icon)
def build(self):
screen = Factory.ExampleFloatingButtons()
# Use this condition otherwise the stack will be created each time.
if not self.create_stack_floating_buttons:
screen.add_widget(MDStackFloatingButtons(
icon='lead-pencil',
floating_data={
'Python': 'language-python',
'Php': 'language-php',
'C++': 'language-cpp'},
callback=self.set_my_language))
self.create_stack_floating_buttons = True
return screen
Example().run()
"""
from kivy.animation import Animation
from kivy.core.window import Window
from kivy.uix.floatlayout import FloatLayout
from kivy.lang import Builder
from kivy.properties import StringProperty, DictProperty, ObjectProperty
from kivy.metrics import dp
from kivymd.cards import MDCard
Builder.load_string('''
#:import Window kivy.core.window.Window
#:import MDFloatingActionButton kivymd.button.MDFloatingActionButton
<FloatingButton@MDFloatingActionButton>
x: Window.width - (self.width + dp(21))
y: dp(25)
size_hint: None, None
size: dp(46), dp(46)
elevation: 5
md_bg_color: app.theme_cls.primary_color
on_release: self.parent.callback(self)
<MDFloatingLabel>
size_hint: None, None
height: dp(20)
width: label.texture_size[0]
border_color_a: .5
md_bg_color: app.theme_cls.primary_color
x: -self.width
Label:
id: label
color: 0, 0, 0, 1
bold: True
markup: True
text: ' %s ' % root.text
<MDStackFloatingButtons>
FloatingButton:
id: f_btn_1
icon: list(root.floating_data.values())[0]
FloatingButton:
id: f_btn_2
icon: list(root.floating_data.values())[1]
FloatingButton:
id: f_btn_3
icon: list(root.floating_data.values())[2]
MDFloatingLabel:
id: f_lbl_1
text: list(root.floating_data.keys())[0]
y: dp(117)
MDFloatingLabel:
id: f_lbl_2
text: list(root.floating_data.keys())[1]
y: dp(170)
MDFloatingLabel:
id: f_lbl_3
text: list(root.floating_data.keys())[2]
y: dp(226)
MDFloatingActionButton:
icon: root.icon
size: dp(56), dp(56)
x: Window.width - (self.width + dp(15))
md_bg_color: app.theme_cls.primary_color
y: dp(15)
on_release: root.show_floating_buttons()
''')
class MDFloatingLabel(MDCard):
text = StringProperty()
class MDStackFloatingButtons(FloatLayout):
icon = StringProperty('checkbox-blank-circle')
callback = ObjectProperty(lambda x: None)
floating_data = DictProperty()
show = False
in_progress = False
def __init__(self, **kwargs):
super(MDStackFloatingButtons, self).__init__(**kwargs)
self.lbl_list = [self.ids.f_lbl_1, self.ids.f_lbl_2, self.ids.f_lbl_3]
self.btn_list = [self.ids.f_btn_1, self.ids.f_btn_2, self.ids.f_btn_3]
def set_in_progress(self, instance_anim, instance):
if instance is self.ids.f_btn_3:
self.in_progress = False
def show_floating_buttons(self):
step = dp(46)
if self.in_progress:
return
self.in_progress = True
for i, btn in enumerate(self.btn_list):
step += dp(56)
anim = Animation(y=step, d=.5, t='out_elastic')
anim.bind(on_complete=self.set_in_progress)
anim.start(btn)
self.show = True if not self.show else False
self.show_floating_labels() if self.show \
else self.hide_floating_labels()
def show_floating_labels(self):
i = 0
for lbl in self.lbl_list:
i += .3
pos_x = Window.width - (lbl.width + dp(46 + 21 * 1.5))
Animation(x=pos_x, d=i, t='out_elastic').start(lbl)
def hide_floating_buttons(self):
for btn in self.btn_list:
Animation(y=25, d=.5, t='in_elastic').start(btn)
def hide_floating_labels(self):
i = 1
for lbl in self.lbl_list:
i -= .3
Animation(x=-lbl.width, d=i, t='out_elastic').start(lbl)
self.hide_floating_buttons()
| 1,319 | 393 | 46 |
07ac001b79bc7e3de70e03db5d82008ecdb963cf | 4,450 | py | Python | src/main/python/apache/aurora/client/api/instance_watcher.py | jeremyvdw/aurora | fa9d83a7ef3a96c522884089a471bbb0bef74c48 | [
"Apache-2.0"
] | 479 | 2015-03-27T22:59:49.000Z | 2022-03-09T08:40:49.000Z | src/main/python/apache/aurora/client/api/instance_watcher.py | jeremyvdw/aurora | fa9d83a7ef3a96c522884089a471bbb0bef74c48 | [
"Apache-2.0"
] | 69 | 2015-05-26T20:06:29.000Z | 2020-01-13T19:18:59.000Z | src/main/python/apache/aurora/client/api/instance_watcher.py | jeremyvdw/aurora | fa9d83a7ef3a96c522884089a471bbb0bef74c48 | [
"Apache-2.0"
] | 226 | 2015-03-27T20:02:59.000Z | 2022-03-09T08:40:53.000Z | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
from threading import Event
from twitter.common import log
from .health_check import StatusHealthCheck
from .task_util import StatusHelper
from gen.apache.aurora.api.ttypes import ScheduleStatus, TaskQuery
| 35.887097 | 100 | 0.702472 | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
from threading import Event
from twitter.common import log
from .health_check import StatusHealthCheck
from .task_util import StatusHelper
from gen.apache.aurora.api.ttypes import ScheduleStatus, TaskQuery
class Instance(object):
def __init__(self, birthday=None, finished=False):
self.birthday = birthday
self.finished = finished
self.healthy = False
def set_healthy(self, value):
self.healthy = value
self.finished = True
def __str__(self):
return ('[birthday=%s, healthy=%s, finished=%s]' % (self.birthday, self.healthy, self.finished))
class InstanceWatcher(object):
def __init__(self,
scheduler,
job_key,
watch_secs,
health_check_interval_seconds,
clock=time,
terminating_event=None):
self._scheduler = scheduler
self._job_key = job_key
self._watch_secs = watch_secs
self._health_check_interval_seconds = health_check_interval_seconds
self._clock = clock
self._terminating = terminating_event or Event()
self._status_helper = StatusHelper(self._scheduler, self._create_query)
def watch(self, instance_ids, health_check=None):
"""Watches a set of instances and detects failures based on a delegated health check.
Arguments:
instance_ids -- set of instances to watch.
Returns a set of instances that are considered failed.
"""
log.info('Watching instances: %s' % instance_ids)
instance_ids = set(instance_ids)
health_check = health_check or StatusHealthCheck()
instance_states = {}
def finished_instances():
return dict((s_id, s) for s_id, s in instance_states.items() if s.finished)
def set_instance_healthy(instance_id, now):
if instance_id not in instance_states:
instance_states[instance_id] = Instance(now)
instance = instance_states.get(instance_id)
if now > (instance.birthday + self._watch_secs):
log.info('Instance %s has been up and healthy for at least %d seconds' % (
instance_id, self._watch_secs))
instance.set_healthy(True)
def set_instance_unhealthy(instance_id):
log.info('Instance %s is unhealthy' % instance_id)
if instance_id in instance_states:
# An instance that was previously healthy and currently unhealthy has failed.
instance_states[instance_id].set_healthy(False)
else:
# An instance never passed a health check (e.g.: failed before the first health check).
instance_states[instance_id] = Instance(finished=True)
while not self._terminating.is_set():
running_tasks = self._status_helper.get_tasks(instance_ids, retry=True)
now = self._clock.time()
tasks_by_instance = dict((task.assignedTask.instanceId, task) for task in running_tasks)
for instance_id in instance_ids:
if instance_id not in finished_instances():
running_task = tasks_by_instance.get(instance_id)
if running_task is not None:
task_healthy = health_check.health(running_task)
if task_healthy:
set_instance_healthy(instance_id, now)
else:
set_instance_unhealthy(instance_id)
log.debug('Instances health: %s' % ['%s: %s' % val for val in instance_states.items()])
# Return if all tasks are finished.
if set(finished_instances().keys()) == instance_ids:
return set([s_id for s_id, s in instance_states.items() if not s.healthy])
self._terminating.wait(self._health_check_interval_seconds)
def terminate(self):
"""Requests immediate termination of the watch cycle."""
self._terminating.set()
def _create_query(self, instance_ids):
query = TaskQuery()
query.jobKeys = set([self._job_key])
query.statuses = set([ScheduleStatus.RUNNING])
query.instanceIds = instance_ids
return query
| 1,863 | 1,695 | 120 |
d4e10b56dfc10e33e596ebc0f85e97ca8ab2b554 | 9,044 | py | Python | hydrushelper.py | rousvit/velky_rybnik | c6f4b9273baf7fb0e634e938bd4cbef94073009f | [
"MIT"
] | null | null | null | hydrushelper.py | rousvit/velky_rybnik | c6f4b9273baf7fb0e634e938bd4cbef94073009f | [
"MIT"
] | null | null | null | hydrushelper.py | rousvit/velky_rybnik | c6f4b9273baf7fb0e634e938bd4cbef94073009f | [
"MIT"
] | 1 | 2020-08-07T15:34:03.000Z | 2020-08-07T15:34:03.000Z | """
Hydrus helper is module containing various functions for working with Hydrus
output files and processing them in Jupyter Notebooks
For now the functions are:
- copy_output():
It will copy the selected files from temporary working directory of a
Hydrus project to a Hydrus/project_name subdirectory.
"""
# some imports first
import glob
import os
import sys
import pathlib
from shutil import copyfile
import pandas as pd
from tkinter import *
from tkinter.filedialog import askopenfilename
def copy_output():
"""
Copy specified Hydrus output file(s) for a specified project
to a working folder of this function (script),
(if other destination is not specified. #should be implemented later)
Works only for Temp working directory,
which exists only if some project is open.
Parameters
----------
(project_name) : string
Need to be entered after a prompt of this function.
All avaible projet names will be listed.
(file_name) : string
Need to be entered after a prompt of this function.
All avaible output files will be listed
Returns
-------
copy of the original file(s)
"""
working_path = "C:\\HYDRUS\\Temp\\~Hydrus3D_2xx\\"
cwd = os.getcwd()
# creating and printing list of all projects in Hydrus working folder
out_p = [p.split("\\")[-1] for p in glob.glob(working_path + "*")]
out_p.remove('Hydrus3D_2xx.tmp')
print("List of projects in the Hydrus working directory:")
for p in out_p:
print(p)
print("")
# choice of project with desired output files
project = input("Enter the projet name from the printed list: ")
print("")
while project not in out_p:
print("There is no such project name %s" % project)
project = input(
"Check the list again and enter an existing projet name: "
)
print("")
# creating and printing list of all output files
out_f = [f.split("\\")[-1] for f in glob.glob(
working_path + project + "\\" + "*.out")]
print("List of output files in the %s working directory" % project)
for f in out_f:
print(f.split("\\")[-1])
print("")
# creating the list of files to copy from user input
files = input("Enter the file name(s) you want to copy: ")
if "," in files:
files = [x.strip() for x in files.split(',')]
else:
files = [x.strip() for x in files.split(' ')]
# check if all the file names are right
result = all(elem in out_f for elem in files)
print("")
# if not run it again with check
while not result:
print("Some error is in file(s) name(s)")
files = input(
"Check the list again and enter the output files names: ")
print("")
if "," in files:
files = [x.strip() for x in files.split(',')]
else:
files = [x.strip() for x in files.split(' ')]
result = all(elem in out_f for elem in files)
# finally the copy of files will be done
for name in files:
print("Filename: %s" % name)
source = working_path + project + "\\" + name
print("Source: %s" % source)
pathlib.Path(
cwd + "\\hydrus\\" + project + "\\"
).mkdir(parents=True, exist_ok=True)
destination = cwd + "\\hydrus\\" + project + "\\" + name
print("Destination: %s" % destination)
copyfile(source, destination)
print("file %s succesefuly copied to %s" % (name, destination))
def read_file(proc_type='flow'):
"""
Function will read the specific composition of a hydrus output file and
convert it to a dataframe.
Now it works only for the "v_Mean.out" file type
args:
proc_type: string
Optional argument for better processing of the Hydrus output according
your simulation type.
Values for choice are:
- flow - only flow simulation (default value)
- tracer - solution simulation with one solute (tracer)
- cwm1 - biokinetic simulation with CWM1 model
- cw2d - biokinetic simulation with CW2D model
Parameters
----------
filepath : string
Full or relative path to the file
Returns
-------
pandas dataframe
"""
proc_types = ['flow', 'tracer', 'cwm1', 'cw2d']
if proc_type not in proc_types:
raise ValueError(
"Invalid process type. Expected one of: %s" % proc_types
)
root = Tk()
root.update()
filepath = askopenfilename()
root.destroy()
filepath = filepath.replace('/', '\\')
# dictionaries for renaming columns in the returned dataframe
v_mean_col = {
"Time": "time",
"rAtm": "pot_surface_flux_atm",
"rRoot": "pot_transp_rate",
"vAtm": "act_surface_flux_atm",
"vRoot": "act_transp_rate",
"vKode3": "total_bottom flux",
"vKode1": "total_boundary_flux",
"vSeep": "total_seepage_flux",
"vKode5": "total_b_node_flux",
"Runoff": "average_surface_ runoff",
"Evapor": "average_evapor_flux",
"Infiltr": "average_infil_flux",
"SnowLayer": "surface_snow_layer"
}
cum_q_col = {
"Time": "time",
"CumQAP": "c_pot_surface_flux_atm",
"CumQRP": "c_pot_transp_rate",
"CumQA": "c_act_surface_flux_atm",
"CumQR": "c_act_transp_rate",
"CumQ3": "c_total_bottom flux",
"CumQ1": "c_total_boundary_flux",
"CumQS": "c_total_seepage_flux",
"CumQ5": "c_total_b_node_flux",
"cRunoff": "c_surface_ runoff",
"cEvapor": "c_evapor_flux",
"cInfiltr": "c_infil_flux",
}
obsnode_col = {
"hNew": "hNew.0",
"theta": "theta.0",
"Temp": "Temp.0",
"Conc": "Conc.0",
"Sorb": "Sorb.0"
}
col_cwm1 = {
"Conc.0": "oxygen.1",
"Conc.1": "readillyCOD.1",
"Conc.2": "acetat.1",
"Conc.3": "in_sol_COD.1",
"Conc.4": "NH4.1",
"Conc.5": "NO3.1",
"Conc.6": "SSO4.1",
"Conc.7": "H2S.1",
"Conc.8": "slowlyCOD.1",
"Conc.9": "in_part_COD.1",
"Sorb.10": "heterotrophic.1",
"Sorb.11": "autotrophic.1",
"Sorb.12": "fermenting.1",
"Sorb.13": "methanogenic.1",
"Sorb.14": "sulphate_reducing.1",
"Sorb.15": "sulphide_oxidising.1",
}
if "v_Mean.out" in filepath:
data = pd.read_csv(filepath,
engine='python',
skiprows=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12],
sep=" ",
skipinitialspace=True,
skipfooter=1
)
data = data.loc[:, (data != 0).any(axis=0)]
data = data.rename(v_mean_col, axis='columns')
print("Data from %s were read into the Pandas DataFrame" % filepath)
return data
elif "Cum_Q.out" in filepath:
data = pd.read_csv(filepath,
engine='python',
skiprows=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12],
sep=" ",
skipinitialspace=True,
skipfooter=1
)
data = data.loc[:, (data != 0).any(axis=0)]
data = data.rename(cum_q_col, axis='columns')
print("Data from %s were read into the Pandas DataFrame" % filepath)
return data
elif "ObsNod.out" in filepath:
if proc_type == "cwm1":
data = pd.read_csv(filepath,
engine='python',
skiprows=[0, 1, 2, 3, 4],
sep=" ",
skipinitialspace=True,
skipfooter=1
)
data = data.rename(obsnode_col, axis='columns')
data = data.rename(col_cwm1, axis='columns')
data = data.loc[:, (data != 0).any(axis=0)]
else:
data = pd.read_csv(filepath,
engine='python',
skiprows=[0, 1, 2, 3, 4],
sep=" ",
skipinitialspace=True,
skipfooter=1
)
data = data.rename(obsnode_col, axis='columns')
data = data.loc[:, (data != 0).any(axis=0)]
print("Data from %s were read into the Pandas DataFrame" % filepath)
return data
else:
print("Sorry, data reader for this file type is not yet implemented.")
| 33.372694 | 79 | 0.525321 | """
Hydrus helper is module containing various functions for working with Hydrus
output files and processing them in Jupyter Notebooks
For now the functions are:
- copy_output():
It will copy the selected files from temporary working directory of a
Hydrus project to a Hydrus/project_name subdirectory.
"""
# some imports first
import glob
import os
import sys
import pathlib
from shutil import copyfile
import pandas as pd
from tkinter import *
from tkinter.filedialog import askopenfilename
def copy_output():
"""
Copy specified Hydrus output file(s) for a specified project
to a working folder of this function (script),
(if other destination is not specified. #should be implemented later)
Works only for Temp working directory,
which exists only if some project is open.
Parameters
----------
(project_name) : string
Need to be entered after a prompt of this function.
All avaible projet names will be listed.
(file_name) : string
Need to be entered after a prompt of this function.
All avaible output files will be listed
Returns
-------
copy of the original file(s)
"""
working_path = "C:\\HYDRUS\\Temp\\~Hydrus3D_2xx\\"
cwd = os.getcwd()
# creating and printing list of all projects in Hydrus working folder
out_p = [p.split("\\")[-1] for p in glob.glob(working_path + "*")]
out_p.remove('Hydrus3D_2xx.tmp')
print("List of projects in the Hydrus working directory:")
for p in out_p:
print(p)
print("")
# choice of project with desired output files
project = input("Enter the projet name from the printed list: ")
print("")
while project not in out_p:
print("There is no such project name %s" % project)
project = input(
"Check the list again and enter an existing projet name: "
)
print("")
# creating and printing list of all output files
out_f = [f.split("\\")[-1] for f in glob.glob(
working_path + project + "\\" + "*.out")]
print("List of output files in the %s working directory" % project)
for f in out_f:
print(f.split("\\")[-1])
print("")
# creating the list of files to copy from user input
files = input("Enter the file name(s) you want to copy: ")
if "," in files:
files = [x.strip() for x in files.split(',')]
else:
files = [x.strip() for x in files.split(' ')]
# check if all the file names are right
result = all(elem in out_f for elem in files)
print("")
# if not run it again with check
while not result:
print("Some error is in file(s) name(s)")
files = input(
"Check the list again and enter the output files names: ")
print("")
if "," in files:
files = [x.strip() for x in files.split(',')]
else:
files = [x.strip() for x in files.split(' ')]
result = all(elem in out_f for elem in files)
# finally the copy of files will be done
for name in files:
print("Filename: %s" % name)
source = working_path + project + "\\" + name
print("Source: %s" % source)
pathlib.Path(
cwd + "\\hydrus\\" + project + "\\"
).mkdir(parents=True, exist_ok=True)
destination = cwd + "\\hydrus\\" + project + "\\" + name
print("Destination: %s" % destination)
copyfile(source, destination)
print("file %s succesefuly copied to %s" % (name, destination))
def read_file(proc_type='flow'):
"""
Function will read the specific composition of a hydrus output file and
convert it to a dataframe.
Now it works only for the "v_Mean.out" file type
args:
proc_type: string
Optional argument for better processing of the Hydrus output according
your simulation type.
Values for choice are:
- flow - only flow simulation (default value)
- tracer - solution simulation with one solute (tracer)
- cwm1 - biokinetic simulation with CWM1 model
- cw2d - biokinetic simulation with CW2D model
Parameters
----------
filepath : string
Full or relative path to the file
Returns
-------
pandas dataframe
"""
proc_types = ['flow', 'tracer', 'cwm1', 'cw2d']
if proc_type not in proc_types:
raise ValueError(
"Invalid process type. Expected one of: %s" % proc_types
)
root = Tk()
root.update()
filepath = askopenfilename()
root.destroy()
filepath = filepath.replace('/', '\\')
# dictionaries for renaming columns in the returned dataframe
v_mean_col = {
"Time": "time",
"rAtm": "pot_surface_flux_atm",
"rRoot": "pot_transp_rate",
"vAtm": "act_surface_flux_atm",
"vRoot": "act_transp_rate",
"vKode3": "total_bottom flux",
"vKode1": "total_boundary_flux",
"vSeep": "total_seepage_flux",
"vKode5": "total_b_node_flux",
"Runoff": "average_surface_ runoff",
"Evapor": "average_evapor_flux",
"Infiltr": "average_infil_flux",
"SnowLayer": "surface_snow_layer"
}
cum_q_col = {
"Time": "time",
"CumQAP": "c_pot_surface_flux_atm",
"CumQRP": "c_pot_transp_rate",
"CumQA": "c_act_surface_flux_atm",
"CumQR": "c_act_transp_rate",
"CumQ3": "c_total_bottom flux",
"CumQ1": "c_total_boundary_flux",
"CumQS": "c_total_seepage_flux",
"CumQ5": "c_total_b_node_flux",
"cRunoff": "c_surface_ runoff",
"cEvapor": "c_evapor_flux",
"cInfiltr": "c_infil_flux",
}
obsnode_col = {
"hNew": "hNew.0",
"theta": "theta.0",
"Temp": "Temp.0",
"Conc": "Conc.0",
"Sorb": "Sorb.0"
}
col_cwm1 = {
"Conc.0": "oxygen.1",
"Conc.1": "readillyCOD.1",
"Conc.2": "acetat.1",
"Conc.3": "in_sol_COD.1",
"Conc.4": "NH4.1",
"Conc.5": "NO3.1",
"Conc.6": "SSO4.1",
"Conc.7": "H2S.1",
"Conc.8": "slowlyCOD.1",
"Conc.9": "in_part_COD.1",
"Sorb.10": "heterotrophic.1",
"Sorb.11": "autotrophic.1",
"Sorb.12": "fermenting.1",
"Sorb.13": "methanogenic.1",
"Sorb.14": "sulphate_reducing.1",
"Sorb.15": "sulphide_oxidising.1",
}
if "v_Mean.out" in filepath:
data = pd.read_csv(filepath,
engine='python',
skiprows=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12],
sep=" ",
skipinitialspace=True,
skipfooter=1
)
data = data.loc[:, (data != 0).any(axis=0)]
data = data.rename(v_mean_col, axis='columns')
print("Data from %s were read into the Pandas DataFrame" % filepath)
return data
elif "Cum_Q.out" in filepath:
data = pd.read_csv(filepath,
engine='python',
skiprows=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12],
sep=" ",
skipinitialspace=True,
skipfooter=1
)
data = data.loc[:, (data != 0).any(axis=0)]
data = data.rename(cum_q_col, axis='columns')
print("Data from %s were read into the Pandas DataFrame" % filepath)
return data
elif "ObsNod.out" in filepath:
if proc_type == "cwm1":
data = pd.read_csv(filepath,
engine='python',
skiprows=[0, 1, 2, 3, 4],
sep=" ",
skipinitialspace=True,
skipfooter=1
)
data = data.rename(obsnode_col, axis='columns')
data = data.rename(col_cwm1, axis='columns')
data = data.loc[:, (data != 0).any(axis=0)]
else:
data = pd.read_csv(filepath,
engine='python',
skiprows=[0, 1, 2, 3, 4],
sep=" ",
skipinitialspace=True,
skipfooter=1
)
data = data.rename(obsnode_col, axis='columns')
data = data.loc[:, (data != 0).any(axis=0)]
print("Data from %s were read into the Pandas DataFrame" % filepath)
return data
else:
print("Sorry, data reader for this file type is not yet implemented.")
| 0 | 0 | 0 |
e7e86c8ef5cb6885489c37dea948c68b596a961e | 969 | py | Python | iahr/commands/chat/chat.py | B1Z0N/iahr | 0f198a47406726c08018afb17f13ff8c31244eff | [
"MIT"
] | 8 | 2020-07-10T08:09:21.000Z | 2021-06-01T23:47:29.000Z | iahr/commands/chat/chat.py | B1Z0N/iahr | 0f198a47406726c08018afb17f13ff8c31244eff | [
"MIT"
] | 1 | 2022-03-12T00:40:59.000Z | 2022-03-12T00:40:59.000Z | iahr/commands/chat/chat.py | B1Z0N/iahr | 0f198a47406726c08018afb17f13ff8c31244eff | [
"MIT"
] | null | null | null | from telethon import events
from iahr.reg import TextSender, VoidSender, MultiArgs, any_send
from iahr.config import IahrConfig
from iahr.utils import AccessList, EventService
from .utils import mention, local, CHAT_TAG
##################################################
# Routines themselves
##################################################
@VoidSender(about=local['tagall']['about'], tags={CHAT_TAG})
| 34.607143 | 75 | 0.585139 | from telethon import events
from iahr.reg import TextSender, VoidSender, MultiArgs, any_send
from iahr.config import IahrConfig
from iahr.utils import AccessList, EventService
from .utils import mention, local, CHAT_TAG
##################################################
# Routines themselves
##################################################
@VoidSender(about=local['tagall']['about'], tags={CHAT_TAG})
async def tagall(event):
cid = await EventService.chatid_from(event)
users = await event.client.get_participants(cid)
max_users = int(IahrConfig.CUSTOM.get('tagall_max_number', 50))
if len(users) > max_users:
await any_send(event, local['tagall']['toomuch'].format(max_users))
else:
await any_send(event,
' '.join(
EventService.mention(u, with_link=True)
for u in users if not u.bot and not u.is_self),
parse_mode='html')
| 537 | 0 | 22 |
94791b784d3c42061301b27e1c8fbf6772fe6f5c | 4,410 | py | Python | vmm/ext/postconf.py | kmohrf/vmm | 5e0dc8c9502d07681bfaca8634ed5b083deae77b | [
"BSD-3-Clause"
] | 4 | 2020-03-08T08:45:35.000Z | 2021-10-17T11:05:17.000Z | vmm/ext/postconf.py | kmohrf/vmm | 5e0dc8c9502d07681bfaca8634ed5b083deae77b | [
"BSD-3-Clause"
] | null | null | null | vmm/ext/postconf.py | kmohrf/vmm | 5e0dc8c9502d07681bfaca8634ed5b083deae77b | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: UTF-8 -*-
# Copyright (c) 2008 - 2014, Pascal Volk
# See COPYING for distribution information.
"""
vmm.ext.postconf
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Wrapper class for Postfix's postconf.
Postconf instances can be used to read actual values of configuration
parameters or edit the value of a configuration parameter.
postconf.read(parameter) -> value
postconf.edit(parameter, value)
"""
import re
from gettext import gettext as _
from subprocess import Popen, PIPE
from vmm.errors import VMMError
from vmm.constants import VMM_ERROR
class Postconf:
"""Wrapper class for Postfix's postconf."""
__slots__ = ("_bin", "_val")
_parameter_re = re.compile(r"^\w+$", re.ASCII)
_variables_re = re.compile(r"\$\b\w+\b", re.ASCII)
def __init__(self, postconf_bin):
"""Creates a new Postconf instance.
Argument:
`postconf_bin` : str
absolute path to the Postfix postconf binary.
"""
self._bin = postconf_bin
self._val = ""
def edit(self, parameter, value):
"""Set the `parameter`'s value to `value`.
Arguments:
`parameter` : str
the name of a Postfix configuration parameter
`value` : str
the parameter's new value.
"""
self._check_parameter(parameter)
stderr = Popen(
(self._bin, "-e", parameter + "=" + str(value)), stderr=PIPE
).communicate()[1]
if stderr:
raise VMMError(stderr.strip().decode(), VMM_ERROR)
def read(self, parameter, expand_vars=True):
"""Returns the parameter's value.
If expand_vars is True (default), all variables in the value will be
expanded:
e.g. mydestination: mail.example.com, localhost.example.com, localhost
Otherwise the value may contain one or more variables.
e.g. mydestination: $myhostname, localhost.$mydomain, localhost
Arguments:
`parameter` : str
the name of a Postfix configuration parameter.
`expand_vars` : bool
indicates if variables should be expanded or not, default True
"""
self._check_parameter(parameter)
self._val = self._read(parameter)
if expand_vars:
self._expand_vars()
return self._val
def _check_parameter(self, parameter):
"""Check that the `parameter` looks like a configuration parameter.
If not, a VMMError will be raised."""
if not self.__class__._parameter_re.match(parameter):
raise VMMError(
_(
"The value '%s' does not look like a valid "
"Postfix configuration parameter name."
)
% parameter,
VMM_ERROR,
)
def _expand_vars(self):
"""Expand the $variables in self._val to their values."""
while True:
pvars = set(self.__class__._variables_re.findall(self._val))
if not pvars:
break
if len(pvars) > 1:
self._expand_multi_vars(self._read_multi(pvars))
continue
pvars = pvars.pop()
self._val = self._val.replace(pvars, self._read(pvars[1:]))
def _expand_multi_vars(self, old_new):
"""Replace all $vars in self._val with their values."""
for old, new in old_new.items():
self._val = self._val.replace("$" + old, new)
def _read(self, parameter):
"""Ask postconf for the value of a single configuration parameter."""
stdout, stderr = Popen(
[self._bin, "-h", parameter], stdout=PIPE, stderr=PIPE
).communicate()
if stderr:
raise VMMError(stderr.strip().decode(), VMM_ERROR)
return stdout.strip().decode()
def _read_multi(self, parameters):
"""Ask postconf for multiple configuration parameters. Returns a dict
parameter: value items."""
cmd = [self._bin]
cmd.extend(parameter[1:] for parameter in parameters)
stdout, stderr = Popen(cmd, stdout=PIPE, stderr=PIPE).communicate()
if stderr:
raise VMMError(stderr.strip().decode(), VMM_ERROR)
par_val = {}
for line in stdout.decode().splitlines():
par, val = line.split(" = ")
par_val[par] = val
return par_val
| 33.157895 | 78 | 0.592971 | # -*- coding: UTF-8 -*-
# Copyright (c) 2008 - 2014, Pascal Volk
# See COPYING for distribution information.
"""
vmm.ext.postconf
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Wrapper class for Postfix's postconf.
Postconf instances can be used to read actual values of configuration
parameters or edit the value of a configuration parameter.
postconf.read(parameter) -> value
postconf.edit(parameter, value)
"""
import re
from gettext import gettext as _
from subprocess import Popen, PIPE
from vmm.errors import VMMError
from vmm.constants import VMM_ERROR
class Postconf:
"""Wrapper class for Postfix's postconf."""
__slots__ = ("_bin", "_val")
_parameter_re = re.compile(r"^\w+$", re.ASCII)
_variables_re = re.compile(r"\$\b\w+\b", re.ASCII)
def __init__(self, postconf_bin):
"""Creates a new Postconf instance.
Argument:
`postconf_bin` : str
absolute path to the Postfix postconf binary.
"""
self._bin = postconf_bin
self._val = ""
def edit(self, parameter, value):
"""Set the `parameter`'s value to `value`.
Arguments:
`parameter` : str
the name of a Postfix configuration parameter
`value` : str
the parameter's new value.
"""
self._check_parameter(parameter)
stderr = Popen(
(self._bin, "-e", parameter + "=" + str(value)), stderr=PIPE
).communicate()[1]
if stderr:
raise VMMError(stderr.strip().decode(), VMM_ERROR)
def read(self, parameter, expand_vars=True):
"""Returns the parameter's value.
If expand_vars is True (default), all variables in the value will be
expanded:
e.g. mydestination: mail.example.com, localhost.example.com, localhost
Otherwise the value may contain one or more variables.
e.g. mydestination: $myhostname, localhost.$mydomain, localhost
Arguments:
`parameter` : str
the name of a Postfix configuration parameter.
`expand_vars` : bool
indicates if variables should be expanded or not, default True
"""
self._check_parameter(parameter)
self._val = self._read(parameter)
if expand_vars:
self._expand_vars()
return self._val
def _check_parameter(self, parameter):
"""Check that the `parameter` looks like a configuration parameter.
If not, a VMMError will be raised."""
if not self.__class__._parameter_re.match(parameter):
raise VMMError(
_(
"The value '%s' does not look like a valid "
"Postfix configuration parameter name."
)
% parameter,
VMM_ERROR,
)
def _expand_vars(self):
"""Expand the $variables in self._val to their values."""
while True:
pvars = set(self.__class__._variables_re.findall(self._val))
if not pvars:
break
if len(pvars) > 1:
self._expand_multi_vars(self._read_multi(pvars))
continue
pvars = pvars.pop()
self._val = self._val.replace(pvars, self._read(pvars[1:]))
def _expand_multi_vars(self, old_new):
"""Replace all $vars in self._val with their values."""
for old, new in old_new.items():
self._val = self._val.replace("$" + old, new)
def _read(self, parameter):
"""Ask postconf for the value of a single configuration parameter."""
stdout, stderr = Popen(
[self._bin, "-h", parameter], stdout=PIPE, stderr=PIPE
).communicate()
if stderr:
raise VMMError(stderr.strip().decode(), VMM_ERROR)
return stdout.strip().decode()
def _read_multi(self, parameters):
"""Ask postconf for multiple configuration parameters. Returns a dict
parameter: value items."""
cmd = [self._bin]
cmd.extend(parameter[1:] for parameter in parameters)
stdout, stderr = Popen(cmd, stdout=PIPE, stderr=PIPE).communicate()
if stderr:
raise VMMError(stderr.strip().decode(), VMM_ERROR)
par_val = {}
for line in stdout.decode().splitlines():
par, val = line.split(" = ")
par_val[par] = val
return par_val
| 0 | 0 | 0 |
b66c0b88e0def2798a3fd1381205daae2963cfa4 | 11,174 | py | Python | journal_paper/sys_id_arx_jan.py | Eramismus/CommunityDRSims | 63d2fc77a53ea17800834f8000013616d7eab9c9 | [
"MIT"
] | 5 | 2019-10-24T12:49:22.000Z | 2022-01-06T10:50:16.000Z | journal_paper/sys_id_arx_jan.py | Eramismus/CommunityDRSims | 63d2fc77a53ea17800834f8000013616d7eab9c9 | [
"MIT"
] | 1 | 2021-05-18T10:25:54.000Z | 2021-05-18T10:26:52.000Z | journal_paper/sys_id_arx_jan.py | Eramismus/CommunityDRSims | 63d2fc77a53ea17800834f8000013616d7eab9c9 | [
"MIT"
] | 4 | 2018-11-16T08:41:34.000Z | 2021-11-01T12:39:30.000Z | """ Sixth version, make the code easier and more modifiable """
# Define the main programme
from funcs import store_namespace
from funcs import load_namespace
from funcs import emulate_jmod
import os
import datetime
import time
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#from multiprocessing import Pool
from mpcpy import units
from mpcpy import variables
from mpcpy import models_mod as models
from scipy.optimize import curve_fit
from scipy.linalg import expm
from numpy.linalg import inv
from sklearn.metrics import mean_squared_error, r2_score
from statsmodels.graphics.tsaplots import plot_pacf
from statsmodels.graphics.tsaplots import plot_acf
from Simulator_HP_mod3 import SimHandler
if __name__ == "__main__":
# Naming conventions for the simulation
community = 'ResidentialCommunityUK_rad_2elements'
sim_id = 'MinEne'
model_id = 'R2CW_HP'
bldg_list = load_namespace(os.path.join('file_path_to_folder', 'teaser_bldgs_residential'))
bldg_index_start = 0
bldg_index_end = 30
emulate = 0 # Emulate or not?
old_sim_param = 1 # Choose initial guesses
# Overall options
date = '1/1/2017 '
start = '1/1/2017 00:00:00'
end = '1/9/2017 00:00:00'
train_start = start
valid_start = '1/6/2017 00:00:00'
train_end = valid_start
valid_end = '1/9/2017 00:00:00'
meas_sampl = '300'
mon = 'jan'
folder = 'path_to_file\\results_sysid_new_'+mon
# Features to use in training
exog = ['weaTDryBul_delay1', 'weaHGloHor_delay1','PowerCompr', 'PowerCompr_delay1', 'T_in_delay13']
target = 'TAir'
features_dict = {}
exog_list = []
j = 0
for item in exog:
exog_list.append(item)
ind_exog = item
ar = []
for i in range(5):
ar.append('T_in_delay'+str(i+1))
features_dict['ARX_lag_'+str(i+1)+'_exog'+str(j)] = exog_list + ar
features_dict['ARX_lag_'+str(i+1)+'_'+ind_exog] = [ind_exog] + ar
j += 1
# Instantiate Simulator
Sim_list = []
i = 0
for bldg in bldg_list[bldg_index_start:bldg_index_end]:
i = i+1
Sim = SimHandler(sim_start = start,
sim_end = end,
meas_sampl = meas_sampl
)
Sim.building = bldg+'_'+model_id
Sim.fmupath_emu = os.path.join(Sim.simu_path, 'fmus', community, community+'_'+bldg+'_'+bldg+'_Models_'+bldg+'_House_mpc.fmu')
Sim.fmupath_ref = os.path.join(Sim.simu_path, 'fmus', community, community+'_'+bldg+'_'+bldg+'_Models_'+bldg+'_House_PI.fmu')
Sim.moinfo_emu = (os.path.join(Sim.mod_path, community, bldg,bldg+'_Models',bldg+'_House_mpc.mo'), community+'.'+bldg+'.'+bldg+'_Models.'+bldg+'_House_mpc',
{}
)
Sim.moinfo_emu_ref = (os.path.join(Sim.mod_path, community, bldg,bldg+'_Models',bldg+'_House_PI.mo'), community+'.'+bldg+'.'+bldg+'_Models.'+bldg+'_House_PI',
{}
)
if emulate == 1:
# Initialise exogenous data sources
if i == 1:
Sim.update_weather(start, end)
index = pd.date_range(start, end, freq = meas_sampl+'S', tz=Sim.weather.tz_name)
else:
Sim.weather = Sim_list[i-2].weather
#Sim.sim_start= '1/1/2017 00:00'
Sim.get_control()
#Sim.sim_start= start
Sim.get_other_input(start,end)
Sim.get_constraints(start,end,upd_control=1)
Sim.param_file = os.path.join(Sim.simu_path,'csvs','Parameters_R2CW.csv')
Sim.get_params()
if i > 1:
Sim.control = Sim_list[i-2].control
store_namespace(os.path.join(folder, 'sysid_control_'+Sim.building+'_'+mon), Sim.control)
else:
store_namespace(os.path.join(folder, 'sysid_control_'+Sim.building+'_'+mon), Sim.control)
# Initialise models
Sim.init_models(use_ukf=1, use_fmu_mpc=1, use_fmu_emu=1) # Use for initialising
# Add to list of simulations
Sim_list.append(Sim)
index = pd.date_range(start, end, freq = meas_sampl+'S')
train_dict = {}
test_dict = {}
results_dict = {}
for Sim in Sim_list:
if emulate == 1:
# Emlate to get data
emulate_jmod(Sim.emu, Sim.meas_vars_emu, Sim.meas_sampl, start, end)
# Handle data
print(Sim.emu.display_measurements('Measured'))
measurements = Sim.emu.display_measurements('Measured')
index = pd.to_datetime(measurements.index)
measurements.index = index
weather = Sim.weather.display_data().resample(meas_sampl+'S').ffill()
#print(weather)
weather.index = index
df = pd.concat([measurements, weather],axis=1)[start:end]
df['PowerCompr'] = df['PowerCompr']/1000.0
df['TAir'] = df['TAir']-273.15
for j in range(1,20):
df['T_in_delay'+str(j)] = df['TAir'].shift(periods=j)
df['PowerCompr_delay'+str(j)] = df['PowerCompr'].shift(periods=j)
df['weaTDryBul_delay'+str(j)] = df['weaTDryBul'].shift(periods=j)
df = df.fillna(method='bfill')
# Remove the lags from the beginning
train_start = datetime.datetime.strptime(start, '%m/%d/%Y %H:%M:%S') + datetime.timedelta(seconds = 10*float(meas_sampl))
# Split the dataset
df_train = df[train_start:train_end]
df_test = df[valid_start:valid_end]
train_dict[bldg] = df_train
test_dict[bldg] = df_test
store_namespace(os.path.join(folder, 'all_data_'+mon+'_'+Sim.building), df)
store_namespace(os.path.join(folder, 'train_data_'+mon+'_'+Sim.building), df_train)
store_namespace(os.path.join(folder, 'test_data_'+mon+'_'+Sim.building), df_test)
else:
df = load_namespace(os.path.join('results_sysid_test_'+mon, 'all_data_'+mon+'_'+Sim.building))
# Remove the lags from the beginning
#train_start = start.strftime('%m/%d/%Y %H:%M:%S')
train_start = datetime.datetime.strptime(start, '%m/%d/%Y %H:%M:%S') + datetime.timedelta(seconds = 10*float(meas_sampl))
for j in range(1,20):
df['T_in_delay'+str(j)] = df['TAir'].shift(periods=j)
df['PowerCompr_delay'+str(j)] = df['PowerCompr'].shift(periods=j)
df['weaTDryBul_delay'+str(j)] = df['weaTDryBul'].shift(periods=j)
df['weaHGloHor_delay'+str(j)] = df['weaHGloHor'].shift(periods=j)
df = df.fillna(method='bfill')
# Split the dataset
df_train = df[train_start:train_end]
df_test = df[valid_start:valid_end]
train_dict[bldg] = df_train
test_dict[bldg] = df_test
store_namespace(os.path.join(folder, 'train_data_'+mon+'_'+Sim.building), df_train)
store_namespace(os.path.join(folder, 'test_data_'+mon+'_'+Sim.building), df_test)
#print(df_train['weaTDryBul'])
'''Identify parameters '''
train_data = df_train
i = 0
for case in features_dict.keys():
while True:
try:
features = features_dict[case]
feats = features + [target]
Sim.init_ARX_model(features, target, train_data)
Sim.ARX_model.evaluate()
# Make some predictions
test_x = df_train[features].values
Sim.ARX_model.predict(test_x)
preds = Sim.ARX_model.predictions
if not os.path.exists(os.path.join(folder,case)):
os.makedirs(os.path.join(folder,case))
store_namespace(os.path.join(folder, case, 'sysid_ARXmodel_'+mon+'_'+Sim.building), Sim.ARX_model)
store_namespace(os.path.join(folder, case, 'sysid_ARXparams_results_'+mon+'_'+Sim.building), Sim.ARX_model.fit_results.params)
store_namespace(os.path.join(folder, case, 'sysid_ARXparams_IDpreds_'+mon+'_'+Sim.building), preds)
results_dict[case+'_'+Sim.building] = {}
results_dict[case+'_'+Sim.building]['AIC'] = Sim.ARX_model.fit_results.aic
results_dict[case+'_'+Sim.building]['BIC'] = Sim.ARX_model.fit_results.bic
results_dict[case+'_'+Sim.building]['MSE-total'] = Sim.ARX_model.fit_results.mse_total
results_dict[case+'_'+Sim.building]['MSE-model'] = Sim.ARX_model.fit_results.mse_model
results_dict[case+'_'+Sim.building]['MSE-resid'] = Sim.ARX_model.fit_results.mse_resid
results_dict[case+'_'+Sim.building]['R2-ID'] = Sim.ARX_model.fit_results.rsquared
'''Validate'''
# Make some predictions
test_x = df_test[features].values
Sim.ARX_model.predict(test_x)
preds = Sim.ARX_model.predictions
mse = mean_squared_error(df_test['TAir'].values, preds)
rscore = r2_score(df_test['TAir'].values, preds)
print('Mean squared error - validation')
print(mean_squared_error(df_test['TAir'].values, preds))
print('R^2 score - validation')
print(r2_score(df_test['TAir'].values, preds))
results_dict[case+'_'+Sim.building]['MSE-model-valid'] = mse
results_dict[case+'_'+Sim.building]['R2-valid'] = rscore
store_namespace(os.path.join(folder, case, 'sysid_ARXparams_validpreds_'+mon+'_'+Sim.building), preds)
store_namespace(os.path.join(folder, case, 'sysid_ARXparams_validMSE_'+mon+'_'+Sim.building), mse)
store_namespace(os.path.join(folder, case, 'sysid_ARXparams_validR2_'+mon+'_'+Sim.building), rscore)
break
except:
print('%%%%%%%%%%%%%%%%%% Failed, trying again! %%%%%%%%%%%%%%%%%%%%%%')
continue
results_pd = pd.DataFrame.from_dict(results_dict, orient='index')
results_pd.to_csv(os.path.join(folder, 'model_selection_all.csv'))
| 40.050179 | 168 | 0.553786 | """ Sixth version, make the code easier and more modifiable """
# Define the main programme
from funcs import store_namespace
from funcs import load_namespace
from funcs import emulate_jmod
import os
import datetime
import time
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#from multiprocessing import Pool
from mpcpy import units
from mpcpy import variables
from mpcpy import models_mod as models
from scipy.optimize import curve_fit
from scipy.linalg import expm
from numpy.linalg import inv
from sklearn.metrics import mean_squared_error, r2_score
from statsmodels.graphics.tsaplots import plot_pacf
from statsmodels.graphics.tsaplots import plot_acf
from Simulator_HP_mod3 import SimHandler
if __name__ == "__main__":
# Naming conventions for the simulation
community = 'ResidentialCommunityUK_rad_2elements'
sim_id = 'MinEne'
model_id = 'R2CW_HP'
bldg_list = load_namespace(os.path.join('file_path_to_folder', 'teaser_bldgs_residential'))
bldg_index_start = 0
bldg_index_end = 30
emulate = 0 # Emulate or not?
old_sim_param = 1 # Choose initial guesses
# Overall options
date = '1/1/2017 '
start = '1/1/2017 00:00:00'
end = '1/9/2017 00:00:00'
train_start = start
valid_start = '1/6/2017 00:00:00'
train_end = valid_start
valid_end = '1/9/2017 00:00:00'
meas_sampl = '300'
mon = 'jan'
folder = 'path_to_file\\results_sysid_new_'+mon
# Features to use in training
exog = ['weaTDryBul_delay1', 'weaHGloHor_delay1','PowerCompr', 'PowerCompr_delay1', 'T_in_delay13']
target = 'TAir'
features_dict = {}
exog_list = []
j = 0
for item in exog:
exog_list.append(item)
ind_exog = item
ar = []
for i in range(5):
ar.append('T_in_delay'+str(i+1))
features_dict['ARX_lag_'+str(i+1)+'_exog'+str(j)] = exog_list + ar
features_dict['ARX_lag_'+str(i+1)+'_'+ind_exog] = [ind_exog] + ar
j += 1
# Instantiate Simulator
Sim_list = []
i = 0
for bldg in bldg_list[bldg_index_start:bldg_index_end]:
i = i+1
Sim = SimHandler(sim_start = start,
sim_end = end,
meas_sampl = meas_sampl
)
Sim.building = bldg+'_'+model_id
Sim.fmupath_emu = os.path.join(Sim.simu_path, 'fmus', community, community+'_'+bldg+'_'+bldg+'_Models_'+bldg+'_House_mpc.fmu')
Sim.fmupath_ref = os.path.join(Sim.simu_path, 'fmus', community, community+'_'+bldg+'_'+bldg+'_Models_'+bldg+'_House_PI.fmu')
Sim.moinfo_emu = (os.path.join(Sim.mod_path, community, bldg,bldg+'_Models',bldg+'_House_mpc.mo'), community+'.'+bldg+'.'+bldg+'_Models.'+bldg+'_House_mpc',
{}
)
Sim.moinfo_emu_ref = (os.path.join(Sim.mod_path, community, bldg,bldg+'_Models',bldg+'_House_PI.mo'), community+'.'+bldg+'.'+bldg+'_Models.'+bldg+'_House_PI',
{}
)
if emulate == 1:
# Initialise exogenous data sources
if i == 1:
Sim.update_weather(start, end)
index = pd.date_range(start, end, freq = meas_sampl+'S', tz=Sim.weather.tz_name)
else:
Sim.weather = Sim_list[i-2].weather
#Sim.sim_start= '1/1/2017 00:00'
Sim.get_control()
#Sim.sim_start= start
Sim.get_other_input(start,end)
Sim.get_constraints(start,end,upd_control=1)
Sim.param_file = os.path.join(Sim.simu_path,'csvs','Parameters_R2CW.csv')
Sim.get_params()
if i > 1:
Sim.control = Sim_list[i-2].control
store_namespace(os.path.join(folder, 'sysid_control_'+Sim.building+'_'+mon), Sim.control)
else:
store_namespace(os.path.join(folder, 'sysid_control_'+Sim.building+'_'+mon), Sim.control)
# Initialise models
Sim.init_models(use_ukf=1, use_fmu_mpc=1, use_fmu_emu=1) # Use for initialising
# Add to list of simulations
Sim_list.append(Sim)
index = pd.date_range(start, end, freq = meas_sampl+'S')
train_dict = {}
test_dict = {}
results_dict = {}
for Sim in Sim_list:
if emulate == 1:
# Emlate to get data
emulate_jmod(Sim.emu, Sim.meas_vars_emu, Sim.meas_sampl, start, end)
# Handle data
print(Sim.emu.display_measurements('Measured'))
measurements = Sim.emu.display_measurements('Measured')
index = pd.to_datetime(measurements.index)
measurements.index = index
weather = Sim.weather.display_data().resample(meas_sampl+'S').ffill()
#print(weather)
weather.index = index
df = pd.concat([measurements, weather],axis=1)[start:end]
df['PowerCompr'] = df['PowerCompr']/1000.0
df['TAir'] = df['TAir']-273.15
for j in range(1,20):
df['T_in_delay'+str(j)] = df['TAir'].shift(periods=j)
df['PowerCompr_delay'+str(j)] = df['PowerCompr'].shift(periods=j)
df['weaTDryBul_delay'+str(j)] = df['weaTDryBul'].shift(periods=j)
df = df.fillna(method='bfill')
# Remove the lags from the beginning
train_start = datetime.datetime.strptime(start, '%m/%d/%Y %H:%M:%S') + datetime.timedelta(seconds = 10*float(meas_sampl))
# Split the dataset
df_train = df[train_start:train_end]
df_test = df[valid_start:valid_end]
train_dict[bldg] = df_train
test_dict[bldg] = df_test
store_namespace(os.path.join(folder, 'all_data_'+mon+'_'+Sim.building), df)
store_namespace(os.path.join(folder, 'train_data_'+mon+'_'+Sim.building), df_train)
store_namespace(os.path.join(folder, 'test_data_'+mon+'_'+Sim.building), df_test)
else:
df = load_namespace(os.path.join('results_sysid_test_'+mon, 'all_data_'+mon+'_'+Sim.building))
# Remove the lags from the beginning
#train_start = start.strftime('%m/%d/%Y %H:%M:%S')
train_start = datetime.datetime.strptime(start, '%m/%d/%Y %H:%M:%S') + datetime.timedelta(seconds = 10*float(meas_sampl))
for j in range(1,20):
df['T_in_delay'+str(j)] = df['TAir'].shift(periods=j)
df['PowerCompr_delay'+str(j)] = df['PowerCompr'].shift(periods=j)
df['weaTDryBul_delay'+str(j)] = df['weaTDryBul'].shift(periods=j)
df['weaHGloHor_delay'+str(j)] = df['weaHGloHor'].shift(periods=j)
df = df.fillna(method='bfill')
# Split the dataset
df_train = df[train_start:train_end]
df_test = df[valid_start:valid_end]
train_dict[bldg] = df_train
test_dict[bldg] = df_test
store_namespace(os.path.join(folder, 'train_data_'+mon+'_'+Sim.building), df_train)
store_namespace(os.path.join(folder, 'test_data_'+mon+'_'+Sim.building), df_test)
#print(df_train['weaTDryBul'])
'''Identify parameters '''
train_data = df_train
i = 0
for case in features_dict.keys():
while True:
try:
features = features_dict[case]
feats = features + [target]
Sim.init_ARX_model(features, target, train_data)
Sim.ARX_model.evaluate()
# Make some predictions
test_x = df_train[features].values
Sim.ARX_model.predict(test_x)
preds = Sim.ARX_model.predictions
if not os.path.exists(os.path.join(folder,case)):
os.makedirs(os.path.join(folder,case))
store_namespace(os.path.join(folder, case, 'sysid_ARXmodel_'+mon+'_'+Sim.building), Sim.ARX_model)
store_namespace(os.path.join(folder, case, 'sysid_ARXparams_results_'+mon+'_'+Sim.building), Sim.ARX_model.fit_results.params)
store_namespace(os.path.join(folder, case, 'sysid_ARXparams_IDpreds_'+mon+'_'+Sim.building), preds)
results_dict[case+'_'+Sim.building] = {}
results_dict[case+'_'+Sim.building]['AIC'] = Sim.ARX_model.fit_results.aic
results_dict[case+'_'+Sim.building]['BIC'] = Sim.ARX_model.fit_results.bic
results_dict[case+'_'+Sim.building]['MSE-total'] = Sim.ARX_model.fit_results.mse_total
results_dict[case+'_'+Sim.building]['MSE-model'] = Sim.ARX_model.fit_results.mse_model
results_dict[case+'_'+Sim.building]['MSE-resid'] = Sim.ARX_model.fit_results.mse_resid
results_dict[case+'_'+Sim.building]['R2-ID'] = Sim.ARX_model.fit_results.rsquared
'''Validate'''
# Make some predictions
test_x = df_test[features].values
Sim.ARX_model.predict(test_x)
preds = Sim.ARX_model.predictions
mse = mean_squared_error(df_test['TAir'].values, preds)
rscore = r2_score(df_test['TAir'].values, preds)
print('Mean squared error - validation')
print(mean_squared_error(df_test['TAir'].values, preds))
print('R^2 score - validation')
print(r2_score(df_test['TAir'].values, preds))
results_dict[case+'_'+Sim.building]['MSE-model-valid'] = mse
results_dict[case+'_'+Sim.building]['R2-valid'] = rscore
store_namespace(os.path.join(folder, case, 'sysid_ARXparams_validpreds_'+mon+'_'+Sim.building), preds)
store_namespace(os.path.join(folder, case, 'sysid_ARXparams_validMSE_'+mon+'_'+Sim.building), mse)
store_namespace(os.path.join(folder, case, 'sysid_ARXparams_validR2_'+mon+'_'+Sim.building), rscore)
break
except:
print('%%%%%%%%%%%%%%%%%% Failed, trying again! %%%%%%%%%%%%%%%%%%%%%%')
continue
results_pd = pd.DataFrame.from_dict(results_dict, orient='index')
results_pd.to_csv(os.path.join(folder, 'model_selection_all.csv'))
| 0 | 0 | 0 |
d965f57e81d16ba37cdd89e8f34eab2538372a01 | 1,006 | py | Python | src/chains/migrations/0001_initial.py | tharsis/safe-config-service | 5335fd006d05fba5b13b477daca9f6ef6d64b818 | [
"MIT"
] | 8 | 2021-07-27T13:21:27.000Z | 2022-02-12T22:46:26.000Z | src/chains/migrations/0001_initial.py | tharsis/safe-config-service | 5335fd006d05fba5b13b477daca9f6ef6d64b818 | [
"MIT"
] | 203 | 2021-04-28T08:23:29.000Z | 2022-03-29T15:50:27.000Z | src/chains/migrations/0001_initial.py | tharsis/safe-config-service | 5335fd006d05fba5b13b477daca9f6ef6d64b818 | [
"MIT"
] | 23 | 2021-06-25T07:22:31.000Z | 2022-03-29T02:24:46.000Z | # Generated by Django 3.2.3 on 2021-06-02 14:44
from typing import List, Tuple
from django.db import migrations, models
| 31.4375 | 86 | 0.535785 | # Generated by Django 3.2.3 on 2021-06-02 14:44
from typing import List, Tuple
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies: List[Tuple[str, str]] = []
operations = [
migrations.CreateModel(
name="Chain",
fields=[
(
"id",
models.PositiveBigIntegerField(
primary_key=True, serialize=False, verbose_name="Chain Id"
),
),
("name", models.CharField(max_length=255, verbose_name="Chain name")),
("rpc_url", models.URLField()),
("block_explorer_url", models.URLField(null=True)),
("currency_name", models.CharField(max_length=255, null=True)),
("currency_symbol", models.CharField(max_length=255)),
("currency_decimals", models.IntegerField(default=18)),
],
),
]
| 0 | 861 | 23 |
b25958053b88bd12e08c6de8d2b4247d1ac2d567 | 9,224 | py | Python | ask-smapi-model/ask_smapi_model/v1/skill/manifest/skill_manifest_publishing_information.py | Signal-Kinetics/alexa-apis-for-python | abb8d3dce18a5510c48b215406ed36c024f01495 | [
"Apache-2.0"
] | null | null | null | ask-smapi-model/ask_smapi_model/v1/skill/manifest/skill_manifest_publishing_information.py | Signal-Kinetics/alexa-apis-for-python | abb8d3dce18a5510c48b215406ed36c024f01495 | [
"Apache-2.0"
] | null | null | null | ask-smapi-model/ask_smapi_model/v1/skill/manifest/skill_manifest_publishing_information.py | Signal-Kinetics/alexa-apis-for-python | abb8d3dce18a5510c48b215406ed36c024f01495 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional, Union
from datetime import datetime
from ask_smapi_model.v1.skill.manifest.distribution_countries import DistributionCountriesV1
from ask_smapi_model.v1.skill.manifest.skill_manifest_localized_publishing_information import SkillManifestLocalizedPublishingInformationV1
from ask_smapi_model.v1.skill.manifest.distribution_mode import DistributionModeV1
from ask_smapi_model.v1.skill.manifest.manifest_gadget_support import ManifestGadgetSupportV1
class SkillManifestPublishingInformation(object):
"""
Defines the structure for publishing information in the skill manifest.
:param name: Name of the skill that is displayed to customers in the Alexa app.
:type name: (optional) str
:param description: Description of the skill's purpose and feature and how it works. Should describe any prerequisites like hardware or account requirements and detailed steps for the customer to get started. For Flash Briefing skill list the feeds offered within the skill. Use a conversational tone and correct grammar and punctuation. This description displays to customers on the skill detail card in the Alexa app.
:type description: (optional) str
:param locales: Defines the structure for locale specific publishing information in the skill manifest.
:type locales: (optional) dict(str, ask_smapi_model.v1.skill.manifest.skill_manifest_localized_publishing_information.SkillManifestLocalizedPublishingInformation)
:param is_available_worldwide: True if the skill should be distributed in all countries where Amazon distributes skill false otherwise.
:type is_available_worldwide: (optional) bool
:param distribution_mode:
:type distribution_mode: (optional) ask_smapi_model.v1.skill.manifest.distribution_mode.DistributionMode
:param gadget_support:
:type gadget_support: (optional) ask_smapi_model.v1.skill.manifest.manifest_gadget_support.ManifestGadgetSupport
:param testing_instructions: Special instructions provided by the developer to test the skill.
:type testing_instructions: (optional) str
:param category: Category that best describes a skill. Indicates the filter category for the skill in the Alexa App.
:type category: (optional) str
:param distribution_countries: Selected list of countries provided by the skill owner where Amazon can distribute the skill.
:type distribution_countries: (optional) list[ask_smapi_model.v1.skill.manifest.distribution_countries.DistributionCountries]
"""
deserialized_types = {
'name': 'str',
'description': 'str',
'locales': 'dict(str, ask_smapi_model.v1.skill.manifest.skill_manifest_localized_publishing_information.SkillManifestLocalizedPublishingInformation)',
'is_available_worldwide': 'bool',
'distribution_mode': 'ask_smapi_model.v1.skill.manifest.distribution_mode.DistributionMode',
'gadget_support': 'ask_smapi_model.v1.skill.manifest.manifest_gadget_support.ManifestGadgetSupport',
'testing_instructions': 'str',
'category': 'str',
'distribution_countries': 'list[ask_smapi_model.v1.skill.manifest.distribution_countries.DistributionCountries]'
} # type: Dict
attribute_map = {
'name': 'name',
'description': 'description',
'locales': 'locales',
'is_available_worldwide': 'isAvailableWorldwide',
'distribution_mode': 'distributionMode',
'gadget_support': 'gadgetSupport',
'testing_instructions': 'testingInstructions',
'category': 'category',
'distribution_countries': 'distributionCountries'
} # type: Dict
supports_multiple_types = False
def __init__(self, name=None, description=None, locales=None, is_available_worldwide=None, distribution_mode=None, gadget_support=None, testing_instructions=None, category=None, distribution_countries=None):
# type: (Optional[str], Optional[str], Optional[Dict[str, SkillManifestLocalizedPublishingInformationV1]], Optional[bool], Optional[DistributionModeV1], Optional[ManifestGadgetSupportV1], Optional[str], Optional[str], Optional[List[DistributionCountriesV1]]) -> None
"""Defines the structure for publishing information in the skill manifest.
:param name: Name of the skill that is displayed to customers in the Alexa app.
:type name: (optional) str
:param description: Description of the skill's purpose and feature and how it works. Should describe any prerequisites like hardware or account requirements and detailed steps for the customer to get started. For Flash Briefing skill list the feeds offered within the skill. Use a conversational tone and correct grammar and punctuation. This description displays to customers on the skill detail card in the Alexa app.
:type description: (optional) str
:param locales: Defines the structure for locale specific publishing information in the skill manifest.
:type locales: (optional) dict(str, ask_smapi_model.v1.skill.manifest.skill_manifest_localized_publishing_information.SkillManifestLocalizedPublishingInformation)
:param is_available_worldwide: True if the skill should be distributed in all countries where Amazon distributes skill false otherwise.
:type is_available_worldwide: (optional) bool
:param distribution_mode:
:type distribution_mode: (optional) ask_smapi_model.v1.skill.manifest.distribution_mode.DistributionMode
:param gadget_support:
:type gadget_support: (optional) ask_smapi_model.v1.skill.manifest.manifest_gadget_support.ManifestGadgetSupport
:param testing_instructions: Special instructions provided by the developer to test the skill.
:type testing_instructions: (optional) str
:param category: Category that best describes a skill. Indicates the filter category for the skill in the Alexa App.
:type category: (optional) str
:param distribution_countries: Selected list of countries provided by the skill owner where Amazon can distribute the skill.
:type distribution_countries: (optional) list[ask_smapi_model.v1.skill.manifest.distribution_countries.DistributionCountries]
"""
self.__discriminator_value = None # type: str
self.name = name
self.description = description
self.locales = locales
self.is_available_worldwide = is_available_worldwide
self.distribution_mode = distribution_mode
self.gadget_support = gadget_support
self.testing_instructions = testing_instructions
self.category = category
self.distribution_countries = distribution_countries
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {} # type: Dict
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, SkillManifestPublishingInformation):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
| 54.579882 | 431 | 0.714983 | # coding: utf-8
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional, Union
from datetime import datetime
from ask_smapi_model.v1.skill.manifest.distribution_countries import DistributionCountriesV1
from ask_smapi_model.v1.skill.manifest.skill_manifest_localized_publishing_information import SkillManifestLocalizedPublishingInformationV1
from ask_smapi_model.v1.skill.manifest.distribution_mode import DistributionModeV1
from ask_smapi_model.v1.skill.manifest.manifest_gadget_support import ManifestGadgetSupportV1
class SkillManifestPublishingInformation(object):
"""
Defines the structure for publishing information in the skill manifest.
:param name: Name of the skill that is displayed to customers in the Alexa app.
:type name: (optional) str
:param description: Description of the skill's purpose and feature and how it works. Should describe any prerequisites like hardware or account requirements and detailed steps for the customer to get started. For Flash Briefing skill list the feeds offered within the skill. Use a conversational tone and correct grammar and punctuation. This description displays to customers on the skill detail card in the Alexa app.
:type description: (optional) str
:param locales: Defines the structure for locale specific publishing information in the skill manifest.
:type locales: (optional) dict(str, ask_smapi_model.v1.skill.manifest.skill_manifest_localized_publishing_information.SkillManifestLocalizedPublishingInformation)
:param is_available_worldwide: True if the skill should be distributed in all countries where Amazon distributes skill false otherwise.
:type is_available_worldwide: (optional) bool
:param distribution_mode:
:type distribution_mode: (optional) ask_smapi_model.v1.skill.manifest.distribution_mode.DistributionMode
:param gadget_support:
:type gadget_support: (optional) ask_smapi_model.v1.skill.manifest.manifest_gadget_support.ManifestGadgetSupport
:param testing_instructions: Special instructions provided by the developer to test the skill.
:type testing_instructions: (optional) str
:param category: Category that best describes a skill. Indicates the filter category for the skill in the Alexa App.
:type category: (optional) str
:param distribution_countries: Selected list of countries provided by the skill owner where Amazon can distribute the skill.
:type distribution_countries: (optional) list[ask_smapi_model.v1.skill.manifest.distribution_countries.DistributionCountries]
"""
deserialized_types = {
'name': 'str',
'description': 'str',
'locales': 'dict(str, ask_smapi_model.v1.skill.manifest.skill_manifest_localized_publishing_information.SkillManifestLocalizedPublishingInformation)',
'is_available_worldwide': 'bool',
'distribution_mode': 'ask_smapi_model.v1.skill.manifest.distribution_mode.DistributionMode',
'gadget_support': 'ask_smapi_model.v1.skill.manifest.manifest_gadget_support.ManifestGadgetSupport',
'testing_instructions': 'str',
'category': 'str',
'distribution_countries': 'list[ask_smapi_model.v1.skill.manifest.distribution_countries.DistributionCountries]'
} # type: Dict
attribute_map = {
'name': 'name',
'description': 'description',
'locales': 'locales',
'is_available_worldwide': 'isAvailableWorldwide',
'distribution_mode': 'distributionMode',
'gadget_support': 'gadgetSupport',
'testing_instructions': 'testingInstructions',
'category': 'category',
'distribution_countries': 'distributionCountries'
} # type: Dict
supports_multiple_types = False
def __init__(self, name=None, description=None, locales=None, is_available_worldwide=None, distribution_mode=None, gadget_support=None, testing_instructions=None, category=None, distribution_countries=None):
# type: (Optional[str], Optional[str], Optional[Dict[str, SkillManifestLocalizedPublishingInformationV1]], Optional[bool], Optional[DistributionModeV1], Optional[ManifestGadgetSupportV1], Optional[str], Optional[str], Optional[List[DistributionCountriesV1]]) -> None
"""Defines the structure for publishing information in the skill manifest.
:param name: Name of the skill that is displayed to customers in the Alexa app.
:type name: (optional) str
:param description: Description of the skill's purpose and feature and how it works. Should describe any prerequisites like hardware or account requirements and detailed steps for the customer to get started. For Flash Briefing skill list the feeds offered within the skill. Use a conversational tone and correct grammar and punctuation. This description displays to customers on the skill detail card in the Alexa app.
:type description: (optional) str
:param locales: Defines the structure for locale specific publishing information in the skill manifest.
:type locales: (optional) dict(str, ask_smapi_model.v1.skill.manifest.skill_manifest_localized_publishing_information.SkillManifestLocalizedPublishingInformation)
:param is_available_worldwide: True if the skill should be distributed in all countries where Amazon distributes skill false otherwise.
:type is_available_worldwide: (optional) bool
:param distribution_mode:
:type distribution_mode: (optional) ask_smapi_model.v1.skill.manifest.distribution_mode.DistributionMode
:param gadget_support:
:type gadget_support: (optional) ask_smapi_model.v1.skill.manifest.manifest_gadget_support.ManifestGadgetSupport
:param testing_instructions: Special instructions provided by the developer to test the skill.
:type testing_instructions: (optional) str
:param category: Category that best describes a skill. Indicates the filter category for the skill in the Alexa App.
:type category: (optional) str
:param distribution_countries: Selected list of countries provided by the skill owner where Amazon can distribute the skill.
:type distribution_countries: (optional) list[ask_smapi_model.v1.skill.manifest.distribution_countries.DistributionCountries]
"""
self.__discriminator_value = None # type: str
self.name = name
self.description = description
self.locales = locales
self.is_available_worldwide = is_available_worldwide
self.distribution_mode = distribution_mode
self.gadget_support = gadget_support
self.testing_instructions = testing_instructions
self.category = category
self.distribution_countries = distribution_countries
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {} # type: Dict
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, SkillManifestPublishingInformation):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
| 0 | 0 | 0 |
eeffbf129aecabf37617744f43a2c93210836eb0 | 6,882 | py | Python | xquantum/qiskit/qiskit.py | caccio/CrossQuantumClassifier | eede8a2d46220d9a9f2fbe8977abd98801c11d5a | [
"Apache-2.0"
] | null | null | null | xquantum/qiskit/qiskit.py | caccio/CrossQuantumClassifier | eede8a2d46220d9a9f2fbe8977abd98801c11d5a | [
"Apache-2.0"
] | null | null | null | xquantum/qiskit/qiskit.py | caccio/CrossQuantumClassifier | eede8a2d46220d9a9f2fbe8977abd98801c11d5a | [
"Apache-2.0"
] | null | null | null | import xquantum as xq
import math
from qiskit.circuit.library.standard_gates import RYGate
from qiskit import QuantumCircuit, execute, Aer, IBMQ
# https://algassert.com/quirk#circuit={%22cols%22:[[1,{%22id%22:%22Ryft%22,%22arg%22:%221.982313%22}],[],[{%22id%22:%22Ryft%22,%22arg%22:%221.714144%22},%22%E2%80%A2%22],[],[{%22id%22:%22Ryft%22,%22arg%22:%221.910633%22},%22%E2%97%A6%22],[],[%22Amps2%22],[],[1,1,%22H%22],[%22%E2%80%A2%22,%22X%22,%22%E2%80%A2%22],[{%22id%22:%22Ryft%22,%22arg%22:%22pi/2%22},1,%22%E2%80%A2%22],[%22%E2%80%A2%22,%22X%22,%22%E2%80%A2%22],[%22X%22,%22%E2%80%A2%22,%22%E2%80%A2%22],[1,{%22id%22:%22Ryft%22,%22arg%22:%22pi/3%22},%22%E2%80%A2%22],[%22X%22,%22%E2%80%A2%22,%22%E2%80%A2%22],[{%22id%22:%22Ryft%22,%22arg%22:%22pi/4%22},1,%22%E2%80%A2%22],[%22Z%22,1,%22%E2%80%A2%22],[1,1,%22X%22],[%22%E2%80%A2%22,%22X%22,%22%E2%80%A2%22],[{%22id%22:%22Ryft%22,%22arg%22:%22pi/2+pi%22},1,%22%E2%80%A2%22],[],[%22%E2%80%A2%22,%22X%22,%22%E2%80%A2%22],[%22X%22,%22%E2%80%A2%22,%22%E2%80%A2%22],[1,{%22id%22:%22Ryft%22,%22arg%22:%22pi/3%22},%22%E2%80%A2%22],[%22X%22,%22%E2%80%A2%22,%22%E2%80%A2%22],[{%22id%22:%22Ryft%22,%22arg%22:%22pi/4%22},1,%22%E2%80%A2%22],[1,1,%22X%22],[1,1,%22H%22],[%22Density%22,%22Density%22,%22Density%22]],%22init%22:[0,0,1]}
# https://algassert.com/quirk#circuit={%22cols%22:[[1,{%22id%22:%22Ryft%22,%22arg%22:%221.982313%22}],[],[{%22id%22:%22Ryft%22,%22arg%22:%221.714144%22},%22%E2%80%A2%22],[],[{%22id%22:%22Ryft%22,%22arg%22:%221.910633%22},%22%E2%97%A6%22],[],[%22Amps2%22],[],[%22%E2%80%A2%22,%22X%22],[{%22id%22:%22Ryft%22,%22arg%22:%22pi/2%22}],[%22%E2%80%A2%22,%22X%22],[%22X%22,%22%E2%80%A2%22],[1,{%22id%22:%22Ryft%22,%22arg%22:%22pi/3%22}],[%22X%22,%22%E2%80%A2%22],[{%22id%22:%22Ryft%22,%22arg%22:%22pi/4%22}],[%22Amps2%22]]}
| 68.138614 | 1,126 | 0.684685 | import xquantum as xq
import math
from qiskit.circuit.library.standard_gates import RYGate
from qiskit import QuantumCircuit, execute, Aer, IBMQ
def getQiskitBackend(engine):
if 'providerGroup' in engine.config.keys():
if engine.config['providerGroup'] == 'open' and engine.config['providerProject'] == 'main':
IBMQ.save_account(engine.config['account'], overwrite=True)
IBMQ.load_account()
provider = IBMQ.get_provider(group=engine.config['providerGroup'], project=engine.config['providerProject'])
return provider.get_backend(engine.config['providerBackend'])
else:
return Aer.get_backend('qasm_simulator') # statevector_simulator qasm_simulator
def testModel(engine, circuitModel, hyperParams, params, normalizedFeatureSet):
return _testParallelModel(engine, circuitModel, hyperParams, params, normalizedFeatureSet)
def classifiedLableProbability(engine, circuitModel, hyperParams, parameters, normalizedFeatures):
return _classifiedLableProbability(engine, circuitModel, hyperParams, parameters, normalizedFeatures)
def circuitDerivativeByParams(engine, circuitModel, hyperParams, parameters, normalizedFeatures):
return _circuitDerivativeByParams(engine, circuitModel, hyperParams, parameters, normalizedFeatures)
def _testParallelModel(engine, circuitModel, hyperParams, params, normalizedFeatureSet):
size = (len(normalizedFeatureSet[0]) - 1).bit_length()
ecs = []
circuitGate = _applyCircuit(size, circuitModel, params)
for nf in normalizedFeatureSet:
mc = QuantumCircuit(size, 1)
mc.initialize(nf, list(range(size)))
mc.append(circuitGate, list(range(size)))
mc.measure(circuitModel.measureQubitIndex, 0)
ecs.append(mc)
backend = getQiskitBackend(engine)
job = execute(ecs, backend, shots=hyperParams.shots)
counts = [job.result().get_counts(qc) for qc in ecs]
p0s = [(c['0'] if '0' in c.keys() else 0) / hyperParams.shots for c in counts]
return [[p0, (p0 + params[circuitModel.biasParameterIndex])] for p0 in p0s]
def _testIterativeModel(engine, circuitModel, hyperParams, params, normalizedFeatureSet):
return [_classifiedLableProbability(engine, circuitModel, hyperParams, params, nf) for nf in normalizedFeatureSet]
def _classifiedLableProbability(engine, circuitModel, hyperParams, parameters, normalizedFeatures):
size = (len(normalizedFeatures) - 1).bit_length()
qc = QuantumCircuit(size, 1)
qc.initialize(normalizedFeatures, list(range(size)))
qc.append(_applyCircuit(size, circuitModel, parameters), list(range(size)))
qc.measure(circuitModel.measureQubitIndex, 0)
backend = getQiskitBackend(engine)
job = execute(qc, backend, shots=hyperParams.shots)
count = job.result().get_counts(qc)
p0 = (count['0'] if '0' in count.keys() else 0) / hyperParams.shots
pi = p0 + parameters[circuitModel.biasParameterIndex]
return (p0, pi)
# https://algassert.com/quirk#circuit={%22cols%22:[[1,{%22id%22:%22Ryft%22,%22arg%22:%221.982313%22}],[],[{%22id%22:%22Ryft%22,%22arg%22:%221.714144%22},%22%E2%80%A2%22],[],[{%22id%22:%22Ryft%22,%22arg%22:%221.910633%22},%22%E2%97%A6%22],[],[%22Amps2%22],[],[1,1,%22H%22],[%22%E2%80%A2%22,%22X%22,%22%E2%80%A2%22],[{%22id%22:%22Ryft%22,%22arg%22:%22pi/2%22},1,%22%E2%80%A2%22],[%22%E2%80%A2%22,%22X%22,%22%E2%80%A2%22],[%22X%22,%22%E2%80%A2%22,%22%E2%80%A2%22],[1,{%22id%22:%22Ryft%22,%22arg%22:%22pi/3%22},%22%E2%80%A2%22],[%22X%22,%22%E2%80%A2%22,%22%E2%80%A2%22],[{%22id%22:%22Ryft%22,%22arg%22:%22pi/4%22},1,%22%E2%80%A2%22],[%22Z%22,1,%22%E2%80%A2%22],[1,1,%22X%22],[%22%E2%80%A2%22,%22X%22,%22%E2%80%A2%22],[{%22id%22:%22Ryft%22,%22arg%22:%22pi/2+pi%22},1,%22%E2%80%A2%22],[],[%22%E2%80%A2%22,%22X%22,%22%E2%80%A2%22],[%22X%22,%22%E2%80%A2%22,%22%E2%80%A2%22],[1,{%22id%22:%22Ryft%22,%22arg%22:%22pi/3%22},%22%E2%80%A2%22],[%22X%22,%22%E2%80%A2%22,%22%E2%80%A2%22],[{%22id%22:%22Ryft%22,%22arg%22:%22pi/4%22},1,%22%E2%80%A2%22],[1,1,%22X%22],[1,1,%22H%22],[%22Density%22,%22Density%22,%22Density%22]],%22init%22:[0,0,1]}
def _circuitDerivativeByParams(engine, circuitModel, hyperParams, parameters, normalizedFeatures):
return _iterativeCircuitDerivativeByParams(engine, circuitModel, hyperParams, parameters, normalizedFeatures)
def _iterativeCircuitDerivativeByParams(engine, circuitModel, hyperParams, parameters, normalizedFeatures):
size = (len(normalizedFeatures) - 1).bit_length()
dpar = [0] * len(parameters)
circuitGate = _applyCircuit(size, circuitModel, parameters).control(1)
for i in range(len(parameters)):
if i == circuitModel.biasParameterIndex:
dpar[i] = 1
else:
shiftedParams = list(
map(lambda j: parameters[j] if j != i else parameters[j] + math.pi, range(len(parameters))))
shiftedGate = _applyCircuit(size, circuitModel, shiftedParams).control(1)
# the size-th qubit is the ancilla
qc = QuantumCircuit(size + 1, 1)
qc.initialize(normalizedFeatures, list(range(size)))
qc.h(size)
qc.append(circuitGate, [size] + list(range(size)))
qc.cz(size, circuitModel.measureQubitIndex)
qc.x(size)
qc.append(shiftedGate, [size] + list(range(size)))
qc.x(size)
qc.h(size)
qc.measure(size, 0)
backend = getQiskitBackend(engine)
job = execute(qc, backend, shots=hyperParams.shots)
count = job.result().get_counts(qc)
p0 = (count['0'] if '0' in count.keys() else 0)
dpar[i] = (p0 / hyperParams.shots) - 0.5 # (2*p0-1)*0.5
return dpar
# https://algassert.com/quirk#circuit={%22cols%22:[[1,{%22id%22:%22Ryft%22,%22arg%22:%221.982313%22}],[],[{%22id%22:%22Ryft%22,%22arg%22:%221.714144%22},%22%E2%80%A2%22],[],[{%22id%22:%22Ryft%22,%22arg%22:%221.910633%22},%22%E2%97%A6%22],[],[%22Amps2%22],[],[%22%E2%80%A2%22,%22X%22],[{%22id%22:%22Ryft%22,%22arg%22:%22pi/2%22}],[%22%E2%80%A2%22,%22X%22],[%22X%22,%22%E2%80%A2%22],[1,{%22id%22:%22Ryft%22,%22arg%22:%22pi/3%22}],[%22X%22,%22%E2%80%A2%22],[{%22id%22:%22Ryft%22,%22arg%22:%22pi/4%22}],[%22Amps2%22]]}
def _applyCircuit(n, circuitModel, parameters):
qc = QuantumCircuit(n)
for un in circuitModel.network:
qc.cx(un.rotationQubitIndex, un.fixedQubitIndex)
if un.rotationAxis == xq.Pauli.X: qc.rx(parameters[un.rotationParameterIndex], un.rotationQubitIndex)
if un.rotationAxis == xq.Pauli.Y: qc.ry(parameters[un.rotationParameterIndex], un.rotationQubitIndex)
if un.rotationAxis == xq.Pauli.Z: qc.rz(parameters[un.rotationParameterIndex], un.rotationQubitIndex)
qc.cx(un.rotationQubitIndex, un.fixedQubitIndex)
# qc.ry(parameters[circuitModel.measureParameterIndex], circuitModel.measureQubitIndex)
return qc.to_gate()
| 4,865 | 0 | 228 |
995f8ac5d23b650899de03512a6648e3a140ecd8 | 644 | py | Python | Webapp/bot/ChangeJS.py | Agulaa/Brovie_test | 1a3de8d370cdfd73108ddbbe99a4130027f86e7f | [
"MIT"
] | 2 | 2018-05-15T19:45:56.000Z | 2019-09-18T11:34:05.000Z | Webapp/bot/ChangeJS.py | Agulaa/Brovie_test | 1a3de8d370cdfd73108ddbbe99a4130027f86e7f | [
"MIT"
] | null | null | null | Webapp/bot/ChangeJS.py | Agulaa/Brovie_test | 1a3de8d370cdfd73108ddbbe99a4130027f86e7f | [
"MIT"
] | 3 | 2018-05-15T19:46:05.000Z | 2021-04-28T20:14:57.000Z | from bot.Bot import Bot
from bot.api_ai import find_respond
import json
# change = Change()
# jso = {
#
# "respond": "Bye"
# }
# result = change.respond(jso)
# print(result)
| 18.941176 | 66 | 0.582298 | from bot.Bot import Bot
from bot.api_ai import find_respond
import json
class Change(object):
def __init__(self):
self.Bot = Bot()
def respond(self, jso):
js = json.loads(jso)
action, film, respond = find_respond(js['respond'])
if action == 'film':
result = self.Bot.choose_film_by_genres(film, respond)
elif action == 'hello' or action == 'bye':
result = self.Bot.greetings(respond)
else:
result = self.Bot.error()
return result
# change = Change()
# jso = {
#
# "respond": "Bye"
# }
# result = change.respond(jso)
# print(result)
| 386 | 0 | 77 |
cafa0355b3e04304900b30d3d763236c9d27dfea | 1,447 | py | Python | WideDTA/train_w.py | Sunitach10/MolPro | 7e550b8a73103cff34fdf976a19722e967b08ab2 | [
"MIT"
] | 4 | 2019-09-10T09:53:01.000Z | 2021-04-01T10:54:11.000Z | WideDTA/train_w.py | Sunitach10/MolPro | 7e550b8a73103cff34fdf976a19722e967b08ab2 | [
"MIT"
] | 1 | 2021-03-13T18:08:43.000Z | 2021-03-13T18:08:43.000Z | WideDTA/train_w.py | Sunitach10/MolPro | 7e550b8a73103cff34fdf976a19722e967b08ab2 | [
"MIT"
] | 1 | 2020-09-24T06:08:54.000Z | 2020-09-24T06:08:54.000Z | import data_w
from data_w import widedata
from data_w import*
import model_w
from model_w import*
from model_w import WideCNN
#datset=widedata(ligand_path, protein_path,keys,motif_path,affinity_path)
dataset = widedata(ligand_path, protein_path,keys,motif_path,affinity_path)
train_loader, test_loader = load_splitset(dataset, .2)
modelw=WideCNN()
trainwide=train_w(modelw,train_loader)
torch.save(modelw,'wide.pt')
if __name__ == '__main__':
print(trainwide[0])
print(trainwide[1])
| 23.721311 | 76 | 0.563234 | import data_w
from data_w import widedata
from data_w import*
import model_w
from model_w import*
from model_w import WideCNN
#datset=widedata(ligand_path, protein_path,keys,motif_path,affinity_path)
dataset = widedata(ligand_path, protein_path,keys,motif_path,affinity_path)
train_loader, test_loader = load_splitset(dataset, .2)
modelw=WideCNN()
class rmsloss(nn.Module):
def __init__(self):
super().__init__()
self.mse=nn.MSELoss()
def forward(self,yhat,y):
return torch.sqrt(self.mse(yhat,y))
def train_w(model,train):
epochs=3
criterion = rmsloss()
optimizer=optim.Adam(model.parameters(),lr=0.003)
for epoch in range(epochs):
outw = []
losw = []
k=0
for i,j in train:
m = i[0]
m = m.reshape((1, 10, 10))
p = i[1]
p = p.reshape((1, 6729, 594))
mt = i[2]
mt = mt.reshape((1, 1076, 32))
out = model(p, m, mt)
outw.append(out)
optimizer.zero_grad()
loss = criterion(out, j)
losw.append(loss)
loss.backward()
optimizer.step()
k += 1
if k == 500:
break
return outw,losw
trainwide=train_w(modelw,train_loader)
torch.save(modelw,'wide.pt')
if __name__ == '__main__':
print(trainwide[0])
print(trainwide[1])
| 813 | 4 | 104 |
e6b318f5d430006b66f8af2c0f6b44f56363a814 | 561 | py | Python | resources/scripts/CycleValue.py | freneticmonkey/epsilonc | 0fb7c6c4c6342a770e2882bfd67ed34719e79066 | [
"MIT"
] | null | null | null | resources/scripts/CycleValue.py | freneticmonkey/epsilonc | 0fb7c6c4c6342a770e2882bfd67ed34719e79066 | [
"MIT"
] | null | null | null | resources/scripts/CycleValue.py | freneticmonkey/epsilonc | 0fb7c6c4c6342a770e2882bfd67ed34719e79066 | [
"MIT"
] | null | null | null | import math | 18.7 | 48 | 0.643494 | import math
class CycleValue(object):
def __init__(self):
self._time_value = 0
self._freq = 100
self._deg = 0
self._dir = True
def set_dir(self, direction):
self._dir = direction
def set_freq(self, freq):
self._freq = freq
def get_value(self, dt):
self._time_value += dt
if self._time_value > ( 1.0 / self._freq ):
self._time_value = 0
self._deg += 1
if self._deg >= 360:
self._deg = 0
v = math.cos( (self._deg * math.pi) / 180.0 )
# If the direction is flipped, flip the result
if not self._dir:
v = -v
return v | 428 | 4 | 118 |
5f01b33d377458285c562958fdab8080b868ffa2 | 973 | py | Python | example/example.py | pbvarga1/example-pytest-bdd-qt | 985a0f90bc2723711c74821d778b766a79ecbe83 | [
"MIT"
] | null | null | null | example/example.py | pbvarga1/example-pytest-bdd-qt | 985a0f90bc2723711c74821d778b766a79ecbe83 | [
"MIT"
] | null | null | null | example/example.py | pbvarga1/example-pytest-bdd-qt | 985a0f90bc2723711c74821d778b766a79ecbe83 | [
"MIT"
] | null | null | null | from itertools import cycle
import sys
# from qtpy import QtWidgets, QtCore
from PyQt5 import QtWidgets, QtCore
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
ex = ExampleWindow()
ex.show()
sys.exit(app.exec_())
| 27.027778 | 52 | 0.633094 | from itertools import cycle
import sys
# from qtpy import QtWidgets, QtCore
from PyQt5 import QtWidgets, QtCore
class ExampleWindow(QtWidgets.QMainWindow):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._central_widget = QtWidgets.QWidget()
self._words = cycle([
'foo',
'bar',
'baz'
])
layout = QtWidgets.QHBoxLayout()
self.btn = QtWidgets.QPushButton('Press Me')
self.line_edit = QtWidgets.QLineEdit()
layout.addWidget(self.btn)
layout.addWidget(self.line_edit)
self.btn.clicked.connect(self.clicked)
self._central_widget.setLayout(layout)
self.setCentralWidget(self._central_widget)
def clicked(self):
self.line_edit.setText(next(self._words))
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
ex = ExampleWindow()
ex.show()
sys.exit(app.exec_())
| 615 | 22 | 81 |
62597d147d35c561e57add64789ddf5367a8af5b | 737 | py | Python | menu.py | WellingtonFSouza1/SGI | 89746bc1d9745931fd9b451e575b92c8197fcc65 | [
"Apache-2.0"
] | null | null | null | menu.py | WellingtonFSouza1/SGI | 89746bc1d9745931fd9b451e575b92c8197fcc65 | [
"Apache-2.0"
] | null | null | null | menu.py | WellingtonFSouza1/SGI | 89746bc1d9745931fd9b451e575b92c8197fcc65 | [
"Apache-2.0"
] | null | null | null | from os import system
| 24.566667 | 65 | 0.458616 | from os import system
def menu():
print('*' * 50)
print(f'{"*":<0}{" Sistema Gestão Integrado ":^48}{"*":>0}')
print('*' * 50)
print(f'* [1]ATULIZAR{"*":>37}')
print(f'* [2]BUSCAR{"*":>39}')
print(f'* [3]CADASTRAR{"*":>36}')
print(f'* [4]EXCLUIR{"*":>38}')
print(f'* [5]SAIR{"*":>41}')
print('*' * 50)
option = str(input())
if option.isnumeric() and 0 < int(option) < 6:
system('cls')
return option
else:
while True:
print('Opção invalida.\ntente novamente:', end='')
option = str(input())
system('cls')
if option.isnumeric() and 0 < int(option) < 6:
return option
| 690 | 0 | 25 |
ed71ff4de8c59ae554116c478eac57cdf5539922 | 2,628 | py | Python | IPython/nbconvert/filters/tests/test_highlight.py | chebee7i/ipython | 85b169fa3afc3d374973295c7f1409ededddbaca | [
"BSD-3-Clause-Clear"
] | 2 | 2015-02-10T18:00:31.000Z | 2015-05-01T02:53:46.000Z | IPython/nbconvert/filters/tests/test_highlight.py | chebee7i/ipython | 85b169fa3afc3d374973295c7f1409ededddbaca | [
"BSD-3-Clause-Clear"
] | null | null | null | IPython/nbconvert/filters/tests/test_highlight.py | chebee7i/ipython | 85b169fa3afc3d374973295c7f1409ededddbaca | [
"BSD-3-Clause-Clear"
] | 1 | 2021-05-22T13:52:12.000Z | 2021-05-22T13:52:12.000Z | """
Module with tests for Highlight
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from ...tests.base import TestsBase
from ..highlight import Highlight2Html, Highlight2Latex
from IPython.config import Config
import xml
#-----------------------------------------------------------------------------
# Class
#-----------------------------------------------------------------------------
highlight2html = Highlight2Html()
highlight2latex = Highlight2Latex()
c = Config()
c.Highlight2Html.default_language='ruby'
highlight2html_ruby = Highlight2Html(config=c)
class TestHighlight(TestsBase):
"""Contains test functions for highlight.py"""
#Hello world test, magics test, blank string test
tests = [
"""
#Hello World Example
def say(text):
print(text)
end
say('Hello World!')
""",
"""
%%pylab
plot(x,y, 'r')
"""
]
tokens = [
['Hello World Example', 'say', 'text', 'print', 'def'],
['pylab', 'plot']]
def test_highlight2html(self):
"""highlight2html test"""
for index, test in enumerate(self.tests):
self._try_highlight(highlight2html, test, self.tokens[index])
def test_highlight2latex(self):
"""highlight2latex test"""
for index, test in enumerate(self.tests):
self._try_highlight(highlight2latex, test, self.tokens[index])
def _try_highlight(self, method, test, tokens):
"""Try highlighting source, look for key tokens"""
results = method(test)
for token in tokens:
assert token in results
| 29.52809 | 79 | 0.497717 | """
Module with tests for Highlight
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from ...tests.base import TestsBase
from ..highlight import Highlight2Html, Highlight2Latex
from IPython.config import Config
import xml
#-----------------------------------------------------------------------------
# Class
#-----------------------------------------------------------------------------
highlight2html = Highlight2Html()
highlight2latex = Highlight2Latex()
c = Config()
c.Highlight2Html.default_language='ruby'
highlight2html_ruby = Highlight2Html(config=c)
class TestHighlight(TestsBase):
"""Contains test functions for highlight.py"""
#Hello world test, magics test, blank string test
tests = [
"""
#Hello World Example
def say(text):
print(text)
end
say('Hello World!')
""",
"""
%%pylab
plot(x,y, 'r')
"""
]
tokens = [
['Hello World Example', 'say', 'text', 'print', 'def'],
['pylab', 'plot']]
def test_highlight2html(self):
"""highlight2html test"""
for index, test in enumerate(self.tests):
self._try_highlight(highlight2html, test, self.tokens[index])
def test_highlight2latex(self):
"""highlight2latex test"""
for index, test in enumerate(self.tests):
self._try_highlight(highlight2latex, test, self.tokens[index])
def test_parse_html_many_lang(self):
ht = highlight2html(self.tests[0])
rb = highlight2html_ruby(self.tests[0])
for lang,tkns in [
( ht, ('def','print') ),
( rb, ('def','end' ) )
]:
root = xml.etree.ElementTree.fromstring(lang)
assert self._extract_tokens(root,'k') == set(tkns)
def _extract_tokens(self, root, cls):
return set(map(lambda x:x.text,root.findall(".//*[@class='"+cls+"']")))
def _try_highlight(self, method, test, tokens):
"""Try highlighting source, look for key tokens"""
results = method(test)
for token in tokens:
assert token in results
| 454 | 0 | 54 |
d98bef884724a2bb1c2ec7e92ad1c1b1590c4e7d | 1,165 | py | Python | figpptx/win32com_misc.py | Sillte/figpptx | bf5539b09eeef4e6a17bb4483f62f29d286138b2 | [
"MIT"
] | null | null | null | figpptx/win32com_misc.py | Sillte/figpptx | bf5539b09eeef4e6a17bb4483f62f29d286138b2 | [
"MIT"
] | null | null | null | figpptx/win32com_misc.py | Sillte/figpptx | bf5539b09eeef4e6a17bb4483f62f29d286138b2 | [
"MIT"
] | null | null | null | """Here, I'd like to gather utility function / classes related to `pywin32com`.
Notice that this library is not library, but recipes for reading codes of `pywin32com`.
"""
from win32com.client import DispatchEx, CDispatch
from win32com.client.selecttlb import EnumTlbs
from win32com.client.makepy import GenerateFromTypeLibSpec
from win32com import __gen_path__
# print(__gen_path__) # In this folder, the generated object resides.
def assure_generation():
"""We would like to `generated` pywin32 PowerPoint module.
"""
if _required_generation():
_gen()
assure_generation()
| 32.361111 | 89 | 0.678112 | """Here, I'd like to gather utility function / classes related to `pywin32com`.
Notice that this library is not library, but recipes for reading codes of `pywin32com`.
"""
from win32com.client import DispatchEx, CDispatch
from win32com.client.selecttlb import EnumTlbs
from win32com.client.makepy import GenerateFromTypeLibSpec
from win32com import __gen_path__
# print(__gen_path__) # In this folder, the generated object resides.
def assure_generation():
"""We would like to `generated` pywin32 PowerPoint module.
"""
def _gen():
targets = []
for s in EnumTlbs():
if s.desc.startswith("Microsoft PowerPoint"):
targets.append(s)
target = targets[0]
GenerateFromTypeLibSpec(target, verboseLevel=1)
def _required_generation():
app = DispatchEx("PowerPoint.Application")
if app.Presentations.__class__.__name__ == "Presentations":
return False
if isinstance(app.Presentations, CDispatch):
del app
return True
raise NotImplementedError("Error.")
if _required_generation():
_gen()
assure_generation()
| 507 | 0 | 52 |
322cde676c50164bb63a1effd28fc6734a881e28 | 469 | py | Python | ui_show.py | Amplil/Image_Genetic_algorithm | dbf3c5610df257fe2ae851cf1fb871c6027eb594 | [
"Apache-2.0"
] | 24 | 2021-04-05T13:44:29.000Z | 2022-02-15T11:04:31.000Z | ui_show.py | Amplil/Image_Genetic_algorithm | dbf3c5610df257fe2ae851cf1fb871c6027eb594 | [
"Apache-2.0"
] | 1 | 2022-01-19T01:26:48.000Z | 2022-01-19T01:26:48.000Z | ui_show.py | Amplil/Image_Genetic_algorithm | dbf3c5610df257fe2ae851cf1fb871c6027eb594 | [
"Apache-2.0"
] | 1 | 2022-01-20T08:15:54.000Z | 2022-01-20T08:15:54.000Z | from selenium import webdriver
from time import sleep
import os
options = webdriver.ChromeOptions()
options.add_experimental_option("excludeSwitches", ['enable-automation'])
# プラウザ起動(Chrome)
driver = webdriver.Chrome(r'chromedriver.exe', chrome_options=options) # chromedriver.exeを使う
# リストからURLをひとつづつ処理
print('file:///{}/tmp.png'.format(os.getcwd()))
driver.get('file:///{}/tmp.png'.format(os.getcwd()))
while True:
sleep(1)
driver.refresh() | 27.588235 | 93 | 0.720682 | from selenium import webdriver
from time import sleep
import os
options = webdriver.ChromeOptions()
options.add_experimental_option("excludeSwitches", ['enable-automation'])
# プラウザ起動(Chrome)
driver = webdriver.Chrome(r'chromedriver.exe', chrome_options=options) # chromedriver.exeを使う
# リストからURLをひとつづつ処理
print('file:///{}/tmp.png'.format(os.getcwd()))
driver.get('file:///{}/tmp.png'.format(os.getcwd()))
while True:
sleep(1)
driver.refresh() | 0 | 0 | 0 |
bd877e5952285f3ac1c5fc1b2383d1e022163a73 | 9,349 | py | Python | gcode-runner.py | hgroover/smoothie-gcode | 17afa5d7f48c9470ca71dc7f8b1842083df72627 | [
"MIT"
] | null | null | null | gcode-runner.py | hgroover/smoothie-gcode | 17afa5d7f48c9470ca71dc7f8b1842083df72627 | [
"MIT"
] | null | null | null | gcode-runner.py | hgroover/smoothie-gcode | 17afa5d7f48c9470ca71dc7f8b1842083df72627 | [
"MIT"
] | null | null | null | # gcode runner
import io, sys
import socket
import time
import re
import argparse
ADDR="192.168.1.18"
PORT=23
LAST_SENT=""
# Should be ok, error, etc.
LAST_RESPONSE=""
# Optional [Caution: Unlocked] in response to $X
LAST_RESPONSE_MSG=""
# Global values set by get_status()
STATUS=""
MPOS=[0.0, 0.0, 0.0]
WPOS=[0.0, 0.0, 0.0]
FEEDS=[0.0, 0.0]
TIMEOUT_COUNT=0
PASS_TIMEOUT_COUNT=0
# Default response for command timeout is 5 minutes
# Get available text with specified timeout in ms
#### Main entry ####
# Parse command line
TARGET_PASSES=1
# Actions to take
WITH_HOME=0
WITH_INIT=1
WITH_FILE=0
WITH_POST=1
# Arbitrary init string
#INIT_CMD='G0 X500 Y800 F2000 G0 Z0 F200\n'
#INIT_CMD='G30 Z2.0\n'
#INIT_CMD='G0 Z0\n'
INIT_CMD='G0 Z10 X10 Y10 F2000\n'
INIT_CMD='G92 Z10 X10 Y10\n'
# Input file
INPUT_FILE='limit-test1-faster.gcode'
# Post-run command
POST_CMD='G0 Z10 X10 Y10 F2000\n'
# Attempt to read entire gcode file. This may fail on really large files.
# Must test with 10's of MB and up.
try:
ifile = open(INPUT_FILE, 'r')
GCode = ifile.readlines()
ifile.close()
except:
print('Failed to open gcode input {0}'.format(INPUT_FILE))
sys.exit(1)
# Analyze for comments
total_lines = len(GCode)
comment_lines = 0
for line in GCode:
if line.startswith('('):
comment_lines = comment_lines + 1
print('Input file {0} has {1} comment lines, {2} out of {3} active (comments will not be sent)'.format(INPUT_FILE, comment_lines, total_lines - comment_lines, total_lines))
start_run = time.monotonic()
line_number = 0
try:
print('Attempting connection via {0} at {1}:{2}'.format('TCP', ADDR, PORT))
socket.setdefaulttimeout(60)
msock = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
started = time.monotonic()
cres = msock.connect( (ADDR, PORT) )
elapsed = time.monotonic() - started
print('Connection time:', elapsed)
# Flush any greeting, usually Smoothie command shell
time.sleep(2)
s = get_text(msock, 5000)
print('Starting text:', s.strip())
s = get_text(msock, 1000)
if s != "":
print('Still starting:', s.strip())
start_run = time.monotonic()
# Query status - if alarm, send $X to clear and try again
#timed_cmd(msock, b'get status\n')
# Smoothie will send <status|mpos|wpos>\n[GC:...] in response to ?$G
#timed_cmd(msock, b'?$G\n')
s = get_text(msock, 1000)
if s != "":
print('Additional text: {0}'.format(s))
# Supposed to be time in milliseconds - Smoothie interprets it as seconds
#timed_cmd(msock, b'G4 P10\n')
for rpass in range(1, 1 + TARGET_PASSES):
print('starting pass', rpass, 'of', TARGET_PASSES)
start_pass = time.monotonic()
get_status(msock)
# If we interrupt a run, we may get an empty status
if STATUS == '':
print('Trying status again:')
get_status(msock, 6000)
# Try again if we timed out
if STATUS == 'Timeout':
print('Status timeout, trying again:')
get_status(msock, 10000)
print('Status:', STATUS)
if STATUS == 'Alarm':
print('Need to clear alarm')
timed_cmd(msock, '$X\n')
if LAST_RESPONSE != 'ok':
print('Did not get ok:', LAST_RESPONSE)
sys.exit(1)
elif STATUS.startswith('Failed'):
# A previous operation failed. Attempt a wait
print('A previous operation failed, attempting to clear failure...')
timed_cmd(msock, 'M400\n')
get_status(msock)
print('Response from wait: {0} {1} status: {2}'.format(LAST_RESPONSE, LAST_RESPONSE_MSG, STATUS))
if STATUS != 'Idle':
print('Unable to clear failure')
sys.exit(1)
elif STATUS != 'Idle':
print('Status must be idle, got:', STATUS)
#sys.exit(1)
break
PASS_TIMEOUT_COUNT = 0
#get_status(msock)
#if STATUS != 'Idle':
# print('Non-idle status:', STATUS)
# break
if WITH_HOME:
print('Homing...')
timed_cmd(msock, 'G28.2 X0 Y0\n')
if LAST_RESPONSE == 'error':
break
timed_cmd(msock, 'G92 X0 Y0\n')
# Wait for motion to complete
timed_cmd(msock, 'M400\n')
get_status(msock)
print('New status after home/reset: {0}\n'.format(STATUS))
if WITH_INIT and rpass == 1:
print('Sending init cmd: {0}'.format(INIT_CMD.strip()))
timed_cmd(msock, INIT_CMD)
timed_cmd(msock, 'M400\n')
get_status(msock)
print('New status after init: {0}\n'.format(STATUS))
line_number = 1
if WITH_FILE:
for line in GCode:
if not line.startswith('('):
# Smoothie switches on if spindle configured in switch mode for ANY value of S, including 0
if line.startswith('M3'):
print('Spindle control: {0}'.format(line.strip()))
# FIXME use longer timeout for M400
timed_cmd(msock, line)
if LAST_RESPONSE == 'error':
print('Exiting, error condition at line {0}'.format(line_number))
sys.exit(1)
line_number = line_number + 1
elapsed_pass = time.monotonic() - start_pass
print('pass {0} total time {1:.4f}s, timeouts: {2}'.format(rpass, elapsed_pass, PASS_TIMEOUT_COUNT))
if WITH_POST:
print('Final pass completed, sending post-run command {0}'.format(POST_CMD))
timed_cmd(msock, POST_CMD)
timed_cmd(msock, 'M400\n')
get_status(msock)
print('New status after post-run cmd: {0}\n'.format(STATUS))
elapsed_run = time.monotonic() - start_run
print('Final pass completed in {0:.4f}s, total timeout count: {1}'.format(elapsed_run, TIMEOUT_COUNT))
except OSError as e:
print('Exception:', e)
print('last cmd:', LAST_SENT, 'line number:', line_number)
elapsed_run = time.monotonic() - start_run
print('Elapsed time: {0:.4f}s'.format(elapsed_run))
sys.exit(1)
msock.close()
print('Completed')
sys.exit(0)
| 33.996364 | 172 | 0.597925 | # gcode runner
import io, sys
import socket
import time
import re
import argparse
ADDR="192.168.1.18"
PORT=23
LAST_SENT=""
# Should be ok, error, etc.
LAST_RESPONSE=""
# Optional [Caution: Unlocked] in response to $X
LAST_RESPONSE_MSG=""
# Global values set by get_status()
STATUS=""
MPOS=[0.0, 0.0, 0.0]
WPOS=[0.0, 0.0, 0.0]
FEEDS=[0.0, 0.0]
TIMEOUT_COUNT=0
PASS_TIMEOUT_COUNT=0
# Default response for command timeout is 5 minutes
def timed_cmd(ms, cmd, response_timeout_ms=300000):
global LAST_SENT
global LAST_RESPONSE
global LAST_RESPONSE_MSG
global TIMEOUT_COUNT
global PASS_TIMEOUT_COUNT
started = time.monotonic()
# cmd is now str
LAST_SENT=cmd
prev_timeout = ms.gettimeout()
ms.settimeout(response_timeout_ms / 1000)
ms.send(cmd.encode('utf-8'))
# Minimum turnaround time is 0.5s
time.sleep(0.5)
try:
s = str(ms.recvfrom(4096)[0], encoding='utf-8')
except:
TIMEOUT_COUNT = TIMEOUT_COUNT + 1
PASS_TIMEOUT_COUNT = PASS_TIMEOUT_COUNT + 1
s = '<timeout>'
ms.settimeout(prev_timeout)
elapsed = time.monotonic() - started
LAST_RESPONSE = s.strip()
m = re.match('\[([^]]+)\]\W*(\w+)', s)
if m != None:
LAST_RESPONSE_MSG = m.group(1)
LAST_RESPONSE = m.group(2)
else:
# Also try error:msg
m = re.match('(error):(.+)', s)
if m != None:
LAST_RESPONSE = m.group(1)
LAST_RESPONSE_MSG = m.group(2)
else:
LAST_RESPONSE_MSG = ""
# For status query, parse <statusword|MPos|WPos>
# FIXME set parameters for verbosity. For now report anything taking 1s or longer
if elapsed >= 1.0:
print('sent:', cmd.strip(), 'recvd:', LAST_RESPONSE, LAST_RESPONSE_MSG, 'elapsed:', elapsed)
sys.stdout.flush()
# Get available text with specified timeout in ms
def get_text(ms, timeout_ms):
prev_timeout = ms.gettimeout()
ms.settimeout(timeout_ms / 1000)
try:
s = str(ms.recvfrom(4096)[0], encoding='utf-8')
except:
s = ''
ms.settimeout(prev_timeout)
return s
def get_status(ms, status_timeout_ms=4000):
global STATUS
global MPOS
global WPOS
global FEEDS
prev_timeout = ms.gettimeout()
ms.settimeout(status_timeout_ms/1000)
# Smoothie sends both <status|mpos|wpos|feedrates> AND [GC:... in response to ?$G
ms.send(b'get status\n')
try:
s = str(ms.recvfrom(4096)[0], encoding='utf-8')
except:
s = 'Timeout'
ms.settimeout(prev_timeout)
# <Idle|MPos:10.0000,10.0000,6.0000|WPos:10.0000,10.0000,12.0000|F:1280.0,100.0>
# If run, we may have L: and S: also
pat = re.compile('<([^|]+)\|MPos:([^|]+)\|WPos:([^|]+)\|F:([^|>]+)>')
m = pat.search(s)
if m is None:
STATUS = 'Failed to parse {0}'.format(s)
m = re.search('<(\w+)\|', s)
if m is None:
STATUS = 'Failed secondary {0}'.format(s)
else:
STATUS = 'Secondary: {0} from {1}'.format(m.group(1), s)
return STATUS
STATUS = m.group(1)
mp_str = m.group(2).split(',')
wp_str = m.group(3).split(',')
f_str = m.group(4).split(',')
MPOS[0] = float(mp_str[0])
MPOS[1] = float(mp_str[1])
MPOS[2] = float(mp_str[2])
WPOS[0] = float(wp_str[0])
WPOS[1] = float(wp_str[1])
WPOS[2] = float(wp_str[2])
FEEDS[0] = float(f_str[0])
FEEDS[1] = float(f_str[1])
print('Raw status:', s.strip(), 'parsed:(', STATUS, ') prev timeout:', prev_timeout, 'MP:', MPOS, 'WP:', WPOS)
return STATUS
#### Main entry ####
# Parse command line
TARGET_PASSES=1
# Actions to take
WITH_HOME=0
WITH_INIT=1
WITH_FILE=0
WITH_POST=1
# Arbitrary init string
#INIT_CMD='G0 X500 Y800 F2000 G0 Z0 F200\n'
#INIT_CMD='G30 Z2.0\n'
#INIT_CMD='G0 Z0\n'
INIT_CMD='G0 Z10 X10 Y10 F2000\n'
INIT_CMD='G92 Z10 X10 Y10\n'
# Input file
INPUT_FILE='limit-test1-faster.gcode'
# Post-run command
POST_CMD='G0 Z10 X10 Y10 F2000\n'
# Attempt to read entire gcode file. This may fail on really large files.
# Must test with 10's of MB and up.
try:
ifile = open(INPUT_FILE, 'r')
GCode = ifile.readlines()
ifile.close()
except:
print('Failed to open gcode input {0}'.format(INPUT_FILE))
sys.exit(1)
# Analyze for comments
total_lines = len(GCode)
comment_lines = 0
for line in GCode:
if line.startswith('('):
comment_lines = comment_lines + 1
print('Input file {0} has {1} comment lines, {2} out of {3} active (comments will not be sent)'.format(INPUT_FILE, comment_lines, total_lines - comment_lines, total_lines))
start_run = time.monotonic()
line_number = 0
try:
print('Attempting connection via {0} at {1}:{2}'.format('TCP', ADDR, PORT))
socket.setdefaulttimeout(60)
msock = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
started = time.monotonic()
cres = msock.connect( (ADDR, PORT) )
elapsed = time.monotonic() - started
print('Connection time:', elapsed)
# Flush any greeting, usually Smoothie command shell
time.sleep(2)
s = get_text(msock, 5000)
print('Starting text:', s.strip())
s = get_text(msock, 1000)
if s != "":
print('Still starting:', s.strip())
start_run = time.monotonic()
# Query status - if alarm, send $X to clear and try again
#timed_cmd(msock, b'get status\n')
# Smoothie will send <status|mpos|wpos>\n[GC:...] in response to ?$G
#timed_cmd(msock, b'?$G\n')
s = get_text(msock, 1000)
if s != "":
print('Additional text: {0}'.format(s))
# Supposed to be time in milliseconds - Smoothie interprets it as seconds
#timed_cmd(msock, b'G4 P10\n')
for rpass in range(1, 1 + TARGET_PASSES):
print('starting pass', rpass, 'of', TARGET_PASSES)
start_pass = time.monotonic()
get_status(msock)
# If we interrupt a run, we may get an empty status
if STATUS == '':
print('Trying status again:')
get_status(msock, 6000)
# Try again if we timed out
if STATUS == 'Timeout':
print('Status timeout, trying again:')
get_status(msock, 10000)
print('Status:', STATUS)
if STATUS == 'Alarm':
print('Need to clear alarm')
timed_cmd(msock, '$X\n')
if LAST_RESPONSE != 'ok':
print('Did not get ok:', LAST_RESPONSE)
sys.exit(1)
elif STATUS.startswith('Failed'):
# A previous operation failed. Attempt a wait
print('A previous operation failed, attempting to clear failure...')
timed_cmd(msock, 'M400\n')
get_status(msock)
print('Response from wait: {0} {1} status: {2}'.format(LAST_RESPONSE, LAST_RESPONSE_MSG, STATUS))
if STATUS != 'Idle':
print('Unable to clear failure')
sys.exit(1)
elif STATUS != 'Idle':
print('Status must be idle, got:', STATUS)
#sys.exit(1)
break
PASS_TIMEOUT_COUNT = 0
#get_status(msock)
#if STATUS != 'Idle':
# print('Non-idle status:', STATUS)
# break
if WITH_HOME:
print('Homing...')
timed_cmd(msock, 'G28.2 X0 Y0\n')
if LAST_RESPONSE == 'error':
break
timed_cmd(msock, 'G92 X0 Y0\n')
# Wait for motion to complete
timed_cmd(msock, 'M400\n')
get_status(msock)
print('New status after home/reset: {0}\n'.format(STATUS))
if WITH_INIT and rpass == 1:
print('Sending init cmd: {0}'.format(INIT_CMD.strip()))
timed_cmd(msock, INIT_CMD)
timed_cmd(msock, 'M400\n')
get_status(msock)
print('New status after init: {0}\n'.format(STATUS))
line_number = 1
if WITH_FILE:
for line in GCode:
if not line.startswith('('):
# Smoothie switches on if spindle configured in switch mode for ANY value of S, including 0
if line.startswith('M3'):
print('Spindle control: {0}'.format(line.strip()))
# FIXME use longer timeout for M400
timed_cmd(msock, line)
if LAST_RESPONSE == 'error':
print('Exiting, error condition at line {0}'.format(line_number))
sys.exit(1)
line_number = line_number + 1
elapsed_pass = time.monotonic() - start_pass
print('pass {0} total time {1:.4f}s, timeouts: {2}'.format(rpass, elapsed_pass, PASS_TIMEOUT_COUNT))
if WITH_POST:
print('Final pass completed, sending post-run command {0}'.format(POST_CMD))
timed_cmd(msock, POST_CMD)
timed_cmd(msock, 'M400\n')
get_status(msock)
print('New status after post-run cmd: {0}\n'.format(STATUS))
elapsed_run = time.monotonic() - start_run
print('Final pass completed in {0:.4f}s, total timeout count: {1}'.format(elapsed_run, TIMEOUT_COUNT))
except OSError as e:
print('Exception:', e)
print('last cmd:', LAST_SENT, 'line number:', line_number)
elapsed_run = time.monotonic() - start_run
print('Elapsed time: {0:.4f}s'.format(elapsed_run))
sys.exit(1)
msock.close()
print('Completed')
sys.exit(0)
| 3,002 | 0 | 67 |
2af8f5241b6f0258789c577a88e64485638d253e | 294 | py | Python | zhangqi/20180328/h4.py | python20180319howmework/homework | c826d7aa4c52f8d22f739feb134d20f0b2c217cd | [
"Apache-2.0"
] | null | null | null | zhangqi/20180328/h4.py | python20180319howmework/homework | c826d7aa4c52f8d22f739feb134d20f0b2c217cd | [
"Apache-2.0"
] | null | null | null | zhangqi/20180328/h4.py | python20180319howmework/homework | c826d7aa4c52f8d22f739feb134d20f0b2c217cd | [
"Apache-2.0"
] | null | null | null | #4. 定义一个函数,完成以下功能:
# 1) 输入两个整型数,例如输入的是3, 5
# 2) 此函数要计算的是3 + 33 + 333 + 3333 + 33333(到5个为止)
a, b = eval(input("请输入两个整型数"))
print(sum1(a, b))
| 10.5 | 49 | 0.578231 | #4. 定义一个函数,完成以下功能:
# 1) 输入两个整型数,例如输入的是3, 5
# 2) 此函数要计算的是3 + 33 + 333 + 3333 + 33333(到5个为止)
def sum1(m, n):
sumnum = 0
for i in range(1,n+1):
sumnum = sumnum + int(str(m)*i)
return sumnum
print("结果是{}".format(sumnum))
a, b = eval(input("请输入两个整型数"))
print(sum1(a, b))
| 118 | 0 | 23 |
7ae39ed6e0121a015b6be97a129e1fdb8ea0c637 | 1,882 | py | Python | urlcatcher/bot_plugin.py | kiwiheretic/logos-v2 | 22739221a6d431322c809b7e17aba54f37eb9617 | [
"Apache-2.0"
] | 4 | 2015-02-20T08:11:59.000Z | 2019-05-15T23:48:11.000Z | urlcatcher/bot_plugin.py | kiwiheretic/logos-v2 | 22739221a6d431322c809b7e17aba54f37eb9617 | [
"Apache-2.0"
] | 58 | 2015-01-11T02:10:09.000Z | 2022-03-20T01:20:15.000Z | urlcatcher/bot_plugin.py | kiwiheretic/logos-v2 | 22739221a6d431322c809b7e17aba54f37eb9617 | [
"Apache-2.0"
] | 1 | 2016-06-15T00:49:44.000Z | 2016-06-15T00:49:44.000Z | # test plugin
from bot.pluginDespatch import Plugin
import re
import datetime
import logging
from models import CapturedUrls
from logos.settings import LOGGING
logger = logging.getLogger(__name__)
logging.config.dictConfig(LOGGING)
| 33.017544 | 117 | 0.566419 | # test plugin
from bot.pluginDespatch import Plugin
import re
import datetime
import logging
from models import CapturedUrls
from logos.settings import LOGGING
logger = logging.getLogger(__name__)
logging.config.dictConfig(LOGGING)
class UrlCatcherPlugin(Plugin):
plugin = ("url_catcher", "Url Catcher Module")
def __init__(self, *args, **kwargs):
Plugin.__init__(self, *args, **kwargs)
self.commands = (\
(r'urls\s+(?P<room>#[a-zA-z0-9-]+)\s+(?P<count>\d+)', self.urls_display, "display a list of captured urls"),
(r'urls\s+(?P<room>#[a-zA-z0-9-]+)$', self.urls_display, "display a list of captured urls"),
)
def urls_display(self, regex, chan, nick, **kwargs):
if 'count' in regex.groupdict():
num_to_print = int(regex.group('count'))
else:
# default to 5 if no number specified
num_to_print = 5
# send the urls in order of reverse chronological
# order
room = regex.group('room')
cap_urls = CapturedUrls.objects.filter(room=room.lower()).\
order_by('-timestamp')[:num_to_print]
for url in cap_urls:
timestamp = str(url.timestamp)
self.say(nick, "{} {} -- {}".format(timestamp, url.nick, url.url))
def privmsg(self, user, channel, message):
# Capture any matching urls and keep it in buffer
url_mch = re.search('(?:https?://\S+)|www\.\S+', message, re.I)
if url_mch:
url = url_mch.group(0)
logger.info("capturing url : " + url)
timestamp = datetime.datetime.utcnow()
cap_url = CapturedUrls(timestamp=timestamp, nick=user,
room=channel.lower(),
url=url)
cap_url.save()
| 1,429 | 145 | 23 |
87855570e41d240efea1ebf14728ffd52495bae4 | 2,914 | py | Python | odata/serializers.py | Captain777747/agasownServer | 70952d083ed81a216cf12708d3403b09b9480e28 | [
"MIT"
] | 1 | 2021-03-17T21:38:55.000Z | 2021-03-17T21:38:55.000Z | odata/serializers.py | Captain777747/agasownServer | 70952d083ed81a216cf12708d3403b09b9480e28 | [
"MIT"
] | null | null | null | odata/serializers.py | Captain777747/agasownServer | 70952d083ed81a216cf12708d3403b09b9480e28 | [
"MIT"
] | null | null | null | from django.contrib.auth.models import User
from rest_framework import serializers
from odata.models import Product, Customer, Category, Shipper, Order, OrderDetail | 42.852941 | 173 | 0.67639 | from django.contrib.auth.models import User
from rest_framework import serializers
from odata.models import Product, Customer, Category, Shipper, Order, OrderDetail
class ProductSerializers(serializers.ModelSerializer):
class Meta:
model = Product
fields = '__all__'
class CustomerSerializers(serializers.ModelSerializer):
user = serializers.PrimaryKeyRelatedField(queryset=User.objects.all(), required=False)
username = serializers.CharField(max_length=100)
email = serializers.EmailField()
class Meta:
model = Customer
fields = ['user', 'username', 'email', 'first_name', 'last_name', 'customer_class', 'room', 'building', 'address1', 'address2', 'city', 'state', 'postal_code',
'country', 'phone', 'voice_mail', 'password', 'credit_card', 'credit_card_type_id', 'card_exp_month', 'card_exp_month', 'billing_address', 'billing_city',
'billing_region', 'billing_postal_code', 'billing_country', 'ship_address', 'ship_city', 'ship_region', 'ship_postal_code', 'ship_country', 'date_entered',
]
def create(self, validated_data):
username = validated_data.pop('username')
email = validated_data.pop('email')
# instance = Customer(**validated_data)
user = User.objects.create(first_name=validated_data['first_name'], last_name=validated_data['last_name'], email=email, username=username)
# instance.user = user
instance = Customer.objects.filter(user=user).update(**validated_data)
return Customer.objects.get(user=user)
def validate(self, validated_data):
username = validated_data['username']
email = validated_data['email']
if User.objects.filter(email=email):
raise serializers.ValidationError({"email" : "Email is already registered with us"})
if User.objects.filter(username=username):
raise serializers.ValidationError({"username" : "Username is already registered with us"})
# if
return validated_data
def to_representation(self, instance):
instance.username = instance.user.username
instance.email = instance.user.email
rep = super(CustomerSerializers, self).to_representation(instance)
rep['username'] = instance.user.username
rep['email'] = instance.user.email
return rep
class CategorySerializers(serializers.ModelSerializer):
class Meta:
model = Category
fields = '__all__'
class ShipperSerializers(serializers.ModelSerializer):
class Meta:
model = Shipper
fields = '__all__'
class OrderSerializers(serializers.ModelSerializer):
class Meta:
model = Order
fields = '__all__'
class OrderDetailSerializers(serializers.ModelSerializer):
class Meta:
model = OrderDetail
fields = '__all__' | 1,225 | 1,380 | 145 |
424f4e27ebed3a8fc74e05408caaaa98bec9923f | 9,244 | py | Python | Ensembl.dat-to-topGO.db.py | evodify/calls-files-manipulations | ba861fbc052e6a13b34fe744fb47cb229572b7ef | [
"MIT"
] | 8 | 2018-01-31T09:55:30.000Z | 2021-12-17T04:49:53.000Z | Ensembl.dat-to-topGO.db.py | evodify/calls-files-manipulations | ba861fbc052e6a13b34fe744fb47cb229572b7ef | [
"MIT"
] | 3 | 2021-12-08T16:38:53.000Z | 2021-12-17T08:36:22.000Z | Ensembl.dat-to-topGO.db.py | evodify/calls-files-manipulations | ba861fbc052e6a13b34fe744fb47cb229572b7ef | [
"MIT"
] | 2 | 2020-05-22T08:24:11.000Z | 2020-06-04T13:24:11.000Z | #! /usr/bin/env python2
'''
This script converts the Ensembl.dat file to the GO reference file used in the topGO R program.
#Example input:
ID 1 standard; DNA; HTG; 122678785 BP.
XX
AC chromosome:CanFam3.1:1:1:122678785:1
XX
SV 1.CanFam3.1
XX
DT 4-SEP-2018
XX
DE Canis lupus familiaris chromosome 1 CanFam3.1 full sequence 1..122678785
DE annotated by Ensembl
XX
KW .
XX
OS Canis lupus familiaris (dog)
OC Eukaryota; Opisthokonta; Metazoa; Eumetazoa; Bilateria; Deuterostomia;
OC Chordata; Craniata; Vertebrata; Gnathostomata; Teleostomi; Euteleostomi;
OC Sarcopterygii; Dipnotetrapodomorpha; Tetrapoda; Amniota; Mammalia; Theria;
OC Eutheria; Boreoeutheria; Laurasiatheria; Carnivora; Caniformia; Canidae;
OC Canis lupus.
XX
CC This sequence was annotated by Ensembl(www.ensembl.org). Please visit the
CC Ensembl or EnsemblGenomes web site, http://www.ensembl.org/ or
CC http://www.ensemblgenomes.org/ for more information.
XX
CC All feature locations are relative to the first (5') base of the sequence
CC in this file. The sequence presented is always the forward strand of the
CC assembly. Features that lie outside of the sequence contained in this file
CC have clonal location coordinates in the format: <clone
CC accession>.<version>:<start>..<end>
XX
CC The /gene indicates a unique id for a gene, /note="transcript_id=..." a
CC unique id for a transcript, /protein_id a unique id for a peptide and
CC note="exon_id=..." a unique id for an exon. These ids are maintained
CC wherever possible between versions.
XX
CC All the exons and transcripts in Ensembl are confirmed by similarity to
CC either protein or cDNA sequences.
XX
FH Key Location/Qualifiers
FT source 1..122678785
FT /organism="Canis lupus familiaris"
FT /db_xref="taxon:9615"
FT gene 722179..735934
FT /gene=ENSCAFG00000000008.3
FT /locus_tag="TXNL4A"
FT /note="thioredoxin like 4A [Source:VGNC
FT Symbol;Acc:VGNC:48019]"
FT mRNA join(722179..722324,722691..722877,731542..731645,
FT 734838..735934)
FT /gene="ENSCAFG00000000008.3"
FT /standard_name="ENSCAFT00000000009.3"
FT CDS join(722725..722877,731542..731645,734838..735009)
FT /gene="ENSCAFG00000000008.3"
FT /protein_id="ENSCAFP00000000008.3"
FT /note="transcript_id=ENSCAFT00000000009.3"
FT /db_xref="RefSeq_mRNA_predicted:XM_005615276"
FT /db_xref="RefSeq_mRNA_predicted:XM_533363"
FT /db_xref="RefSeq_peptide_predicted:XP_005615333"
FT /db_xref="RefSeq_peptide_predicted:XP_022263670"
FT /db_xref="RefSeq_peptide_predicted:XP_533363"
FT /db_xref="Uniprot/SPTREMBL:E2R204"
FT /db_xref="EMBL:AAEX03000011"
FT /db_xref="GO:0000398"
FT /db_xref="GO:0000398"
FT /db_xref="GO:0005634"
FT /db_xref="GO:0005682"
FT /db_xref="GO:0005829"
FT /db_xref="GO:0031965"
FT /db_xref="GO:0046540"
FT /db_xref="GO:0046540"
FT /db_xref="GO:0046540"
FT /db_xref="GO:0071005"
FT /db_xref="VGNC_trans_name:TXNL4A-201"
FT /db_xref="Reactome:R-CFA-72163"
FT /db_xref="Reactome:R-CFA-72165"
FT /db_xref="Reactome:R-CFA-72172"
FT /db_xref="Reactome:R-CFA-72203"
FT /db_xref="Reactome:R-CFA-8953854"
FT /db_xref="UniParc:UPI0000447A0B"
FT /translation="MSYMLPHLHNGWQVDQAILSEEDRVVVIRFGHDWDPTCMKMDEVL
FT YSIAEKVKNFAVIYLVDITEVPDFNKMYELYDPCTVMFFFRNKHIMIDLGTGNNNKINW
FT AMEDKQEMIDIIETVYRGARKGRGLVVSPKDYSTKYRY"
FT gene complement(744461..746178)
FT /gene=ENSCAFG00000031133.1
FT /locus_tag="HSBP1L1"
FT /note="heat shock factor binding protein 1 like 1
FT [Source:VGNC Symbol;Acc:VGNC:53725]"
FT mRNA join(complement(746112..746178),complement(744461..744552))
FT /gene="ENSCAFG00000031133.1"
FT /standard_name="ENSCAFT00000045122.1"
FT CDS join(complement(746112..746178),complement(744461..744552))
FT /gene="ENSCAFG00000031133.1"
FT /protein_id="ENSCAFP00000038592.1"
FT /note="transcript_id=ENSCAFT00000045122.1"
FT /db_xref="RefSeq_mRNA_predicted:XM_003432558"
FT /db_xref="RefSeq_peptide_predicted:XP_003432606"
FT /db_xref="Uniprot/SPTREMBL:J9NZ72"
FT /db_xref="EMBL:AAEX03000011"
FT /db_xref="GO:0003714"
FT /db_xref="GO:0005634"
FT /db_xref="GO:0005737"
FT /db_xref="GO:0005829"
FT /db_xref="GO:0070370"
FT /db_xref="GO:1903507"
FT /db_xref="VGNC_trans_name:HSBP1L1-201"
FT /db_xref="UniParc:UPI00027479F7"
FT /translation="AENLFQELQEHFQALIATLNLRMEEMGSRLEDLQKNVNDLMVQAG
FT VEDPVSEQ"
FT gene complement(829658..866436)
FT /gene=ENSCAFG00000039493.1
FT misc_RNA join(complement(865924..866436),complement(846078..846445),
FT complement(841686..841787),complement(829658..829667))
FT /gene="ENSCAFG00000039493.1"
FT /db_xref="RNAcentral:URS0000A92009"
FT /note="lincRNA"
FT /standard_name="ENSCAFT00000053567.1"
FT misc_RNA join(complement(865924..866436),complement(846078..846445),
FT complement(843054..843163),complement(841759..841787))
FT /gene="ENSCAFG00000039493.1"
FT /db_xref="RNAcentral:URS0000A94E9D"
FT /note="lincRNA"
FT /standard_name="ENSCAFT00000055264.1"
FT misc_RNA join(complement(865924..866436),complement(846078..846445),
FT complement(845283..845439))
FT /gene="ENSCAFG00000039493.1"
FT /db_xref="RNAcentral:URS0000AA5288"
FT /note="lincRNA"
FT /standard_name="ENSCAFT00000058711.1"
FT gene complement(886083..886640)
FT /gene=ENSCAFG00000028976.1
FT mRNA complement(886083..886640)
FT /gene="ENSCAFG00000028976.1"
FT /standard_name="ENSCAFT00000043602.1"
FT CDS complement(886083..886640)
FT /gene="ENSCAFG00000028976.1"
FT /protein_id="ENSCAFP00000039366.1"
FT /note="transcript_id=ENSCAFT00000043602.1"
FT /db_xref="Uniprot/SPTREMBL:J9P1E2"
FT /db_xref="EMBL:AAEX03000016"
FT /db_xref="UniParc:UPI000274763D"
FT /translation="MWTGWPMGVPEHCTAPAPYTGRSAQGPSPTSGSAPGPPHTHGPPA
FT LGIPPRGPLSTQDYPPTWPPAPRTPLMWAPQQPGPPTQATSTEDHPHATPQHPGLPHPH
FT PRGPSAPRTPPCGPSHGSPALGTPPCRPLSTKDPLPPPHPKSYGGWFPGSLFRVLPGPQ
FT EDSPPNRAADAQSQHLVAFRCF"
#Example output:
ENSCAFG00000000008 GO:0000398, GO:0000398, GO:0005634, GO:0005682, GO:0005829, GO:0031965, GO:0046540, GO:0046540, GO:0046540, GO:0071005
ENSCAFG00000031133 GO:0003714, GO:0005634, GO:0005737, GO:0005829, GO:0070370, GO:1903507
#command:
$ python Ensembl.dat-to-topGO.db.py -i input.table -o output.tab
#contact:
Dmytro Kryvokhyzha dmytro.kryvokhyzha@evobio.eu
'''
############################# modules #############################
import calls # my custom module
############################# options #############################
parser = calls.CommandLineParser()
parser.add_argument('-i', '--input', help = 'name of the input file', type=str, required=True)
parser.add_argument('-o', '--output', help = 'name of the output file', type=str, required=True)
args = parser.parse_args()
############################# program #############################
geneName = "geneName"
dicts = {geneName: []}
outfile = open(args.output, 'w')
with open(args.input) as datafile:
for line in datafile:
if line.startswith("FT"):
words = line.split()
if '/gene="' in words[1]:
if dicts[geneName] != []:
GOprint = ', '.join(str(e) for e in dicts[geneName])
outfile.write("%s\t%s\n" % (list(dicts.keys())[0], GOprint))
geneName = words[1].split(".")[0].replace('/gene="', '')
dicts = {geneName: []}
elif '/db_xref="GO' in words[1]:
GO = words[1].replace('/db_xref="', '').replace('"', '')
dicts[geneName].append(GO)
datafile.close()
outfile.close()
print('Done!')
# dicts = {}
# keys = range(4)
# values = ["Hi", "I", "am", "John"]
# for i in keys:
# dicts[i] = values[i]
# print(dicts) | 43.810427 | 137 | 0.594331 | #! /usr/bin/env python2
'''
This script converts the Ensembl.dat file to the GO reference file used in the topGO R program.
#Example input:
ID 1 standard; DNA; HTG; 122678785 BP.
XX
AC chromosome:CanFam3.1:1:1:122678785:1
XX
SV 1.CanFam3.1
XX
DT 4-SEP-2018
XX
DE Canis lupus familiaris chromosome 1 CanFam3.1 full sequence 1..122678785
DE annotated by Ensembl
XX
KW .
XX
OS Canis lupus familiaris (dog)
OC Eukaryota; Opisthokonta; Metazoa; Eumetazoa; Bilateria; Deuterostomia;
OC Chordata; Craniata; Vertebrata; Gnathostomata; Teleostomi; Euteleostomi;
OC Sarcopterygii; Dipnotetrapodomorpha; Tetrapoda; Amniota; Mammalia; Theria;
OC Eutheria; Boreoeutheria; Laurasiatheria; Carnivora; Caniformia; Canidae;
OC Canis lupus.
XX
CC This sequence was annotated by Ensembl(www.ensembl.org). Please visit the
CC Ensembl or EnsemblGenomes web site, http://www.ensembl.org/ or
CC http://www.ensemblgenomes.org/ for more information.
XX
CC All feature locations are relative to the first (5') base of the sequence
CC in this file. The sequence presented is always the forward strand of the
CC assembly. Features that lie outside of the sequence contained in this file
CC have clonal location coordinates in the format: <clone
CC accession>.<version>:<start>..<end>
XX
CC The /gene indicates a unique id for a gene, /note="transcript_id=..." a
CC unique id for a transcript, /protein_id a unique id for a peptide and
CC note="exon_id=..." a unique id for an exon. These ids are maintained
CC wherever possible between versions.
XX
CC All the exons and transcripts in Ensembl are confirmed by similarity to
CC either protein or cDNA sequences.
XX
FH Key Location/Qualifiers
FT source 1..122678785
FT /organism="Canis lupus familiaris"
FT /db_xref="taxon:9615"
FT gene 722179..735934
FT /gene=ENSCAFG00000000008.3
FT /locus_tag="TXNL4A"
FT /note="thioredoxin like 4A [Source:VGNC
FT Symbol;Acc:VGNC:48019]"
FT mRNA join(722179..722324,722691..722877,731542..731645,
FT 734838..735934)
FT /gene="ENSCAFG00000000008.3"
FT /standard_name="ENSCAFT00000000009.3"
FT CDS join(722725..722877,731542..731645,734838..735009)
FT /gene="ENSCAFG00000000008.3"
FT /protein_id="ENSCAFP00000000008.3"
FT /note="transcript_id=ENSCAFT00000000009.3"
FT /db_xref="RefSeq_mRNA_predicted:XM_005615276"
FT /db_xref="RefSeq_mRNA_predicted:XM_533363"
FT /db_xref="RefSeq_peptide_predicted:XP_005615333"
FT /db_xref="RefSeq_peptide_predicted:XP_022263670"
FT /db_xref="RefSeq_peptide_predicted:XP_533363"
FT /db_xref="Uniprot/SPTREMBL:E2R204"
FT /db_xref="EMBL:AAEX03000011"
FT /db_xref="GO:0000398"
FT /db_xref="GO:0000398"
FT /db_xref="GO:0005634"
FT /db_xref="GO:0005682"
FT /db_xref="GO:0005829"
FT /db_xref="GO:0031965"
FT /db_xref="GO:0046540"
FT /db_xref="GO:0046540"
FT /db_xref="GO:0046540"
FT /db_xref="GO:0071005"
FT /db_xref="VGNC_trans_name:TXNL4A-201"
FT /db_xref="Reactome:R-CFA-72163"
FT /db_xref="Reactome:R-CFA-72165"
FT /db_xref="Reactome:R-CFA-72172"
FT /db_xref="Reactome:R-CFA-72203"
FT /db_xref="Reactome:R-CFA-8953854"
FT /db_xref="UniParc:UPI0000447A0B"
FT /translation="MSYMLPHLHNGWQVDQAILSEEDRVVVIRFGHDWDPTCMKMDEVL
FT YSIAEKVKNFAVIYLVDITEVPDFNKMYELYDPCTVMFFFRNKHIMIDLGTGNNNKINW
FT AMEDKQEMIDIIETVYRGARKGRGLVVSPKDYSTKYRY"
FT gene complement(744461..746178)
FT /gene=ENSCAFG00000031133.1
FT /locus_tag="HSBP1L1"
FT /note="heat shock factor binding protein 1 like 1
FT [Source:VGNC Symbol;Acc:VGNC:53725]"
FT mRNA join(complement(746112..746178),complement(744461..744552))
FT /gene="ENSCAFG00000031133.1"
FT /standard_name="ENSCAFT00000045122.1"
FT CDS join(complement(746112..746178),complement(744461..744552))
FT /gene="ENSCAFG00000031133.1"
FT /protein_id="ENSCAFP00000038592.1"
FT /note="transcript_id=ENSCAFT00000045122.1"
FT /db_xref="RefSeq_mRNA_predicted:XM_003432558"
FT /db_xref="RefSeq_peptide_predicted:XP_003432606"
FT /db_xref="Uniprot/SPTREMBL:J9NZ72"
FT /db_xref="EMBL:AAEX03000011"
FT /db_xref="GO:0003714"
FT /db_xref="GO:0005634"
FT /db_xref="GO:0005737"
FT /db_xref="GO:0005829"
FT /db_xref="GO:0070370"
FT /db_xref="GO:1903507"
FT /db_xref="VGNC_trans_name:HSBP1L1-201"
FT /db_xref="UniParc:UPI00027479F7"
FT /translation="AENLFQELQEHFQALIATLNLRMEEMGSRLEDLQKNVNDLMVQAG
FT VEDPVSEQ"
FT gene complement(829658..866436)
FT /gene=ENSCAFG00000039493.1
FT misc_RNA join(complement(865924..866436),complement(846078..846445),
FT complement(841686..841787),complement(829658..829667))
FT /gene="ENSCAFG00000039493.1"
FT /db_xref="RNAcentral:URS0000A92009"
FT /note="lincRNA"
FT /standard_name="ENSCAFT00000053567.1"
FT misc_RNA join(complement(865924..866436),complement(846078..846445),
FT complement(843054..843163),complement(841759..841787))
FT /gene="ENSCAFG00000039493.1"
FT /db_xref="RNAcentral:URS0000A94E9D"
FT /note="lincRNA"
FT /standard_name="ENSCAFT00000055264.1"
FT misc_RNA join(complement(865924..866436),complement(846078..846445),
FT complement(845283..845439))
FT /gene="ENSCAFG00000039493.1"
FT /db_xref="RNAcentral:URS0000AA5288"
FT /note="lincRNA"
FT /standard_name="ENSCAFT00000058711.1"
FT gene complement(886083..886640)
FT /gene=ENSCAFG00000028976.1
FT mRNA complement(886083..886640)
FT /gene="ENSCAFG00000028976.1"
FT /standard_name="ENSCAFT00000043602.1"
FT CDS complement(886083..886640)
FT /gene="ENSCAFG00000028976.1"
FT /protein_id="ENSCAFP00000039366.1"
FT /note="transcript_id=ENSCAFT00000043602.1"
FT /db_xref="Uniprot/SPTREMBL:J9P1E2"
FT /db_xref="EMBL:AAEX03000016"
FT /db_xref="UniParc:UPI000274763D"
FT /translation="MWTGWPMGVPEHCTAPAPYTGRSAQGPSPTSGSAPGPPHTHGPPA
FT LGIPPRGPLSTQDYPPTWPPAPRTPLMWAPQQPGPPTQATSTEDHPHATPQHPGLPHPH
FT PRGPSAPRTPPCGPSHGSPALGTPPCRPLSTKDPLPPPHPKSYGGWFPGSLFRVLPGPQ
FT EDSPPNRAADAQSQHLVAFRCF"
#Example output:
ENSCAFG00000000008 GO:0000398, GO:0000398, GO:0005634, GO:0005682, GO:0005829, GO:0031965, GO:0046540, GO:0046540, GO:0046540, GO:0071005
ENSCAFG00000031133 GO:0003714, GO:0005634, GO:0005737, GO:0005829, GO:0070370, GO:1903507
#command:
$ python Ensembl.dat-to-topGO.db.py -i input.table -o output.tab
#contact:
Dmytro Kryvokhyzha dmytro.kryvokhyzha@evobio.eu
'''
############################# modules #############################
import calls # my custom module
############################# options #############################
parser = calls.CommandLineParser()
parser.add_argument('-i', '--input', help = 'name of the input file', type=str, required=True)
parser.add_argument('-o', '--output', help = 'name of the output file', type=str, required=True)
args = parser.parse_args()
############################# program #############################
geneName = "geneName"
dicts = {geneName: []}
outfile = open(args.output, 'w')
with open(args.input) as datafile:
for line in datafile:
if line.startswith("FT"):
words = line.split()
if '/gene="' in words[1]:
if dicts[geneName] != []:
GOprint = ', '.join(str(e) for e in dicts[geneName])
outfile.write("%s\t%s\n" % (list(dicts.keys())[0], GOprint))
geneName = words[1].split(".")[0].replace('/gene="', '')
dicts = {geneName: []}
elif '/db_xref="GO' in words[1]:
GO = words[1].replace('/db_xref="', '').replace('"', '')
dicts[geneName].append(GO)
datafile.close()
outfile.close()
print('Done!')
# dicts = {}
# keys = range(4)
# values = ["Hi", "I", "am", "John"]
# for i in keys:
# dicts[i] = values[i]
# print(dicts) | 0 | 0 | 0 |
33b55b1f23f0abfd9720892d60fe0e6cc1394d65 | 8,517 | py | Python | conf_search_and_xTB/geo_utils.py | aspuru-guzik-group/kraken | 4eaad505c1343e6083032b4a3fda47e004e19734 | [
"MIT"
] | 3 | 2022-01-13T12:39:54.000Z | 2022-03-30T00:10:52.000Z | conf_search_and_xTB/geo_utils.py | aspuru-guzik-group/kraken | 4eaad505c1343e6083032b4a3fda47e004e19734 | [
"MIT"
] | null | null | null | conf_search_and_xTB/geo_utils.py | aspuru-guzik-group/kraken | 4eaad505c1343e6083032b4a3fda47e004e19734 | [
"MIT"
] | null | null | null | import os
import numpy as np
import scipy.spatial as scsp
import scipy.linalg as scli
| 35.4875 | 119 | 0.613244 | import os
import numpy as np
import scipy.spatial as scsp
import scipy.linalg as scli
def get_Ni_CO_3():
crest_best=""" Ni -2.05044275300666 0.06382544955011 0.09868120676498
P -2.80714796997979 -1.10266971180507 -1.69574169412280
C -2.69200378269657 -0.76605024888162 1.57419568293391
O -3.04257804499007 -1.20995335174270 2.55963300719774
C -2.69223663646763 1.74898458637508 -0.06255834794434
O -3.04279673881760 2.82969533618590 -0.06960307962299
C -0.24189533762829 0.01881947327896 0.02959721559736
O 0.89275735454075 0.05117679841698 0.07869727019190"""
coords=[]
elements=[]
for line in crest_best.split("\n"):
elements.append(line.split()[0].capitalize())
coords.append([float(line.split()[1]),float(line.split()[2]),float(line.split()[3])])
coords=np.array(coords)
pd_idx=0
p_idx=1
return(coords, elements, pd_idx, p_idx)
def get_Pd_NH3_Cl_Cl():
crest_best=""" Pd -1.89996002172552 -0.02498444632011 2.10982622577294
N -1.56965112209091 -2.05219215877655 2.00001618954387
As -2.21595829857879 2.00450177777031 2.22007410905701
H -2.40942129799767 2.36875215537164 1.28398287161819
H -3.02318569399418 2.18955283434028 2.82011004940424
H -1.37353382758245 2.44891664756754 2.59391276210718
Cl 0.35060095551484 0.32532669157403 2.26937306191342
Cl -4.15039897250316 -0.37607926860031 1.97331323844022"""
coords=[]
elements=[]
for line in crest_best.split("\n"):
elements.append(line.split()[0].capitalize())
coords.append([float(line.split()[1]),float(line.split()[2]),float(line.split()[3])])
coords=np.array(coords)
pd_idx=0
p_idx=1
return(coords, elements, pd_idx, p_idx)
def get_Pd_PH3_Cl_Cl():
crest_best=""" Pd -0.0000038844 0.0000159819 0.0000111133
P -1.6862635579 -1.4845823545 0.0000219312
As 1.6863052034 1.4845534610 0.0000263723
H 1.5596931931 2.8713746717 0.0001941369
H 2.5992646617 1.3913133533 -1.0337086367
H 2.5995574579 1.3910615548 1.0334736685
Cl -1.8219820508 1.3831400099 -0.0000386628
Cl 1.8219489915 -1.3831565314 -0.0000388596"""
coords=[]
elements=[]
for line in crest_best.split("\n"):
elements.append(line.split()[0].capitalize())
coords.append([float(line.split()[1]),float(line.split()[2]),float(line.split()[3])])
coords=np.array(coords)
pd_idx=0
p_idx=1
return(coords, elements, pd_idx, p_idx)
def get_Pd_Cl_Cl():
crest_best=""" Pd -0.0000038844 0.0000159819 0.0000111133
P -1.6862635579 -1.4845823545 0.0000219312
Cl -1.8219820508 1.3831400099 -0.0000386628
Cl 1.8219489915 -1.3831565314 -0.0000388596"""
coords=[]
elements=[]
for line in crest_best.split("\n"):
elements.append(line.split()[0].capitalize())
coords.append([float(line.split()[1]),float(line.split()[2]),float(line.split()[3])])
coords=np.array(coords)
pd_idx=0
p_idx=1
return(coords, elements, pd_idx, p_idx)
def rotationMatrix(vector,angle):
angle=angle/180.0*np.pi
norm=(vector[0]**2.0+vector[1]**2.0+vector[2]**2.0)**0.5
direction=vector/norm
matrix=np.zeros((3,3))
matrix[0][0]=direction[0]**2.0*(1.0-np.cos(angle))+np.cos(angle)
matrix[1][1]=direction[1]**2.0*(1.0-np.cos(angle))+np.cos(angle)
matrix[2][2]=direction[2]**2.0*(1.0-np.cos(angle))+np.cos(angle)
matrix[0][1]=direction[0]*direction[1]*(1.0-np.cos(angle))-direction[2]*np.sin(angle)
matrix[1][0]=direction[0]*direction[1]*(1.0-np.cos(angle))+direction[2]*np.sin(angle)
matrix[0][2]=direction[0]*direction[2]*(1.0-np.cos(angle))+direction[1]*np.sin(angle)
matrix[2][0]=direction[0]*direction[2]*(1.0-np.cos(angle))-direction[1]*np.sin(angle)
matrix[1][2]=direction[1]*direction[2]*(1.0-np.cos(angle))-direction[0]*np.sin(angle)
matrix[2][1]=direction[1]*direction[2]*(1.0-np.cos(angle))+direction[0]*np.sin(angle)
return(matrix)
def replace(c1_i, e1_i, c2_i, e2_i, Au_index, P_index, match_Au_index, match_P_index, smiles, rotate_third_axis=True):
# copy all the initial things to not change the original arrays
c1=np.copy(c1_i)
e1=np.copy(e1_i)
c2=np.copy(c2_i)
e2=np.copy(e2_i)
clash_dist=1.0
# shift the ligand
c1-=c1[P_index]
# shift the ferrocene
c2-=c2[match_P_index]
# rotate He-P-axis of ligand
dir1=c1[Au_index]-c1[P_index]
dir1/=scli.norm(dir1)
dir2=np.array([0.0,1.0,0.0])
dir2/=scli.norm(dir2)
if np.abs(1.0-np.sum(dir1*dir2))>1e-3:
cross_dir1_dir2=np.cross(dir1,dir2)
cross_dir1_dir2/=scli.norm(cross_dir1_dir2)
angle=np.arccos(np.sum(dir1*dir2))/np.pi*180.0
rotation=rotationMatrix(cross_dir1_dir2, angle)
coords_rotated=[]
for atom in c1:
coords_rotated.append(np.dot(rotation, atom).tolist())
c1=np.array(coords_rotated)
# rotate P-He_replacement-axis of ligand
dir1=c2[match_Au_index]-c2[match_P_index]
dir1/=scli.norm(dir1)
dir2=np.array([0.0,1.0,0.0])
dir2/=scli.norm(dir2)
if np.abs(1.0-np.sum(dir1*dir2))>1e-3:
cross_dir1_dir2=np.cross(dir1,dir2)
cross_dir1_dir2/=scli.norm(cross_dir1_dir2)
angle=np.arccos(np.sum(dir1*dir2))/np.pi*180.0
rotation=rotationMatrix(cross_dir1_dir2, angle)
coords_rotated=[]
for atom in c2:
coords_rotated.append(np.dot(rotation, atom).tolist())
c2=np.array(coords_rotated)
#c2+=np.array([0.0,0.7,0.0])
# rotatble bonds to P
#print(smi1)
#rot_bonds=get_rotatable_bonds(smi1)
#print(rot_bonds)
#print(Au_index, P_index)
if rotate_third_axis:
# rotate third axis
axis2=np.array([0.0,1.0,0.0])
axis2/=scli.norm(axis2)
#min_dist_opt=1.0
min_best=clash_dist
angle2_best=None
all_steps=[]
all_elements=[]
for angle2 in np.linspace(0.0,360.0,361):
rotation2=rotationMatrix(axis2, angle2)
# shift to zero
coords_rotated2=[]
for atom in c2:
coords_rotated2.append(np.dot(rotation2, atom))
coords_rotated2=np.array(coords_rotated2)
all_steps.append(np.copy(coords_rotated2))
all_elements.append(e2)
# shift back
mask1=np.ones((len(c1)))
mask1[Au_index]=0
mask1[P_index]=0
mask2=np.ones((len(c2)))
mask2[match_Au_index]=0
mask2[match_P_index]=0
indeces1=np.where(mask1==1)[0]
indeces2=np.where(mask2==1)[0]
min_dist=np.min(scsp.distance.cdist(c1[indeces1],coords_rotated2[indeces2]))
if min_dist>min_best: #min_dist>min_dist_opt and
min_best=min_dist
angle2_best=angle2
#print("found better RMSD: %f"%(RMSD_best))
if angle2_best == None:
#print("FAILED")
print("ERROR: Did not find a good rotation angle without clashes! %s"%(smiles))
return(False,None,None)
rotation2=rotationMatrix(axis2, angle2_best)
# shift to zero
coords_rotated_final=[]
for atom in c2:
coords_rotated_final.append(np.dot(rotation2, atom))
c2=np.array(coords_rotated_final)
c_final=[]
e_final=[]
c2_final=[]
e2_final=[]
for idx in range(len(c1)):
if idx!=P_index:
c_final.append(c1[idx].tolist())
e_final.append(e1[idx])
for idx in range(len(c2)):
if idx!=match_Au_index:
c_final.append(c2[idx].tolist())
e_final.append(e2[idx])
c2_final.append(c2[idx].tolist())
e2_final.append(e2[idx])
c_final=np.array(c_final)
#all_steps.append(np.copy(c2_final))
#all_elements.append(["K" for e in e2_final])
#all_steps.append(np.copy(c_final))
#all_elements.append(e_final)
#exportXYZs(all_steps,all_elements,"group_rotation.xyz")
e_final=[str(x) for x in e_final]
return(True, c_final, e_final)
| 8,284 | 0 | 138 |
4732b4a92f605030b3dd8dffc639c524a49610ec | 134 | py | Python | example/lambda.py | emlynoregan/phonelessapi | 2d911f341d563e08042c4a79e23df0ce62b4f1ba | [
"MIT"
] | null | null | null | example/lambda.py | emlynoregan/phonelessapi | 2d911f341d563e08042c4a79e23df0ce62b4f1ba | [
"MIT"
] | null | null | null | example/lambda.py | emlynoregan/phonelessapi | 2d911f341d563e08042c4a79e23df0ce62b4f1ba | [
"MIT"
] | null | null | null | import json
| 16.75 | 35 | 0.597015 | import json
def lambda_handler(event, context):
message = json.dumps(event);
return {
'message' : message
}
| 97 | 0 | 25 |
9866d3e5988ebcb70269f110e95af9cf52aaa3e1 | 4,800 | py | Python | fuel_plugin/testing/tests/unit/base.py | dnikishov/fuel-ostf | 4dcd99cc4bfa19f52d4b87ed321eb84ff03844da | [
"Apache-2.0"
] | null | null | null | fuel_plugin/testing/tests/unit/base.py | dnikishov/fuel-ostf | 4dcd99cc4bfa19f52d4b87ed321eb84ff03844da | [
"Apache-2.0"
] | null | null | null | fuel_plugin/testing/tests/unit/base.py | dnikishov/fuel-ostf | 4dcd99cc4bfa19f52d4b87ed321eb84ff03844da | [
"Apache-2.0"
] | null | null | null | # Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest2
from mock import patch, MagicMock
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from fuel_plugin.ostf_adapter import config
from fuel_plugin.ostf_adapter.nose_plugin.nose_discovery import discovery
from fuel_plugin.ostf_adapter.storage import models
from fuel_plugin.ostf_adapter import mixins
TEST_PATH = 'fuel_plugin/testing/fixture/dummy_tests'
| 35.820896 | 78 | 0.632917 | # Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest2
from mock import patch, MagicMock
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from fuel_plugin.ostf_adapter import config
from fuel_plugin.ostf_adapter.nose_plugin.nose_discovery import discovery
from fuel_plugin.ostf_adapter.storage import models
from fuel_plugin.ostf_adapter import mixins
TEST_PATH = 'fuel_plugin/testing/fixture/dummy_tests'
class BaseWSGITest(unittest2.TestCase):
@classmethod
def setUpClass(cls):
cls.dbpath = 'postgresql+psycopg2://ostf:ostf@localhost/ostf'
cls.Session = sessionmaker()
cls.engine = create_engine(cls.dbpath)
cls.ext_id = 'fuel_plugin.testing.fixture.dummy_tests.'
cls.expected = {
'cluster': {
'id': 1,
'deployment_tags': set(['ha', 'rhel', 'nova_network',
'public_on_all_nodes'])
},
'test_sets': ['general_test',
'stopped_test', 'ha_deployment_test',
'environment_variables'],
'tests': [cls.ext_id + test for test in [
('deployment_types_tests.ha_deployment_test.'
'HATest.test_ha_depl'),
('deployment_types_tests.ha_deployment_test.'
'HATest.test_ha_rhel_depl'),
'general_test.Dummy_test.test_fast_pass',
'general_test.Dummy_test.test_long_pass',
'general_test.Dummy_test.test_fast_fail',
'general_test.Dummy_test.test_fast_error',
'general_test.Dummy_test.test_fail_with_step',
'general_test.Dummy_test.test_skip',
'general_test.Dummy_test.test_skip_directly',
'stopped_test.dummy_tests_stopped.test_really_long',
'stopped_test.dummy_tests_stopped.test_one_no_so_long',
'stopped_test.dummy_tests_stopped.test_not_long_at_all',
('test_environment_variables.TestEnvVariables.'
'test_os_credentials_env_variables')
]]
}
def setUp(self):
# orm session wrapping
config.init_config([])
self.connection = self.engine.connect()
self.trans = self.connection.begin()
self.Session.configure(
bind=self.connection
)
self.session = self.Session()
test_sets = self.session.query(models.TestSet).all()
# need this if start unit tests in conjuction with integration
if not test_sets:
discovery(path=TEST_PATH, session=self.session)
mixins.cache_test_repository(self.session)
# mocking
# request mocking
self.request_mock = MagicMock()
self.request_patcher = patch(
'fuel_plugin.ostf_adapter.wsgi.controllers.request',
self.request_mock
)
self.request_patcher.start()
# engine.get_session mocking
self.request_mock.session = self.session
def tearDown(self):
# rollback changes to database
# made by tests
self.trans.rollback()
self.session.close()
self.connection.close()
# end of test_case patching
self.request_patcher.stop()
mixins.TEST_REPOSITORY = []
@property
def is_background_working(self):
is_working = True
cluster_state = self.session.query(models.ClusterState)\
.filter_by(id=self.expected['cluster']['id'])\
.one()
is_working = is_working and set(cluster_state.deployment_tags) == \
self.expected['cluster']['deployment_tags']
cluster_testing_patterns = self.session\
.query(models.ClusterTestingPattern)\
.filter_by(cluster_id=self.expected['cluster']['id'])\
.all()
for testing_pattern in cluster_testing_patterns:
is_working = is_working and \
(testing_pattern.test_set_id in self.expected['test_sets'])
is_working = is_working and set(testing_pattern.tests)\
.issubset(set(self.expected['tests']))
return is_working
| 3,610 | 157 | 23 |
5938c7cf6b91510f77334e09942060690083290c | 1,876 | py | Python | sstcam_sandbox/d191003_compress/check_pedestal_tcal.py | watsonjj/CHECLabPySB | 91330d3a6f510a392f635bd7f4abd2f77871322c | [
"BSD-3-Clause"
] | null | null | null | sstcam_sandbox/d191003_compress/check_pedestal_tcal.py | watsonjj/CHECLabPySB | 91330d3a6f510a392f635bd7f4abd2f77871322c | [
"BSD-3-Clause"
] | null | null | null | sstcam_sandbox/d191003_compress/check_pedestal_tcal.py | watsonjj/CHECLabPySB | 91330d3a6f510a392f635bd7f4abd2f77871322c | [
"BSD-3-Clause"
] | 1 | 2021-03-30T09:46:56.000Z | 2021-03-30T09:46:56.000Z | from CHECLabPy.core.io import TIOReader
from TargetCalibSB.pedestal import PedestalTargetCalib
import fitsio
from tqdm import tqdm
from matplotlib import pyplot as plt
import numpy as np
if __name__ == '__main__':
main()
| 31.266667 | 80 | 0.692964 | from CHECLabPy.core.io import TIOReader
from TargetCalibSB.pedestal import PedestalTargetCalib
import fitsio
from tqdm import tqdm
from matplotlib import pyplot as plt
import numpy as np
def main():
r0_path = "/Users/Jason/Downloads/tempdata/Pedestal_23deg_r0.tio"
r1_int_path = "/Users/Jason/Downloads/tempdata/Pedestal_23deg_r1_int.tio"
r1_rnd_path = "/Users/Jason/Downloads/tempdata/Pedestal_23deg_r1_rnd.tio"
tcal_path = "/Users/Jason/Downloads/tempdata/Pedestal_23deg_ped.tcal"
reader_r0 = TIOReader(r0_path, max_events=10000)
reader_r1_int = TIOReader(r1_int_path, max_events=10000)
reader_r1_rnd = TIOReader(r1_rnd_path, max_events=10000)
# Generate Pedestal
pedestal_tc = PedestalTargetCalib(
reader_r0.n_pixels, reader_r0.n_samples, reader_r0.n_cells
)
pedestal_tc.load_tcal(tcal_path)
l_int = []
l_rnd = []
# Subtract Pedestals
desc = "Subtracting pedestal"
z = zip(reader_r0, reader_r1_int, reader_r1_rnd)
it = tqdm(z, total=reader_r0.n_events, desc=desc)
for wfs_r0, wfs_r1_int, wfs_r1_rnd in it:
if wfs_r0.missing_packets:
continue
wfs_r1_flt = pedestal_tc.subtract_pedestal(wfs_r0, wfs_r0.first_cell_id)
# offset = 700
# scale = 13.6
# wfs_r1_flt = (wfs_r1_flt + offset) * scale
# wfs_r1_int = (wfs_r1_int + offset) * scale
# wfs_r1_rnd = (wfs_r1_rnd + offset) * scale
l_int.append(wfs_r1_flt - wfs_r1_int)
l_rnd.append(wfs_r1_flt - wfs_r1_rnd)
l_int = np.array(l_int).ravel()
l_rnd = np.array(l_rnd).ravel()
plt.hist(l_int, bins=20, histtype='step', label='int')
plt.hist(l_rnd, bins=20, histtype='step', label='rnd')
plt.legend(loc='best')
plt.xlabel("Difference to float ped-sub ADC")
plt.ylabel("N")
plt.show()
if __name__ == '__main__':
main()
| 1,625 | 0 | 23 |
67a94b4b3f1dac14bbe7faad1d2c29e676a6739b | 3,213 | py | Python | yuki/avito/src/oof_elasticnet.py | RandLive/Avito-Demand-Prediction-Challenge | eb2955c6cb799907071d8bbf7b31b73b163c604f | [
"MIT"
] | null | null | null | yuki/avito/src/oof_elasticnet.py | RandLive/Avito-Demand-Prediction-Challenge | eb2955c6cb799907071d8bbf7b31b73b163c604f | [
"MIT"
] | null | null | null | yuki/avito/src/oof_elasticnet.py | RandLive/Avito-Demand-Prediction-Challenge | eb2955c6cb799907071d8bbf7b31b73b163c604f | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
import xgboost as xgb
import lightgbm as lgb
import pyarrow as pa
import pyarrow.parquet as pq
from sklearn.model_selection import train_test_split
import argparse
from scipy.special import erfinv
from scipy.sparse import csr_matrix
from sklearn.preprocessing import StandardScaler
from utils import *
X, X_test, _ = read_train_test_data()
# drop label encoding
lbl_cols = [col for col in X.columns if "_labelencod" in col]
X.drop(lbl_cols, axis=1, inplace=True)
X_test.drop(lbl_cols, axis=1, inplace=True)
# nan index
train_nan_idx = csr_matrix((np.isnan(X)).astype(int))
test_nan_idx = csr_matrix((np.isnan(X_test)).astype(int))
X = X.fillna(X.median())#X.fillna(X.median()) # X.fillna(0)
X = X.replace(np.inf, 99999.999)
X = X.replace(-np.inf, -99999.999)
X = X.values
X_test = X_test.fillna(X_test.median())#X_test.fillna(X_test.median())
X_test = X_test.replace(np.inf, 9999.999)
X_test = X_test.replace(-np.inf, -9999.999)
X_test = X_test.values
train_size = X.shape[0]
print("scale data")
scaler = StandardScaler()#StandardScaler() # GaussRankScaler()
X_all = scaler.fit_transform(np.r_[X, X_test])
del X, X_test; gc.collect()
# X = pd.DataFrame(X_all[:train_size,:])
X = pd.DataFrame(X_all[:train_size,:] * np.array((train_nan_idx.todense()==0).astype(int)))
del train_nan_idx
print("Done scaling train data...")
# X_test = pd.DataFrame(X_all[train_size:,:])
X_test = pd.DataFrame(X_all[train_size:,:] * np.array((test_nan_idx.todense()==0).astype(int)))
print("Done scaling test data...")
# del X_all; gc.collect()
del X_all, test_nan_idx;gc.collect()
X_test = X_test.values
X = X.values
y = pd.read_csv("../input/train.csv")["deal_probability"].values
oof_sgd(X, X_test, y, "stacking_elasticnet")
### no oof features
X, X_test, _ = read_train_test_data_all()
# drop label encoding
lbl_cols = [col for col in X.columns if "_labelencod" in col or "oof_" in col]
X.drop(lbl_cols, axis=1, inplace=True)
X_test.drop(lbl_cols, axis=1, inplace=True)
# nan index
train_nan_idx = csr_matrix((np.isnan(X)).astype(int))
test_nan_idx = csr_matrix((np.isnan(X_test)).astype(int))
X = X.fillna(X.median())#X.fillna(X.median()) # X.fillna(0)
X = X.replace(np.inf, 99999.999)
X = X.replace(-np.inf, -99999.999)
X = X.values
X_test = X_test.fillna(X_test.median())#X_test.fillna(X_test.median())
X_test = X_test.replace(np.inf, 9999.999)
X_test = X_test.replace(-np.inf, -9999.999)
X_test = X_test.values
train_size = X.shape[0]
print("scale data")
scaler = StandardScaler()#StandardScaler() # GaussRankScaler()
X_all = scaler.fit_transform(np.r_[X, X_test])
del X, X_test; gc.collect()
# X = pd.DataFrame(X_all[:train_size,:])
X = pd.DataFrame(X_all[:train_size,:] * np.array((train_nan_idx.todense()==0).astype(int)))
del train_nan_idx
print("Done scaling train data...")
# X_test = pd.DataFrame(X_all[train_size:,:])
X_test = pd.DataFrame(X_all[train_size:,:] * np.array((test_nan_idx.todense()==0).astype(int)))
print("Done scaling test data...")
# del X_all; gc.collect()
del X_all, test_nan_idx;gc.collect()
X_test = X_test.values
X = X.values
y = pd.read_csv("../input/train.csv")["deal_probability"].values
oof_sgd(X, X_test, y, "stacking_elasticnet_nooof")
| 33.123711 | 95 | 0.729225 | import pandas as pd
import numpy as np
import xgboost as xgb
import lightgbm as lgb
import pyarrow as pa
import pyarrow.parquet as pq
from sklearn.model_selection import train_test_split
import argparse
from scipy.special import erfinv
from scipy.sparse import csr_matrix
from sklearn.preprocessing import StandardScaler
from utils import *
X, X_test, _ = read_train_test_data()
# drop label encoding
lbl_cols = [col for col in X.columns if "_labelencod" in col]
X.drop(lbl_cols, axis=1, inplace=True)
X_test.drop(lbl_cols, axis=1, inplace=True)
# nan index
train_nan_idx = csr_matrix((np.isnan(X)).astype(int))
test_nan_idx = csr_matrix((np.isnan(X_test)).astype(int))
X = X.fillna(X.median())#X.fillna(X.median()) # X.fillna(0)
X = X.replace(np.inf, 99999.999)
X = X.replace(-np.inf, -99999.999)
X = X.values
X_test = X_test.fillna(X_test.median())#X_test.fillna(X_test.median())
X_test = X_test.replace(np.inf, 9999.999)
X_test = X_test.replace(-np.inf, -9999.999)
X_test = X_test.values
train_size = X.shape[0]
print("scale data")
scaler = StandardScaler()#StandardScaler() # GaussRankScaler()
X_all = scaler.fit_transform(np.r_[X, X_test])
del X, X_test; gc.collect()
# X = pd.DataFrame(X_all[:train_size,:])
X = pd.DataFrame(X_all[:train_size,:] * np.array((train_nan_idx.todense()==0).astype(int)))
del train_nan_idx
print("Done scaling train data...")
# X_test = pd.DataFrame(X_all[train_size:,:])
X_test = pd.DataFrame(X_all[train_size:,:] * np.array((test_nan_idx.todense()==0).astype(int)))
print("Done scaling test data...")
# del X_all; gc.collect()
del X_all, test_nan_idx;gc.collect()
X_test = X_test.values
X = X.values
y = pd.read_csv("../input/train.csv")["deal_probability"].values
oof_sgd(X, X_test, y, "stacking_elasticnet")
### no oof features
X, X_test, _ = read_train_test_data_all()
# drop label encoding
lbl_cols = [col for col in X.columns if "_labelencod" in col or "oof_" in col]
X.drop(lbl_cols, axis=1, inplace=True)
X_test.drop(lbl_cols, axis=1, inplace=True)
# nan index
train_nan_idx = csr_matrix((np.isnan(X)).astype(int))
test_nan_idx = csr_matrix((np.isnan(X_test)).astype(int))
X = X.fillna(X.median())#X.fillna(X.median()) # X.fillna(0)
X = X.replace(np.inf, 99999.999)
X = X.replace(-np.inf, -99999.999)
X = X.values
X_test = X_test.fillna(X_test.median())#X_test.fillna(X_test.median())
X_test = X_test.replace(np.inf, 9999.999)
X_test = X_test.replace(-np.inf, -9999.999)
X_test = X_test.values
train_size = X.shape[0]
print("scale data")
scaler = StandardScaler()#StandardScaler() # GaussRankScaler()
X_all = scaler.fit_transform(np.r_[X, X_test])
del X, X_test; gc.collect()
# X = pd.DataFrame(X_all[:train_size,:])
X = pd.DataFrame(X_all[:train_size,:] * np.array((train_nan_idx.todense()==0).astype(int)))
del train_nan_idx
print("Done scaling train data...")
# X_test = pd.DataFrame(X_all[train_size:,:])
X_test = pd.DataFrame(X_all[train_size:,:] * np.array((test_nan_idx.todense()==0).astype(int)))
print("Done scaling test data...")
# del X_all; gc.collect()
del X_all, test_nan_idx;gc.collect()
X_test = X_test.values
X = X.values
y = pd.read_csv("../input/train.csv")["deal_probability"].values
oof_sgd(X, X_test, y, "stacking_elasticnet_nooof")
| 0 | 0 | 0 |
aac94c469cb1b80c80c88bda8fea8c141c4875cb | 42 | py | Python | notebooks/_solutions/case-argo-sea-floats6.py | jorisvandenbossche/DS-python-geospatial | 893a12edc5c203a75815f6dcb5f1e18c577c8cd5 | [
"BSD-3-Clause"
] | 58 | 2020-10-09T10:10:59.000Z | 2022-03-07T14:58:07.000Z | notebooks/_solutions/13-xarray6.py | amitkb3/DS-python-geospatial | 5f156ebff67e06d59b2a7ef446d1fed746ce0650 | [
"BSD-3-Clause"
] | 24 | 2020-09-30T19:57:14.000Z | 2021-10-05T07:21:09.000Z | notebooks/_solutions/13-xarray6.py | amitkb3/DS-python-geospatial | 5f156ebff67e06d59b2a7ef446d1fed746ce0650 | [
"BSD-3-Clause"
] | 19 | 2020-10-05T09:32:18.000Z | 2022-03-20T00:09:14.000Z | argo["salinity"].sel(level=10).plot.line() | 42 | 42 | 0.714286 | argo["salinity"].sel(level=10).plot.line() | 0 | 0 | 0 |
85422467599040ba9797e0b437b58fb79132e2a8 | 1,243 | py | Python | answers/question4.py | Yamahitsuji/Gasyori100knock | 62b3f776124c25dfb36e45a647d573b36b45d2b1 | [
"MIT"
] | null | null | null | answers/question4.py | Yamahitsuji/Gasyori100knock | 62b3f776124c25dfb36e45a647d573b36b45d2b1 | [
"MIT"
] | null | null | null | answers/question4.py | Yamahitsuji/Gasyori100knock | 62b3f776124c25dfb36e45a647d573b36b45d2b1 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
from skimage import io
import cv2
# 参考記事:https://qiita.com/haru1843/items/00de955790d3a22a217b
img = io.imread("./dataset/images/imori_256x256.png")
gray_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
th = getThreshold(gray_img)
binary_img = gray2binary(gray_img, th)
plt.figure(figsize=(12, 4))
plt.subplot(1, 3, 1)
plt.title("input")
plt.imshow(img)
plt.subplot(1, 3, 2)
plt.title("gray")
plt.imshow(gray_img, cmap='gray')
plt.subplot(1, 3, 3)
plt.title("binary")
plt.imshow(binary_img, cmap="gray")
plt.show()
| 23.45283 | 60 | 0.612228 | import numpy as np
import matplotlib.pyplot as plt
from skimage import io
import cv2
# 参考記事:https://qiita.com/haru1843/items/00de955790d3a22a217b
def getThreshold(img):
max_variance = 0
max_th = 0
for th in range(1, 254):
# thを閾値としたクラス1,2の画素配列
c0 = img[img <= th]
c1 = img[img > th]
# クラス1,2の重み
r0 = len(c0) / (len(c0) + len(c1))
r1 = len(c1) / (len(c0) + len(c1))
if len(c0) == 0 or len(c1) == 0:
continue
c0_avg = c0.mean()
c1_avg = c1.mean()
variance = r0 * r1 * ((c0_avg - c1_avg) ** 2)
if variance > max_variance:
max_variance = variance
max_th = th
return max_th
def gray2binary(gray_img, th):
img = np.minimum(gray_img // th, 1) * 255
return img.astype(np.uint8)
img = io.imread("./dataset/images/imori_256x256.png")
gray_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
th = getThreshold(gray_img)
binary_img = gray2binary(gray_img, th)
plt.figure(figsize=(12, 4))
plt.subplot(1, 3, 1)
plt.title("input")
plt.imshow(img)
plt.subplot(1, 3, 2)
plt.title("gray")
plt.imshow(gray_img, cmap='gray')
plt.subplot(1, 3, 3)
plt.title("binary")
plt.imshow(binary_img, cmap="gray")
plt.show()
| 673 | 0 | 45 |
fc8be575967873f995eef9243b3dd551953be224 | 281 | py | Python | lnbits/extensions/lnurlpayout/models.py | pseudozach/lnbits-legend | b90eb0a3ba403d60cc151af12ffabb74fd529db0 | [
"MIT"
] | 76 | 2021-11-02T22:19:59.000Z | 2022-03-30T18:01:33.000Z | lnbits/extensions/lnurlpayout/models.py | pseudozach/lnbits-legend | b90eb0a3ba403d60cc151af12ffabb74fd529db0 | [
"MIT"
] | 100 | 2021-11-04T16:33:28.000Z | 2022-03-30T15:03:52.000Z | lnbits/extensions/lnurlpayout/models.py | pseudozach/lnbits-legend | b90eb0a3ba403d60cc151af12ffabb74fd529db0 | [
"MIT"
] | 57 | 2021-11-08T06:43:59.000Z | 2022-03-31T08:53:16.000Z | from sqlite3 import Row
from pydantic import BaseModel
| 14.789474 | 39 | 0.69395 | from sqlite3 import Row
from pydantic import BaseModel
class CreateLnurlPayoutData(BaseModel):
title: str
lnurlpay: str
threshold: int
class lnurlpayout(BaseModel):
id: str
title: str
wallet: str
admin_key: str
lnurlpay: str
threshold: int
| 0 | 177 | 46 |
3412f3dd8b367573c7003283d49c0fb980a7c41b | 77 | py | Python | loss/__init__.py | TheCodez/pytorch-box2pix | 6c498384f56b303b3c3783bf0eec895159729cd3 | [
"MIT"
] | 22 | 2019-03-14T05:54:56.000Z | 2021-09-17T00:27:30.000Z | loss/__init__.py | geekfeiw/pytorch-box2pix | 6c498384f56b303b3c3783bf0eec895159729cd3 | [
"MIT"
] | 2 | 2019-06-19T18:10:08.000Z | 2020-05-20T12:57:41.000Z | loss/__init__.py | geekfeiw/pytorch-box2pix | 6c498384f56b303b3c3783bf0eec895159729cd3 | [
"MIT"
] | 8 | 2019-03-18T06:16:23.000Z | 2020-08-16T16:54:33.000Z | from .boxloss import *
from .focalloss import *
from .multitaskloss import *
| 19.25 | 28 | 0.766234 | from .boxloss import *
from .focalloss import *
from .multitaskloss import *
| 0 | 0 | 0 |
631c1c55253f5587db7e1f0120050b0460363810 | 1,806 | py | Python | training/linear_regression.py | vanshc98/CE4032 | c57c0f84254484c4fcc91ae817c9d650d26ec64b | [
"CNRI-Python"
] | null | null | null | training/linear_regression.py | vanshc98/CE4032 | c57c0f84254484c4fcc91ae817c9d650d26ec64b | [
"CNRI-Python"
] | 7 | 2019-11-14T15:06:11.000Z | 2022-02-10T01:27:41.000Z | training/linear_regression.py | vanshc98/CE4032 | c57c0f84254484c4fcc91ae817c9d650d26ec64b | [
"CNRI-Python"
] | null | null | null | import os
import time
import numpy as np
import pandas as pd
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score
import sys
sys.path.append('..')
from utils import calHarDist
DATA_DIR = '../datasets'
for filename in ['modified_train.csv']:
print('reading training data from %s ...' % filename)
df = pd.read_csv(os.path.join(DATA_DIR, filename))
c = df.corr().abs()
print(c)
y = df['DURATION']
# df['displacement'] = df.apply(lambda row : calHarDist(row['ORIGIN_LAT'], row['ORIGIN_LNG'], row['CUT_OFF_LAT'], row['CUT_OFF_LNG']), axis = 1)
df.drop(['TRIP_ID','TIMESTAMP','DATE', 'END_TIME', 'ORIGIN_CALL', 'ORIGIN_STAND', 'DURATION'], axis=1, inplace=True)
# values = {'ORIGIN_CALL': -1, 'ORIGIN_STAND': -1}
# df = df.fillna(value=values)
X = np.array(df, dtype=np.float)
# th1 = np.percentile(df['displacement'], [99.9])[0]
# relevant_rows = (df['displacement'] < th1)
# df.drop(['displacement'], axis=1, inplace=True)
# X = df.loc[relevant_rows]
# y = y.loc[relevant_rows]
t0 = time.time()
reg = LinearRegression().fit(X, y)
print(reg.score(X, y))
print('Done in %.1f sec.' % (time.time() - t0))
df = pd.read_csv(os.path.join(DATA_DIR, filename.replace('train', 'test')))
ids = df['TRIP_ID']
df.drop(['TRIP_ID','TIMESTAMP','DATE', 'END_TIME', 'ORIGIN_CALL', 'ORIGIN_STAND'], axis=1, inplace=True)
# values = {'ORIGIN_CALL': -1, 'ORIGIN_STAND': -1}
# df = df.fillna(value=values)
X_tst = np.array(df, dtype=np.float)
y_pred = reg.predict(X_tst)
submission = pd.DataFrame(ids, columns=['TRIP_ID'])
submission['TRAVEL_TIME'] = y_pred
submission.to_csv('../datasets/my_submission.csv', index=False)
| 35.411765 | 148 | 0.637874 | import os
import time
import numpy as np
import pandas as pd
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score
import sys
sys.path.append('..')
from utils import calHarDist
DATA_DIR = '../datasets'
for filename in ['modified_train.csv']:
print('reading training data from %s ...' % filename)
df = pd.read_csv(os.path.join(DATA_DIR, filename))
c = df.corr().abs()
print(c)
y = df['DURATION']
# df['displacement'] = df.apply(lambda row : calHarDist(row['ORIGIN_LAT'], row['ORIGIN_LNG'], row['CUT_OFF_LAT'], row['CUT_OFF_LNG']), axis = 1)
df.drop(['TRIP_ID','TIMESTAMP','DATE', 'END_TIME', 'ORIGIN_CALL', 'ORIGIN_STAND', 'DURATION'], axis=1, inplace=True)
# values = {'ORIGIN_CALL': -1, 'ORIGIN_STAND': -1}
# df = df.fillna(value=values)
X = np.array(df, dtype=np.float)
# th1 = np.percentile(df['displacement'], [99.9])[0]
# relevant_rows = (df['displacement'] < th1)
# df.drop(['displacement'], axis=1, inplace=True)
# X = df.loc[relevant_rows]
# y = y.loc[relevant_rows]
t0 = time.time()
reg = LinearRegression().fit(X, y)
print(reg.score(X, y))
print('Done in %.1f sec.' % (time.time() - t0))
df = pd.read_csv(os.path.join(DATA_DIR, filename.replace('train', 'test')))
ids = df['TRIP_ID']
df.drop(['TRIP_ID','TIMESTAMP','DATE', 'END_TIME', 'ORIGIN_CALL', 'ORIGIN_STAND'], axis=1, inplace=True)
# values = {'ORIGIN_CALL': -1, 'ORIGIN_STAND': -1}
# df = df.fillna(value=values)
X_tst = np.array(df, dtype=np.float)
y_pred = reg.predict(X_tst)
submission = pd.DataFrame(ids, columns=['TRIP_ID'])
submission['TRAVEL_TIME'] = y_pred
submission.to_csv('../datasets/my_submission.csv', index=False)
| 0 | 0 | 0 |
5563251a885ab49a498ca1cf6b5ef95de6493110 | 1,213 | py | Python | Rec_Server/personalize_predict.py | mishidemudong/deep_recommendation | 6100cfd4037b45b167227ed76acbc536500bc119 | [
"Apache-2.0"
] | null | null | null | Rec_Server/personalize_predict.py | mishidemudong/deep_recommendation | 6100cfd4037b45b167227ed76acbc536500bc119 | [
"Apache-2.0"
] | null | null | null | Rec_Server/personalize_predict.py | mishidemudong/deep_recommendation | 6100cfd4037b45b167227ed76acbc536500bc119 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 15 14:01:24 2021
@author: liang
"""
'''
AWS personalize model
'''
import boto3
import json
import os
class PersonalizePredictHandler():
"""
""" | 26.955556 | 131 | 0.600989 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 15 14:01:24 2021
@author: liang
"""
'''
AWS personalize model
'''
import boto3
import json
import os
class PersonalizePredictHandler():
"""
"""
def __init__(self, config_path):
if os.path.exists(config_path):
self.config = json.load(open(config_path, 'r'))
self.campaignArn = self.config['campaignArn'] #'arn:aws:personalize:us-east-2:005527976057:campaign/movielens_campaign'
self.personalizeRt = boto3.client('personalize-runtime')
else:
print('Build Personalize Handler Faild!!')
def predict(self, user_list, item_list):
res = []
for user_id in user_list:
result = {}
result['user_id'] = user_id
response = self.personalizeRt.get_personalized_ranking(
campaignArn=self.campaignArn,
userId=user_id,
inputList=item_list,
)
result['item_score_list'] = [(it_id, index) for index, it_id in enumerate(response['personalizedRanking'])]
result['model_type'] = 'personalize'
res.append(result)
return res | 935 | 0 | 54 |
f6bcca11ddf785c31107de65837bbb705eae7a8a | 1,461 | py | Python | security_monkey/alerters/custom_alerter.py | boladmin/security_monkey | c28592ffd518fa399527d26262683fc860c30eef | [
"Apache-2.0"
] | 4,258 | 2015-01-04T22:06:10.000Z | 2022-03-31T23:40:27.000Z | security_monkey/alerters/custom_alerter.py | boladmin/security_monkey | c28592ffd518fa399527d26262683fc860c30eef | [
"Apache-2.0"
] | 1,013 | 2015-01-12T02:31:03.000Z | 2021-09-16T19:09:03.000Z | security_monkey/alerters/custom_alerter.py | boladmin/security_monkey | c28592ffd518fa399527d26262683fc860c30eef | [
"Apache-2.0"
] | 965 | 2015-01-11T21:06:07.000Z | 2022-03-17T16:53:57.000Z | # Copyright 2016 Bridgewater Associates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module: security_monkey.alerters.custom_alerter
:platform: Unix
.. version:: $$VERSION$$
.. moduleauthor:: Bridgewater OSS <opensource@bwater.com>
"""
from security_monkey import app
alerter_registry = []
| 31.76087 | 105 | 0.715948 | # Copyright 2016 Bridgewater Associates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module: security_monkey.alerters.custom_alerter
:platform: Unix
.. version:: $$VERSION$$
.. moduleauthor:: Bridgewater OSS <opensource@bwater.com>
"""
from security_monkey import app
alerter_registry = []
class AlerterType(type):
def __init__(cls, name, bases, attrs):
if getattr(cls, "report_auditor_changes", None) and getattr(cls, "report_watcher_changes", None):
app.logger.debug("Registering alerter %s", cls.__name__)
alerter_registry.append(cls)
def report_auditor_changes(auditor):
for alerter_class in alerter_registry:
alerter = alerter_class()
alerter.report_auditor_changes(auditor)
def report_watcher_changes(watcher):
for alerter_class in alerter_registry:
alerter = alerter_class()
alerter.report_watcher_changes(watcher)
| 513 | 3 | 96 |
3b387205d9aa446a4fbbf921b448e8f29def0a5d | 68 | py | Python | yhirose_cloudmesh_ex1.py | futuresystems/465-yujinhirose | 75e7400282af2a34141480c34501c4aecd3a8ac6 | [
"Apache-2.0"
] | null | null | null | yhirose_cloudmesh_ex1.py | futuresystems/465-yujinhirose | 75e7400282af2a34141480c34501c4aecd3a8ac6 | [
"Apache-2.0"
] | null | null | null | yhirose_cloudmesh_ex1.py | futuresystems/465-yujinhirose | 75e7400282af2a34141480c34501c4aecd3a8ac6 | [
"Apache-2.0"
] | null | null | null | import cloudmesh
cloudmesh.shell("help")
print cloudmesh.version()
| 13.6 | 25 | 0.794118 | import cloudmesh
cloudmesh.shell("help")
print cloudmesh.version()
| 0 | 0 | 0 |
123f070341e37fb26e8e67b1fc81ff95b2f95455 | 4,401 | py | Python | src/embeddings.py | radhe2205/summar | 2e2e63efd06c14acf275faf49a1eb69648a761e4 | [
"Apache-2.0"
] | null | null | null | src/embeddings.py | radhe2205/summar | 2e2e63efd06c14acf275faf49a1eb69648a761e4 | [
"Apache-2.0"
] | null | null | null | src/embeddings.py | radhe2205/summar | 2e2e63efd06c14acf275faf49a1eb69648a761e4 | [
"Apache-2.0"
] | null | null | null | import json
import numpy as np
import torch
from torch import nn
| 41.914286 | 122 | 0.662349 | import json
import numpy as np
import torch
from torch import nn
class GloveLimitedEmbedding(nn.Module):
def __init__(self, total_embeddings, embedding_w, embedding_dim=300): # total_embedding includes start and end marker.
super(GloveLimitedEmbedding, self).__init__()
if embedding_w is not None:
self.embeddings = nn.Embedding(*embedding_w.shape, padding_idx=-1).requires_grad_(False)
with torch.no_grad():
self.embeddings.weight.data = embedding_w
elif total_embeddings is not None:
self.embeddings = nn.Embedding(total_embeddings - 2, embedding_dim, padding_idx=-1).requires_grad_(False)
else:
raise Exception("Bad Initialisation")
self.beg_end_emb = nn.Embedding.from_pretrained(torch.randn(2, embedding_dim)).requires_grad_(True)
self.start_idx = self.embeddings.weight.shape[0] # After padding
self.end_idx = self.start_idx + 1 # after start
def get_embeddings(self, idxes):
# idxes[idxes == -1] = self.embeddings.padding_idx
idxes = idxes.clone()
start_idxes = idxes == self.start_idx
end_idxes = idxes == self.end_idx
idxes[start_idxes] = self.embeddings.padding_idx
idxes[end_idxes] = self.embeddings.padding_idx
embedding_vec = self.embeddings(idxes)
embedding_vec[start_idxes] = self.beg_end_emb(torch.tensor(0).to(idxes.device))
embedding_vec[end_idxes] = self.beg_end_emb(torch.tensor(1).to(idxes.device))
return embedding_vec
class GloveEmbedding(nn.Module):
def __init__(self, embedding_dim, embedding_path = None, reload = False):
super(GloveEmbedding, self).__init__()
self.total_words = 400003
self.embedding_dim = embedding_dim
self.embeddings = nn.Embedding(400004, embedding_dim, padding_idx=-1).requires_grad_(False)
self.beg_end_marker = nn.Embedding(2, embedding_dim).requires_grad_(True)
self.start_idx = 400001
self.end_idx = 400002
# self.start_emb = nn.Parameter(torch.randn(embedding_dim))
# self.end_emb = nn.Parameter(torch.randn(embedding_dim))
# Whether the embedding be loaded from file OR model.
if reload:
if embedding_path is not None:
embedding_vec, wordtoidx = load_embeddings(embedding_path, embedding_dim)
self.wordtoidx = wordtoidx
with torch.no_grad():
self.embeddings.weight.data = embedding_vec
def get_embeddings(self, idxes):
idxes[idxes == -1] = self.embeddings.padding_idx
embedding_vec = self.embeddings(idxes)
embedding_vec[idxes == self.start_idx] = self.beg_end_marker(torch.tensor(0).to(idxes.device))
embedding_vec[idxes == self.end_idx] = self.beg_end_marker(torch.tensor(1).to(idxes.device))
return embedding_vec
def load_limited_embeddings(wordtoidx, embedding_path, embedding_dim = 300):
rem = 2 if "<start>" in wordtoidx else 0
embedding_vec = torch.zeros((len(wordtoidx.keys()) - rem, embedding_dim)) # Padding index automatically assigned 0
with open(embedding_path, "r", encoding="utf-8") as f:
for line in f:
vals = line.split()
word = vals[0]
if word not in wordtoidx or word in ("<start>", "<end>"):
continue
embedding_vec[wordtoidx[word]] = torch.from_numpy(np.asarray(vals[1:], "float32"))
print("Finished loading embedding.")
return embedding_vec
def load_embeddings(embedding_path, embedding_dim = 300):
embedding_vec = torch.zeros((400004, embedding_dim))
wordtoidx = {}
word_count = 0
with open(embedding_path, "r", encoding="utf-8") as f:
for line in f:
vals = line.split()
word = vals[0]
embedding_vec[word_count] = torch.from_numpy(np.asarray(vals[1:], "float32"))
wordtoidx[word] = word_count
word_count += 1
print("Finished loading embeddings")
wordtoidx["<start>"] = 400001
wordtoidx["<end>"] = 400002
return embedding_vec, wordtoidx
def save_vocab(wordtoidx, vocab_path):
with open(vocab_path, "w") as f:
f.write(json.dumps(wordtoidx))
def load_vocab(vocab_path):
with open(vocab_path, "r") as f:
vocab = f.read()
vocab = json.loads(vocab)
return vocab
| 4,062 | 29 | 244 |
c44f47bb27e67ba7814900202569dea6d29752f4 | 25,790 | py | Python | pysnmp/Juniper-IKE-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 11 | 2021-02-02T16:27:16.000Z | 2021-08-31T06:22:49.000Z | pysnmp/Juniper-IKE-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 75 | 2021-02-24T17:30:31.000Z | 2021-12-08T00:01:18.000Z | pysnmp/Juniper-IKE-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module Juniper-IKE-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/Juniper-IKE-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 19:52:12 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, SingleValueConstraint, ValueRangeConstraint, ConstraintsIntersection, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsIntersection", "ConstraintsUnion")
juniMibs, = mibBuilder.importSymbols("Juniper-MIBs", "juniMibs")
NotificationGroup, ObjectGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ObjectGroup", "ModuleCompliance")
ObjectIdentity, Counter32, Bits, Integer32, ModuleIdentity, NotificationType, MibIdentifier, Gauge32, MibScalar, MibTable, MibTableRow, MibTableColumn, IpAddress, iso, Counter64, Unsigned32, TimeTicks = mibBuilder.importSymbols("SNMPv2-SMI", "ObjectIdentity", "Counter32", "Bits", "Integer32", "ModuleIdentity", "NotificationType", "MibIdentifier", "Gauge32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "IpAddress", "iso", "Counter64", "Unsigned32", "TimeTicks")
TextualConvention, DisplayString, RowStatus = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString", "RowStatus")
juniIkeMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71))
juniIkeMIB.setRevisions(('2005-11-22 16:15', '2004-01-23 15:12', '2004-04-06 22:26',))
if mibBuilder.loadTexts: juniIkeMIB.setLastUpdated('200404062226Z')
if mibBuilder.loadTexts: juniIkeMIB.setOrganization('Juniper Networks, Inc.')
juniIkeObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1))
juniIke = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1))
juniIkePolicyRuleTable = MibTable((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 1), )
if mibBuilder.loadTexts: juniIkePolicyRuleTable.setStatus('obsolete')
juniIkePolicyRuleEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 1, 1), ).setIndexNames((0, "Juniper-IKE-MIB", "juniIkePolicyRulePriority"))
if mibBuilder.loadTexts: juniIkePolicyRuleEntry.setStatus('obsolete')
juniIkePolicyRulePriority = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 10000)))
if mibBuilder.loadTexts: juniIkePolicyRulePriority.setStatus('obsolete')
juniIkePolicyRuleAuthMethod = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 1, 1, 2), JuniIkeAuthenticationMethod().clone('preSharedKeys')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkePolicyRuleAuthMethod.setStatus('obsolete')
juniIkePolicyRuleEncryptMethod = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 1, 1, 3), JuniIkeEncryptionMethod().clone('tripleDes')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkePolicyRuleEncryptMethod.setStatus('obsolete')
juniIkePolicyRulePfsGroup = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 1, 1, 4), JuniIkeGroup().clone('group2')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkePolicyRulePfsGroup.setStatus('obsolete')
juniIkePolicyRuleHashMethod = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 1, 1, 5), JuniIkeHashMethod().clone('sha')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkePolicyRuleHashMethod.setStatus('obsolete')
juniIkePolicyRuleLifetime = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 1, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(60, 86400)).clone(28800)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkePolicyRuleLifetime.setStatus('obsolete')
juniIkePolicyRuleNegotiationMode = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 1, 1, 7), JuniIkeNegotiationMode().clone('aggressive')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkePolicyRuleNegotiationMode.setStatus('obsolete')
juniIkePolicyRuleRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 1, 1, 8), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkePolicyRuleRowStatus.setStatus('obsolete')
juniIkePolicyRuleV2Table = MibTable((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 6), )
if mibBuilder.loadTexts: juniIkePolicyRuleV2Table.setStatus('current')
juniIkePolicyRuleV2Entry = MibTableRow((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 6, 1), ).setIndexNames((0, "Juniper-IKE-MIB", "juniIkePolicyRuleV2Priority"))
if mibBuilder.loadTexts: juniIkePolicyRuleV2Entry.setStatus('current')
juniIkePolicyRuleV2Priority = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 6, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 10000)))
if mibBuilder.loadTexts: juniIkePolicyRuleV2Priority.setStatus('current')
juniIkePolicyRuleV2AuthMethod = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 6, 1, 2), JuniIkeAuthenticationMethod().clone('preSharedKeys')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkePolicyRuleV2AuthMethod.setStatus('current')
juniIkePolicyRuleV2EncryptMethod = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 6, 1, 3), JuniIkeEncryptionMethod().clone('tripleDes')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkePolicyRuleV2EncryptMethod.setStatus('current')
juniIkePolicyRuleV2PfsGroup = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 6, 1, 4), JuniIkeGroup().clone('group2')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkePolicyRuleV2PfsGroup.setStatus('current')
juniIkePolicyRuleV2HashMethod = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 6, 1, 5), JuniIkeHashMethod().clone('sha')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkePolicyRuleV2HashMethod.setStatus('current')
juniIkePolicyRuleV2Lifetime = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 6, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(60, 86400)).clone(28800)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkePolicyRuleV2Lifetime.setStatus('current')
juniIkePolicyRuleV2NegotiationMode = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 6, 1, 7), JuniIkeNegotiationV2Mode().clone('aggressiveNotAllowed')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkePolicyRuleV2NegotiationMode.setStatus('current')
juniIkePolicyRuleV2IpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 6, 1, 8), IpAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkePolicyRuleV2IpAddress.setStatus('current')
juniIkePolicyRuleV2RouterIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 6, 1, 9), Unsigned32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkePolicyRuleV2RouterIndex.setStatus('current')
juniIkePolicyRuleV2RowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 6, 1, 10), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkePolicyRuleV2RowStatus.setStatus('current')
juniIkeIpv4PresharedKeyTable = MibTable((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 2), )
if mibBuilder.loadTexts: juniIkeIpv4PresharedKeyTable.setStatus('current')
juniIkeIpv4PresharedKeyEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 2, 1), ).setIndexNames((0, "Juniper-IKE-MIB", "juniIkeIpv4PresharedRemoteIpAddr"), (0, "Juniper-IKE-MIB", "juniIkeIpv4PresharedRouterIdx"))
if mibBuilder.loadTexts: juniIkeIpv4PresharedKeyEntry.setStatus('current')
juniIkeIpv4PresharedRemoteIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 2, 1, 1), IpAddress())
if mibBuilder.loadTexts: juniIkeIpv4PresharedRemoteIpAddr.setStatus('current')
juniIkeIpv4PresharedRouterIdx = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 2, 1, 2), Unsigned32())
if mibBuilder.loadTexts: juniIkeIpv4PresharedRouterIdx.setStatus('current')
juniIkeIpv4PresharedKeyStr = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 2, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 200))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkeIpv4PresharedKeyStr.setStatus('current')
juniIkeIpv4PresharedMaskedKeyStr = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 2, 1, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 300))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkeIpv4PresharedMaskedKeyStr.setStatus('current')
juniIkeIpv4PresharedKeyRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 2, 1, 5), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkeIpv4PresharedKeyRowStatus.setStatus('current')
juniIkeFqdnPresharedKeyTable = MibTable((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 3), )
if mibBuilder.loadTexts: juniIkeFqdnPresharedKeyTable.setStatus('current')
juniIkeFqdnPresharedKeyEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 3, 1), ).setIndexNames((0, "Juniper-IKE-MIB", "juniIkeFqdnPresharedRemote"), (0, "Juniper-IKE-MIB", "juniIkeFqdnPresharedRouterIndex"))
if mibBuilder.loadTexts: juniIkeFqdnPresharedKeyEntry.setStatus('current')
juniIkeFqdnPresharedRemote = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 3, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 80)))
if mibBuilder.loadTexts: juniIkeFqdnPresharedRemote.setStatus('current')
juniIkeFqdnPresharedRouterIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 3, 1, 2), Unsigned32())
if mibBuilder.loadTexts: juniIkeFqdnPresharedRouterIndex.setStatus('current')
juniIkeFqdnPresharedKeyStr = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 3, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 200))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkeFqdnPresharedKeyStr.setStatus('current')
juniIkeFqdnPresharedMaskedKeyStr = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 3, 1, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 300))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkeFqdnPresharedMaskedKeyStr.setStatus('current')
juniIkeFqdnPresharedKeyRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 3, 1, 5), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkeFqdnPresharedKeyRowStatus.setStatus('current')
juniIkeSaTable = MibTable((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 4), )
if mibBuilder.loadTexts: juniIkeSaTable.setStatus('obsolete')
juniIkeSaEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 4, 1), ).setIndexNames((0, "Juniper-IKE-MIB", "juniIkeSaRemoteIpAddr"), (0, "Juniper-IKE-MIB", "juniIkeSaLocalIpAddr"), (0, "Juniper-IKE-MIB", "juniIkeSaRouterIndex"), (0, "Juniper-IKE-MIB", "juniIkeSaDirection"))
if mibBuilder.loadTexts: juniIkeSaEntry.setStatus('obsolete')
juniIkeSaRemoteIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 4, 1, 1), IpAddress())
if mibBuilder.loadTexts: juniIkeSaRemoteIpAddr.setStatus('obsolete')
juniIkeSaLocalIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 4, 1, 2), IpAddress())
if mibBuilder.loadTexts: juniIkeSaLocalIpAddr.setStatus('obsolete')
juniIkeSaRouterIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 4, 1, 3), Unsigned32())
if mibBuilder.loadTexts: juniIkeSaRouterIndex.setStatus('obsolete')
juniIkeSaDirection = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 4, 1, 4), JuniIpsecPhase1SaDirection())
if mibBuilder.loadTexts: juniIkeSaDirection.setStatus('obsolete')
juniIkeSaState = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 4, 1, 5), JuniIpsecPhase1SaState()).setMaxAccess("readonly")
if mibBuilder.loadTexts: juniIkeSaState.setStatus('obsolete')
juniIkeSaRemaining = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 4, 1, 6), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 86400))).setUnits('seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: juniIkeSaRemaining.setStatus('obsolete')
juniIkeSa2Table = MibTable((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 5), )
if mibBuilder.loadTexts: juniIkeSa2Table.setStatus('current')
juniIkeSa2Entry = MibTableRow((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 5, 1), ).setIndexNames((0, "Juniper-IKE-MIB", "juniIkeSa2RemoteIpAddr"), (0, "Juniper-IKE-MIB", "juniIkeSaRemotePort"), (0, "Juniper-IKE-MIB", "juniIkeSa2LocalIpAddr"), (0, "Juniper-IKE-MIB", "juniIkeSaLocalPort"), (0, "Juniper-IKE-MIB", "juniIkeSa2RouterIndex"), (0, "Juniper-IKE-MIB", "juniIkeSa2Direction"), (0, "Juniper-IKE-MIB", "juniIkeSaNegotiationDone"))
if mibBuilder.loadTexts: juniIkeSa2Entry.setStatus('current')
juniIkeSa2RemoteIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 5, 1, 1), IpAddress())
if mibBuilder.loadTexts: juniIkeSa2RemoteIpAddr.setStatus('current')
juniIkeSaRemotePort = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 5, 1, 2), Unsigned32())
if mibBuilder.loadTexts: juniIkeSaRemotePort.setStatus('current')
juniIkeSa2LocalIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 5, 1, 3), IpAddress())
if mibBuilder.loadTexts: juniIkeSa2LocalIpAddr.setStatus('current')
juniIkeSaLocalPort = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 5, 1, 4), Unsigned32())
if mibBuilder.loadTexts: juniIkeSaLocalPort.setStatus('current')
juniIkeSa2RouterIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 5, 1, 5), Unsigned32())
if mibBuilder.loadTexts: juniIkeSa2RouterIndex.setStatus('current')
juniIkeSa2Direction = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 5, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("responder", 0), ("initiator", 1))))
if mibBuilder.loadTexts: juniIkeSa2Direction.setStatus('current')
juniIkeSaNegotiationDone = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 5, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("negotiationNotDone", 0), ("negotiationDone", 1))))
if mibBuilder.loadTexts: juniIkeSaNegotiationDone.setStatus('current')
juniIkeSa2State = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 5, 1, 8), JuniIpsecPhase1SaState()).setMaxAccess("readonly")
if mibBuilder.loadTexts: juniIkeSa2State.setStatus('current')
juniIkeSa2Remaining = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 5, 1, 9), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 86400))).setUnits('seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: juniIkeSa2Remaining.setStatus('current')
juniRemoteCookie = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 5, 1, 10), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: juniRemoteCookie.setStatus('current')
juniLocalCookie = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 5, 1, 11), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: juniLocalCookie.setStatus('current')
juniIkeMIBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 2))
juniIkeMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 2, 1))
juniIkeMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 2, 2))
juniIkeCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 2, 1, 1)).setObjects(("Juniper-IKE-MIB", "juniIkePolicyRuleGroup"), ("Juniper-IKE-MIB", "juniIkeIpv4PreSharedKeyGroup"), ("Juniper-IKE-MIB", "juniIkeFqdnPreSharedKeyGroup"), ("Juniper-IKE-MIB", "juniIkeSaGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIkeCompliance = juniIkeCompliance.setStatus('obsolete')
juniIkeCompliance2 = ModuleCompliance((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 2, 1, 2)).setObjects(("Juniper-IKE-MIB", "juniIkePolicyRuleGroup"), ("Juniper-IKE-MIB", "juniIkeIpv4PreSharedKeyGroup"), ("Juniper-IKE-MIB", "juniIkeFqdnPreSharedKeyGroup"), ("Juniper-IKE-MIB", "juniIkeSa2Group"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIkeCompliance2 = juniIkeCompliance2.setStatus('obsolete')
juniIkeCompliance3 = ModuleCompliance((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 2, 1, 3)).setObjects(("Juniper-IKE-MIB", "juniIkePolicyRuleV2Group"), ("Juniper-IKE-MIB", "juniIkeIpv4PreSharedKeyGroup"), ("Juniper-IKE-MIB", "juniIkeFqdnPreSharedKeyGroup"), ("Juniper-IKE-MIB", "juniIkeSa2Group"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIkeCompliance3 = juniIkeCompliance3.setStatus('current')
juniIkePolicyRuleGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 2, 2, 1)).setObjects(("Juniper-IKE-MIB", "juniIkePolicyRuleAuthMethod"), ("Juniper-IKE-MIB", "juniIkePolicyRuleEncryptMethod"), ("Juniper-IKE-MIB", "juniIkePolicyRulePfsGroup"), ("Juniper-IKE-MIB", "juniIkePolicyRuleHashMethod"), ("Juniper-IKE-MIB", "juniIkePolicyRuleLifetime"), ("Juniper-IKE-MIB", "juniIkePolicyRuleNegotiationMode"), ("Juniper-IKE-MIB", "juniIkePolicyRuleRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIkePolicyRuleGroup = juniIkePolicyRuleGroup.setStatus('obsolete')
juniIkeIpv4PreSharedKeyGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 2, 2, 2)).setObjects(("Juniper-IKE-MIB", "juniIkeIpv4PresharedKeyStr"), ("Juniper-IKE-MIB", "juniIkeIpv4PresharedMaskedKeyStr"), ("Juniper-IKE-MIB", "juniIkeIpv4PresharedKeyRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIkeIpv4PreSharedKeyGroup = juniIkeIpv4PreSharedKeyGroup.setStatus('current')
juniIkeFqdnPreSharedKeyGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 2, 2, 3)).setObjects(("Juniper-IKE-MIB", "juniIkeFqdnPresharedKeyStr"), ("Juniper-IKE-MIB", "juniIkeFqdnPresharedMaskedKeyStr"), ("Juniper-IKE-MIB", "juniIkeFqdnPresharedKeyRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIkeFqdnPreSharedKeyGroup = juniIkeFqdnPreSharedKeyGroup.setStatus('current')
juniIkeSaGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 2, 2, 4)).setObjects(("Juniper-IKE-MIB", "juniIkeSaState"), ("Juniper-IKE-MIB", "juniIkeSaRemaining"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIkeSaGroup = juniIkeSaGroup.setStatus('obsolete')
juniIkeSa2Group = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 2, 2, 5)).setObjects(("Juniper-IKE-MIB", "juniIkeSa2State"), ("Juniper-IKE-MIB", "juniIkeSa2Remaining"), ("Juniper-IKE-MIB", "juniRemoteCookie"), ("Juniper-IKE-MIB", "juniLocalCookie"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIkeSa2Group = juniIkeSa2Group.setStatus('current')
juniIkePolicyRuleV2Group = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 2, 2, 6)).setObjects(("Juniper-IKE-MIB", "juniIkePolicyRuleV2AuthMethod"), ("Juniper-IKE-MIB", "juniIkePolicyRuleV2EncryptMethod"), ("Juniper-IKE-MIB", "juniIkePolicyRuleV2PfsGroup"), ("Juniper-IKE-MIB", "juniIkePolicyRuleV2HashMethod"), ("Juniper-IKE-MIB", "juniIkePolicyRuleV2Lifetime"), ("Juniper-IKE-MIB", "juniIkePolicyRuleV2NegotiationMode"), ("Juniper-IKE-MIB", "juniIkePolicyRuleV2IpAddress"), ("Juniper-IKE-MIB", "juniIkePolicyRuleV2RouterIndex"), ("Juniper-IKE-MIB", "juniIkePolicyRuleV2RowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIkePolicyRuleV2Group = juniIkePolicyRuleV2Group.setStatus('current')
mibBuilder.exportSymbols("Juniper-IKE-MIB", juniIkeSaRemaining=juniIkeSaRemaining, juniIkePolicyRuleV2Priority=juniIkePolicyRuleV2Priority, juniIkePolicyRuleV2AuthMethod=juniIkePolicyRuleV2AuthMethod, juniIkeSaRemotePort=juniIkeSaRemotePort, juniIkeSa2Group=juniIkeSa2Group, juniIkeSaNegotiationDone=juniIkeSaNegotiationDone, juniIkeSa2RemoteIpAddr=juniIkeSa2RemoteIpAddr, juniIkeSa2Entry=juniIkeSa2Entry, juniIkeFqdnPresharedKeyEntry=juniIkeFqdnPresharedKeyEntry, juniIkeSaState=juniIkeSaState, juniIkeSa2RouterIndex=juniIkeSa2RouterIndex, juniIkeIpv4PresharedRemoteIpAddr=juniIkeIpv4PresharedRemoteIpAddr, juniIkeSaLocalPort=juniIkeSaLocalPort, juniIkePolicyRuleLifetime=juniIkePolicyRuleLifetime, juniIkeMIB=juniIkeMIB, juniIkeSaEntry=juniIkeSaEntry, juniIkePolicyRuleEntry=juniIkePolicyRuleEntry, juniIkePolicyRuleTable=juniIkePolicyRuleTable, juniIkeSa2State=juniIkeSa2State, juniIkePolicyRuleV2PfsGroup=juniIkePolicyRuleV2PfsGroup, juniIkeCompliance3=juniIkeCompliance3, juniIkePolicyRuleV2IpAddress=juniIkePolicyRuleV2IpAddress, juniIkePolicyRuleAuthMethod=juniIkePolicyRuleAuthMethod, juniIkeSaGroup=juniIkeSaGroup, JuniIkeGroup=JuniIkeGroup, juniIkeSaLocalIpAddr=juniIkeSaLocalIpAddr, juniIkeSaRemoteIpAddr=juniIkeSaRemoteIpAddr, juniIkePolicyRuleHashMethod=juniIkePolicyRuleHashMethod, juniIkeIpv4PreSharedKeyGroup=juniIkeIpv4PreSharedKeyGroup, juniIkePolicyRuleV2HashMethod=juniIkePolicyRuleV2HashMethod, juniIke=juniIke, juniIkePolicyRuleV2RouterIndex=juniIkePolicyRuleV2RouterIndex, juniIkeFqdnPresharedKeyRowStatus=juniIkeFqdnPresharedKeyRowStatus, juniIkeSa2Table=juniIkeSa2Table, juniIkeSa2Direction=juniIkeSa2Direction, JuniIkeEncryptionMethod=JuniIkeEncryptionMethod, juniIkeIpv4PresharedMaskedKeyStr=juniIkeIpv4PresharedMaskedKeyStr, juniIkeSaTable=juniIkeSaTable, JuniIkeHashMethod=JuniIkeHashMethod, JuniIpsecPhase1SaState=JuniIpsecPhase1SaState, juniIkePolicyRuleV2NegotiationMode=juniIkePolicyRuleV2NegotiationMode, juniIkeFqdnPresharedRemote=juniIkeFqdnPresharedRemote, juniIkeMIBCompliances=juniIkeMIBCompliances, juniIkeMIBConformance=juniIkeMIBConformance, juniIkeSa2Remaining=juniIkeSa2Remaining, juniLocalCookie=juniLocalCookie, juniIkeSaRouterIndex=juniIkeSaRouterIndex, juniIkeFqdnPreSharedKeyGroup=juniIkeFqdnPreSharedKeyGroup, juniIkeIpv4PresharedKeyRowStatus=juniIkeIpv4PresharedKeyRowStatus, juniIkePolicyRulePriority=juniIkePolicyRulePriority, JuniIkeNegotiationV2Mode=JuniIkeNegotiationV2Mode, juniIkeSaDirection=juniIkeSaDirection, juniIkePolicyRuleV2Lifetime=juniIkePolicyRuleV2Lifetime, juniIkePolicyRuleGroup=juniIkePolicyRuleGroup, juniIkePolicyRuleV2Entry=juniIkePolicyRuleV2Entry, juniIkeFqdnPresharedKeyStr=juniIkeFqdnPresharedKeyStr, juniIkeFqdnPresharedMaskedKeyStr=juniIkeFqdnPresharedMaskedKeyStr, juniIkeCompliance=juniIkeCompliance, JuniIpsecPhase1SaDirection=JuniIpsecPhase1SaDirection, juniIkeSa2LocalIpAddr=juniIkeSa2LocalIpAddr, juniIkePolicyRuleV2Group=juniIkePolicyRuleV2Group, juniIkeIpv4PresharedRouterIdx=juniIkeIpv4PresharedRouterIdx, juniIkePolicyRuleV2RowStatus=juniIkePolicyRuleV2RowStatus, juniRemoteCookie=juniRemoteCookie, PYSNMP_MODULE_ID=juniIkeMIB, juniIkePolicyRuleNegotiationMode=juniIkePolicyRuleNegotiationMode, juniIkePolicyRuleEncryptMethod=juniIkePolicyRuleEncryptMethod, juniIkeIpv4PresharedKeyEntry=juniIkeIpv4PresharedKeyEntry, JuniIkeAuthenticationMethod=JuniIkeAuthenticationMethod, juniIkePolicyRuleV2Table=juniIkePolicyRuleV2Table, juniIkeFqdnPresharedKeyTable=juniIkeFqdnPresharedKeyTable, juniIkeMIBGroups=juniIkeMIBGroups, juniIkeIpv4PresharedKeyStr=juniIkeIpv4PresharedKeyStr, juniIkeFqdnPresharedRouterIndex=juniIkeFqdnPresharedRouterIndex, juniIkeIpv4PresharedKeyTable=juniIkeIpv4PresharedKeyTable, juniIkeCompliance2=juniIkeCompliance2, juniIkePolicyRulePfsGroup=juniIkePolicyRulePfsGroup, juniIkeObjects=juniIkeObjects, JuniIkeNegotiationMode=JuniIkeNegotiationMode, juniIkePolicyRuleRowStatus=juniIkePolicyRuleRowStatus, juniIkePolicyRuleV2EncryptMethod=juniIkePolicyRuleV2EncryptMethod)
| 123.397129 | 3,980 | 0.762815 | #
# PySNMP MIB module Juniper-IKE-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/Juniper-IKE-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 19:52:12 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, SingleValueConstraint, ValueRangeConstraint, ConstraintsIntersection, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsIntersection", "ConstraintsUnion")
juniMibs, = mibBuilder.importSymbols("Juniper-MIBs", "juniMibs")
NotificationGroup, ObjectGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ObjectGroup", "ModuleCompliance")
ObjectIdentity, Counter32, Bits, Integer32, ModuleIdentity, NotificationType, MibIdentifier, Gauge32, MibScalar, MibTable, MibTableRow, MibTableColumn, IpAddress, iso, Counter64, Unsigned32, TimeTicks = mibBuilder.importSymbols("SNMPv2-SMI", "ObjectIdentity", "Counter32", "Bits", "Integer32", "ModuleIdentity", "NotificationType", "MibIdentifier", "Gauge32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "IpAddress", "iso", "Counter64", "Unsigned32", "TimeTicks")
TextualConvention, DisplayString, RowStatus = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString", "RowStatus")
juniIkeMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71))
juniIkeMIB.setRevisions(('2005-11-22 16:15', '2004-01-23 15:12', '2004-04-06 22:26',))
if mibBuilder.loadTexts: juniIkeMIB.setLastUpdated('200404062226Z')
if mibBuilder.loadTexts: juniIkeMIB.setOrganization('Juniper Networks, Inc.')
class JuniIkeAuthenticationMethod(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 3))
namedValues = NamedValues(("rsaSignature", 0), ("preSharedKeys", 3))
class JuniIkeEncryptionMethod(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1))
namedValues = NamedValues(("des", 0), ("tripleDes", 1))
class JuniIkeGroup(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1, 4))
namedValues = NamedValues(("group1", 0), ("group2", 1), ("group5", 4))
class JuniIkeHashMethod(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1))
namedValues = NamedValues(("md5", 0), ("sha", 1))
class JuniIkeNegotiationMode(TextualConvention, Integer32):
status = 'obsolete'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1))
namedValues = NamedValues(("aggressive", 0), ("main", 1))
class JuniIkeNegotiationV2Mode(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3))
namedValues = NamedValues(("aggressiveAccepted", 0), ("aggressiveRequested", 1), ("aggressiveRequired", 2), ("aggressiveNotAllowed", 3))
class JuniIpsecPhase1SaState(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26))
namedValues = NamedValues(("reserved", 0), ("startSaNegotiationI", 1), ("startSaNegotiationR", 2), ("mmSaI", 3), ("mmSaR", 4), ("mmKeI", 5), ("mmKeR", 6), ("mmFinalI", 7), ("mmFinalR", 8), ("mmDoneI", 9), ("amSaI", 10), ("amSaR", 11), ("amFinalI", 12), ("amDoneR", 13), ("startQmI", 14), ("startQmR", 15), ("qmHashSaI", 16), ("qmHashSaR", 17), ("qmHashI", 18), ("qmDoneR", 19), ("startNgmI", 20), ("startNgmR", 21), ("ngmHashSaI", 22), ("ngmHashSaR", 23), ("ngmDoneI", 24), ("done", 25), ("deleted", 26))
class JuniIpsecPhase1SaDirection(TextualConvention, Integer32):
status = 'obsolete'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1))
namedValues = NamedValues(("initiator", 0), ("responder", 1))
juniIkeObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1))
juniIke = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1))
juniIkePolicyRuleTable = MibTable((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 1), )
if mibBuilder.loadTexts: juniIkePolicyRuleTable.setStatus('obsolete')
juniIkePolicyRuleEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 1, 1), ).setIndexNames((0, "Juniper-IKE-MIB", "juniIkePolicyRulePriority"))
if mibBuilder.loadTexts: juniIkePolicyRuleEntry.setStatus('obsolete')
juniIkePolicyRulePriority = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 10000)))
if mibBuilder.loadTexts: juniIkePolicyRulePriority.setStatus('obsolete')
juniIkePolicyRuleAuthMethod = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 1, 1, 2), JuniIkeAuthenticationMethod().clone('preSharedKeys')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkePolicyRuleAuthMethod.setStatus('obsolete')
juniIkePolicyRuleEncryptMethod = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 1, 1, 3), JuniIkeEncryptionMethod().clone('tripleDes')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkePolicyRuleEncryptMethod.setStatus('obsolete')
juniIkePolicyRulePfsGroup = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 1, 1, 4), JuniIkeGroup().clone('group2')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkePolicyRulePfsGroup.setStatus('obsolete')
juniIkePolicyRuleHashMethod = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 1, 1, 5), JuniIkeHashMethod().clone('sha')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkePolicyRuleHashMethod.setStatus('obsolete')
juniIkePolicyRuleLifetime = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 1, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(60, 86400)).clone(28800)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkePolicyRuleLifetime.setStatus('obsolete')
juniIkePolicyRuleNegotiationMode = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 1, 1, 7), JuniIkeNegotiationMode().clone('aggressive')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkePolicyRuleNegotiationMode.setStatus('obsolete')
juniIkePolicyRuleRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 1, 1, 8), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkePolicyRuleRowStatus.setStatus('obsolete')
juniIkePolicyRuleV2Table = MibTable((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 6), )
if mibBuilder.loadTexts: juniIkePolicyRuleV2Table.setStatus('current')
juniIkePolicyRuleV2Entry = MibTableRow((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 6, 1), ).setIndexNames((0, "Juniper-IKE-MIB", "juniIkePolicyRuleV2Priority"))
if mibBuilder.loadTexts: juniIkePolicyRuleV2Entry.setStatus('current')
juniIkePolicyRuleV2Priority = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 6, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 10000)))
if mibBuilder.loadTexts: juniIkePolicyRuleV2Priority.setStatus('current')
juniIkePolicyRuleV2AuthMethod = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 6, 1, 2), JuniIkeAuthenticationMethod().clone('preSharedKeys')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkePolicyRuleV2AuthMethod.setStatus('current')
juniIkePolicyRuleV2EncryptMethod = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 6, 1, 3), JuniIkeEncryptionMethod().clone('tripleDes')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkePolicyRuleV2EncryptMethod.setStatus('current')
juniIkePolicyRuleV2PfsGroup = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 6, 1, 4), JuniIkeGroup().clone('group2')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkePolicyRuleV2PfsGroup.setStatus('current')
juniIkePolicyRuleV2HashMethod = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 6, 1, 5), JuniIkeHashMethod().clone('sha')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkePolicyRuleV2HashMethod.setStatus('current')
juniIkePolicyRuleV2Lifetime = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 6, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(60, 86400)).clone(28800)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkePolicyRuleV2Lifetime.setStatus('current')
juniIkePolicyRuleV2NegotiationMode = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 6, 1, 7), JuniIkeNegotiationV2Mode().clone('aggressiveNotAllowed')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkePolicyRuleV2NegotiationMode.setStatus('current')
juniIkePolicyRuleV2IpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 6, 1, 8), IpAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkePolicyRuleV2IpAddress.setStatus('current')
juniIkePolicyRuleV2RouterIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 6, 1, 9), Unsigned32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkePolicyRuleV2RouterIndex.setStatus('current')
juniIkePolicyRuleV2RowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 6, 1, 10), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkePolicyRuleV2RowStatus.setStatus('current')
juniIkeIpv4PresharedKeyTable = MibTable((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 2), )
if mibBuilder.loadTexts: juniIkeIpv4PresharedKeyTable.setStatus('current')
juniIkeIpv4PresharedKeyEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 2, 1), ).setIndexNames((0, "Juniper-IKE-MIB", "juniIkeIpv4PresharedRemoteIpAddr"), (0, "Juniper-IKE-MIB", "juniIkeIpv4PresharedRouterIdx"))
if mibBuilder.loadTexts: juniIkeIpv4PresharedKeyEntry.setStatus('current')
juniIkeIpv4PresharedRemoteIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 2, 1, 1), IpAddress())
if mibBuilder.loadTexts: juniIkeIpv4PresharedRemoteIpAddr.setStatus('current')
juniIkeIpv4PresharedRouterIdx = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 2, 1, 2), Unsigned32())
if mibBuilder.loadTexts: juniIkeIpv4PresharedRouterIdx.setStatus('current')
juniIkeIpv4PresharedKeyStr = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 2, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 200))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkeIpv4PresharedKeyStr.setStatus('current')
juniIkeIpv4PresharedMaskedKeyStr = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 2, 1, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 300))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkeIpv4PresharedMaskedKeyStr.setStatus('current')
juniIkeIpv4PresharedKeyRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 2, 1, 5), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkeIpv4PresharedKeyRowStatus.setStatus('current')
juniIkeFqdnPresharedKeyTable = MibTable((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 3), )
if mibBuilder.loadTexts: juniIkeFqdnPresharedKeyTable.setStatus('current')
juniIkeFqdnPresharedKeyEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 3, 1), ).setIndexNames((0, "Juniper-IKE-MIB", "juniIkeFqdnPresharedRemote"), (0, "Juniper-IKE-MIB", "juniIkeFqdnPresharedRouterIndex"))
if mibBuilder.loadTexts: juniIkeFqdnPresharedKeyEntry.setStatus('current')
juniIkeFqdnPresharedRemote = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 3, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 80)))
if mibBuilder.loadTexts: juniIkeFqdnPresharedRemote.setStatus('current')
juniIkeFqdnPresharedRouterIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 3, 1, 2), Unsigned32())
if mibBuilder.loadTexts: juniIkeFqdnPresharedRouterIndex.setStatus('current')
juniIkeFqdnPresharedKeyStr = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 3, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 200))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkeFqdnPresharedKeyStr.setStatus('current')
juniIkeFqdnPresharedMaskedKeyStr = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 3, 1, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 300))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkeFqdnPresharedMaskedKeyStr.setStatus('current')
juniIkeFqdnPresharedKeyRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 3, 1, 5), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkeFqdnPresharedKeyRowStatus.setStatus('current')
juniIkeSaTable = MibTable((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 4), )
if mibBuilder.loadTexts: juniIkeSaTable.setStatus('obsolete')
juniIkeSaEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 4, 1), ).setIndexNames((0, "Juniper-IKE-MIB", "juniIkeSaRemoteIpAddr"), (0, "Juniper-IKE-MIB", "juniIkeSaLocalIpAddr"), (0, "Juniper-IKE-MIB", "juniIkeSaRouterIndex"), (0, "Juniper-IKE-MIB", "juniIkeSaDirection"))
if mibBuilder.loadTexts: juniIkeSaEntry.setStatus('obsolete')
juniIkeSaRemoteIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 4, 1, 1), IpAddress())
if mibBuilder.loadTexts: juniIkeSaRemoteIpAddr.setStatus('obsolete')
juniIkeSaLocalIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 4, 1, 2), IpAddress())
if mibBuilder.loadTexts: juniIkeSaLocalIpAddr.setStatus('obsolete')
juniIkeSaRouterIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 4, 1, 3), Unsigned32())
if mibBuilder.loadTexts: juniIkeSaRouterIndex.setStatus('obsolete')
juniIkeSaDirection = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 4, 1, 4), JuniIpsecPhase1SaDirection())
if mibBuilder.loadTexts: juniIkeSaDirection.setStatus('obsolete')
juniIkeSaState = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 4, 1, 5), JuniIpsecPhase1SaState()).setMaxAccess("readonly")
if mibBuilder.loadTexts: juniIkeSaState.setStatus('obsolete')
juniIkeSaRemaining = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 4, 1, 6), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 86400))).setUnits('seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: juniIkeSaRemaining.setStatus('obsolete')
juniIkeSa2Table = MibTable((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 5), )
if mibBuilder.loadTexts: juniIkeSa2Table.setStatus('current')
juniIkeSa2Entry = MibTableRow((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 5, 1), ).setIndexNames((0, "Juniper-IKE-MIB", "juniIkeSa2RemoteIpAddr"), (0, "Juniper-IKE-MIB", "juniIkeSaRemotePort"), (0, "Juniper-IKE-MIB", "juniIkeSa2LocalIpAddr"), (0, "Juniper-IKE-MIB", "juniIkeSaLocalPort"), (0, "Juniper-IKE-MIB", "juniIkeSa2RouterIndex"), (0, "Juniper-IKE-MIB", "juniIkeSa2Direction"), (0, "Juniper-IKE-MIB", "juniIkeSaNegotiationDone"))
if mibBuilder.loadTexts: juniIkeSa2Entry.setStatus('current')
juniIkeSa2RemoteIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 5, 1, 1), IpAddress())
if mibBuilder.loadTexts: juniIkeSa2RemoteIpAddr.setStatus('current')
juniIkeSaRemotePort = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 5, 1, 2), Unsigned32())
if mibBuilder.loadTexts: juniIkeSaRemotePort.setStatus('current')
juniIkeSa2LocalIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 5, 1, 3), IpAddress())
if mibBuilder.loadTexts: juniIkeSa2LocalIpAddr.setStatus('current')
juniIkeSaLocalPort = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 5, 1, 4), Unsigned32())
if mibBuilder.loadTexts: juniIkeSaLocalPort.setStatus('current')
juniIkeSa2RouterIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 5, 1, 5), Unsigned32())
if mibBuilder.loadTexts: juniIkeSa2RouterIndex.setStatus('current')
juniIkeSa2Direction = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 5, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("responder", 0), ("initiator", 1))))
if mibBuilder.loadTexts: juniIkeSa2Direction.setStatus('current')
juniIkeSaNegotiationDone = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 5, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("negotiationNotDone", 0), ("negotiationDone", 1))))
if mibBuilder.loadTexts: juniIkeSaNegotiationDone.setStatus('current')
juniIkeSa2State = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 5, 1, 8), JuniIpsecPhase1SaState()).setMaxAccess("readonly")
if mibBuilder.loadTexts: juniIkeSa2State.setStatus('current')
juniIkeSa2Remaining = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 5, 1, 9), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 86400))).setUnits('seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: juniIkeSa2Remaining.setStatus('current')
juniRemoteCookie = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 5, 1, 10), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: juniRemoteCookie.setStatus('current')
juniLocalCookie = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 5, 1, 11), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: juniLocalCookie.setStatus('current')
juniIkeMIBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 2))
juniIkeMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 2, 1))
juniIkeMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 2, 2))
juniIkeCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 2, 1, 1)).setObjects(("Juniper-IKE-MIB", "juniIkePolicyRuleGroup"), ("Juniper-IKE-MIB", "juniIkeIpv4PreSharedKeyGroup"), ("Juniper-IKE-MIB", "juniIkeFqdnPreSharedKeyGroup"), ("Juniper-IKE-MIB", "juniIkeSaGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIkeCompliance = juniIkeCompliance.setStatus('obsolete')
juniIkeCompliance2 = ModuleCompliance((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 2, 1, 2)).setObjects(("Juniper-IKE-MIB", "juniIkePolicyRuleGroup"), ("Juniper-IKE-MIB", "juniIkeIpv4PreSharedKeyGroup"), ("Juniper-IKE-MIB", "juniIkeFqdnPreSharedKeyGroup"), ("Juniper-IKE-MIB", "juniIkeSa2Group"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIkeCompliance2 = juniIkeCompliance2.setStatus('obsolete')
juniIkeCompliance3 = ModuleCompliance((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 2, 1, 3)).setObjects(("Juniper-IKE-MIB", "juniIkePolicyRuleV2Group"), ("Juniper-IKE-MIB", "juniIkeIpv4PreSharedKeyGroup"), ("Juniper-IKE-MIB", "juniIkeFqdnPreSharedKeyGroup"), ("Juniper-IKE-MIB", "juniIkeSa2Group"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIkeCompliance3 = juniIkeCompliance3.setStatus('current')
juniIkePolicyRuleGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 2, 2, 1)).setObjects(("Juniper-IKE-MIB", "juniIkePolicyRuleAuthMethod"), ("Juniper-IKE-MIB", "juniIkePolicyRuleEncryptMethod"), ("Juniper-IKE-MIB", "juniIkePolicyRulePfsGroup"), ("Juniper-IKE-MIB", "juniIkePolicyRuleHashMethod"), ("Juniper-IKE-MIB", "juniIkePolicyRuleLifetime"), ("Juniper-IKE-MIB", "juniIkePolicyRuleNegotiationMode"), ("Juniper-IKE-MIB", "juniIkePolicyRuleRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIkePolicyRuleGroup = juniIkePolicyRuleGroup.setStatus('obsolete')
juniIkeIpv4PreSharedKeyGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 2, 2, 2)).setObjects(("Juniper-IKE-MIB", "juniIkeIpv4PresharedKeyStr"), ("Juniper-IKE-MIB", "juniIkeIpv4PresharedMaskedKeyStr"), ("Juniper-IKE-MIB", "juniIkeIpv4PresharedKeyRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIkeIpv4PreSharedKeyGroup = juniIkeIpv4PreSharedKeyGroup.setStatus('current')
juniIkeFqdnPreSharedKeyGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 2, 2, 3)).setObjects(("Juniper-IKE-MIB", "juniIkeFqdnPresharedKeyStr"), ("Juniper-IKE-MIB", "juniIkeFqdnPresharedMaskedKeyStr"), ("Juniper-IKE-MIB", "juniIkeFqdnPresharedKeyRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIkeFqdnPreSharedKeyGroup = juniIkeFqdnPreSharedKeyGroup.setStatus('current')
juniIkeSaGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 2, 2, 4)).setObjects(("Juniper-IKE-MIB", "juniIkeSaState"), ("Juniper-IKE-MIB", "juniIkeSaRemaining"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIkeSaGroup = juniIkeSaGroup.setStatus('obsolete')
juniIkeSa2Group = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 2, 2, 5)).setObjects(("Juniper-IKE-MIB", "juniIkeSa2State"), ("Juniper-IKE-MIB", "juniIkeSa2Remaining"), ("Juniper-IKE-MIB", "juniRemoteCookie"), ("Juniper-IKE-MIB", "juniLocalCookie"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIkeSa2Group = juniIkeSa2Group.setStatus('current')
juniIkePolicyRuleV2Group = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 2, 2, 6)).setObjects(("Juniper-IKE-MIB", "juniIkePolicyRuleV2AuthMethod"), ("Juniper-IKE-MIB", "juniIkePolicyRuleV2EncryptMethod"), ("Juniper-IKE-MIB", "juniIkePolicyRuleV2PfsGroup"), ("Juniper-IKE-MIB", "juniIkePolicyRuleV2HashMethod"), ("Juniper-IKE-MIB", "juniIkePolicyRuleV2Lifetime"), ("Juniper-IKE-MIB", "juniIkePolicyRuleV2NegotiationMode"), ("Juniper-IKE-MIB", "juniIkePolicyRuleV2IpAddress"), ("Juniper-IKE-MIB", "juniIkePolicyRuleV2RouterIndex"), ("Juniper-IKE-MIB", "juniIkePolicyRuleV2RowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIkePolicyRuleV2Group = juniIkePolicyRuleV2Group.setStatus('current')
mibBuilder.exportSymbols("Juniper-IKE-MIB", juniIkeSaRemaining=juniIkeSaRemaining, juniIkePolicyRuleV2Priority=juniIkePolicyRuleV2Priority, juniIkePolicyRuleV2AuthMethod=juniIkePolicyRuleV2AuthMethod, juniIkeSaRemotePort=juniIkeSaRemotePort, juniIkeSa2Group=juniIkeSa2Group, juniIkeSaNegotiationDone=juniIkeSaNegotiationDone, juniIkeSa2RemoteIpAddr=juniIkeSa2RemoteIpAddr, juniIkeSa2Entry=juniIkeSa2Entry, juniIkeFqdnPresharedKeyEntry=juniIkeFqdnPresharedKeyEntry, juniIkeSaState=juniIkeSaState, juniIkeSa2RouterIndex=juniIkeSa2RouterIndex, juniIkeIpv4PresharedRemoteIpAddr=juniIkeIpv4PresharedRemoteIpAddr, juniIkeSaLocalPort=juniIkeSaLocalPort, juniIkePolicyRuleLifetime=juniIkePolicyRuleLifetime, juniIkeMIB=juniIkeMIB, juniIkeSaEntry=juniIkeSaEntry, juniIkePolicyRuleEntry=juniIkePolicyRuleEntry, juniIkePolicyRuleTable=juniIkePolicyRuleTable, juniIkeSa2State=juniIkeSa2State, juniIkePolicyRuleV2PfsGroup=juniIkePolicyRuleV2PfsGroup, juniIkeCompliance3=juniIkeCompliance3, juniIkePolicyRuleV2IpAddress=juniIkePolicyRuleV2IpAddress, juniIkePolicyRuleAuthMethod=juniIkePolicyRuleAuthMethod, juniIkeSaGroup=juniIkeSaGroup, JuniIkeGroup=JuniIkeGroup, juniIkeSaLocalIpAddr=juniIkeSaLocalIpAddr, juniIkeSaRemoteIpAddr=juniIkeSaRemoteIpAddr, juniIkePolicyRuleHashMethod=juniIkePolicyRuleHashMethod, juniIkeIpv4PreSharedKeyGroup=juniIkeIpv4PreSharedKeyGroup, juniIkePolicyRuleV2HashMethod=juniIkePolicyRuleV2HashMethod, juniIke=juniIke, juniIkePolicyRuleV2RouterIndex=juniIkePolicyRuleV2RouterIndex, juniIkeFqdnPresharedKeyRowStatus=juniIkeFqdnPresharedKeyRowStatus, juniIkeSa2Table=juniIkeSa2Table, juniIkeSa2Direction=juniIkeSa2Direction, JuniIkeEncryptionMethod=JuniIkeEncryptionMethod, juniIkeIpv4PresharedMaskedKeyStr=juniIkeIpv4PresharedMaskedKeyStr, juniIkeSaTable=juniIkeSaTable, JuniIkeHashMethod=JuniIkeHashMethod, JuniIpsecPhase1SaState=JuniIpsecPhase1SaState, juniIkePolicyRuleV2NegotiationMode=juniIkePolicyRuleV2NegotiationMode, juniIkeFqdnPresharedRemote=juniIkeFqdnPresharedRemote, juniIkeMIBCompliances=juniIkeMIBCompliances, juniIkeMIBConformance=juniIkeMIBConformance, juniIkeSa2Remaining=juniIkeSa2Remaining, juniLocalCookie=juniLocalCookie, juniIkeSaRouterIndex=juniIkeSaRouterIndex, juniIkeFqdnPreSharedKeyGroup=juniIkeFqdnPreSharedKeyGroup, juniIkeIpv4PresharedKeyRowStatus=juniIkeIpv4PresharedKeyRowStatus, juniIkePolicyRulePriority=juniIkePolicyRulePriority, JuniIkeNegotiationV2Mode=JuniIkeNegotiationV2Mode, juniIkeSaDirection=juniIkeSaDirection, juniIkePolicyRuleV2Lifetime=juniIkePolicyRuleV2Lifetime, juniIkePolicyRuleGroup=juniIkePolicyRuleGroup, juniIkePolicyRuleV2Entry=juniIkePolicyRuleV2Entry, juniIkeFqdnPresharedKeyStr=juniIkeFqdnPresharedKeyStr, juniIkeFqdnPresharedMaskedKeyStr=juniIkeFqdnPresharedMaskedKeyStr, juniIkeCompliance=juniIkeCompliance, JuniIpsecPhase1SaDirection=JuniIpsecPhase1SaDirection, juniIkeSa2LocalIpAddr=juniIkeSa2LocalIpAddr, juniIkePolicyRuleV2Group=juniIkePolicyRuleV2Group, juniIkeIpv4PresharedRouterIdx=juniIkeIpv4PresharedRouterIdx, juniIkePolicyRuleV2RowStatus=juniIkePolicyRuleV2RowStatus, juniRemoteCookie=juniRemoteCookie, PYSNMP_MODULE_ID=juniIkeMIB, juniIkePolicyRuleNegotiationMode=juniIkePolicyRuleNegotiationMode, juniIkePolicyRuleEncryptMethod=juniIkePolicyRuleEncryptMethod, juniIkeIpv4PresharedKeyEntry=juniIkeIpv4PresharedKeyEntry, JuniIkeAuthenticationMethod=JuniIkeAuthenticationMethod, juniIkePolicyRuleV2Table=juniIkePolicyRuleV2Table, juniIkeFqdnPresharedKeyTable=juniIkeFqdnPresharedKeyTable, juniIkeMIBGroups=juniIkeMIBGroups, juniIkeIpv4PresharedKeyStr=juniIkeIpv4PresharedKeyStr, juniIkeFqdnPresharedRouterIndex=juniIkeFqdnPresharedRouterIndex, juniIkeIpv4PresharedKeyTable=juniIkeIpv4PresharedKeyTable, juniIkeCompliance2=juniIkeCompliance2, juniIkePolicyRulePfsGroup=juniIkePolicyRulePfsGroup, juniIkeObjects=juniIkeObjects, JuniIkeNegotiationMode=JuniIkeNegotiationMode, juniIkePolicyRuleRowStatus=juniIkePolicyRuleRowStatus, juniIkePolicyRuleV2EncryptMethod=juniIkePolicyRuleV2EncryptMethod)
| 0 | 2,332 | 183 |
05afe74a9edff3bb79d4147c7b62c775c037193f | 2,034 | py | Python | Wrapping/Python/Testing/Create_Quad_Geometry.py | mhitzem/SIMPL | cd8a58f8d955d232ea039121cc5286cc9545c7a6 | [
"NRL"
] | 3 | 2018-01-18T18:27:02.000Z | 2021-06-13T06:10:52.000Z | Wrapping/Python/Testing/Create_Quad_Geometry.py | mhitzem/SIMPL | cd8a58f8d955d232ea039121cc5286cc9545c7a6 | [
"NRL"
] | 211 | 2016-07-27T12:18:16.000Z | 2021-11-02T13:42:11.000Z | Wrapping/Python/Testing/Create_Quad_Geometry.py | mhitzem/SIMPL | cd8a58f8d955d232ea039121cc5286cc9545c7a6 | [
"NRL"
] | 23 | 2016-02-15T21:23:47.000Z | 2021-08-11T15:35:24.000Z | # Based on CreateQuadGeometry example
import simpl
import simplpy as d3d
import simpl_helpers as sh
import simpl_test_dirs as sd
if __name__ == '__main__':
CreateQuadGeometryTest()
| 47.302326 | 196 | 0.748279 | # Based on CreateQuadGeometry example
import simpl
import simplpy as d3d
import simpl_helpers as sh
import simpl_test_dirs as sd
def CreateQuadGeometryTest():
# Create Data Container Array
dca = simpl.DataContainerArray()
# Create the Data Container
err = d3d.create_data_container(dca, 'DataContainer')
assert err == 0, f'DataContainer ErrorCondition: {err}'
# Read vertices
err = d3d.create_attribute_matrix(dca, simpl.DataArrayPath('DataContainer', 'Bounds', ''), simpl.AttributeMatrix.Type.Cell, sh.CreateDynamicTableData([[144]]))
assert err == 0, f'CreateAttributeMatrix - Error: {err}'
err = d3d.import_asci_data_array(dca, simpl.DataArrayPath('DataContainer', 'Bounds', 'Vertices'), simpl.NumericTypes.Float, 3, 1, sd.GetBuildDirectory() + '/Data/SIMPL/VertexCoordinates.csv', 0)
assert err == 0, f'ImportAsciDataArray - Error {err}'
# Read quads
err = d3d.create_attribute_matrix(dca, simpl.DataArrayPath('DataContainer', 'QuadList', ''), simpl.AttributeMatrix.Type.Cell, sh.CreateDynamicTableData([[121]]))
assert err == 0, f'CreateAttributeMatrix - Error: {err}'
err = d3d.import_asci_data_array(dca, simpl.DataArrayPath('DataContainer', 'QuadList', 'Quads'), simpl.NumericTypes.SizeT, 4, 1, sd.GetBuildDirectory() + '/Data/SIMPL/QuadConnectivity.csv', 0)
assert err == 0, f'ImportAsciDataArray - Error {err}'
# Create Geometry
err = sh.CreateGeometry(dca, 0, simpl.IGeometry.Type.Quad, 'DataContainer', False,
shared_vertex_list_array_path = simpl.DataArrayPath('DataContainer', 'Bounds', 'Vertices'),
shared_quad_list_array_path = simpl.DataArrayPath('DataContainer', 'QuadList', 'Quads'),
vertex_attribute_matrix_name = 'VertexData',
face_attribute_matrix_name = 'FaceData')
assert err == 0, f'Create Geometry - ErrorCondition: {err}'
err = d3d.data_container_writer(dca, sd.GetTestTempDirectory() + '/CreateQuadGeometry.dream3d', True, False)
assert err == 0, f'DataContainerWriter ErrorCondition: {err}'
if __name__ == '__main__':
CreateQuadGeometryTest()
| 1,826 | 0 | 23 |
577a1fe1ddbd642935fc1a15b38681f3628284de | 439 | py | Python | pdf_enlarge.py | meklenbpo/pdf_enlarge | 412cef12e479560ddf28a987acfc0094a8ec41e7 | [
"MIT"
] | null | null | null | pdf_enlarge.py | meklenbpo/pdf_enlarge | 412cef12e479560ddf28a987acfc0094a8ec41e7 | [
"MIT"
] | 2 | 2021-01-13T10:02:51.000Z | 2021-01-13T10:03:05.000Z | pdf_enlarge.py | meklenbpo/pdf_enlarge | 412cef12e479560ddf28a987acfc0094a8ec41e7 | [
"MIT"
] | null | null | null | """
PDF Enlarge
===========
Scale the contents of a PDF document 2x.
- Split each page into two parts along the perpendicular axis (e.g. one
A4 page into two A5 pages).
- Scale each split part 2 times (e.g. A5 -> A4)
- Merge scaled parts into a new PDF document.
"""
import sys
def main() -> int:
"""Console script entry point function."""
print('PDF Enlarge')
return 0
if __name__ == '__main__':
sys.exit(main())
| 18.291667 | 71 | 0.644647 | """
PDF Enlarge
===========
Scale the contents of a PDF document 2x.
- Split each page into two parts along the perpendicular axis (e.g. one
A4 page into two A5 pages).
- Scale each split part 2 times (e.g. A5 -> A4)
- Merge scaled parts into a new PDF document.
"""
import sys
def main() -> int:
"""Console script entry point function."""
print('PDF Enlarge')
return 0
if __name__ == '__main__':
sys.exit(main())
| 0 | 0 | 0 |
f76bf215b059f317c9ad61f90d151ac3137850cc | 3,349 | py | Python | txstatsd/server/protocol.py | drawks/txstatsd | da674d7a86e0e5ec40eaa53fe81310ef19d1ed9e | [
"MIT"
] | null | null | null | txstatsd/server/protocol.py | drawks/txstatsd | da674d7a86e0e5ec40eaa53fe81310ef19d1ed9e | [
"MIT"
] | 1 | 2020-07-10T23:35:49.000Z | 2020-07-10T23:35:49.000Z | txstatsd/server/protocol.py | drawks/txstatsd | da674d7a86e0e5ec40eaa53fe81310ef19d1ed9e | [
"MIT"
] | 1 | 2020-07-13T05:31:58.000Z | 2020-07-13T05:31:58.000Z | # Copyright (C) 2011-2012 Canonical Services Ltd
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from twisted.internet.protocol import (
DatagramProtocol, Factory)
from twisted.protocols.basic import LineReceiver
class StatsDServerProtocol(DatagramProtocol):
"""A Twisted-based implementation of the StatsD server.
Data is received via UDP for local aggregation and then sent to a Graphite
server via TCP.
"""
def datagramReceived(self, data, (host, port)):
"""Process received data and store it locally."""
if data == self.monitor_message:
# Send the expected response to the
# monitoring agent.
return self.transport.write(
self.monitor_response, (host, port))
return self.transport.reactor.callLater(
0, self.processor.process, data)
class StatsDTCPServerProtocol(LineReceiver):
"""A Twisted-based implementation of the StatsD server over TCP.
Data is received via TCP for local aggregation and then sent to a Graphite
server via TCP.
"""
def lineReceived(self, data):
"""Process received data and store it locally."""
if data == self.monitor_message:
# Send the expected response to the
# monitoring agent.
return self.transport.write(self.monitor_response)
return self.transport.reactor.callLater(
0, self.processor.process, data)
| 38.94186 | 78 | 0.707973 | # Copyright (C) 2011-2012 Canonical Services Ltd
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from twisted.internet.protocol import (
DatagramProtocol, Factory)
from twisted.protocols.basic import LineReceiver
class StatsDServerProtocol(DatagramProtocol):
"""A Twisted-based implementation of the StatsD server.
Data is received via UDP for local aggregation and then sent to a Graphite
server via TCP.
"""
def __init__(self, processor, monitor_message=None,
monitor_response=None):
self.processor = processor
self.monitor_message = monitor_message
self.monitor_response = monitor_response
def datagramReceived(self, data, (host, port)):
"""Process received data and store it locally."""
if data == self.monitor_message:
# Send the expected response to the
# monitoring agent.
return self.transport.write(
self.monitor_response, (host, port))
return self.transport.reactor.callLater(
0, self.processor.process, data)
class StatsDTCPServerProtocol(LineReceiver):
"""A Twisted-based implementation of the StatsD server over TCP.
Data is received via TCP for local aggregation and then sent to a Graphite
server via TCP.
"""
def __init__(self, processor, monitor_message=None,
monitor_response=None):
self.processor = processor
self.monitor_message = monitor_message
self.monitor_response = monitor_response
def lineReceived(self, data):
"""Process received data and store it locally."""
if data == self.monitor_message:
# Send the expected response to the
# monitoring agent.
return self.transport.write(self.monitor_response)
return self.transport.reactor.callLater(
0, self.processor.process, data)
class StatsDTCPServerFactory(Factory):
def __init__(self, processor, monitor_message=None,
monitor_response=None):
self.processor = processor
self.monitor_message = monitor_message
self.monitor_response = monitor_response
def buildProtocol(self, addr):
return StatsDTCPServerProtocol(
self.processor, self.monitor_message,
self.monitor_response)
| 740 | 17 | 131 |
17c615cbc1defe72246e6cb25c6b08f1f512c901 | 4,641 | py | Python | acq4/analysis/modules/Photostim/ScatterPlotter.py | pbmanis/acq4-2019 | 2a7d66eb4f1258d2dbdce27637d8a66de518c312 | [
"MIT"
] | null | null | null | acq4/analysis/modules/Photostim/ScatterPlotter.py | pbmanis/acq4-2019 | 2a7d66eb4f1258d2dbdce27637d8a66de518c312 | [
"MIT"
] | null | null | null | acq4/analysis/modules/Photostim/ScatterPlotter.py | pbmanis/acq4-2019 | 2a7d66eb4f1258d2dbdce27637d8a66de518c312 | [
"MIT"
] | null | null | null | from __future__ import print_function
# -*- coding: utf-8 -*-
from six.moves import range
from acq4.util import Qt
import acq4.pyqtgraph as pg
#import acq4.pyqtgraph.TreeWidget as TreeWidget
import acq4.util.flowchart.EventDetection as FCEventDetection
import acq4.util.debug as debug
| 35.427481 | 188 | 0.579401 | from __future__ import print_function
# -*- coding: utf-8 -*-
from six.moves import range
from acq4.util import Qt
import acq4.pyqtgraph as pg
#import acq4.pyqtgraph.TreeWidget as TreeWidget
import acq4.util.flowchart.EventDetection as FCEventDetection
import acq4.util.debug as debug
class ScatterPlotter(Qt.QSplitter):
### Draws scatter plots, allows the user to pick which data is used for x and y axes.
sigClicked = Qt.Signal(object, object)
def __init__(self):
Qt.QSplitter.__init__(self)
self.setOrientation(Qt.Qt.Horizontal)
self.plot = pg.PlotWidget()
self.addWidget(self.plot)
self.ctrl = Qt.QWidget()
self.addWidget(self.ctrl)
self.layout = Qt.QVBoxLayout()
self.layout.setSpacing(0)
self.layout.setContentsMargins(0, 0, 0, 0)
self.ctrl.setLayout(self.layout)
self.scanList = pg.TreeWidget()
self.layout.addWidget(self.scanList)
self.filter = FCEventDetection.EventFilter('eventFilter')
self.layout.addWidget(self.filter.ctrlWidget())
self.xCombo = Qt.QComboBox()
self.yCombo = Qt.QComboBox()
self.layout.addWidget(self.xCombo)
self.layout.addWidget(self.yCombo)
self.columns = []
self.scans = {} ## maps scan: (scatterPlotItem, treeItem, valid)
self.xCombo.currentIndexChanged.connect(self.invalidate)
self.yCombo.currentIndexChanged.connect(self.invalidate)
self.filter.sigStateChanged.connect(self.invalidate)
self.scanList.itemChanged.connect(self.itemChanged)
def itemChanged(self, item, col):
gi = self.scans[item.scan][0]
if item.checkState(0) == Qt.Qt.Checked:
gi.show()
else:
gi.hide()
self.updateAll()
def invalidate(self): ## mark all scans as invalid and update
for s in self.scans:
self.scans[s][2] = False
self.updateAll()
def addScan(self, scanDict):
plot = pg.ScatterPlotItem(pen=Qt.QPen(Qt.Qt.NoPen), brush=pg.mkBrush((255, 255, 255, 100)))
self.plot.addItem(plot)
plot.sigClicked.connect(self.plotClicked)
if not isinstance(scanDict, dict):
scanDict = {'key':scanDict}
#print "Adding:", scan.name
for scan in scanDict.values():
item = Qt.QTreeWidgetItem([scan.name()])
item.setCheckState(0, Qt.Qt.Checked)
item.scan = scan
self.scanList.addTopLevelItem(item)
self.scans[scan] = [plot, item, False]
self.updateScan(scan)
scan.sigEventsChanged.connect(self.invalidateScan)
def invalidateScan(self, scan):
self.scans[scan][2] = False
self.updateScan(scan)
def updateScan(self, scan):
try:
if self.scans[scan] is True:
return
self.updateColumns(scan)
x, y = self.getAxes()
plot = self.scans[scan][0]
data = scan.getAllEvents()
if data is None:
plot.setPoints([])
return
data = self.filter.process(data, {})
#print "scatter plot:", len(data['output']), "pts"
### TODO: if 'fitTime' is not available, we should fall back to 'index'
pts = [{'pos': (data['output'][i][x], data['output'][i][y]), 'data': (scan, data['output'][i]['SourceFile'], data['output'][i]['fitTime'])} for i in range(len(data['output']))]
plot.setPoints(pts)
#print pts
self.scans[scan][2] = True ## plot is valid
except:
pass
#debug.printExc("Error updating scatter plot:")
def plotClicked(self, plot, points):
self.sigClicked.emit(self, points)
def updateAll(self):
for s in self.scans:
if self.scans[s][1].checkState(0) == Qt.Qt.Checked:
self.updateScan(s)
def updateColumns(self, scan):
ev = scan.getAllEvents()
if ev is None:
return
cols = ev.dtype.names
for c in cols:
if c not in self.columns:
self.xCombo.addItem(c)
self.yCombo.addItem(c)
for c in self.columns:
if c not in cols:
ind = self.xCombo.findText(c)
self.xCombo.removeItem(ind)
self.yCombo.removeItem(ind)
self.columns = cols
def getAxes(self):
return str(self.xCombo.currentText()), str(self.yCombo.currentText())
| 3,894 | 437 | 23 |
7a009eb6bfd0a0a41612fd04b1dc563bb1dadf4c | 3,538 | py | Python | shakenfist_ci/tests/test_snapshots.py | Aceofspies/deploy | e4de8185ed84b1f051c462d92ed9d88890f0405d | [
"Apache-2.0"
] | null | null | null | shakenfist_ci/tests/test_snapshots.py | Aceofspies/deploy | e4de8185ed84b1f051c462d92ed9d88890f0405d | [
"Apache-2.0"
] | null | null | null | shakenfist_ci/tests/test_snapshots.py | Aceofspies/deploy | e4de8185ed84b1f051c462d92ed9d88890f0405d | [
"Apache-2.0"
] | null | null | null | from shakenfist.client import apiclient
from shakenfist_ci import base
| 33.065421 | 74 | 0.541266 | from shakenfist.client import apiclient
from shakenfist_ci import base
class TestSnapshots(base.BaseTestCase):
def setUp(self):
super(TestSnapshots, self).setUp()
self.namespace = 'ci-snapshots-%s' % self._uniquifier()
self.namespace_key = self._uniquifier()
self.test_client = self._make_namespace(
self.namespace, self.namespace_key)
self.net = self.test_client.allocate_network(
'192.168.242.0/24', True, True, '%s-net' % self.namespace)
def tearDown(self):
super(TestSnapshots, self).tearDown()
for inst in self.test_client.get_instances():
self.test_client.delete_instance(inst['uuid'])
self.test_client.delete_network(self.net['uuid'])
self._remove_namespace(self.namespace)
def test_single_disk_snapshots(self):
inst = self.test_client.create_instance(
'cirros', 1, 1,
[
{
'network_uuid': self.net['uuid']
}
],
[
{
'size': 8,
'base': 'cirros',
'type': 'disk'
}
], None, None)
self.assertIsNotNone(inst['uuid'])
self.assertIsNotNone(inst['node'])
self._await_login_prompt(inst['uuid'])
snap1 = self.test_client.snapshot_instance(inst['uuid'])
self.assertIsNotNone(snap1)
snapshots = self.test_client.get_instance_snapshots(inst['uuid'])
self.assertEqual(1, len(snapshots))
snap2 = self.test_client.snapshot_instance(inst['uuid'], all=True)
self.assertIsNotNone(snap2)
snapshots = self.test_client.get_instance_snapshots(inst['uuid'])
self.assertEqual(2, len(snapshots))
for snap in snapshots:
self.assertEqual('vda', snap['device'])
self.assertEqual(inst['uuid'], snap['instance_uuid'])
self.test_client.delete_instance(inst['uuid'])
def test_multiple_disk_snapshots(self):
inst = self.test_client.create_instance(
'cirros', 1, 1,
[
{
'network_uuid': self.net['uuid']
}
],
[
{
'size': 8,
'base': 'cirros',
'type': 'disk'
},
{
'size': 8,
'type': 'disk'
},
{
'size': 8,
'base': 'cirros',
'type': 'cdrom'
}
], None, None)
self.assertIsNotNone(inst['uuid'])
self.assertIsNotNone(inst['node'])
console = base.LoggingSocket(inst['node'], inst['console_port'])
console.await_login_prompt()
snap1 = self.test_client.snapshot_instance(inst['uuid'])
self.assertIsNotNone(snap1)
snapshots = self.test_client.get_instance_snapshots(inst['uuid'])
self.assertEqual(1, len(snapshots))
snap2 = self.test_client.snapshot_instance(inst['uuid'], all=True)
self.assertIsNotNone(snap2)
snapshots = self.test_client.get_instance_snapshots(inst['uuid'])
self.assertEqual(3, len(snapshots))
for snap in snapshots:
self.assertIn(snap['device'], ['vda', 'vdc'])
self.assertEqual(inst['uuid'], snap['instance_uuid'])
self.test_client.delete_instance(inst['uuid'])
| 3,317 | 18 | 130 |
cb0e788d33dbe9e1c1e90d718f368e794a35e6be | 1,156 | py | Python | quizzes/migrations/0005_auto_20150813_0645.py | ikedumancas/ikequizgen | 86cb4c183ef927be063eb9ad0e7855a0c23ba0d2 | [
"MIT"
] | null | null | null | quizzes/migrations/0005_auto_20150813_0645.py | ikedumancas/ikequizgen | 86cb4c183ef927be063eb9ad0e7855a0c23ba0d2 | [
"MIT"
] | null | null | null | quizzes/migrations/0005_auto_20150813_0645.py | ikedumancas/ikequizgen | 86cb4c183ef927be063eb9ad0e7855a0c23ba0d2 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
| 27.52381 | 107 | 0.547578 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('quizzes', '0004_auto_20150811_1354'),
]
operations = [
migrations.AlterModelOptions(
name='choice',
options={'ordering': ['order']},
),
migrations.AlterModelOptions(
name='question',
options={'ordering': ['order']},
),
migrations.AlterModelOptions(
name='quiz',
options={'ordering': ['-timestamp'], 'verbose_name': 'Quiz', 'verbose_name_plural': 'Quizzes'},
),
migrations.AddField(
model_name='choice',
name='order',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='question',
name='order',
field=models.IntegerField(default=0),
),
migrations.AlterField(
model_name='quiz',
name='is_active',
field=models.BooleanField(default=False, verbose_name=b'active'),
),
]
| 0 | 1,026 | 23 |
c54f76638dceb534a31b87621c4b854ace380526 | 1,695 | py | Python | lib/surface/apigee/deployments/__init__.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | 2 | 2019-11-10T09:17:07.000Z | 2019-12-18T13:44:08.000Z | lib/surface/apigee/deployments/__init__.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | null | null | null | lib/surface/apigee/deployments/__init__.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | 1 | 2020-07-25T01:40:19.000Z | 2020-07-25T01:40:19.000Z | # -*- coding: utf-8 -*- # Lint as: python3
# Copyright 2020 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The deployments command group for the Apigee CLI."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
class Deployments(base.Group):
"""Manage deployments of Apigee API proxies in runtime environments."""
detailed_help = {
"DESCRIPTION": """
{description}
`{command}` contains commands for enumerating and checking the status
of deployments of proxies to runtime environments.
""",
"EXAMPLES": """
To list all deployments for the active Cloud Platform project, run:
$ {command} list
To list all deployments in a particular environment of a particular
Apigee organization, run:
$ {command} list --environment=ENVIRONMENT --organization=ORG_NAME
To get the status of a specific deployment as a JSON object, run:
$ {command} describe --api=API_NAME --environment=ENVIRONMENT --format=json
""",
}
| 33.9 | 89 | 0.698525 | # -*- coding: utf-8 -*- # Lint as: python3
# Copyright 2020 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The deployments command group for the Apigee CLI."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
class Deployments(base.Group):
"""Manage deployments of Apigee API proxies in runtime environments."""
detailed_help = {
"DESCRIPTION": """
{description}
`{command}` contains commands for enumerating and checking the status
of deployments of proxies to runtime environments.
""",
"EXAMPLES": """
To list all deployments for the active Cloud Platform project, run:
$ {command} list
To list all deployments in a particular environment of a particular
Apigee organization, run:
$ {command} list --environment=ENVIRONMENT --organization=ORG_NAME
To get the status of a specific deployment as a JSON object, run:
$ {command} describe --api=API_NAME --environment=ENVIRONMENT --format=json
""",
}
| 0 | 0 | 0 |
ba212c198138bfba78a51a27c08670c39088122e | 498 | py | Python | src/pygcode_modules/regular_polygon.py | kazetkazet/cnc | 8e207a71616a9a13bac57df85631714235589891 | [
"MIT"
] | null | null | null | src/pygcode_modules/regular_polygon.py | kazetkazet/cnc | 8e207a71616a9a13bac57df85631714235589891 | [
"MIT"
] | null | null | null | src/pygcode_modules/regular_polygon.py | kazetkazet/cnc | 8e207a71616a9a13bac57df85631714235589891 | [
"MIT"
] | null | null | null | import math
def code(radius: int = 15, num_sides: int = 7) -> str:
"""
Example G-code module, regular polygon.
Please simulate first, before milling.
"""
result = ["G90"]
for i in range(num_sides):
x = radius - radius * math.cos(2 * math.pi * i / num_sides)
y = radius * math.sin(2 * math.pi * i / num_sides)
result.append("G0 X%0.2f Y%0.2f" % (
x,
y
))
result.append("G0 X0 Y0")
return '\n'.join(result)
| 22.636364 | 67 | 0.53012 | import math
def code(radius: int = 15, num_sides: int = 7) -> str:
"""
Example G-code module, regular polygon.
Please simulate first, before milling.
"""
result = ["G90"]
for i in range(num_sides):
x = radius - radius * math.cos(2 * math.pi * i / num_sides)
y = radius * math.sin(2 * math.pi * i / num_sides)
result.append("G0 X%0.2f Y%0.2f" % (
x,
y
))
result.append("G0 X0 Y0")
return '\n'.join(result)
| 0 | 0 | 0 |
eb046dcade542f39b272b58c7cd33de1809e7456 | 2,148 | py | Python | mihaelanistor/data/sihks.py | s-shailja/challenge-iclr-2021 | 28ad9d126597166bc41715f77c8cf366b8fba975 | [
"MIT"
] | 39 | 2021-03-12T07:30:14.000Z | 2022-03-24T06:37:02.000Z | mihaelanistor/data/sihks.py | s-shailja/challenge-iclr-2021 | 28ad9d126597166bc41715f77c8cf366b8fba975 | [
"MIT"
] | 34 | 2021-03-09T03:19:55.000Z | 2021-09-07T18:30:59.000Z | mihaelanistor/data/sihks.py | s-shailja/challenge-iclr-2021 | 28ad9d126597166bc41715f77c8cf366b8fba975 | [
"MIT"
] | 29 | 2021-03-13T21:21:14.000Z | 2022-02-02T05:52:44.000Z | import numpy as np
from scipy.io import loadmat
from scipy.fft import fft
def make_point_clouds(vertices, temperature):
"""[summary]
Parameters
----------
vertices : ndarray of shape (n_vertices, 3)
Vertices of the mesh as 3D points.
temperature : ndarray of shape (n_vertices, n_functions)
A collection of functions defined on the vertices of the mesh, such as SIHKS or other spectral descriptor.
Returns
-------
point_clouds : ndarray of shape (n_functions, n_vertices, 4)
Collection of point clouds formed by concatenating the vertex coordinates and the corresponding
temperature for each given function.
"""
n_vertices = vertices.shape[0]
n_functions = temperature.shape[1]
# Repeat points n_function times [n_functions, n_vertices, 3]
vertices = np.tile(vertices, reps=(n_functions, 1))
vertices = vertices.reshape(n_functions, n_vertices, 3)
# Reshape temperature [n_functions, n_vertices, 1]
temperature = np.expand_dims(temperature.T, axis=-1)
# Concatenate coordinates and temperature
point_clouds = np.concatenate([vertices, temperature], axis=-1)
return point_clouds
| 33.046154 | 125 | 0.633613 | import numpy as np
from scipy.io import loadmat
from scipy.fft import fft
def SIHKS(evecs, evals, t0=0.01, time_scale=15, alpha1=2, n_functions=17):
n_vertices = evecs.shape[0]
tau = np.linspace(start=0, stop=time_scale, num=int(time_scale/(1/16)+1))
t = t0 * alpha1 ** tau
hks = np.zeros((n_vertices, len(tau))) # (1002, 241)
for i in range(len(tau)):
sum1 = np.multiply(-np.log(alpha1) * evecs ** 2,
np.tile(np.multiply(t[i] * evals.T, np.exp(-(t[i] * evals.T))), reps=(n_vertices, 1))).sum(axis=1)
sum2 = np.multiply(evecs ** 2, np.tile(np.exp(-t[i] * evals.T), reps=(n_vertices, 1))).sum(axis=1)
hks[:, i] = np.divide(sum1, sum2)
shks = np.zeros((n_vertices, len(tau) - 1))
for i in range(len(tau) - 1):
shks[:, i] = hks[:, i + 1] - hks[:, i]
sihks = np.zeros((n_vertices, len(tau) - 1))
for i in range(n_vertices):
sihks[i, :] = np.abs(fft(shks[i, :]))
return sihks[:, :n_functions]
def make_point_clouds(vertices, temperature):
"""[summary]
Parameters
----------
vertices : ndarray of shape (n_vertices, 3)
Vertices of the mesh as 3D points.
temperature : ndarray of shape (n_vertices, n_functions)
A collection of functions defined on the vertices of the mesh, such as SIHKS or other spectral descriptor.
Returns
-------
point_clouds : ndarray of shape (n_functions, n_vertices, 4)
Collection of point clouds formed by concatenating the vertex coordinates and the corresponding
temperature for each given function.
"""
n_vertices = vertices.shape[0]
n_functions = temperature.shape[1]
# Repeat points n_function times [n_functions, n_vertices, 3]
vertices = np.tile(vertices, reps=(n_functions, 1))
vertices = vertices.reshape(n_functions, n_vertices, 3)
# Reshape temperature [n_functions, n_vertices, 1]
temperature = np.expand_dims(temperature.T, axis=-1)
# Concatenate coordinates and temperature
point_clouds = np.concatenate([vertices, temperature], axis=-1)
return point_clouds
| 928 | 0 | 23 |
dbd4a11a4f56abf62c7b56880621f5da540c055c | 445 | py | Python | jsonargparse/__init__.py | carmocca/jsonargparse | dde80fb00f48e3a922001542c47f16c986df7fd2 | [
"MIT"
] | null | null | null | jsonargparse/__init__.py | carmocca/jsonargparse | dde80fb00f48e3a922001542c47f16c986df7fd2 | [
"MIT"
] | null | null | null | jsonargparse/__init__.py | carmocca/jsonargparse | dde80fb00f48e3a922001542c47f16c986df7fd2 | [
"MIT"
] | null | null | null | from argparse import (
Action,
Namespace,
HelpFormatter,
OPTIONAL,
REMAINDER,
SUPPRESS,
PARSER,
ONE_OR_MORE,
ZERO_OR_MORE,
)
from .actions import *
from .cli import *
from .core import *
from .deprecated import *
from .formatters import *
from .jsonnet import *
from .jsonschema import *
from .optionals import *
from .signatures import *
from .typehints import *
from .util import *
__version__ = '3.19.2'
| 17.115385 | 25 | 0.692135 | from argparse import (
Action,
Namespace,
HelpFormatter,
OPTIONAL,
REMAINDER,
SUPPRESS,
PARSER,
ONE_OR_MORE,
ZERO_OR_MORE,
)
from .actions import *
from .cli import *
from .core import *
from .deprecated import *
from .formatters import *
from .jsonnet import *
from .jsonschema import *
from .optionals import *
from .signatures import *
from .typehints import *
from .util import *
__version__ = '3.19.2'
| 0 | 0 | 0 |
3bc7dca6af68bed2eb5dd91180da70f783fdec45 | 9,476 | py | Python | sprokit/tests/bindings/python/sprokit/pipeline/test-config.py | dstoup/kwiver | a3a36317b446baf0feb6274235ab1ac6b4329ead | [
"BSD-3-Clause"
] | 14 | 2015-05-02T17:06:34.000Z | 2019-06-20T10:10:20.000Z | sprokit/tests/bindings/python/sprokit/pipeline/test-config.py | dstoup/kwiver | a3a36317b446baf0feb6274235ab1ac6b4329ead | [
"BSD-3-Clause"
] | 3 | 2021-03-19T15:39:43.000Z | 2021-09-08T02:47:15.000Z | sprokit/tests/bindings/python/sprokit/pipeline/test-config.py | acidburn0zzz/kwiver | 6e4205f1c46df04759c57c040f01cc804b27e00d | [
"BSD-3-Clause"
] | 9 | 2015-08-19T04:48:16.000Z | 2021-02-15T14:28:06.000Z | #!@PYTHON_EXECUTABLE@
#ckwg +28
# Copyright 2011-2013 by Kitware, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither name of Kitware, Inc. nor the names of any contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
if __name__ == '__main__':
import os
import sys
if not len(sys.argv) == 4:
test_error("Expected three arguments")
sys.exit(1)
testname = sys.argv[1]
os.chdir(sys.argv[2])
sys.path.append(sys.argv[3])
from sprokit.test.test import *
run_test(testname, find_tests(locals()))
| 22.669856 | 85 | 0.657873 | #!@PYTHON_EXECUTABLE@
#ckwg +28
# Copyright 2011-2013 by Kitware, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither name of Kitware, Inc. nor the names of any contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def test_import():
try:
import sprokit.pipeline.config
except:
test_error("Failed to import the config module")
def test_create():
from sprokit.pipeline import config
try:
config.empty_config()
except:
test_error("Failed to create an empty configuration")
config.ConfigKey()
config.ConfigKeys()
config.ConfigDescription()
config.ConfigValue()
def test_api_calls():
from sprokit.pipeline import config
config.Config.block_sep
config.Config.global_value
def test_has_value():
from sprokit.pipeline import config
c = config.empty_config()
keya = 'keya'
keyb = 'keyb'
valuea = 'valuea'
c.set_value(keya, valuea)
if not c.has_value(keya):
test_error("Block does not have value which was set")
if c.has_value(keyb):
test_error("Block has value which was not set")
def test_get_value():
from sprokit.pipeline import config
c = config.empty_config()
keya = 'keya'
valuea = 'valuea'
c.set_value(keya, valuea)
get_valuea = c.get_value(keya)
if not valuea == get_valuea:
test_error("Did not retrieve value that was set")
def test_get_value_nested():
from sprokit.pipeline import config
c = config.empty_config()
keya = 'keya'
keyb = 'keyb'
valuea = 'valuea'
c.set_value(keya + config.Config.block_sep + keyb, valuea)
nc = c.subblock(keya)
get_valuea = nc.get_value(keyb)
if not valuea == get_valuea:
test_error("Did not retrieve value that was set")
def test_get_value_no_exist():
from sprokit.pipeline import config
c = config.empty_config()
keya = 'keya'
keyb = 'keyb'
valueb = 'valueb'
expect_exception('retrieving an unset value', BaseException,
c.get_value, keya)
get_valueb = c.get_value(keyb, valueb)
if not valueb == get_valueb:
test_error("Did not retrieve default when requesting unset value")
def test_unset_value():
from sprokit.pipeline import config
c = config.empty_config()
keya = 'keya'
keyb = 'keyb'
valuea = 'valuea'
valueb = 'valueb'
c.set_value(keya, valuea)
c.set_value(keyb, valueb)
c.unset_value(keya)
expect_exception('retrieving an unset value', BaseException,
c.get_value, keya)
get_valueb = c.get_value(keyb)
if not valueb == get_valueb:
test_error("Did not retrieve value when requesting after an unrelated unset")
def test_available_values():
from sprokit.pipeline import config
c = config.empty_config()
keya = 'keya'
keyb = 'keyb'
valuea = 'valuea'
valueb = 'valueb'
c.set_value(keya, valuea)
c.set_value(keyb, valueb)
avail = c.available_values()
if not len(avail) == 2:
test_error("Did not retrieve correct number of keys")
try:
for val in avail:
pass
except:
test_error("Available values is not iterable")
def test_read_only():
from sprokit.pipeline import config
c = config.empty_config()
keya = 'keya'
valuea = 'valuea'
valueb = 'valueb'
c.set_value(keya, valuea)
c.mark_read_only(keya)
expect_exception('setting a read only value', BaseException,
c.set_value, keya, valueb)
get_valuea = c.get_value(keya)
if not valuea == get_valuea:
test_error("Read only value changed")
def test_read_only_unset():
from sprokit.pipeline import config
c = config.empty_config()
keya = 'keya'
valuea = 'valuea'
c.set_value(keya, valuea)
c.mark_read_only(keya)
expect_exception('unsetting a read only value', BaseException,
c.unset_value, keya)
get_valuea = c.get_value(keya)
if not valuea == get_valuea:
test_error("Read only value was unset")
def test_subblock():
from sprokit.pipeline import config
c = config.empty_config()
block1 = 'block1'
block2 = 'block2'
keya = 'keya'
keyb = 'keyb'
keyc = 'keyc'
valuea = 'valuea'
valueb = 'valueb'
valuec = 'valuec'
c.set_value(block1 + config.Config.block_sep + keya, valuea)
c.set_value(block1 + config.Config.block_sep + keyb, valueb)
c.set_value(block2 + config.Config.block_sep + keyc, valuec)
d = c.subblock(block1)
get_valuea = d.get_value(keya)
if not valuea == get_valuea:
test_error("Subblock does not inherit expected keys")
get_valueb = d.get_value(keyb)
if not valueb == get_valueb:
test_error("Subblock does not inherit expected keys")
if d.has_value(keyc):
test_error("Subblock inherited unrelated key")
def test_subblock_view():
from sprokit.pipeline import config
c = config.empty_config()
block1 = 'block1'
block2 = 'block2'
keya = 'keya'
keyb = 'keyb'
keyc = 'keyc'
valuea = 'valuea'
valueb = 'valueb'
valuec = 'valuec'
c.set_value(block1 + config.Config.block_sep + keya, valuea)
c.set_value(block2 + config.Config.block_sep + keyb, valueb)
d = c.subblock_view(block1)
if not d.has_value(keya):
test_error("Subblock does not inherit expected keys")
if d.has_value(keyb):
test_error("Subblock inherited unrelated key")
c.set_value(block1 + config.Config.block_sep + keya, valueb)
get_valuea1 = d.get_value(keya)
if not valueb == get_valuea1:
test_error("Subblock view persisted a changed value")
d.set_value(keya, valuea)
get_valuea2 = d.get_value(keya)
if not valuea == get_valuea2:
test_error("Subblock view set value was not changed in parent")
def test_merge_config():
from sprokit.pipeline import config
c = config.empty_config()
d = config.empty_config()
keya = 'keya'
keyb = 'keyb'
keyc = 'keyc'
valuea = 'valuea'
valueb = 'valueb'
valuec = 'valuec'
c.set_value(keya, valuea)
c.set_value(keyb, valuea)
d.set_value(keyb, valueb)
d.set_value(keyc, valuec)
c.merge_config(d)
get_valuea = c.get_value(keya)
if not valuea == get_valuea:
test_error("Unmerged key changed")
get_valueb = c.get_value(keyb)
if not valueb == get_valueb:
test_error("Conflicting key was not overwritten")
get_valuec = c.get_value(keyc)
if not valuec == get_valuec:
test_error("New key did not appear")
def test_dict():
from sprokit.pipeline import config
c = config.empty_config()
key = 'key'
value = 'oldvalue'
if key in c:
test_error("'%s' is in an empty config" % key)
if c:
test_error("An empty config is not falsy")
c[key] = value
if not c[key] == value:
test_error("Value was not set")
if key not in c:
test_error("'%s' is not in config after insertion" % key)
if not len(c) == 1:
test_error("The len() operator is incorrect")
if not c:
test_error("A non-empty config is not truthy")
value = 'newvalue'
origvalue = 'newvalue'
c[key] = value
value = 'replacedvalue'
if not c[key] == origvalue:
test_error("Value was overwritten")
del c[key]
expect_exception('getting an unset value', BaseException,
c.__getitem__, key)
expect_exception('deleting an unset value', BaseException,
c.__delitem__, key)
value = 10
c[key] = value
if not c[key] == str(value):
test_error("Value was not converted to a string")
if __name__ == '__main__':
import os
import sys
if not len(sys.argv) == 4:
test_error("Expected three arguments")
sys.exit(1)
testname = sys.argv[1]
os.chdir(sys.argv[2])
sys.path.append(sys.argv[3])
from sprokit.test.test import *
run_test(testname, find_tests(locals()))
| 7,222 | 0 | 345 |
f77f84a38d963e84a788a588afba020c07d38f30 | 1,564 | py | Python | corehq/apps/integration/util.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 471 | 2015-01-10T02:55:01.000Z | 2022-03-29T18:07:18.000Z | corehq/apps/integration/util.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 14,354 | 2015-01-01T07:38:23.000Z | 2022-03-31T20:55:14.000Z | corehq/apps/integration/util.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 175 | 2015-01-06T07:16:47.000Z | 2022-03-29T13:27:01.000Z | import hashlib
from corehq.apps.integration.models import (
DialerSettings,
GaenOtpServerSettings,
HmacCalloutSettings,
)
| 25.639344 | 72 | 0.716752 | import hashlib
from corehq.apps.integration.models import (
DialerSettings,
GaenOtpServerSettings,
HmacCalloutSettings,
)
def domain_uses_dialer(domain):
try:
settings = DialerSettings.objects.get(domain=domain)
return settings.is_enabled
except DialerSettings.DoesNotExist:
return False
def get_hmac_callout_settings(domain):
try:
settings = HmacCalloutSettings.objects.get(domain=domain)
return settings if settings.is_enabled else None
except HmacCalloutSettings.DoesNotExist:
pass
def get_gaen_otp_server_settings(domain):
try:
settings = GaenOtpServerSettings.objects.get(domain=domain)
return settings if settings.is_enabled else None
except GaenOtpServerSettings.DoesNotExist:
pass
def get_dialer_settings(domain):
return DialerSettings.objects.get(domain=domain)
def integration_contexts(domain):
context = {'dialer_enabled': domain_uses_dialer(domain)}
gaen_otp_server_settings = get_gaen_otp_server_settings(domain)
if gaen_otp_server_settings:
context.update({
'gaen_otp_enabled': True
})
hmac_settings = get_hmac_callout_settings(domain)
if hmac_settings:
context.update({
'hmac_root_url': hmac_settings.destination_url,
'hmac_api_key': hmac_settings.api_key,
'hmac_hashed_secret': hash_secret(hmac_settings.api_secret),
})
return context
def hash_secret(secret):
return hashlib.sha512(secret.encode()).hexdigest()
| 1,284 | 0 | 138 |
7171a7dd253f1b36af369cd08354fa4ed94e53f5 | 484 | py | Python | manage.py | lsloan/myla | f288ad3c77c3911f1831bf484a2ff275194e768b | [
"Apache-2.0"
] | 2 | 2019-01-28T17:56:45.000Z | 2019-10-23T17:35:54.000Z | manage.py | lsloan/myla | f288ad3c77c3911f1831bf484a2ff275194e768b | [
"Apache-2.0"
] | 1 | 2020-01-23T18:18:49.000Z | 2020-01-23T18:18:49.000Z | manage.py | lsloan/myla | f288ad3c77c3911f1831bf484a2ff275194e768b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import os, sys, warnings, logging
logger = logging.getLogger(__name__)
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dashboard.settings")
from django.core.management import execute_from_command_line
# Regex for which modules to ignore warnings from
IGNORE_MODULES = 'djangosaml2'
warnings.filterwarnings("ignore", module=IGNORE_MODULES, category=DeprecationWarning)
execute_from_command_line(sys.argv)
| 28.470588 | 89 | 0.770661 | #!/usr/bin/env python
import os, sys, warnings, logging
logger = logging.getLogger(__name__)
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dashboard.settings")
from django.core.management import execute_from_command_line
# Regex for which modules to ignore warnings from
IGNORE_MODULES = 'djangosaml2'
warnings.filterwarnings("ignore", module=IGNORE_MODULES, category=DeprecationWarning)
execute_from_command_line(sys.argv)
| 0 | 0 | 0 |
5a40381b43a7fa949e47343d5c7edae0bf2e3a78 | 2,017 | py | Python | code/layers.py | forest-snow/adan | 8e954f228c38191fccc06827f19b686aad34852d | [
"MIT"
] | 54 | 2018-07-17T04:16:28.000Z | 2022-02-10T04:25:22.000Z | code/layers.py | forest-snow/adan | 8e954f228c38191fccc06827f19b686aad34852d | [
"MIT"
] | 3 | 2019-01-02T02:15:13.000Z | 2020-10-09T22:49:35.000Z | code/layers.py | forest-snow/adan | 8e954f228c38191fccc06827f19b686aad34852d | [
"MIT"
] | 12 | 2018-10-23T18:43:37.000Z | 2022-01-22T07:30:44.000Z | import torch
from torch import autograd, nn
import torch.nn.functional as functional
import utils
| 31.030769 | 102 | 0.622707 | import torch
from torch import autograd, nn
import torch.nn.functional as functional
import utils
class AveragingLayer(nn.Module):
def __init__(self, word_emb):
super(AveragingLayer, self).__init__()
self.word_emb = word_emb
def forward(self, input):
"""
input: (data, lengths): (IntTensor(batch_size, max_sent_len), IntTensor(batch_size))
"""
data, lengths = input
embeds = self.word_emb(data)
X = embeds.sum(1).squeeze(1)
lengths = lengths.view(-1, 1).expand_as(X)
return X / lengths.float()
class SummingLayer(nn.Module):
def __init__(self, word_emb):
super(SummingLayer, self).__init__()
self.word_emb = word_emb
def forward(self, input):
"""
input: (data, lengths): (IntTensor(batch_size, max_sent_len), IntTensor(batch_size))
"""
data, _ = input
embeds = self.word_emb(data)
X = embeds.sum(1).squeeze()
return X
class DotAttentionLayer(nn.Module):
def __init__(self, hidden_size):
super(DotAttentionLayer, self).__init__()
self.hidden_size = hidden_size
self.W = nn.Linear(hidden_size, 1, bias=False)
def forward(self, input):
"""
input: (unpacked_padded_output: batch_size x seq_len x hidden_size, lengths: batch_size)
"""
inputs, lengths = input
batch_size, max_len, _ = inputs.size()
flat_input = inputs.contiguous().view(-1, self.hidden_size)
logits = self.W(flat_input).view(batch_size, max_len)
alphas = functional.softmax(logits, dim=-1)
# computing mask
idxes = torch.arange(0, max_len, out=torch.LongTensor(max_len)).unsqueeze(0).to(inputs.device)
mask = (idxes<lengths.unsqueeze(1)).float()
alphas = alphas * mask
# renormalize
alphas = alphas / torch.sum(alphas, 1).view(-1, 1)
output = torch.bmm(alphas.unsqueeze(1), inputs).squeeze(1)
return output
| 329 | 1,508 | 69 |
40eb99acbc7e96a316af0c3e934f5b0ee1191eac | 499 | py | Python | manager/config.py | ofurman/feedbot | c88e86bb3cccd16393639b1868268aea0b4ca677 | [
"MIT"
] | null | null | null | manager/config.py | ofurman/feedbot | c88e86bb3cccd16393639b1868268aea0b4ca677 | [
"MIT"
] | null | null | null | manager/config.py | ofurman/feedbot | c88e86bb3cccd16393639b1868268aea0b4ca677 | [
"MIT"
] | null | null | null | import os
basedir = os.path.abspath(os.path.dirname(__file__)) | 35.642857 | 114 | 0.711423 | import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config(object):
POSTGRESQL_DATABASE_URI="postgresql://{POSTGRES_USER}:{POSTGRES_PASSWORD}@postgres:5432/{POSTGRES_DB}".format(
POSTGRES_USER=os.environ.get('POSTGRES_USER'),
POSTGRES_PASSWORD=os.environ.get('POSTGRES_PASSWORD'),
POSTGRES_DB=os.environ.get('POSTGRES_DB')
)
API_ID = os.environ.get('API_ID')
API_HASH = os.environ.get('API_HASH')
BOT_ID = int(os.environ.get('BOT_ID')) | 0 | 414 | 23 |
b2a752fe495a2c7d923532741bb3916f84407372 | 418 | py | Python | jingtai/transformers/util.py | feihong/jingtai | 637ed42dcc20ed011ff93993fddc6d187b084c1d | [
"Apache-2.0"
] | null | null | null | jingtai/transformers/util.py | feihong/jingtai | 637ed42dcc20ed011ff93993fddc6d187b084c1d | [
"Apache-2.0"
] | null | null | null | jingtai/transformers/util.py | feihong/jingtai | 637ed42dcc20ed011ff93993fddc6d187b084c1d | [
"Apache-2.0"
] | null | null | null | import re
import yaml
def split_markup(markup):
"""
Given some markup, return a tuple containing the decoded data and the
template code.
"""
match = re.search(r'\n={3,}\n', markup)
if match:
start, end = match.span()
ctx = yaml.load(markup[:start])
template_code = markup[end:]
else:
ctx = {}
template_code = markup
return ctx, template_code
| 19.904762 | 73 | 0.590909 | import re
import yaml
def split_markup(markup):
"""
Given some markup, return a tuple containing the decoded data and the
template code.
"""
match = re.search(r'\n={3,}\n', markup)
if match:
start, end = match.span()
ctx = yaml.load(markup[:start])
template_code = markup[end:]
else:
ctx = {}
template_code = markup
return ctx, template_code
| 0 | 0 | 0 |
35f4f70ace5b6bc3f2793839fc7d7f3801a7594a | 1,134 | py | Python | Filtro/search_by_id.py | jolivaresc/AyPIdeT-Proyecto-Sismo | f9ace8292e662f926f9f53007cf57b3b79b76fd7 | [
"MIT"
] | 1 | 2017-10-18T14:40:40.000Z | 2017-10-18T14:40:40.000Z | Filtro/search_by_id.py | jolivaresc/AyPIdeT-Proyecto-Sismo | f9ace8292e662f926f9f53007cf57b3b79b76fd7 | [
"MIT"
] | null | null | null | Filtro/search_by_id.py | jolivaresc/AyPIdeT-Proyecto-Sismo | f9ace8292e662f926f9f53007cf57b3b79b76fd7 | [
"MIT"
] | 1 | 2017-10-23T14:12:47.000Z | 2017-10-23T14:12:47.000Z | # CÓDIGO DE PRUEBA
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from pymongo import MongoClient
from bson.objectid import ObjectId
from nltk.tokenize import TweetTokenizer
from datetime import datetime as dt
try:
client = MongoClient()
print("Connected to MongoDB\n")
except pymongo.errors.ConnectionFailure as e:
print("Could not connect to MongoDB",e)
db = client.sept2017_db
tweets = db.sept2017_collection
fecha1 = "Tue Sep 19 00:00:01 +0000 2017"
fecha_inicio = dt.strptime(fecha1,'%a %b %d %H:%M:%S +0000 %Y')
print(fecha_inicio)
fecha2 = "Tue Sep 26 23:59:59 +0000 2017"
fecha_fin = dt.strptime(fecha2,'%a %b %d %H:%M:%S +0000 %Y')
print(fecha_fin)
tknzr = TweetTokenizer(preserve_case=False, # Convertir a minúsculas
reduce_len=True, # Reducir caracteres repetidos
strip_handles=False) # Mostrar @usuarios
id_tweet = tknzr.tokenize(tweets.find_one({'_id': ObjectId('59e55c370e0bab1d26640d94') }).get('text'))
#fecha_tweet = tknzr.tokenize(tweets.find_one({"created_at": {"$gte":dt(2017,9,19),"$lt":dt(2017,9,26)}}).get('text'))
print(id_tweet)
| 30.648649 | 118 | 0.689594 | # CÓDIGO DE PRUEBA
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from pymongo import MongoClient
from bson.objectid import ObjectId
from nltk.tokenize import TweetTokenizer
from datetime import datetime as dt
try:
client = MongoClient()
print("Connected to MongoDB\n")
except pymongo.errors.ConnectionFailure as e:
print("Could not connect to MongoDB",e)
db = client.sept2017_db
tweets = db.sept2017_collection
fecha1 = "Tue Sep 19 00:00:01 +0000 2017"
fecha_inicio = dt.strptime(fecha1,'%a %b %d %H:%M:%S +0000 %Y')
print(fecha_inicio)
fecha2 = "Tue Sep 26 23:59:59 +0000 2017"
fecha_fin = dt.strptime(fecha2,'%a %b %d %H:%M:%S +0000 %Y')
print(fecha_fin)
tknzr = TweetTokenizer(preserve_case=False, # Convertir a minúsculas
reduce_len=True, # Reducir caracteres repetidos
strip_handles=False) # Mostrar @usuarios
id_tweet = tknzr.tokenize(tweets.find_one({'_id': ObjectId('59e55c370e0bab1d26640d94') }).get('text'))
#fecha_tweet = tknzr.tokenize(tweets.find_one({"created_at": {"$gte":dt(2017,9,19),"$lt":dt(2017,9,26)}}).get('text'))
print(id_tweet)
| 0 | 0 | 0 |
b47435aa6518eae61a73ba675399b74e273ec70c | 2,822 | py | Python | Acquire/Accounting/_pairednote.py | openghg/acquire | 8af8701b092f7304c02fea1ee6360e53502dfd64 | [
"Apache-2.0"
] | 1 | 2021-10-18T17:11:47.000Z | 2021-10-18T17:11:47.000Z | Acquire/Accounting/_pairednote.py | openghg/acquire | 8af8701b092f7304c02fea1ee6360e53502dfd64 | [
"Apache-2.0"
] | null | null | null | Acquire/Accounting/_pairednote.py | openghg/acquire | 8af8701b092f7304c02fea1ee6360e53502dfd64 | [
"Apache-2.0"
] | null | null | null | __all__ = ["PairedNote"]
class PairedNote:
"""This class holds a DebitNote together with its matching
CreditNote(s)
"""
def __init__(self, debit_note, credit_note):
"""Construct from the matching pair of notes"""
from Acquire.Accounting import CreditNote as _CreditNote
from Acquire.Accounting import DebitNote as _DebitNote
if not isinstance(debit_note, _DebitNote):
raise TypeError("The debit_note must be of type DebitNote!")
if not isinstance(credit_note, _CreditNote):
raise TypeError("The credit_note must be of type CreditNote!")
if credit_note.debit_note_uid() != debit_note.uid():
raise ValueError(
"You must pair up DebitNote (%s) with a "
"matching CreditNote (%s)" % (debit_note.uid(), credit_note.debit_note_uid())
)
self._debit_note = debit_note
self._credit_note = credit_note
def debit_note(self):
"""Return the debit note"""
return self._debit_note
def credit_note(self):
"""Return the credit note"""
return self._credit_note
@staticmethod
def create(debit_notes, credit_notes):
"""Return a list of PairedNotes that pair together the passed
debit notes and credit notes
"""
try:
debit_note = debit_notes[0]
except:
debit_notes = [debit_notes]
if not isinstance(credit_notes, dict):
try:
credit_notes[0]
except:
credit_notes = [credit_notes]
d = {}
for credit_note in credit_notes:
d[credit_note.debit_note_uid()] = credit_note
credit_notes = d
pairs = []
missing = []
for debit_note in debit_notes:
if debit_note.uid() in credit_notes:
pairs.append(PairedNote(debit_note, credit_notes[debit_note.uid()]))
else:
missing.append(debit_note)
if len(missing) > 0 or len(credit_notes) != len(debit_notes):
from Acquire.Accounting import UnbalancedLedgerError
raise UnbalancedLedgerError(
"Cannot balance the ledger as the debit do not match the "
"credits %s versus %s" % (str(debit_notes), str(credit_notes))
)
return pairs
| 32.068182 | 108 | 0.601701 | __all__ = ["PairedNote"]
class PairedNote:
"""This class holds a DebitNote together with its matching
CreditNote(s)
"""
def __init__(self, debit_note, credit_note):
"""Construct from the matching pair of notes"""
from Acquire.Accounting import CreditNote as _CreditNote
from Acquire.Accounting import DebitNote as _DebitNote
if not isinstance(debit_note, _DebitNote):
raise TypeError("The debit_note must be of type DebitNote!")
if not isinstance(credit_note, _CreditNote):
raise TypeError("The credit_note must be of type CreditNote!")
if credit_note.debit_note_uid() != debit_note.uid():
raise ValueError(
"You must pair up DebitNote (%s) with a "
"matching CreditNote (%s)" % (debit_note.uid(), credit_note.debit_note_uid())
)
self._debit_note = debit_note
self._credit_note = credit_note
def __str__(self):
return "PairedNote(debit_note=%s, credit_note=%s)" % (str(self._debit_note), str(self._credit_note))
def __eq__(self, other):
if isinstance(other, self.__class__):
return self._debit_note == other._debit_note and self._credit_note == other._credit_note
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def debit_note(self):
"""Return the debit note"""
return self._debit_note
def credit_note(self):
"""Return the credit note"""
return self._credit_note
@staticmethod
def create(debit_notes, credit_notes):
"""Return a list of PairedNotes that pair together the passed
debit notes and credit notes
"""
try:
debit_note = debit_notes[0]
except:
debit_notes = [debit_notes]
if not isinstance(credit_notes, dict):
try:
credit_notes[0]
except:
credit_notes = [credit_notes]
d = {}
for credit_note in credit_notes:
d[credit_note.debit_note_uid()] = credit_note
credit_notes = d
pairs = []
missing = []
for debit_note in debit_notes:
if debit_note.uid() in credit_notes:
pairs.append(PairedNote(debit_note, credit_notes[debit_note.uid()]))
else:
missing.append(debit_note)
if len(missing) > 0 or len(credit_notes) != len(debit_notes):
from Acquire.Accounting import UnbalancedLedgerError
raise UnbalancedLedgerError(
"Cannot balance the ledger as the debit do not match the "
"credits %s versus %s" % (str(debit_notes), str(credit_notes))
)
return pairs
| 336 | 0 | 81 |
d0d41a9bfe70b530497ec5247845294a52a9f15d | 782 | py | Python | python project.py | 1-kane/Internship-Project | 72405d55713037c7882c4ed6511dc747f85bc1cb | [
"MIT"
] | null | null | null | python project.py | 1-kane/Internship-Project | 72405d55713037c7882c4ed6511dc747f85bc1cb | [
"MIT"
] | null | null | null | python project.py | 1-kane/Internship-Project | 72405d55713037c7882c4ed6511dc747f85bc1cb | [
"MIT"
] | null | null | null | from nltk import *
from nltk.corpus import *
ans = 'Y'
while((ans=='y')|(ans=='Y')):
input1 = input("Write a scentence")
lang = detect_language(input1)
print(input1+"\t Langauge: "+ lang)
ans = input("to do this again enter (y/Y)")
| 31.28 | 64 | 0.644501 | from nltk import *
from nltk.corpus import *
def lang_ratio(input):
lang_ratio={}
tokens = wordpunct_tokenize(input)
words = [word.lower() for word in tokens]
for language in stopwords.fileids():
stopwords_set = set(stopwords.words(language))
words_set = set(words)
common_elements = words_set.intersection(stopwords_set)
lang_ratio[language] = len(common_elements)
return lang_ratio
def detect_language(input):
ratios = lang_ratio(input)
lang = max(ratios, key = ratios.get)
return lang
ans = 'Y'
while((ans=='y')|(ans=='Y')):
input1 = input("Write a scentence")
lang = detect_language(input1)
print(input1+"\t Langauge: "+ lang)
ans = input("to do this again enter (y/Y)")
| 475 | 0 | 51 |
1c79fccc7802262dd9cd397daf2da80899d6d7ea | 8,310 | py | Python | minibenchmarks/pyxl_bench.py | lameiro/pyston | 838e0ac98d5926ba942224951cd1e8bad5483b5e | [
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null | minibenchmarks/pyxl_bench.py | lameiro/pyston | 838e0ac98d5926ba942224951cd1e8bad5483b5e | [
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null | minibenchmarks/pyxl_bench.py | lameiro/pyston | 838e0ac98d5926ba942224951cd1e8bad5483b5e | [
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null | import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), "../test/integration/pyxl/"))
from pyxl.codec.register import pyxl_transform_string
for i in xrange(100):
# pyxl/tests/test_if_1.py
pyxl_transform_string(
'''
from pyxl import html
def test():
assert str(<frag><if cond="{True}">true</if><else>false</else></frag>) == "true"
assert str(<frag><if cond="{False}">true</if><else>false</else></frag>) == "false"
''')
for i in xrange(100):
# pyxl/tests/test_curlies_in_attrs_1.py
pyxl_transform_string(
'''
from pyxl import html
def test():
# kannan thinks this should be different
assert str(<frag><img src="{'foo'}" /></frag>) == """<img src="foo" />"""
''')
for i in xrange(100):
# pyxl/tests/test_rss.py
pyxl_transform_string(
'''
import datetime
from unittest2 import TestCase
from pyxl import html
from pyxl import rss
class RssTests(TestCase):
def test_decl(self):
decl = <rss.rss_decl_standalone />.to_string()
self.assertEqual(decl, u'<?xml version="1.0" encoding="UTF-8" standalone="yes" ?>')
def test_rss(self):
r = <rss.rss version="2.0" />.to_string()
self.assertEqual(r, u'<rss version="2.0"></rss>')
def test_channel(self):
c = (
<rss.rss version="2.0">
<rss.channel />
</rss.rss>
).to_string()
self.assertEqual(c, u'<rss version="2.0"><channel></channel></rss>')
def test_channel_with_required_elements(self):
channel = (
<frag>
<rss.rss_decl_standalone />
<rss.rss version="2.0">
<rss.channel>
<rss.title>A Title</rss.title>
<rss.link>https://www.dropbox.com</rss.link>
<rss.description>A detailed description</rss.description>
</rss.channel>
</rss.rss>
</frag>
)
expected = """
<?xml version="1.0" encoding="UTF-8" standalone="yes" ?>
<rss version="2.0">
<channel>
<title>A Title</title>
<link>https://www.dropbox.com</link>
<description>A detailed description</description>
</channel>
</rss>
"""
expected = u''.join(l.strip() for l in expected.splitlines())
self.assertEqual(channel.to_string(), expected)
def test_channel_with_optional_elements(self):
channel = (
<frag>
<rss.rss_decl_standalone />
<rss.rss version="2.0">
<rss.channel>
<rss.title>A Title</rss.title>
<rss.link>https://www.dropbox.com</rss.link>
<rss.description>A detailed description</rss.description>
<rss.ttl>60</rss.ttl>
<rss.language>en-us</rss.language>
</rss.channel>
</rss.rss>
</frag>
)
expected = """
<?xml version="1.0" encoding="UTF-8" standalone="yes" ?>
<rss version="2.0">
<channel>
<title>A Title</title>
<link>https://www.dropbox.com</link>
<description>A detailed description</description>
<ttl>60</ttl>
<language>en-us</language>
</channel>
</rss>
"""
expected = u''.join(l.strip() for l in expected.splitlines())
self.assertEqual(channel.to_string(), expected)
def test_item_with_common_elements(self):
item = (
<rss.item>
<rss.title>Item Title</rss.title>
<rss.description>
{html.rawhtml('<![CDATA[ ')}
This is a really interesting description
{html.rawhtml(']]>')}
</rss.description>
<rss.link>https://www.dropbox.com/somewhere</rss.link>
</rss.item>
)
expected = """
<item>
<title>Item Title</title>
<description><![CDATA[ This is a really interesting description ]]></description>
<link>https://www.dropbox.com/somewhere</link>
</item>
"""
expected = u''.join(l.strip() for l in expected.splitlines())
self.assertEqual(item.to_string(), expected)
def test_guid(self):
self.assertEqual(<rss.guid>foo</rss.guid>.to_string(), u'<guid>foo</guid>')
self.assertEqual(<rss.guid is-perma-link="{False}">foo</rss.guid>.to_string(),
u'<guid isPermaLink="false">foo</guid>')
self.assertEqual(<rss.guid is-perma-link="{True}">foo</rss.guid>.to_string(),
u'<guid isPermaLink="true">foo</guid>')
def test_date_elements(self):
dt = datetime.datetime(2013, 12, 17, 23, 54, 14)
self.assertEqual(<rss.pubDate date="{dt}" />.to_string(),
u'<pubDate>Tue, 17 Dec 2013 23:54:14 GMT</pubDate>')
self.assertEqual(<rss.lastBuildDate date="{dt}" />.to_string(),
u'<lastBuildDate>Tue, 17 Dec 2013 23:54:14 GMT</lastBuildDate>')
def test_rss_document(self):
dt = datetime.datetime(2013, 12, 17, 23, 54, 14)
dt2 = datetime.datetime(2013, 12, 18, 11, 54, 14)
doc = (
<frag>
<rss.rss_decl_standalone />
<rss.rss version="2.0">
<rss.channel>
<rss.title>A Title</rss.title>
<rss.link>https://www.dropbox.com</rss.link>
<rss.description>A detailed description</rss.description>
<rss.ttl>60</rss.ttl>
<rss.language>en-us</rss.language>
<rss.lastBuildDate date="{dt}" />
<rss.item>
<rss.title>Item Title</rss.title>
<rss.description>
{html.rawhtml('<![CDATA[ ')}
This is a really interesting description
{html.rawhtml(']]>')}
</rss.description>
<rss.link>https://www.dropbox.com/somewhere</rss.link>
<rss.pubDate date="{dt}" />
<rss.guid is-perma-link="{False}">123456789</rss.guid>
</rss.item>
<rss.item>
<rss.title>Another Item</rss.title>
<rss.description>
{html.rawhtml('<![CDATA[ ')}
This is another really interesting description
{html.rawhtml(']]>')}
</rss.description>
<rss.link>https://www.dropbox.com/nowhere</rss.link>
<rss.pubDate date="{dt2}" />
<rss.guid is-perma-link="{False}">ABCDEFGHIJ</rss.guid>
</rss.item>
</rss.channel>
</rss.rss>
</frag>
)
expected = """
<?xml version="1.0" encoding="UTF-8" standalone="yes" ?>
<rss version="2.0">
<channel>
<title>A Title</title>
<link>https://www.dropbox.com</link>
<description>A detailed description</description>
<ttl>60</ttl>
<language>en-us</language>
<lastBuildDate>Tue, 17 Dec 2013 23:54:14 GMT</lastBuildDate>
<item>
<title>Item Title</title>
<description><![CDATA[ This is a really interesting description ]]></description>
<link>https://www.dropbox.com/somewhere</link>
<pubDate>Tue, 17 Dec 2013 23:54:14 GMT</pubDate>
<guid isPermaLink="false">123456789</guid>
</item>
<item>
<title>Another Item</title>
<description><![CDATA[ This is another really interesting description ]]></description>
<link>https://www.dropbox.com/nowhere</link>
<pubDate>Wed, 18 Dec 2013 11:54:14 GMT</pubDate>
<guid isPermaLink="false">ABCDEFGHIJ</guid>
</item>
</channel>
</rss>
"""
expected = ''.join(l.strip() for l in expected.splitlines())
self.assertEqual(doc.to_string(), expected)
''')
| 36.130435 | 100 | 0.514561 | import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), "../test/integration/pyxl/"))
from pyxl.codec.register import pyxl_transform_string
for i in xrange(100):
# pyxl/tests/test_if_1.py
pyxl_transform_string(
'''
from pyxl import html
def test():
assert str(<frag><if cond="{True}">true</if><else>false</else></frag>) == "true"
assert str(<frag><if cond="{False}">true</if><else>false</else></frag>) == "false"
''')
for i in xrange(100):
# pyxl/tests/test_curlies_in_attrs_1.py
pyxl_transform_string(
'''
from pyxl import html
def test():
# kannan thinks this should be different
assert str(<frag><img src="{'foo'}" /></frag>) == """<img src="foo" />"""
''')
for i in xrange(100):
# pyxl/tests/test_rss.py
pyxl_transform_string(
'''
import datetime
from unittest2 import TestCase
from pyxl import html
from pyxl import rss
class RssTests(TestCase):
def test_decl(self):
decl = <rss.rss_decl_standalone />.to_string()
self.assertEqual(decl, u'<?xml version="1.0" encoding="UTF-8" standalone="yes" ?>')
def test_rss(self):
r = <rss.rss version="2.0" />.to_string()
self.assertEqual(r, u'<rss version="2.0"></rss>')
def test_channel(self):
c = (
<rss.rss version="2.0">
<rss.channel />
</rss.rss>
).to_string()
self.assertEqual(c, u'<rss version="2.0"><channel></channel></rss>')
def test_channel_with_required_elements(self):
channel = (
<frag>
<rss.rss_decl_standalone />
<rss.rss version="2.0">
<rss.channel>
<rss.title>A Title</rss.title>
<rss.link>https://www.dropbox.com</rss.link>
<rss.description>A detailed description</rss.description>
</rss.channel>
</rss.rss>
</frag>
)
expected = """
<?xml version="1.0" encoding="UTF-8" standalone="yes" ?>
<rss version="2.0">
<channel>
<title>A Title</title>
<link>https://www.dropbox.com</link>
<description>A detailed description</description>
</channel>
</rss>
"""
expected = u''.join(l.strip() for l in expected.splitlines())
self.assertEqual(channel.to_string(), expected)
def test_channel_with_optional_elements(self):
channel = (
<frag>
<rss.rss_decl_standalone />
<rss.rss version="2.0">
<rss.channel>
<rss.title>A Title</rss.title>
<rss.link>https://www.dropbox.com</rss.link>
<rss.description>A detailed description</rss.description>
<rss.ttl>60</rss.ttl>
<rss.language>en-us</rss.language>
</rss.channel>
</rss.rss>
</frag>
)
expected = """
<?xml version="1.0" encoding="UTF-8" standalone="yes" ?>
<rss version="2.0">
<channel>
<title>A Title</title>
<link>https://www.dropbox.com</link>
<description>A detailed description</description>
<ttl>60</ttl>
<language>en-us</language>
</channel>
</rss>
"""
expected = u''.join(l.strip() for l in expected.splitlines())
self.assertEqual(channel.to_string(), expected)
def test_item_with_common_elements(self):
item = (
<rss.item>
<rss.title>Item Title</rss.title>
<rss.description>
{html.rawhtml('<![CDATA[ ')}
This is a really interesting description
{html.rawhtml(']]>')}
</rss.description>
<rss.link>https://www.dropbox.com/somewhere</rss.link>
</rss.item>
)
expected = """
<item>
<title>Item Title</title>
<description><![CDATA[ This is a really interesting description ]]></description>
<link>https://www.dropbox.com/somewhere</link>
</item>
"""
expected = u''.join(l.strip() for l in expected.splitlines())
self.assertEqual(item.to_string(), expected)
def test_guid(self):
self.assertEqual(<rss.guid>foo</rss.guid>.to_string(), u'<guid>foo</guid>')
self.assertEqual(<rss.guid is-perma-link="{False}">foo</rss.guid>.to_string(),
u'<guid isPermaLink="false">foo</guid>')
self.assertEqual(<rss.guid is-perma-link="{True}">foo</rss.guid>.to_string(),
u'<guid isPermaLink="true">foo</guid>')
def test_date_elements(self):
dt = datetime.datetime(2013, 12, 17, 23, 54, 14)
self.assertEqual(<rss.pubDate date="{dt}" />.to_string(),
u'<pubDate>Tue, 17 Dec 2013 23:54:14 GMT</pubDate>')
self.assertEqual(<rss.lastBuildDate date="{dt}" />.to_string(),
u'<lastBuildDate>Tue, 17 Dec 2013 23:54:14 GMT</lastBuildDate>')
def test_rss_document(self):
dt = datetime.datetime(2013, 12, 17, 23, 54, 14)
dt2 = datetime.datetime(2013, 12, 18, 11, 54, 14)
doc = (
<frag>
<rss.rss_decl_standalone />
<rss.rss version="2.0">
<rss.channel>
<rss.title>A Title</rss.title>
<rss.link>https://www.dropbox.com</rss.link>
<rss.description>A detailed description</rss.description>
<rss.ttl>60</rss.ttl>
<rss.language>en-us</rss.language>
<rss.lastBuildDate date="{dt}" />
<rss.item>
<rss.title>Item Title</rss.title>
<rss.description>
{html.rawhtml('<![CDATA[ ')}
This is a really interesting description
{html.rawhtml(']]>')}
</rss.description>
<rss.link>https://www.dropbox.com/somewhere</rss.link>
<rss.pubDate date="{dt}" />
<rss.guid is-perma-link="{False}">123456789</rss.guid>
</rss.item>
<rss.item>
<rss.title>Another Item</rss.title>
<rss.description>
{html.rawhtml('<![CDATA[ ')}
This is another really interesting description
{html.rawhtml(']]>')}
</rss.description>
<rss.link>https://www.dropbox.com/nowhere</rss.link>
<rss.pubDate date="{dt2}" />
<rss.guid is-perma-link="{False}">ABCDEFGHIJ</rss.guid>
</rss.item>
</rss.channel>
</rss.rss>
</frag>
)
expected = """
<?xml version="1.0" encoding="UTF-8" standalone="yes" ?>
<rss version="2.0">
<channel>
<title>A Title</title>
<link>https://www.dropbox.com</link>
<description>A detailed description</description>
<ttl>60</ttl>
<language>en-us</language>
<lastBuildDate>Tue, 17 Dec 2013 23:54:14 GMT</lastBuildDate>
<item>
<title>Item Title</title>
<description><![CDATA[ This is a really interesting description ]]></description>
<link>https://www.dropbox.com/somewhere</link>
<pubDate>Tue, 17 Dec 2013 23:54:14 GMT</pubDate>
<guid isPermaLink="false">123456789</guid>
</item>
<item>
<title>Another Item</title>
<description><![CDATA[ This is another really interesting description ]]></description>
<link>https://www.dropbox.com/nowhere</link>
<pubDate>Wed, 18 Dec 2013 11:54:14 GMT</pubDate>
<guid isPermaLink="false">ABCDEFGHIJ</guid>
</item>
</channel>
</rss>
"""
expected = ''.join(l.strip() for l in expected.splitlines())
self.assertEqual(doc.to_string(), expected)
''')
| 0 | 0 | 0 |
18cb76eea749b02096a7baf7a16afb18bf50e409 | 695 | py | Python | chainerrl/env.py | takeratta/chainerrl | 51a2762246db2964fe97e596fd98a1a1cbfea19a | [
"MIT"
] | 4 | 2019-05-05T21:37:09.000Z | 2020-06-04T11:40:51.000Z | chainerrl/env.py | takeratta/chainerrl | 51a2762246db2964fe97e596fd98a1a1cbfea19a | [
"MIT"
] | 1 | 2018-05-29T06:38:22.000Z | 2018-05-29T06:38:22.000Z | chainerrl/env.py | takeratta/chainerrl | 51a2762246db2964fe97e596fd98a1a1cbfea19a | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
from future.utils import with_metaclass
standard_library.install_aliases()
from abc import ABCMeta
from abc import abstractmethod
class Env(with_metaclass(ABCMeta, object)):
"""RL learning environment.
This serves a minimal interface for RL agents.
"""
@abstractmethod
@abstractmethod
@abstractmethod
| 23.166667 | 50 | 0.758273 | from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
from future.utils import with_metaclass
standard_library.install_aliases()
from abc import ABCMeta
from abc import abstractmethod
class Env(with_metaclass(ABCMeta, object)):
"""RL learning environment.
This serves a minimal interface for RL agents.
"""
@abstractmethod
def step(self, action):
raise NotImplementedError()
@abstractmethod
def reset(self):
raise NotImplementedError()
@abstractmethod
def close(self):
raise NotImplementedError()
| 100 | 0 | 78 |
9d82cea7f4206e305827c34264fa931545630061 | 6,392 | py | Python | download_bars2.py | rolangom/rx_ibapi_fetch | e1259fe1105ef7b152b3db25ea13c7232d0acdab | [
"MIT"
] | null | null | null | download_bars2.py | rolangom/rx_ibapi_fetch | e1259fe1105ef7b152b3db25ea13c7232d0acdab | [
"MIT"
] | null | null | null | download_bars2.py | rolangom/rx_ibapi_fetch | e1259fe1105ef7b152b3db25ea13c7232d0acdab | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import os
import sys
import argparse
import logging
from typing import List, Optional, Union, Dict, Tuple
from datetime import datetime, timedelta
from sqlalchemy import create_engine
import rx
import rx.operators as ops
from rx.subject import AsyncSubject, Subject, BehaviorSubject, ReplaySubject
from rx.core.observable import Observable
from typing import List, Optional, NoReturn
from collections import defaultdict
from dateutil.parser import parse
import numpy as np
import pandas as pd
from ibapi import wrapper
from ibapi.common import TickerId, BarData
from ibapi.client import EClient
from ibapi.contract import Contract
from ibapi.utils import iswrapper
ContractList = List[Contract]
BarDataList = List[BarData]
OptionalDate = Optional[datetime]
def make_download_path(args: argparse.Namespace, contract: Contract) -> str:
"""Make path for saving csv files.
Files to be stored in base_directory/<security_type>/<size>/<symbol>/
"""
path = os.path.sep.join([
args.base_directory,
args.security_type,
args.size.replace(" ", "_"),
contract.symbol,
])
return path
if __name__ == "__main__":
main()
# download_bars.py --size "5 min" --start-date 20110804 --end-date 20110904 AAPL
# download_bars.py --size "1 day" --duration "1 Y" --end-date 20210808 ABNB
# stated @ 2021-08-04 23:35:45.267262
# end @ 2021-08-04 23:35:46.107792 | 32.779487 | 105 | 0.635638 | #!/usr/bin/env python3
import os
import sys
import argparse
import logging
from typing import List, Optional, Union, Dict, Tuple
from datetime import datetime, timedelta
from sqlalchemy import create_engine
import rx
import rx.operators as ops
from rx.subject import AsyncSubject, Subject, BehaviorSubject, ReplaySubject
from rx.core.observable import Observable
from typing import List, Optional, NoReturn
from collections import defaultdict
from dateutil.parser import parse
import numpy as np
import pandas as pd
from ibapi import wrapper
from ibapi.common import TickerId, BarData
from ibapi.client import EClient
from ibapi.contract import Contract
from ibapi.utils import iswrapper
ContractList = List[Contract]
BarDataList = List[BarData]
OptionalDate = Optional[datetime]
def make_download_path(args: argparse.Namespace, contract: Contract) -> str:
"""Make path for saving csv files.
Files to be stored in base_directory/<security_type>/<size>/<symbol>/
"""
path = os.path.sep.join([
args.base_directory,
args.security_type,
args.size.replace(" ", "_"),
contract.symbol,
])
return path
class DownloadApp(EClient, wrapper.EWrapper):
def __init__(self):
EClient.__init__(self, wrapper=self)
wrapper.EWrapper.__init__(self)
self.request_id = 0
self.started = False
self.next_valid_order_id = None
# self.contracts = contracts
# self.requests = {}
# self.bar_data = defaultdict(list)
# self.pending_ends = set()
# # self.args = args
# self.current = self.args.end_date
# self.duration = self.args.duration
# self.useRTH = 0
self.requests: Dict[int, Contract] = {}
self._subjects: Dict[int, Subject[(Contract, BarData)]] = {}
self.connected: BehaviorSubject[bool] = BehaviorSubject(False)
# self.engine = create_engine(self.args.db_url)
def next_request_id(self, contract: Contract) -> int:
self.request_id += 1
self.requests[self.request_id] = contract
return self.request_id
def historicalDataRequest(self, contract: Contract, endDateTime:str,
durationStr:str, barSizeSetting:str, whatToShow:str = "TRADES",
useRTH:int = 0, formatDate:int = 1, keepUpToDate:bool = False) -> Observable:
cid = self.next_request_id(contract)
# self.pending_ends.add(cid)
subject = Subject()
self._subjects[cid] = subject
self.reqHistoricalData(
cid, # tickerId, used to identify incoming data
contract,
endDateTime, # always go to midnight
durationStr, # amount of time to go back
barSizeSetting, # bar size
whatToShow, # historical data type
useRTH, # useRTH (regular trading hours)
formatDate, # format the date in yyyyMMdd HH:mm:ss
keepUpToDate, # keep up to date after snapshot
[], # chart options
)
return self._subjects[cid]
@iswrapper
def historicalData(self, reqId: int, bar) -> None:
logging.info('historicalData %s, %s' % (reqId, bar))
print('historicalData %s, %s' % (reqId, bar))
contract = self.requests[reqId]
subject = self._subjects[reqId]
if contract and subject:
subject.on_next((contract, bar))
@iswrapper
def historicalDataEnd(self, reqId: int, start: str, end: str) -> None:
super().historicalDataEnd(reqId, start, end)
logging.info('historicalDataEnd %s, %s, %s' % (reqId, start, end))
print('historicalDataEnd %s, %s, %s' % (reqId, start, end))
subject = self._subjects[reqId]
subject.on_completed()
@iswrapper
def connectAck(self):
logging.info("Connected")
self.connected.on_next(True)
@iswrapper
def connectionClosed(self):
logging.info("Disconnected")
self.connected.on_next(False)
self.connected.on_completed()
@iswrapper
def nextValidId(self, order_id: int):
super().nextValidId(order_id)
self.next_valid_order_id = order_id
logging.info(f"nextValidId: {order_id}")
# we can start now
# self.start()
@iswrapper
def error(self, req_id: TickerId, error_code: int, error: str):
super().error(req_id, error_code, error)
err = Exception("Error. Id: %s Code %s Msg: %s" % req_id, error_code, error)
if req_id < 0:
logging.debug("Error. Id: %s Code %s Msg: %s", req_id, error_code, error)
self.connected.on_error(err)
else:
logging.error("Error. Id: %s Code %s Msg: %s", req_id, error_code, error)
# we will always exit on error since data will need to be validated
subject = self._subjects[req_id]
if (subject is not None):
subject.on_error(err)
# self.done = True
def do_connect(self, host: str = "127.0.0.1", port: int = 4001, clientId: int = 0) -> rx.Observable:
self.connect(host, port, clientId)
return self.connected
def say_bye(self):
print('bye!')
self.disconnect()
def make_contract(symbol: str, sec_type: str, currency: str, exchange: str,
primaryExchange: str, localsymbol: str) -> Contract:
contract = Contract()
contract.symbol = symbol
contract.secType = sec_type
contract.currency = currency
contract.exchange = exchange
contract.primaryExchange = primaryExchange
if localsymbol:
contract.localSymbol = localsymbol
return contract
def read_file(observer: rx.core.Observer, scheduler= None) -> None:
# subject = ReplaySubject()
with open('symbols.txt', 'r') as f:
lines = f.readlines()
for line in lines:
observer.on_next(line)
# subject.on_next(line)
# return subject
observer.on_completed()
def main():
app = DownloadApp()
app.connect("127.0.0.1", clientId=1)
app.run()
if __name__ == "__main__":
main()
# download_bars.py --size "5 min" --start-date 20110804 --end-date 20110904 AAPL
# download_bars.py --size "1 day" --duration "1 Y" --end-date 20210808 ABNB
# stated @ 2021-08-04 23:35:45.267262
# end @ 2021-08-04 23:35:46.107792 | 4,458 | 410 | 92 |
a94311eed972e6d1cea65423c9ac00e2d6a91fc3 | 1,533 | py | Python | tweet.py | LineG/ADA_472_P2 | 5a72ed0ae0894f910e8d72e2bf7caf7fbe92a0f3 | [
"MIT"
] | null | null | null | tweet.py | LineG/ADA_472_P2 | 5a72ed0ae0894f910e8d72e2bf7caf7fbe92a0f3 | [
"MIT"
] | null | null | null | tweet.py | LineG/ADA_472_P2 | 5a72ed0ae0894f910e8d72e2bf7caf7fbe92a0f3 | [
"MIT"
] | null | null | null | # Line Ghanem 27280076
# Anthony Iatropoulos 40028246
# Mikael Samvelian 40003178
import re
# V = 0
# V = 1
# V = 2
# helper function
| 25.131148 | 64 | 0.500326 | # Line Ghanem 27280076
# Anthony Iatropoulos 40028246
# Mikael Samvelian 40003178
import re
class Tweet:
def __init__(self, tweet_id, user_name, language, text):
self.tweet_id = tweet_id
self.user_name = user_name
self.language = language
self.strip_text = ''
self.text = text
# dividing the sentence into 1, 2 or 3 character string
self.uni = {}
self.bi = {}
self.tri = {}
# V = 0
def lower_case(self):
self.strip_text = re.sub(r'[^A-Za-z]', '*', self.text)
self.strip_text = self.strip_text.lower()
# V = 1
def case_sensitive(self):
self.strip_text = re.sub(r'[^A-Za-z]', '*', self.text)
# V = 2
def is_alpha(self):
self.strip_text = ''
for char in self.text:
if char.isalpha():
self.strip_text += char
else:
self.strip_text += '*'
# helper function
def add_key(self, k, count):
if k in count:
count[k] += 1
else:
count[k] = 1
def counter(self):
l = len(self.strip_text)
s = self.strip_text
a = self.strip_text.replace('*', '')
for k in a:
self.add_key(k, self.uni)
for n in range(l-1):
k = s[n]+s[n+1]
if not '*' in k:
self.add_key(k, self.bi)
for n in range(l-2):
k = s[n]+s[n+1]+s[n+2]
if not '*' in k:
self.add_key(k, self.tri)
| 1,207 | -9 | 180 |
897ab95dd3ffb1e7780a7e18487313a46dcbc716 | 2,157 | py | Python | pacemaker/pacemaker.py | mkeshav/pace-maker | ad6f420b8f9edcaa6143e0aabd153fab7da1c99c | [
"MIT"
] | null | null | null | pacemaker/pacemaker.py | mkeshav/pace-maker | ad6f420b8f9edcaa6143e0aabd153fab7da1c99c | [
"MIT"
] | null | null | null | pacemaker/pacemaker.py | mkeshav/pace-maker | ad6f420b8f9edcaa6143e0aabd153fab7da1c99c | [
"MIT"
] | null | null | null | from time import time
from threading import Lock
class PaceMaker(object):
'''
Implementation of https://en.wikipedia.org/wiki/Token_bucket#Algorithm
Args:
no_token_sleep_in_seconds: Seconds to nap when there are no tokens to spend.
Defaults to 1.
'''
@classmethod
def set_rate_per_second(self, rate_per_second):
'''
Sets the rate/sec
Args:
rate_per_second: rate/sec
'''
with self.lock:
self.rate_per_second = rate_per_second
self.tokens = self.rate_per_second
def consume(self, tokens=1):
'''
Consumes the tokens and returns sleep time
Args:
tokens: Number of tokens to consume. Defaults to 1
'''
with self.lock:
# if the rate_per_second is set to 0, throw exception
if self.rate_per_second == 0:
raise Exception('Cannot use the pace maker without setting the heart rate_per_second!!!')
now = self._epoch_in_seconds()
time_lapsed = now - self.last
self.last = now
# Add rate_per_second x seconds lapsed
self.tokens += time_lapsed * self.rate_per_second
# If the bucket is full, discard
if self.tokens > self.rate_per_second:
self.tokens = self.rate_per_second
# subtract the number of tokens being consumed
self.tokens -= tokens
if self.tokens > 0:
# Calculate the pace based on the tokens left
return round(self.tokens/self.rate_per_second, 3)
else:
return self.no_token_sleep_in_seconds
| 33.184615 | 105 | 0.586463 | from time import time
from threading import Lock
class PaceMaker(object):
'''
Implementation of https://en.wikipedia.org/wiki/Token_bucket#Algorithm
Args:
no_token_sleep_in_seconds: Seconds to nap when there are no tokens to spend.
Defaults to 1.
'''
@classmethod
def _epoch_in_seconds(self):
return round(time())
def __init__(self, no_token_sleep_in_seconds=1):
self.tokens = 0
self.rate_per_second = 0
self.last = self._epoch_in_seconds() #Granularity of seconds is good enough
self.lock = Lock() # for thread safety
self.no_token_sleep_in_seconds = no_token_sleep_in_seconds
def set_rate_per_second(self, rate_per_second):
'''
Sets the rate/sec
Args:
rate_per_second: rate/sec
'''
with self.lock:
self.rate_per_second = rate_per_second
self.tokens = self.rate_per_second
def consume(self, tokens=1):
'''
Consumes the tokens and returns sleep time
Args:
tokens: Number of tokens to consume. Defaults to 1
'''
with self.lock:
# if the rate_per_second is set to 0, throw exception
if self.rate_per_second == 0:
raise Exception('Cannot use the pace maker without setting the heart rate_per_second!!!')
now = self._epoch_in_seconds()
time_lapsed = now - self.last
self.last = now
# Add rate_per_second x seconds lapsed
self.tokens += time_lapsed * self.rate_per_second
# If the bucket is full, discard
if self.tokens > self.rate_per_second:
self.tokens = self.rate_per_second
# subtract the number of tokens being consumed
self.tokens -= tokens
if self.tokens > 0:
# Calculate the pace based on the tokens left
return round(self.tokens/self.rate_per_second, 3)
else:
return self.no_token_sleep_in_seconds
| 318 | 0 | 53 |
4dd5daf97104f73025eaad10909f2f63ea7c003d | 2,804 | py | Python | tests/test_policy_measures.py | davidpetra/covid19-sir | 3ee3e3c3fe35acfd90781d82c259a9d3a1cb5ac0 | [
"Apache-2.0"
] | null | null | null | tests/test_policy_measures.py | davidpetra/covid19-sir | 3ee3e3c3fe35acfd90781d82c259a9d3a1cb5ac0 | [
"Apache-2.0"
] | 100 | 2021-02-06T17:50:15.000Z | 2022-03-31T20:30:49.000Z | tests/test_policy_measures.py | ardhani31/Covid19-SIRV | 813bc66f668a3d2945dc97474ea1149bbc6e40c2 | [
"Apache-2.0"
] | 1 | 2021-09-22T14:27:02.000Z | 2021-09-22T14:27:02.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import warnings
import pandas as pd
import pytest
from covsirphy import PolicyMeasures
from covsirphy import SIRF, Scenario
# Skip this test at this time
| 38.944444 | 82 | 0.656205 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import warnings
import pandas as pd
import pytest
from covsirphy import PolicyMeasures
from covsirphy import SIRF, Scenario
class UnTestPolicyMeasures(object):
# Skip this test at this time
def test_start(self, jhu_data, population_data, oxcgrt_data):
warnings.simplefilter("ignore", category=UserWarning)
# Create instance
analyser = PolicyMeasures(jhu_data, population_data, oxcgrt_data, tau=360)
# List of countries
assert isinstance(analyser.countries, list)
# Return Scenario class
assert isinstance(analyser.scenario("Japan"), Scenario)
with pytest.raises(KeyError):
analyser.scenario("Moon")
def test_analysis(self, jhu_data, population_data, oxcgrt_data):
warnings.simplefilter("ignore", category=UserWarning)
warnings.simplefilter("error", category=RuntimeWarning)
# Create instance
analyser = PolicyMeasures(
jhu_data, population_data, oxcgrt_data, tau=360)
# S-R trend analysis
analyser.trend()
# Phase length
phase_len_dict = analyser.phase_len()
assert isinstance(phase_len_dict, dict)
assert isinstance(
max(phase_len_dict.items(), key=lambda x: x[0])[1], list)
# Select two countries
phase_len_dict = analyser.phase_len()
countries_all = [
country
for (_, countries) in sorted(phase_len_dict.items())
for country in countries
]
analyser.countries = countries_all[:2]
# Parameter estimation
with pytest.raises(ValueError):
analyser.track()
analyser.estimate(SIRF, timeout=1, timeout_iteration=1)
assert isinstance(analyser.summary(), pd.DataFrame)
# Parameter history of Rt
with pytest.raises(KeyError):
df = analyser.history("Temperature", roll_window=None)
df = analyser.history("Rt", roll_window=None)
assert isinstance(df, pd.DataFrame)
# Parameter history of rho
df = analyser.history("rho", roll_window=14, show_figure=False)
assert isinstance(df, pd.DataFrame)
# Summarize
assert isinstance(analyser.summary(), pd.DataFrame)
with pytest.raises(TypeError):
analyser.summary(countries="Poland")
def test_error(self, jhu_data, population_data, oxcgrt_data):
warnings.simplefilter("ignore", category=UserWarning)
warnings.simplefilter("error", category=RuntimeWarning)
# Create instance
analyser = PolicyMeasures(
jhu_data, population_data, oxcgrt_data, tau=360)
# Register countries
with pytest.raises(KeyError):
analyser.countries = ["Moon"]
| 2,481 | 14 | 103 |
f85836d16da99b8333a921b39c91ccaab484bc93 | 263 | py | Python | DjangoNationalGeographic/Animal/admin.py | JGabriel-AbreuM/DjangoNationalGeograph | 0b095e61a1793d84f21b41bb3c4f77653e6ca19b | [
"MIT"
] | null | null | null | DjangoNationalGeographic/Animal/admin.py | JGabriel-AbreuM/DjangoNationalGeograph | 0b095e61a1793d84f21b41bb3c4f77653e6ca19b | [
"MIT"
] | null | null | null | DjangoNationalGeographic/Animal/admin.py | JGabriel-AbreuM/DjangoNationalGeograph | 0b095e61a1793d84f21b41bb3c4f77653e6ca19b | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Animal, Raca, NomeCientifico, Origem, NomePopular
admin.site.register(Animal)
admin.site.register(Raca)
admin.site.register(Origem)
admin.site.register(NomeCientifico)
admin.site.register(NomePopular)
| 29.222222 | 70 | 0.802281 | from django.contrib import admin
from .models import Animal, Raca, NomeCientifico, Origem, NomePopular
admin.site.register(Animal)
admin.site.register(Raca)
admin.site.register(Origem)
admin.site.register(NomeCientifico)
admin.site.register(NomePopular)
| 0 | 0 | 0 |
d985847b11148b37ebd1f0e1449a7bb915bd77c4 | 92 | py | Python | parameters_80.py | brenosora/tortuguita | 1f954dc307e6ebac070dbe7e2d8fe8d02b3ddf23 | [
"BSD-3-Clause"
] | null | null | null | parameters_80.py | brenosora/tortuguita | 1f954dc307e6ebac070dbe7e2d8fe8d02b3ddf23 | [
"BSD-3-Clause"
] | null | null | null | parameters_80.py | brenosora/tortuguita | 1f954dc307e6ebac070dbe7e2d8fe8d02b3ddf23 | [
"BSD-3-Clause"
] | null | null | null | password="pbkdf2(1000,20,sha512)$a4e3c0f67fc691f5$70638e315cc3ad9d7aed2f01edba2f36a5cbe486"
| 46 | 91 | 0.891304 | password="pbkdf2(1000,20,sha512)$a4e3c0f67fc691f5$70638e315cc3ad9d7aed2f01edba2f36a5cbe486"
| 0 | 0 | 0 |
8d815f0166dca1d847bcc835a11ca2d6a8cc2e5b | 12,518 | py | Python | pysnmp-with-texts/Wellfleet-GRE-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 8 | 2019-05-09T17:04:00.000Z | 2021-06-09T06:50:51.000Z | pysnmp-with-texts/Wellfleet-GRE-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 4 | 2019-05-31T16:42:59.000Z | 2020-01-31T21:57:17.000Z | pysnmp-with-texts/Wellfleet-GRE-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module Wellfleet-GRE-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/Wellfleet-GRE-MIB
# Produced by pysmi-0.3.4 at Wed May 1 15:40:19 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ValueRangeConstraint, ConstraintsIntersection, ConstraintsUnion, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsIntersection", "ConstraintsUnion", "ValueSizeConstraint")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
TimeTicks, Unsigned32, Bits, IpAddress, ObjectIdentity, Counter32, iso, NotificationType, Gauge32, Counter64, MibIdentifier, ModuleIdentity, Integer32, MibScalar, MibTable, MibTableRow, MibTableColumn = mibBuilder.importSymbols("SNMPv2-SMI", "TimeTicks", "Unsigned32", "Bits", "IpAddress", "ObjectIdentity", "Counter32", "iso", "NotificationType", "Gauge32", "Counter64", "MibIdentifier", "ModuleIdentity", "Integer32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
wfGreGroup, = mibBuilder.importSymbols("Wellfleet-COMMON-MIB", "wfGreGroup")
wfGreInterfaceTable = MibTable((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 1), )
if mibBuilder.loadTexts: wfGreInterfaceTable.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreInterfaceTable.setDescription('Parameters in wfGreInterfaceTable')
wfGreInterfaceEntry = MibTableRow((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 1, 1), ).setIndexNames((0, "Wellfleet-GRE-MIB", "wfGreIntfIpAddr"), (0, "Wellfleet-GRE-MIB", "wfGreIntfCct"))
if mibBuilder.loadTexts: wfGreInterfaceEntry.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreInterfaceEntry.setDescription('An entry in wfGreTable.')
wfGreIntfCreate = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 1, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("create", 1), ("delete", 2))).clone('create')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfGreIntfCreate.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreIntfCreate.setDescription('Create/Delete parameter. Default is created. Users perform a set operation on this object in order to create/delete an wfGreEntry instance.')
wfGreIntfEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone('enabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfGreIntfEnable.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreIntfEnable.setDescription('Enable/Disable parameter. Default is enabled. Users perform a set operation on this object in order to enable/disable GRE .')
wfGreIntfState = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("up", 1), ("down", 2), ("init", 3), ("notpres", 4))).clone('notpres')).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfGreIntfState.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreIntfState.setDescription('The current state of GRE interface.')
wfGreIntfIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 1, 1, 4), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfGreIntfIpAddr.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreIntfIpAddr.setDescription('The IP interface to run GRE on.')
wfGreIntfCct = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 1, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfGreIntfCct.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreIntfCct.setDescription('Circuit number of the GRE interface')
wfGreIntfStatsEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone('enabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfGreIntfStatsEnable.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreIntfStatsEnable.setDescription('Enable/Disable parameter. Default is enabled. Users perform a set operation on this object in order to enable/disable mib statistics for GRE interface.')
wfGreIntfDebugLevel = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 1, 1, 7), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfGreIntfDebugLevel.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreIntfDebugLevel.setDescription('A parameter to specify which messages to be printed in to the log.')
wfGreTunnelTable = MibTable((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 2), )
if mibBuilder.loadTexts: wfGreTunnelTable.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreTunnelTable.setDescription('Parameters in wfGreTunnelTable')
wfGreTunnelEntry = MibTableRow((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 2, 1), ).setIndexNames((0, "Wellfleet-GRE-MIB", "wfGreTunnelLocalAddr"), (0, "Wellfleet-GRE-MIB", "wfGreTunnelPeerAddress"), (0, "Wellfleet-GRE-MIB", "wfGreTunnelLocalIndex"))
if mibBuilder.loadTexts: wfGreTunnelEntry.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreTunnelEntry.setDescription('An entry in wfGreTunnelTable.')
wfGreTunnelLocalAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 2, 1, 1), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfGreTunnelLocalAddr.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreTunnelLocalAddr.setDescription('IP Address of local interface.')
wfGreTunnelLocalIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 2, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfGreTunnelLocalIndex.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreTunnelLocalIndex.setDescription('This tunnel index is assigned by the GRE process. It is used to index into the GRE mapping table.')
wfGreTunnelType = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("generic", 1), ("udas", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfGreTunnelType.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreTunnelType.setDescription('Indicate whether a tunnel peer has assigned a tunnel ID.')
wfGreTunnelId = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 2, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfGreTunnelId.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreTunnelId.setDescription('This tunnel ID is assigned by the tunnel peer.')
wfGreTunnelPeerAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 2, 1, 5), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfGreTunnelPeerAddress.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreTunnelPeerAddress.setDescription('Address of the tunnel peer.')
wfGreRemotePayloadAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 2, 1, 6), OctetString().subtype(subtypeSpec=ValueSizeConstraint(10, 10)).setFixedLength(10)).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfGreRemotePayloadAddress.setStatus('deprecated')
if mibBuilder.loadTexts: wfGreRemotePayloadAddress.setDescription('The address of the remote node.')
wfGreTunnelState = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 2, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("up", 1), ("down", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfGreTunnelState.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreTunnelState.setDescription('The state of the GRE tunnel.')
wfGreVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 2, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfGreVersion.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreVersion.setDescription('Reserved for future use')
wfGreProtoMap = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 2, 1, 9), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfGreProtoMap.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreProtoMap.setDescription('This will be set to the protocol type of the payload. GRE_PROTO_IP 1 GRE_PROTO_IPX 2')
wfGreTunnelPktsTx = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 2, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfGreTunnelPktsTx.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreTunnelPktsTx.setDescription('Number of packets transmitted ')
wfGreTunnelPktsRx = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 2, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfGreTunnelPktsRx.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreTunnelPktsRx.setDescription('Number of packets received ')
wfGreTunnelBytesTx = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 2, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfGreTunnelBytesTx.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreTunnelBytesTx.setDescription('Number of bytes transmitted ')
wfGreTunnelBytesRx = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 2, 1, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfGreTunnelBytesRx.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreTunnelBytesRx.setDescription('Number of bytes received')
wfGreTunnelPktsTxDropped = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 2, 1, 14), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfGreTunnelPktsTxDropped.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreTunnelPktsTxDropped.setDescription('Number of outgoing packets dropped')
wfGreTunnelPktsRxDropped = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 2, 1, 15), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfGreTunnelPktsRxDropped.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreTunnelPktsRxDropped.setDescription('Number of incoming packets dropped')
wfGreTunnelXsumErr = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 2, 1, 16), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfGreTunnelXsumErr.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreTunnelXsumErr.setDescription('Number of inbound checksum errors')
wfGreTunnelSeqNumErr = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 2, 1, 17), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfGreTunnelSeqNumErr.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreTunnelSeqNumErr.setDescription('Number of sequence errors')
wfGreTunnelMtu = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 2, 1, 18), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 4500))).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfGreTunnelMtu.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreTunnelMtu.setDescription('The MTU of the GRE tunnel')
mibBuilder.exportSymbols("Wellfleet-GRE-MIB", wfGreTunnelPktsTxDropped=wfGreTunnelPktsTxDropped, wfGreTunnelLocalIndex=wfGreTunnelLocalIndex, wfGreTunnelEntry=wfGreTunnelEntry, wfGreTunnelState=wfGreTunnelState, wfGreTunnelType=wfGreTunnelType, wfGreTunnelBytesRx=wfGreTunnelBytesRx, wfGreTunnelPktsRxDropped=wfGreTunnelPktsRxDropped, wfGreIntfCreate=wfGreIntfCreate, wfGreIntfState=wfGreIntfState, wfGreIntfIpAddr=wfGreIntfIpAddr, wfGreTunnelPeerAddress=wfGreTunnelPeerAddress, wfGreTunnelLocalAddr=wfGreTunnelLocalAddr, wfGreIntfEnable=wfGreIntfEnable, wfGreRemotePayloadAddress=wfGreRemotePayloadAddress, wfGreIntfStatsEnable=wfGreIntfStatsEnable, wfGreTunnelBytesTx=wfGreTunnelBytesTx, wfGreProtoMap=wfGreProtoMap, wfGreTunnelXsumErr=wfGreTunnelXsumErr, wfGreTunnelId=wfGreTunnelId, wfGreTunnelTable=wfGreTunnelTable, wfGreInterfaceTable=wfGreInterfaceTable, wfGreVersion=wfGreVersion, wfGreTunnelPktsTx=wfGreTunnelPktsTx, wfGreInterfaceEntry=wfGreInterfaceEntry, wfGreIntfDebugLevel=wfGreIntfDebugLevel, wfGreTunnelPktsRx=wfGreTunnelPktsRx, wfGreIntfCct=wfGreIntfCct, wfGreTunnelMtu=wfGreTunnelMtu, wfGreTunnelSeqNumErr=wfGreTunnelSeqNumErr)
| 121.533981 | 1,146 | 0.778319 | #
# PySNMP MIB module Wellfleet-GRE-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/Wellfleet-GRE-MIB
# Produced by pysmi-0.3.4 at Wed May 1 15:40:19 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ValueRangeConstraint, ConstraintsIntersection, ConstraintsUnion, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsIntersection", "ConstraintsUnion", "ValueSizeConstraint")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
TimeTicks, Unsigned32, Bits, IpAddress, ObjectIdentity, Counter32, iso, NotificationType, Gauge32, Counter64, MibIdentifier, ModuleIdentity, Integer32, MibScalar, MibTable, MibTableRow, MibTableColumn = mibBuilder.importSymbols("SNMPv2-SMI", "TimeTicks", "Unsigned32", "Bits", "IpAddress", "ObjectIdentity", "Counter32", "iso", "NotificationType", "Gauge32", "Counter64", "MibIdentifier", "ModuleIdentity", "Integer32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
wfGreGroup, = mibBuilder.importSymbols("Wellfleet-COMMON-MIB", "wfGreGroup")
wfGreInterfaceTable = MibTable((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 1), )
if mibBuilder.loadTexts: wfGreInterfaceTable.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreInterfaceTable.setDescription('Parameters in wfGreInterfaceTable')
wfGreInterfaceEntry = MibTableRow((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 1, 1), ).setIndexNames((0, "Wellfleet-GRE-MIB", "wfGreIntfIpAddr"), (0, "Wellfleet-GRE-MIB", "wfGreIntfCct"))
if mibBuilder.loadTexts: wfGreInterfaceEntry.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreInterfaceEntry.setDescription('An entry in wfGreTable.')
wfGreIntfCreate = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 1, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("create", 1), ("delete", 2))).clone('create')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfGreIntfCreate.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreIntfCreate.setDescription('Create/Delete parameter. Default is created. Users perform a set operation on this object in order to create/delete an wfGreEntry instance.')
wfGreIntfEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone('enabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfGreIntfEnable.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreIntfEnable.setDescription('Enable/Disable parameter. Default is enabled. Users perform a set operation on this object in order to enable/disable GRE .')
wfGreIntfState = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("up", 1), ("down", 2), ("init", 3), ("notpres", 4))).clone('notpres')).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfGreIntfState.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreIntfState.setDescription('The current state of GRE interface.')
wfGreIntfIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 1, 1, 4), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfGreIntfIpAddr.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreIntfIpAddr.setDescription('The IP interface to run GRE on.')
wfGreIntfCct = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 1, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfGreIntfCct.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreIntfCct.setDescription('Circuit number of the GRE interface')
wfGreIntfStatsEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone('enabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfGreIntfStatsEnable.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreIntfStatsEnable.setDescription('Enable/Disable parameter. Default is enabled. Users perform a set operation on this object in order to enable/disable mib statistics for GRE interface.')
wfGreIntfDebugLevel = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 1, 1, 7), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfGreIntfDebugLevel.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreIntfDebugLevel.setDescription('A parameter to specify which messages to be printed in to the log.')
wfGreTunnelTable = MibTable((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 2), )
if mibBuilder.loadTexts: wfGreTunnelTable.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreTunnelTable.setDescription('Parameters in wfGreTunnelTable')
wfGreTunnelEntry = MibTableRow((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 2, 1), ).setIndexNames((0, "Wellfleet-GRE-MIB", "wfGreTunnelLocalAddr"), (0, "Wellfleet-GRE-MIB", "wfGreTunnelPeerAddress"), (0, "Wellfleet-GRE-MIB", "wfGreTunnelLocalIndex"))
if mibBuilder.loadTexts: wfGreTunnelEntry.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreTunnelEntry.setDescription('An entry in wfGreTunnelTable.')
wfGreTunnelLocalAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 2, 1, 1), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfGreTunnelLocalAddr.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreTunnelLocalAddr.setDescription('IP Address of local interface.')
wfGreTunnelLocalIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 2, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfGreTunnelLocalIndex.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreTunnelLocalIndex.setDescription('This tunnel index is assigned by the GRE process. It is used to index into the GRE mapping table.')
wfGreTunnelType = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("generic", 1), ("udas", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfGreTunnelType.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreTunnelType.setDescription('Indicate whether a tunnel peer has assigned a tunnel ID.')
wfGreTunnelId = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 2, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfGreTunnelId.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreTunnelId.setDescription('This tunnel ID is assigned by the tunnel peer.')
wfGreTunnelPeerAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 2, 1, 5), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfGreTunnelPeerAddress.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreTunnelPeerAddress.setDescription('Address of the tunnel peer.')
wfGreRemotePayloadAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 2, 1, 6), OctetString().subtype(subtypeSpec=ValueSizeConstraint(10, 10)).setFixedLength(10)).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfGreRemotePayloadAddress.setStatus('deprecated')
if mibBuilder.loadTexts: wfGreRemotePayloadAddress.setDescription('The address of the remote node.')
wfGreTunnelState = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 2, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("up", 1), ("down", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfGreTunnelState.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreTunnelState.setDescription('The state of the GRE tunnel.')
wfGreVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 2, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfGreVersion.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreVersion.setDescription('Reserved for future use')
wfGreProtoMap = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 2, 1, 9), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfGreProtoMap.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreProtoMap.setDescription('This will be set to the protocol type of the payload. GRE_PROTO_IP 1 GRE_PROTO_IPX 2')
wfGreTunnelPktsTx = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 2, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfGreTunnelPktsTx.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreTunnelPktsTx.setDescription('Number of packets transmitted ')
wfGreTunnelPktsRx = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 2, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfGreTunnelPktsRx.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreTunnelPktsRx.setDescription('Number of packets received ')
wfGreTunnelBytesTx = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 2, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfGreTunnelBytesTx.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreTunnelBytesTx.setDescription('Number of bytes transmitted ')
wfGreTunnelBytesRx = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 2, 1, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfGreTunnelBytesRx.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreTunnelBytesRx.setDescription('Number of bytes received')
wfGreTunnelPktsTxDropped = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 2, 1, 14), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfGreTunnelPktsTxDropped.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreTunnelPktsTxDropped.setDescription('Number of outgoing packets dropped')
wfGreTunnelPktsRxDropped = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 2, 1, 15), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfGreTunnelPktsRxDropped.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreTunnelPktsRxDropped.setDescription('Number of incoming packets dropped')
wfGreTunnelXsumErr = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 2, 1, 16), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfGreTunnelXsumErr.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreTunnelXsumErr.setDescription('Number of inbound checksum errors')
wfGreTunnelSeqNumErr = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 2, 1, 17), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfGreTunnelSeqNumErr.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreTunnelSeqNumErr.setDescription('Number of sequence errors')
wfGreTunnelMtu = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 2, 1, 18), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 4500))).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfGreTunnelMtu.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreTunnelMtu.setDescription('The MTU of the GRE tunnel')
mibBuilder.exportSymbols("Wellfleet-GRE-MIB", wfGreTunnelPktsTxDropped=wfGreTunnelPktsTxDropped, wfGreTunnelLocalIndex=wfGreTunnelLocalIndex, wfGreTunnelEntry=wfGreTunnelEntry, wfGreTunnelState=wfGreTunnelState, wfGreTunnelType=wfGreTunnelType, wfGreTunnelBytesRx=wfGreTunnelBytesRx, wfGreTunnelPktsRxDropped=wfGreTunnelPktsRxDropped, wfGreIntfCreate=wfGreIntfCreate, wfGreIntfState=wfGreIntfState, wfGreIntfIpAddr=wfGreIntfIpAddr, wfGreTunnelPeerAddress=wfGreTunnelPeerAddress, wfGreTunnelLocalAddr=wfGreTunnelLocalAddr, wfGreIntfEnable=wfGreIntfEnable, wfGreRemotePayloadAddress=wfGreRemotePayloadAddress, wfGreIntfStatsEnable=wfGreIntfStatsEnable, wfGreTunnelBytesTx=wfGreTunnelBytesTx, wfGreProtoMap=wfGreProtoMap, wfGreTunnelXsumErr=wfGreTunnelXsumErr, wfGreTunnelId=wfGreTunnelId, wfGreTunnelTable=wfGreTunnelTable, wfGreInterfaceTable=wfGreInterfaceTable, wfGreVersion=wfGreVersion, wfGreTunnelPktsTx=wfGreTunnelPktsTx, wfGreInterfaceEntry=wfGreInterfaceEntry, wfGreIntfDebugLevel=wfGreIntfDebugLevel, wfGreTunnelPktsRx=wfGreTunnelPktsRx, wfGreIntfCct=wfGreIntfCct, wfGreTunnelMtu=wfGreTunnelMtu, wfGreTunnelSeqNumErr=wfGreTunnelSeqNumErr)
| 0 | 0 | 0 |
e9165bdc89fd749b3ea93ba6c4a1ac473de7bcab | 2,116 | py | Python | learning/paramInfo.py | patras91/rae_release | 0e5faffb7eb732fdb8e3bbf2c6d2f2cbd520aa30 | [
"BSD-3-Clause"
] | 1 | 2021-09-28T12:56:56.000Z | 2021-09-28T12:56:56.000Z | learning/paramInfo.py | patras91/rae_release | 0e5faffb7eb732fdb8e3bbf2c6d2f2cbd520aa30 | [
"BSD-3-Clause"
] | null | null | null | learning/paramInfo.py | patras91/rae_release | 0e5faffb7eb732fdb8e3bbf2c6d2f2cbd520aa30 | [
"BSD-3-Clause"
] | 1 | 2022-03-31T16:30:39.000Z | 2022-03-31T16:30:39.000Z |
nFeatures = {
"EE": 182, #22,
"SD": 126, #24,
"SR": 330, #23,
"OF": 0,
"CR": 97, #22, #91
}
params = {
'SD': {
'MoveThroughDoorway_Method2':
{'r2': {'nOutputs': 4, 'nInputs': 150, 'pos': -1, 'decoder': DecodeRobot_SD}},
'Recover_Method1':
{'r2': {'nOutputs': 4, 'nInputs': 128, 'pos': -1, 'decoder': DecodeRobot_SD}},
},
'OF': {
'Order_Method1': {
'm': {'nOutputs': 5, 'nInputs': 613, 'pos': -2, 'decoder': DecodeMachine_OF},
'objList': {'nOutputs': 10, 'nInputs': 613, 'pos': -1, 'decoder': DecodeObjList_OF},
},
'Order_Method2': {
'm': {'nOutputs': 5, 'nInputs': 613, 'pos': -3, 'decoder': DecodeMachine_OF},
'objList': {'nOutputs': 10, 'nInputs': 613, 'pos': -2, 'decoder': DecodeObjList_OF},
'p': {'nOutputs': 4, 'nInputs': 613, 'pos': -1, 'decoder': DecodePalate_OF},
},
'PickupAndLoad_Method1':
{'r': {'nOutputs': 7, 'nInputs': 637, 'pos': -1, 'decoder': DecodeRobot_OF}},
'UnloadAndDeliver_Method1':
{'r': {'nOutputs': 7, 'nInputs': 625, 'pos': -1, 'decoder': DecodeRobot_OF}},
'MoveToPallet_Method1':
{'r': {'nOutputs': 7, 'nInputs': 633, 'pos': -1, 'decoder': DecodeRobot_OF}},
},
} | 25.493976 | 97 | 0.414934 | def DecodeRobot_SD(r):
return {
0: "r1",
1: "r2",
2: "r3",
3: "r4",
}[r]
def DecodeRobot_OF(r):
return {
0: "r0",
1: "r1",
2: "r2",
3: "r3",
4: "r4",
5: "r5",
6: "r6",
}[r]
def DecodeMachine_OF(m):
return {
0: "m0",
1: "m1",
2: "m2",
3: "m3",
4: "m4",
}[m]
def DecodePalate_OF(p):
return {
0: "p0",
1: "p1",
2: "p2",
3: "p3",
}[p]
def DecodeObjList_OF(o):
return {
0: ('o1',),
1: ('o1',),
2: ('o2',),
3: ('o3',),
4: ('o4',),
5: ('o5',),
6: ('o6',),
7: ('o7',),
8: ('o8',),
9: ('o9',),
}[o]
nFeatures = {
"EE": 182, #22,
"SD": 126, #24,
"SR": 330, #23,
"OF": 0,
"CR": 97, #22, #91
}
params = {
'SD': {
'MoveThroughDoorway_Method2':
{'r2': {'nOutputs': 4, 'nInputs': 150, 'pos': -1, 'decoder': DecodeRobot_SD}},
'Recover_Method1':
{'r2': {'nOutputs': 4, 'nInputs': 128, 'pos': -1, 'decoder': DecodeRobot_SD}},
},
'OF': {
'Order_Method1': {
'm': {'nOutputs': 5, 'nInputs': 613, 'pos': -2, 'decoder': DecodeMachine_OF},
'objList': {'nOutputs': 10, 'nInputs': 613, 'pos': -1, 'decoder': DecodeObjList_OF},
},
'Order_Method2': {
'm': {'nOutputs': 5, 'nInputs': 613, 'pos': -3, 'decoder': DecodeMachine_OF},
'objList': {'nOutputs': 10, 'nInputs': 613, 'pos': -2, 'decoder': DecodeObjList_OF},
'p': {'nOutputs': 4, 'nInputs': 613, 'pos': -1, 'decoder': DecodePalate_OF},
},
'PickupAndLoad_Method1':
{'r': {'nOutputs': 7, 'nInputs': 637, 'pos': -1, 'decoder': DecodeRobot_OF}},
'UnloadAndDeliver_Method1':
{'r': {'nOutputs': 7, 'nInputs': 625, 'pos': -1, 'decoder': DecodeRobot_OF}},
'MoveToPallet_Method1':
{'r': {'nOutputs': 7, 'nInputs': 633, 'pos': -1, 'decoder': DecodeRobot_OF}},
},
} | 660 | 0 | 114 |
180eceb9c234223bdd4873839b6ba7d2999aefa2 | 35 | py | Python | pandas_scripts/script1.py | gustavofsantos/PandasPresentation | 457745d2be737891d439f1b6deb9bfddbf20baed | [
"MIT"
] | null | null | null | pandas_scripts/script1.py | gustavofsantos/PandasPresentation | 457745d2be737891d439f1b6deb9bfddbf20baed | [
"MIT"
] | null | null | null | pandas_scripts/script1.py | gustavofsantos/PandasPresentation | 457745d2be737891d439f1b6deb9bfddbf20baed | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import pandas
| 7 | 18 | 0.714286 | #!/usr/bin/python3
import pandas
| 0 | 0 | 0 |
849a311f39e26d846b879d9b57b4943e1da7e878 | 2,251 | py | Python | scripts/1507834892/pythonsv_icx_handler.py | ZSslience/MKTME-Auto | faaea6a1a23a0c6b2672427e7ab1a09fc844d722 | [
"Apache-2.0"
] | null | null | null | scripts/1507834892/pythonsv_icx_handler.py | ZSslience/MKTME-Auto | faaea6a1a23a0c6b2672427e7ab1a09fc844d722 | [
"Apache-2.0"
] | null | null | null | scripts/1507834892/pythonsv_icx_handler.py | ZSslience/MKTME-Auto | faaea6a1a23a0c6b2672427e7ab1a09fc844d722 | [
"Apache-2.0"
] | 1 | 2020-12-11T09:07:19.000Z | 2020-12-11T09:07:19.000Z | import sys
print("----------------------- pythonsv project init -----------------------")
sys.path.append(".")
sys.path.append(r'C:\PythonSV\icelakex')
from icelakex.starticx import *
from icelakex.toolext import pysv_config
from svtools.common.pysv_config import CFG
if __name__ == '__main__':
itp, sv = pythonsv_init()
# x = cpuid(0x7,0)
# print(x)
# print("ECX data: %s" % (hex(x['ecx'])))
# ECX_BIN = "{0:08b}".format(x['ecx'])
# print(ECX_BIN[-14] == "1")
# ECX_DEC = x['ecx']
# MASK_14 = 1 << 14
# print(ECX_DEC, MASK_14)
# EXPECT_MASK_14 = 0b1 << 14
# print((ECX_DEC & MASK_14) == EXPECT_MASK_14)
# x = cpuid(0x80000008,0)
# print(x)
# post_80 = itp.threads[0].port(0x80)
# post_81 = itp.threads[0].port(0x81)
# print("POST CODE: %s%s" % (post_80, post_81))
x = itp.threads[0].msr(0x981)
print("MSR 0x981: %s" % x)
# pythonsv_exit()
| 26.482353 | 78 | 0.500666 | import sys
print("----------------------- pythonsv project init -----------------------")
sys.path.append(".")
sys.path.append(r'C:\PythonSV\icelakex')
from icelakex.starticx import *
from icelakex.toolext import pysv_config
from svtools.common.pysv_config import CFG
def pythonsv_init(try_times=5):
for i in range(try_times):
try:
start_openipc(CFG)
start_general(CFG)
print(">>>>>>>> itp.halt()")
itp = get_itp()
itp.halt()
print(">>>>>>>> itp.halt() success")
print(">>>>>>>> sv.fresh()")
sv = get_sv()
sv.refresh()
print(">>>>>>>> sv.refresh() success")
return itp, sv
except Exception as e:
print("exception occurred")
print(e)
continue
def get_cpuid(try_times=5):
for i in range(try_times):
try:
start_openipc(CFG)
start_general(CFG)
print(">>>>>>>> cupid(0x7,0)")
halt()
result = cpuid(0x7,0)
return result
except Exception as e:
print("ipc_init except")
print(e)
continue
def pythonsv_exit(try_times=5):
for i in range(try_times):
try:
start_openipc(CFG)
start_general(CFG)
print(">>>>>>>> itp.go")
itp = get_itp()
itp.go()
# exit()
return True
except Exception as e:
print("ipc_init except")
print(e)
continue
return False
if __name__ == '__main__':
itp, sv = pythonsv_init()
# x = cpuid(0x7,0)
# print(x)
# print("ECX data: %s" % (hex(x['ecx'])))
# ECX_BIN = "{0:08b}".format(x['ecx'])
# print(ECX_BIN[-14] == "1")
# ECX_DEC = x['ecx']
# MASK_14 = 1 << 14
# print(ECX_DEC, MASK_14)
# EXPECT_MASK_14 = 0b1 << 14
# print((ECX_DEC & MASK_14) == EXPECT_MASK_14)
# x = cpuid(0x80000008,0)
# print(x)
# post_80 = itp.threads[0].port(0x80)
# post_81 = itp.threads[0].port(0x81)
# print("POST CODE: %s%s" % (post_80, post_81))
x = itp.threads[0].msr(0x981)
print("MSR 0x981: %s" % x)
# pythonsv_exit()
| 1,259 | 0 | 69 |
859bce5d0bf17d2e9e437775716e886e5c77cf78 | 466 | py | Python | pydebug.py | innovationgarage/pydebug | eb087e0d18d825f6ea8c966a287d287d31969512 | [
"MIT"
] | null | null | null | pydebug.py | innovationgarage/pydebug | eb087e0d18d825f6ea8c966a287d287d31969512 | [
"MIT"
] | null | null | null | pydebug.py | innovationgarage/pydebug | eb087e0d18d825f6ea8c966a287d287d31969512 | [
"MIT"
] | null | null | null | import sys
import threading
import code
import socket
import code
import debugthread
import io
foo="xxx"
s = socket.socket(socket.AF_INET)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(('127.0.0.1', 4711))
s.listen(0)
while True:
ss, addr = s.accept()
stdin =io.TextIOWrapper(ss.makefile('rb', 0), encoding='utf8')
stdout = io.TextIOWrapper(ss.makefile('wb', 0), encoding='utf8')
debugthread.shell(stdin, stdout, stdout, locals())
| 23.3 | 68 | 0.716738 | import sys
import threading
import code
import socket
import code
import debugthread
import io
foo="xxx"
s = socket.socket(socket.AF_INET)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(('127.0.0.1', 4711))
s.listen(0)
while True:
ss, addr = s.accept()
stdin =io.TextIOWrapper(ss.makefile('rb', 0), encoding='utf8')
stdout = io.TextIOWrapper(ss.makefile('wb', 0), encoding='utf8')
debugthread.shell(stdin, stdout, stdout, locals())
| 0 | 0 | 0 |
02189948a2265f78106f09edb1573530dd3b896b | 7,131 | py | Python | tests/unit/language/ast/test_interface_type_definition.py | matt-koevort/tartiflette | 5777866b133d846ce4f8aa03f735fa81832896cd | [
"MIT"
] | 530 | 2019-06-04T11:45:36.000Z | 2022-03-31T09:29:56.000Z | tests/unit/language/ast/test_interface_type_definition.py | matt-koevort/tartiflette | 5777866b133d846ce4f8aa03f735fa81832896cd | [
"MIT"
] | 242 | 2019-06-04T11:53:08.000Z | 2022-03-28T07:06:27.000Z | tests/unit/language/ast/test_interface_type_definition.py | matt-koevort/tartiflette | 5777866b133d846ce4f8aa03f735fa81832896cd | [
"MIT"
] | 36 | 2019-06-21T06:40:27.000Z | 2021-11-04T13:11:16.000Z | import pytest
from tartiflette.language.ast import InterfaceTypeDefinitionNode
@pytest.mark.parametrize(
"interface_type_definition_node,other,expected",
[
(
InterfaceTypeDefinitionNode(
name="interfaceTypeDefinitionName",
description="interfaceTypeDefinitionDescription",
directives="interfaceTypeDefinitionDirectives",
fields="interfaceTypeDefinitionFields",
location="interfaceTypeDefinitionLocation",
),
Ellipsis,
False,
),
(
InterfaceTypeDefinitionNode(
name="interfaceTypeDefinitionName",
description="interfaceTypeDefinitionDescription",
directives="interfaceTypeDefinitionDirectives",
fields="interfaceTypeDefinitionFields",
location="interfaceTypeDefinitionLocation",
),
InterfaceTypeDefinitionNode(
name="interfaceTypeDefinitionNameBis",
description="interfaceTypeDefinitionDescription",
directives="interfaceTypeDefinitionDirectives",
fields="interfaceTypeDefinitionFields",
location="interfaceTypeDefinitionLocation",
),
False,
),
(
InterfaceTypeDefinitionNode(
name="interfaceTypeDefinitionName",
description="interfaceTypeDefinitionDescription",
directives="interfaceTypeDefinitionDirectives",
fields="interfaceTypeDefinitionFields",
location="interfaceTypeDefinitionLocation",
),
InterfaceTypeDefinitionNode(
name="interfaceTypeDefinitionName",
description="interfaceTypeDefinitionDescriptionBis",
directives="interfaceTypeDefinitionDirectives",
fields="interfaceTypeDefinitionFields",
location="interfaceTypeDefinitionLocation",
),
False,
),
(
InterfaceTypeDefinitionNode(
name="interfaceTypeDefinitionName",
description="interfaceTypeDefinitionDescription",
directives="interfaceTypeDefinitionDirectives",
fields="interfaceTypeDefinitionFields",
location="interfaceTypeDefinitionLocation",
),
InterfaceTypeDefinitionNode(
name="interfaceTypeDefinitionName",
description="interfaceTypeDefinitionDescription",
directives="interfaceTypeDefinitionDirectivesBis",
fields="interfaceTypeDefinitionFields",
location="interfaceTypeDefinitionLocation",
),
False,
),
(
InterfaceTypeDefinitionNode(
name="interfaceTypeDefinitionName",
description="interfaceTypeDefinitionDescription",
directives="interfaceTypeDefinitionDirectives",
fields="interfaceTypeDefinitionFields",
location="interfaceTypeDefinitionLocation",
),
InterfaceTypeDefinitionNode(
name="interfaceTypeDefinitionName",
description="interfaceTypeDefinitionDescription",
directives="interfaceTypeDefinitionDirectives",
fields="interfaceTypeDefinitionFieldsBis",
location="interfaceTypeDefinitionLocation",
),
False,
),
(
InterfaceTypeDefinitionNode(
name="interfaceTypeDefinitionName",
description="interfaceTypeDefinitionDescription",
directives="interfaceTypeDefinitionDirectives",
fields="interfaceTypeDefinitionFields",
location="interfaceTypeDefinitionLocation",
),
InterfaceTypeDefinitionNode(
name="interfaceTypeDefinitionName",
description="interfaceTypeDefinitionDescription",
directives="interfaceTypeDefinitionDirectives",
fields="interfaceTypeDefinitionFields",
location="interfaceTypeDefinitionLocationBis",
),
False,
),
(
InterfaceTypeDefinitionNode(
name="interfaceTypeDefinitionName",
description="interfaceTypeDefinitionDescription",
directives="interfaceTypeDefinitionDirectives",
fields="interfaceTypeDefinitionFields",
location="interfaceTypeDefinitionLocation",
),
InterfaceTypeDefinitionNode(
name="interfaceTypeDefinitionName",
description="interfaceTypeDefinitionDescription",
directives="interfaceTypeDefinitionDirectives",
fields="interfaceTypeDefinitionFields",
location="interfaceTypeDefinitionLocation",
),
True,
),
],
)
@pytest.mark.parametrize(
"interface_type_definition_node,expected",
[
(
InterfaceTypeDefinitionNode(
name="interfaceTypeDefinitionName",
description="interfaceTypeDefinitionDescription",
directives="interfaceTypeDefinitionDirectives",
fields="interfaceTypeDefinitionFields",
location="interfaceTypeDefinitionLocation",
),
"InterfaceTypeDefinitionNode("
"description='interfaceTypeDefinitionDescription', "
"name='interfaceTypeDefinitionName', "
"directives='interfaceTypeDefinitionDirectives', "
"fields='interfaceTypeDefinitionFields', "
"location='interfaceTypeDefinitionLocation')",
)
],
)
| 39.39779 | 79 | 0.628944 | import pytest
from tartiflette.language.ast import InterfaceTypeDefinitionNode
def test_interfacetypedefinitionnode__init__():
interface_type_definition_node = InterfaceTypeDefinitionNode(
name="interfaceTypeDefinitionName",
description="interfaceTypeDefinitionDescription",
directives="interfaceTypeDefinitionDirectives",
fields="interfaceTypeDefinitionFields",
location="interfaceTypeDefinitionLocation",
)
assert interface_type_definition_node.name == "interfaceTypeDefinitionName"
assert (
interface_type_definition_node.description
== "interfaceTypeDefinitionDescription"
)
assert (
interface_type_definition_node.directives
== "interfaceTypeDefinitionDirectives"
)
assert (
interface_type_definition_node.fields
== "interfaceTypeDefinitionFields"
)
assert (
interface_type_definition_node.location
== "interfaceTypeDefinitionLocation"
)
@pytest.mark.parametrize(
"interface_type_definition_node,other,expected",
[
(
InterfaceTypeDefinitionNode(
name="interfaceTypeDefinitionName",
description="interfaceTypeDefinitionDescription",
directives="interfaceTypeDefinitionDirectives",
fields="interfaceTypeDefinitionFields",
location="interfaceTypeDefinitionLocation",
),
Ellipsis,
False,
),
(
InterfaceTypeDefinitionNode(
name="interfaceTypeDefinitionName",
description="interfaceTypeDefinitionDescription",
directives="interfaceTypeDefinitionDirectives",
fields="interfaceTypeDefinitionFields",
location="interfaceTypeDefinitionLocation",
),
InterfaceTypeDefinitionNode(
name="interfaceTypeDefinitionNameBis",
description="interfaceTypeDefinitionDescription",
directives="interfaceTypeDefinitionDirectives",
fields="interfaceTypeDefinitionFields",
location="interfaceTypeDefinitionLocation",
),
False,
),
(
InterfaceTypeDefinitionNode(
name="interfaceTypeDefinitionName",
description="interfaceTypeDefinitionDescription",
directives="interfaceTypeDefinitionDirectives",
fields="interfaceTypeDefinitionFields",
location="interfaceTypeDefinitionLocation",
),
InterfaceTypeDefinitionNode(
name="interfaceTypeDefinitionName",
description="interfaceTypeDefinitionDescriptionBis",
directives="interfaceTypeDefinitionDirectives",
fields="interfaceTypeDefinitionFields",
location="interfaceTypeDefinitionLocation",
),
False,
),
(
InterfaceTypeDefinitionNode(
name="interfaceTypeDefinitionName",
description="interfaceTypeDefinitionDescription",
directives="interfaceTypeDefinitionDirectives",
fields="interfaceTypeDefinitionFields",
location="interfaceTypeDefinitionLocation",
),
InterfaceTypeDefinitionNode(
name="interfaceTypeDefinitionName",
description="interfaceTypeDefinitionDescription",
directives="interfaceTypeDefinitionDirectivesBis",
fields="interfaceTypeDefinitionFields",
location="interfaceTypeDefinitionLocation",
),
False,
),
(
InterfaceTypeDefinitionNode(
name="interfaceTypeDefinitionName",
description="interfaceTypeDefinitionDescription",
directives="interfaceTypeDefinitionDirectives",
fields="interfaceTypeDefinitionFields",
location="interfaceTypeDefinitionLocation",
),
InterfaceTypeDefinitionNode(
name="interfaceTypeDefinitionName",
description="interfaceTypeDefinitionDescription",
directives="interfaceTypeDefinitionDirectives",
fields="interfaceTypeDefinitionFieldsBis",
location="interfaceTypeDefinitionLocation",
),
False,
),
(
InterfaceTypeDefinitionNode(
name="interfaceTypeDefinitionName",
description="interfaceTypeDefinitionDescription",
directives="interfaceTypeDefinitionDirectives",
fields="interfaceTypeDefinitionFields",
location="interfaceTypeDefinitionLocation",
),
InterfaceTypeDefinitionNode(
name="interfaceTypeDefinitionName",
description="interfaceTypeDefinitionDescription",
directives="interfaceTypeDefinitionDirectives",
fields="interfaceTypeDefinitionFields",
location="interfaceTypeDefinitionLocationBis",
),
False,
),
(
InterfaceTypeDefinitionNode(
name="interfaceTypeDefinitionName",
description="interfaceTypeDefinitionDescription",
directives="interfaceTypeDefinitionDirectives",
fields="interfaceTypeDefinitionFields",
location="interfaceTypeDefinitionLocation",
),
InterfaceTypeDefinitionNode(
name="interfaceTypeDefinitionName",
description="interfaceTypeDefinitionDescription",
directives="interfaceTypeDefinitionDirectives",
fields="interfaceTypeDefinitionFields",
location="interfaceTypeDefinitionLocation",
),
True,
),
],
)
def test_interfacetypedefinitionnode__eq__(
interface_type_definition_node, other, expected
):
assert (interface_type_definition_node == other) is expected
@pytest.mark.parametrize(
"interface_type_definition_node,expected",
[
(
InterfaceTypeDefinitionNode(
name="interfaceTypeDefinitionName",
description="interfaceTypeDefinitionDescription",
directives="interfaceTypeDefinitionDirectives",
fields="interfaceTypeDefinitionFields",
location="interfaceTypeDefinitionLocation",
),
"InterfaceTypeDefinitionNode("
"description='interfaceTypeDefinitionDescription', "
"name='interfaceTypeDefinitionName', "
"directives='interfaceTypeDefinitionDirectives', "
"fields='interfaceTypeDefinitionFields', "
"location='interfaceTypeDefinitionLocation')",
)
],
)
def test_interfacetypedefinitionnode__repr__(
interface_type_definition_node, expected
):
assert interface_type_definition_node.__repr__() == expected
| 1,169 | 0 | 67 |
2c4d38bcb02b28a838bd5916b8131ef4b36201b8 | 137,768 | py | Python | src/gofra.py | GofraLang/core | 397c02f48a1484713c663d2abbb582641e195078 | [
"MIT"
] | 5 | 2022-02-20T21:00:11.000Z | 2022-02-23T11:01:31.000Z | src/gofra.py | GofraLang/core | 397c02f48a1484713c663d2abbb582641e195078 | [
"MIT"
] | 4 | 2021-11-02T18:32:26.000Z | 2021-12-01T19:36:27.000Z | src/gofra.py | gofra-lang/core | 397c02f48a1484713c663d2abbb582641e195078 | [
"MIT"
] | 1 | 2022-02-23T12:41:31.000Z | 2022-02-23T12:41:31.000Z | """
Main Gofra programming language source code.
"""
__author__ = "Kirill Zhosul @kirillzhosul"
__license__ = "MIT"
from typing import Generator
from os.path import basename
from sys import argv
import gofra
from gofra.core.danger import *
from gofra.core.stack import Stack
# MAJOR WARNING FOR ALL READERS.
# This code is not refactored,
# currently I am working on refactoring and splitting into the gofra module,
# there is a lot of stuff, that will be reworked.
# Also, want to say that bytecode is not finished, and interpretation will be
# converted to gofra.core.vm that will be run bytecode for own,
# as internal interpretation method (if you want to use C++ VM which is may not be finished also yet,
# see that https://github.com/gofralang/vm/)
# Lexer.
def lexer_tokenize(lines: List[str], file_parent: str) -> Generator[Token, None, None]:
""" Tokenizes lines into list of the Tokens. """
# Check that there is no changes in token type.
assert len(TokenType) == 6, "Please update implementation after adding new TokenType!"
# Get the basename.
file_parent = basename(file_parent)
# Current line index.
current_line_index = 0
# Get lines count.
lines_count = len(lines)
# Check that there is more than zero lines.
if lines_count == 0:
# If there is no lines.
# Error.
gofra.core.errors.message_verbosed(Stage.LEXER, (file_parent, 1, 1), "Error",
"There is no lines found in the given file "
"are you given empty file?", True)
while current_line_index < lines_count:
# Loop over lines.
# Get line.
current_line = lines[current_line_index]
# Find first non-space char.
current_collumn_index = gofra.core.lexer.find_collumn(current_line, 0, lambda char: not char.isspace())
# Get current line length.
current_line_length = len(current_line)
# ?.
current_collumn_end_index = 0
while current_collumn_index < current_line_length:
# Iterate over line.
# Get the location.
current_location = (file_parent, current_line_index + 1, current_collumn_index + 1)
if current_line[current_collumn_index] == EXTRA_CHAR:
# If we got character quote*.
# Index of the column end.
# (Trying to find closing quote*
current_collumn_end_index = gofra.core.lexer.find_collumn(current_line, current_collumn_index + 1,
lambda char: char == EXTRA_CHAR)
if current_collumn_end_index >= len(current_line) or \
current_line[current_collumn_end_index] != EXTRA_CHAR:
# If we got not EXTRA_CHAR or exceed current line length.
# Error.
gofra.core.errors.message_verbosed(Stage.LEXER, current_location, "Error",
"There is unclosed character literal. "
f"Do you forgot to place `{EXTRA_CHAR}`?", True)
# Get current token text.
current_token_text = current_line[current_collumn_index + 1: current_collumn_end_index]
# Get current char value.
current_char_value = gofra.core.lexer.unescape(current_token_text).encode("UTF-8")
if len(current_char_value) != 1:
# If there is 0 or more than 1 characters*.
# Error.
gofra.core.errors.message_verbosed(Stage.LEXER, current_location, "Error",
"Unexpected number of characters in the character literal."
"Only one character is allowed in character literal", True)
# Return character token.
yield Token(
type=TokenType.CHARACTER,
text=current_token_text,
location=current_location,
value=current_char_value[0]
)
# Find first non-space char.
current_collumn_index = gofra.core.lexer.find_collumn(current_line, current_collumn_end_index + 1,
lambda char: not char.isspace())
elif current_line[current_collumn_index] == EXTRA_STRING:
# If this is string.
# String buffer for strings.
current_string_buffer = ""
while current_line_index < len(lines):
# While we don`t reach end of the lines.
# Get string start.
string_start_collumn_index = current_collumn_index
if current_string_buffer == "":
# If we not start writing string buffer.
# Increment by one for quote.
string_start_collumn_index += len(EXTRA_STRING)
else:
# If we started.
# Just grab line.
current_line = lines[current_line_index]
# Get string end.
string_end_collumn_index = gofra.core.lexer.find_string_end(current_line, string_start_collumn_index)
if string_end_collumn_index >= len(current_line) or \
current_line[string_end_collumn_index] != EXTRA_STRING:
# If got end of current line, or not found closing string.
# Add current line.
current_string_buffer += current_line[string_start_collumn_index:]
# Reset and move next line.
current_line_index += 1
current_collumn_index = 0
else:
# If current line.
# Add final buffer.
current_string_buffer += current_line[string_start_collumn_index:string_end_collumn_index]
current_collumn_end_index = string_end_collumn_index
# End lexing string.
break
if current_line_index >= len(lines):
# If we exceed current lines length.
# Error.
gofra.core.errors.message_verbosed(Stage.LEXER, current_location, "Error",
"There is unclosed string literal. "
f"Do you forgot to place `{EXTRA_STRING}`?", True)
# Error?.
assert current_line[current_collumn_index] == EXTRA_STRING, "Got non string closing character!"
# Increase end index.
current_collumn_end_index += 1
# Get current token text.
current_token_text = current_string_buffer
# Return string token.
yield Token(
type=TokenType.STRING,
text=current_token_text,
location=current_location,
value=gofra.core.lexer.unescape(current_token_text)
)
# Find first non-space char.
current_collumn_index = gofra.core.lexer.find_collumn(current_line, current_collumn_end_index,
lambda char: not char.isspace())
else:
# Index of the column end.
current_collumn_end_index = gofra.core.lexer.find_collumn(current_line, current_collumn_index,
lambda char: char.isspace())
# Get current token text.
current_token_text = current_line[current_collumn_index: current_collumn_end_index]
try:
# Try convert token integer.
current_token_integer = int(current_token_text)
except ValueError:
# If there is invalid value for integer.
if current_token_text in KEYWORD_NAMES_TO_TYPE:
# If this is keyword.
# Return keyword token.
yield Token(
type=TokenType.KEYWORD,
text=current_token_text,
location=current_location,
value=KEYWORD_NAMES_TO_TYPE[current_token_text]
)
else:
# Not keyword.
# If this is comment - break.
# TODO: Try to fix something like 0//0 (comment not at the start) will lex not as should.
if current_token_text.startswith(EXTRA_COMMENT):
break
# Return word token.
yield Token(
type=TokenType.WORD,
text=current_token_text,
location=current_location,
value=current_token_text
)
else:
# If all ok.
# Return token.
yield Token(
type=TokenType.INTEGER,
text=current_token_text,
location=current_location,
value=current_token_integer
)
# Find first non-space char.
current_collumn_index = gofra.core.lexer.find_collumn(current_line, current_collumn_end_index,
lambda char: not char.isspace())
# Increment current line.
current_line_index += 1
# Parser.
def parser_parse(tokens: List[Token], context: ParserContext, path: str):
""" Parses token from lexer* (lexer_tokenize()) """
# Check that there is no changes in operator type.
assert len(OperatorType) == 10, "Please update implementation after adding new OperatorType!"
# Check that there is no changes in keyword type.
assert len(Keyword) == 8, "Please update implementation after adding new Keyword!"
# Check that there is no changes in token type.
assert len(TokenType) == 6, "Please update implementation after adding new TokenType!"
# Reverse tokens.
reversed_tokens: List[Token] = list(reversed(tokens))
# Definitions.
definitions: Dict[str, Definition] = dict()
memories: Dict[str, Memory] = dict()
variables: Dict[str, Variable] = dict()
variables_offset = 0
memories_offset = 0
if len(reversed_tokens) == 0:
gofra.core.errors.message_verbosed(Stage.PARSER, (basename(path), 1, 1), "Error",
"There is no tokens found, are you given empty file?", True)
while len(reversed_tokens) > 0:
# While there is any token.
# Get current token.
current_token: Token = reversed_tokens.pop()
if current_token.type == TokenType.WORD:
assert isinstance(current_token.value, str), "Type error, lexer level error?"
if current_token.value in INTRINSIC_NAMES_TO_TYPE:
context.operators.append(Operator(
type=OperatorType.INTRINSIC,
token=current_token,
operand=INTRINSIC_NAMES_TO_TYPE[current_token.value]
))
context.operator_index += 1
continue
if current_token.text in definitions:
# Expand definition tokens.
reversed_tokens += reversed(definitions[current_token.text].tokens)
continue
if current_token.text in memories:
memory = memories[current_token.text]
context.operators.append(Operator(
type=OperatorType.PUSH_INTEGER,
token=current_token,
operand=memory.ptr_offset
))
context.operator_index += 1
continue
if current_token.text in variables:
variable = variables[current_token.text]
context.operators.append(Operator(
type=OperatorType.PUSH_INTEGER,
token=current_token,
operand=variable.ptr_offset
))
context.operator_index += 1
continue
if current_token.text.startswith(EXTRA_DIRECTIVE):
directive = current_token.text[len(EXTRA_DIRECTIVE):]
if directive == "LINTER_SKIP":
if context.directive_linter_skip:
gofra.core.errors.message_verbosed(Stage.PARSER, current_token.location, "Error",
f"Directive `{EXTRA_DIRECTIVE}{directive}` defined twice!", True)
context.directive_linter_skip = True
elif directive == "PYTHON_COMMENTS_SKIP":
if context.directive_python_comments_skip:
gofra.core.errors.message_verbosed(Stage.PARSER, current_token.location, "Error",
f"Directive `{EXTRA_DIRECTIVE}{directive}` defined twice!",
True)
context.directive_python_comments_skip = True
else:
if directive.startswith("MEM_BUF_BYTE_SIZE="):
# If this is starts with memory buffer byte size definition name.
# Get directive value from all directive text.
directive_value = directive[len("MEM_BUF_BYTE_SIZE="):]
# Get new memory size
try:
new_memory_bytearray_size = int(directive_value)
except ValueError:
gofra.core.errors.message_verbosed(Stage.PARSER, current_token.location, "Error",
f"Directive `{EXTRA_DIRECTIVE}{directive}` "
f"passed invalid size `{directive_value}`!", True)
else:
# Change size of the bytearray.
context.memory_bytearray_size = new_memory_bytearray_size
else:
# Message.
gofra.core.errors.message_verbosed(Stage.PARSER, current_token.location, "Error",
f"Unknown directive `{EXTRA_DIRECTIVE}{directive}`", True)
continue
gofra.core.errors.message_verbosed(Stage.PARSER, current_token.location, "Error",
f"Unknown WORD `{current_token.text}`, "
f"are you misspelled something?", True)
elif current_token.type == TokenType.INTEGER:
# If we got an integer.
# Type check.
assert isinstance(current_token.value, int), "Type error, lexer level error?"
# Add operator to the context.
context.operators.append(Operator(
type=OperatorType.PUSH_INTEGER,
token=current_token,
operand=current_token.value
))
# Increment operator index.
context.operator_index += 1
elif current_token.type == TokenType.STRING:
# If we got a string.
# Type check.
assert isinstance(current_token.value, str), "Type error, lexer level error?"
# Add operator to the context.
context.operators.append(Operator(
type=OperatorType.PUSH_STRING,
token=current_token,
operand=current_token.value
))
# Increment operator index.
context.operator_index += 1
elif current_token.type == TokenType.CHARACTER:
# If we got a character.
# Type check.
assert isinstance(current_token.value, int), "Type error, lexer level error?"
# Add operator to the context.
context.operators.append(Operator(
type=OperatorType.PUSH_INTEGER,
token=current_token,
operand=current_token.value
))
# Increment operator index.
context.operator_index += 1
elif current_token.type == TokenType.KEYWORD:
# If we got a keyword.
if current_token.value == Keyword.IF:
# This is IF keyword.
# Push operator to the context.
context.operators.append(Operator(
type=OperatorType.IF,
token=current_token
))
# Push current operator index to the context memory stack.
context.memory_stack.append(context.operator_index)
# Increment operator index.
context.operator_index += 1
elif current_token.value == Keyword.WHILE:
# This is WHILE keyword.
# Push operator to the context.
context.operators.append(Operator(
type=OperatorType.WHILE,
token=current_token
))
# Push current operator index to the context memory stack.
context.memory_stack.append(context.operator_index)
# Increment operator index.
context.operator_index += 1
elif current_token.value == Keyword.DO:
# This is `DO` keyword.
if len(context.memory_stack) == 0:
# If there is nothing on the memory stack.
# Error.
gofra.core.errors.message_verbosed(Stage.PARSER, current_token.location, "Error",
"`do` should used after the `while` block!", True)
# Push operator to the context.
context.operators.append(Operator(
type=OperatorType.DO,
token=current_token
))
# Get `WHILE` operator from the memory stack.
block_operator_index = context.memory_stack.pop()
block_operator = context.operators[block_operator_index]
if block_operator.type != OperatorType.WHILE:
# If this is not while.
# Error.
gofra.core.errors.message_verbosed(Stage.PARSER, current_token.location, "Error",
"`do` should used after the `while` block!", True)
# Say that we crossreference WHILE block.
context.operators[context.operator_index].operand = block_operator_index
# Push current operator index to the context memory stack.
context.memory_stack.append(context.operator_index)
# Increment operator index.
context.operator_index += 1
elif current_token.value == Keyword.ELSE:
# If this is else keyword.
if len(context.memory_stack) == 0:
# If there is nothing on the memory stack.
# Error.
gofra.core.errors.message_verbosed(Stage.PARSER, current_token.location, "Error",
"`else` should used after the `if` block!", True)
# Get `IF` operator from the memory stack.
block_operator_index = context.memory_stack.pop()
block_operator = context.operators[block_operator_index]
if block_operator.type == OperatorType.IF:
# If we use else after the IF.
# Say that previous IF should jump at the our+1 operator index.
context.operators[block_operator_index].operand = context.operator_index + 1
# Push current operator index to the stack.
context.memory_stack.append(context.operator_index)
# Push operator to the context.
context.operators.append(Operator(
type=OperatorType.ELSE,
token=current_token
))
# Increment operator index.
context.operator_index += 1
else:
# If not `IF`.
# Get error location.
error_location = block_operator.token.location
# Error message.
gofra.core.errors.message_verbosed(Stage.PARSER, error_location, "Error",
"`else` can only used after `if` block!", True)
elif current_token.value == Keyword.END:
# If this is end keyword.
# Get block operator from the stack.
block_operator_index = context.memory_stack.pop()
block_operator = context.operators[block_operator_index]
if block_operator.type == OperatorType.IF:
# If this is IF block.
# Push operator to the context.
context.operators.append(Operator(
type=OperatorType.END,
token=current_token
))
# Say that start IF block refers to this END block.
context.operators[block_operator_index].operand = context.operator_index
# Say that this END block refers to next operator index.
context.operators[context.operator_index].operand = context.operator_index + 1
elif block_operator.type == OperatorType.ELSE:
# If this is ELSE block.
# Push operator to the context.
context.operators.append(Operator(
type=OperatorType.END,
token=current_token
))
# Say that owner block (If/Else) should jump to us.
context.operators[block_operator_index].operand = context.operator_index
# Say that we should jump to the next position.
context.operators[context.operator_index].operand = context.operator_index + 1
elif block_operator.type == OperatorType.DO:
# If this is DO block.
# Type check.
assert block_operator.operand is not None, "DO operator has unset operand! Parser level error?"
assert isinstance(block_operator.operand, OPERATOR_ADDRESS), "Type error, parser level error?"
# Push operator to the context.
context.operators.append(Operator(
type=OperatorType.END,
token=current_token
))
# Say that DO crossreference to the WHILE block.
context.operators[context.operator_index].operand = block_operator.operand
# Say that WHILE should jump in the DO body.
context.operators[block_operator.operand].operand = context.operator_index + 1
else:
# If invalid we call end not after the if or else.
# Get error location.
error_location = block_operator.token.location
# Error message.
gofra.core.errors.message_verbosed(Stage.PARSER, error_location, "Error",
"`end` can only close `if`, `else` or `do` block!", True)
# Increment operator index.
context.operator_index += 1
elif current_token.value == Keyword.DEFINE:
# This is DEFINE keyword.
if len(reversed_tokens) == 0:
# No name for definition is given.
# Error message.
gofra.core.errors.message_verbosed(Stage.PARSER, current_token.location, "Error",
"`define` should have name after the keyword, "
"do you has unfinished definition?", True)
# Get name for definition.
definition_name = reversed_tokens.pop()
if definition_name.type != TokenType.WORD:
# If name is not word.
# Error message.
gofra.core.errors.message_verbosed(Stage.PARSER, definition_name.location, "Error",
"`define` name, should be of type WORD, sorry, but you can`t use something that you give as name for the definition!", True)
if definition_name.text in definitions:
# If already defined.
# Error messages.
gofra.core.errors.message_verbosed(Stage.PARSER, definition_name.location, "Error",
"Definition with name {} was already defined!", False)
gofra.core.errors.message_verbosed(Stage.PARSER, definitions[definition_name.text].location, "Error",
"Original definition was here...", True)
if definition_name.text in INTRINSIC_NAMES_TO_TYPE or definition_name.text in KEYWORD_NAMES_TO_TYPE:
# If default item.
# Error message.
gofra.core.errors.message_verbosed(Stage.PARSER, definition_name.location, "Error",
"Can`t define definition with language defined name!", True)
# Create blank new definition.
definition = Definition(current_token.location, [])
# Add definition.
definitions[definition_name.text] = definition
# How much we require ends.
required_end_count = 0
while len(reversed_tokens) > 0:
# If there is still tokens.
# Get new token.
current_token = reversed_tokens.pop()
if current_token.type == TokenType.KEYWORD:
# If got keyword.
if current_token.text in KEYWORD_NAMES_TO_TYPE:
# If this is correct keyword.
if current_token.text == KEYWORD_TYPES_TO_NAME[Keyword.END]:
# If this is end.
if required_end_count <= 0:
# If we no more require end.
# Stop definition.
break
# Decrease required end counter.
required_end_count -= 1
if KEYWORD_NAMES_TO_TYPE[current_token.text] in \
(Keyword.IF, Keyword.DEFINE, Keyword.DO):
# If this is keyword that requires end.
# Increase required end count.
required_end_count += 1
if KEYWORD_NAMES_TO_TYPE[current_token.text] == Keyword.ELSE:
# If got else.
# Just pass as else not requires end.
pass
else:
# Invalid keyword.
assert False, "Got invalid keyword!"
# Append token.
definition.tokens.append(current_token)
if required_end_count != 0:
# If there is still required end.
# Error message.
gofra.core.errors.message_verbosed(Stage.PARSER, current_token.location, "Error",
f"There is {required_end_count} unclosed blocks, "
"that requires cloing `end` keyword inside `define` definition. ",
True)
if not (current_token.type == TokenType.KEYWORD and
current_token.text == KEYWORD_TYPES_TO_NAME[Keyword.END]):
# If got not end at end of definition.
# Error message.
gofra.core.errors.message_verbosed(Stage.PARSER, current_token.location, "Error",
"`define` should have `end` at the end of definition, "
"but it was not founded!", True)
elif current_token.value == Keyword.MEMORY:
if len(reversed_tokens) == 0:
gofra.core.errors.message_verbosed(Stage.PARSER, current_token.location, "Error",
"`memory` should have name after the keyword, "
"do you has unfinished memory definition?", True)
name_token = reversed_tokens.pop()
if name_token.type != TokenType.WORD:
gofra.core.errors.message_verbosed(Stage.PARSER, name_token.location, "Error",
"`memory` name, should be of type WORD, sorry, but "
"you can`t use something that you give as name "
"for the memory!", True)
if name_token.text in memories or name_token.text in definitions:
gofra.core.errors.message_verbosed(Stage.PARSER, name_token.location, "Error",
f"Definition or memory with name {name_token.text} "
f"was already defined!", False)
if name_token.text in definitions:
gofra.core.errors.message_verbosed(Stage.PARSER, definitions[name_token.text].location,
"Error", "Original definition was here...", True)
# TODO: Memory location report.
if name_token.text in INTRINSIC_NAMES_TO_TYPE or name_token.text in KEYWORD_NAMES_TO_TYPE:
# If default item.
gofra.core.errors.message_verbosed(Stage.PARSER, name_token.location, "Error",
"Can`t define memories with language defined name!", True)
if len(reversed_tokens) <= 0:
gofra.core.errors.message_verbosed(Stage.PARSER, name_token.location, "Error",
"`memory` requires size for memory definition, "
"which was not given!", True)
memory_size_token = reversed_tokens.pop()
if memory_size_token.type != TokenType.INTEGER:
gofra.core.errors.message_verbosed(Stage.PARSER, name_token.location, "Error",
"`var` size, should be of type INTEGER, sorry, but "
"you can`t use something that you give as size "
"for the memory!", True)
# TODO: Proper evaluation.
# Create blank new memory.
memory_name = name_token.text
memories[memory_name] = Memory(memory_name, memory_size_token.value, memories_offset)
memories_offset += memory_size_token.value
if len(reversed_tokens) >= 0:
end_token = reversed_tokens.pop()
if end_token.type == TokenType.KEYWORD and \
end_token.text == KEYWORD_TYPES_TO_NAME[Keyword.END]:
continue
# If got not end at end of definition.
gofra.core.errors.message_verbosed(Stage.PARSER, current_token.location, "Error",
"`memory` should have `end` at the end of memory definition, "
"but it was not founded!", True)
elif current_token.value == Keyword.VARIABLE:
if len(reversed_tokens) == 0:
gofra.core.errors.message_verbosed(Stage.PARSER, current_token.location, "Error",
"`var` should have name after the keyword, "
"do you has unfinished variable definition?", True)
name_token = reversed_tokens.pop()
if name_token.type != TokenType.WORD:
gofra.core.errors.message_verbosed(Stage.PARSER, name_token.location, "Error",
"`var` name, should be of type WORD, sorry, but "
"you can`t use something that you give as name "
"for the variable!", True)
if name_token.text in variables or name_token.text in definitions or name_token.text in memories:
gofra.core.errors.message_verbosed(Stage.PARSER, name_token.location, "Error",
f"Definition or variable with name {name_token.text} "
f"was already defined!", False)
if name_token.text in definitions:
gofra.core.errors.message_verbosed(Stage.PARSER, definitions[name_token.text].location,
"Error", "Original definition was here...", True)
# TODO: Memory / variable location report.
if name_token.text in INTRINSIC_NAMES_TO_TYPE or name_token.text in KEYWORD_NAMES_TO_TYPE:
# If default item.
gofra.core.errors.message_verbosed(Stage.PARSER, name_token.location, "Error",
"Can`t define variable with language defined name!", True)
# Create blank new memory.
variable_name = name_token.text
variables[variable_name] = Variable(variable_name, variables_offset)
variables_offset += VARIABLE_SIZE
else:
# If unknown keyword type.
assert False, "Unknown keyword type! (How?)"
else:
# If unknown operator type.
assert False, "Unknown operator type! (How?)"
if len(context.memory_stack) > 0:
# If there is any in the stack.
# Get error operator.
error_operator = context.operators[context.memory_stack.pop()]
# Get error location.
error_location = error_operator.token.location
# Error message.
gofra.core.errors.message_verbosed(Stage.PARSER, error_location, "Error",
f"Unclosed block \"{error_operator.token.text}\"!", True)
if context.directive_linter_skip:
# If skip linter.
# Warning message.
gofra.core.errors.message_verbosed(Stage.PARSER, (basename(path), 1, 1), "Warning",
"#LINTER_SKIP DIRECTIVE! THIS IS UNSAFE, PLEASE DISABLE IT!")
# Interpretator.
def interpretator_run(source: Source,
bytearray_size: int = MEMORY_BYTEARRAY_SIZE):
""" Interpretates the source. """
assert len(OperatorType) == 10, "Please update implementation after adding new OperatorType!"
assert len(Intrinsic) == 30, "Please update implementation after adding new Intrinsic!"
# Create empty stack.
memory_execution_stack = Stack()
# String pointers.
memory_string_pointers: Dict[OPERATOR_ADDRESS, TYPE_POINTER] = dict()
memory_string_size = bytearray_size
memory_string_size_ponter = 0
# Allocate sized bytearray.
memory_bytearray = bytearray(bytearray_size + memory_string_size + MEMORY_MEMORIES_SIZE + MEMORY_VARIABLES_SIZE)
# Get source operators count.
operators_count = len(source.operators)
current_operator_index = 0
if operators_count == 0:
gofra.core.errors.message_verbosed(Stage.RUNNER, ("__RUNNER__", 1, 1), "Error",
"There is no operators found in given file after parsing, "
"are you given empty file or file without resulting operators?", True)
while current_operator_index < operators_count:
# While we not run out of the source operators list.
# Get current operator from the source.
current_operator: Operator = source.operators[current_operator_index]
try:
# Try / Catch to get unexpected Python errors.
if current_operator.type == OperatorType.PUSH_INTEGER:
# Push integer operator.
# Type check.
assert isinstance(current_operator.operand, int), "Type error, parser level error?"
# Push operand to the stack.
memory_execution_stack.push(current_operator.operand)
# Increase operator index.
current_operator_index += 1
elif current_operator.type == OperatorType.PUSH_STRING:
# Push string operator.
# Type check.
assert isinstance(current_operator.operand, str), "Type error, parser level error?"
# Get string data.
string_value = current_operator.operand.encode("UTF-8")
string_length = len(string_value)
if current_operator_index not in memory_string_pointers:
# If we not found string in allocated string pointers.
# Get pointer, and push in to the pointers.
string_pointer: TYPE_POINTER = memory_string_size + 1 + memory_string_size_ponter
memory_string_pointers[current_operator_index] = string_pointer
# Write string right into the bytearray memory.
memory_bytearray[string_pointer: string_pointer + string_length] = string_value
# Increase next pointer by current string length.
memory_string_size_ponter += string_length
# Check that there is no overflow.
if string_length > memory_string_size:
# If overflowed.
# Error.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
"Trying to push string, when there is memory string buffer overflow!"
" Try use memory size directive, to increase size!", True)
# Push found string pointer to the stack.
found_string_pointer = memory_string_pointers[current_operator_index]
memory_execution_stack.push(found_string_pointer)
# Push string length to the stack.
memory_execution_stack.push(string_length)
# Increase operator index.
current_operator_index += 1
elif current_operator.type == OperatorType.INTRINSIC:
# Intrinsic operator.
if current_operator.operand == Intrinsic.PLUS:
# Intristic plus operator.
# Get both operands.
operand_a = memory_execution_stack.pop()
operand_b = memory_execution_stack.pop()
# Push sum to the stack.
memory_execution_stack.push(operand_b + operand_a)
elif current_operator.operand == Intrinsic.DIVIDE:
# Intristic divide operator.
# Get both operands.
operand_a = memory_execution_stack.pop()
operand_b = memory_execution_stack.pop()
# Push divide to the stack.
memory_execution_stack.push(operand_b // operand_a)
elif current_operator.operand == Intrinsic.MODULUS:
# Intristic modulus operator.
# Get both operands.
operand_a = memory_execution_stack.pop()
operand_b = memory_execution_stack.pop()
# Push divide to the stack.
memory_execution_stack.push(int(operand_b % operand_a))
elif current_operator.operand == Intrinsic.MINUS:
# Intristic minus operator.
# Get both operands.
operand_a = memory_execution_stack.pop()
operand_b = memory_execution_stack.pop()
# Push difference to the stack.
memory_execution_stack.push(operand_b - operand_a)
elif current_operator.operand == Intrinsic.MULTIPLY:
# Intristic multiply operator.
# Get both operands.
operand_a = memory_execution_stack.pop()
operand_b = memory_execution_stack.pop()
# Push muliply to the stack.
memory_execution_stack.push(operand_b * operand_a)
elif current_operator.operand == Intrinsic.EQUAL:
# Intristic equal operator.
# Get both operands.
operand_a = memory_execution_stack.pop()
operand_b = memory_execution_stack.pop()
# Push equal to the stack.
memory_execution_stack.push(int(operand_b == operand_a))
elif current_operator.operand == Intrinsic.NOT_EQUAL:
# Intristic not equal operator.
# Get both operands.
operand_a = memory_execution_stack.pop()
operand_b = memory_execution_stack.pop()
# Push not equal to the stack.
memory_execution_stack.push(int(operand_b != operand_a))
elif current_operator.operand == Intrinsic.LESS_THAN:
# Intristic less than operator.
# Get both operands.
operand_a = memory_execution_stack.pop()
operand_b = memory_execution_stack.pop()
# Push less than to the stack.
memory_execution_stack.push(int(operand_b < operand_a))
elif current_operator.operand == Intrinsic.GREATER_THAN:
# Intristic greater than operator.
# Get both operands.
operand_a = memory_execution_stack.pop()
operand_b = memory_execution_stack.pop()
# Push greater than to the stack.
memory_execution_stack.push(int(operand_b > operand_a))
elif current_operator.operand == Intrinsic.LESS_EQUAL_THAN:
# Intristic less equal than operator.
# Get both operands.
operand_a = memory_execution_stack.pop()
operand_b = memory_execution_stack.pop()
# Push less equal than to the stack.
memory_execution_stack.push(int(operand_b <= operand_a))
elif current_operator.operand == Intrinsic.GREATER_EQUAL_THAN:
# Intristic greater equal than operator.
# Get both operands.
operand_a = memory_execution_stack.pop()
operand_b = memory_execution_stack.pop()
# Push greater equal than to the stack.
memory_execution_stack.push(int(operand_b >= operand_a))
elif current_operator.operand == Intrinsic.SWAP:
# Intristic swap operator.
# Get both operands.
operand_a = memory_execution_stack.pop()
operand_b = memory_execution_stack.pop()
# Push swapped to the stack.
memory_execution_stack.push(operand_a)
memory_execution_stack.push(operand_b)
elif current_operator.operand == Intrinsic.COPY:
# Intristic copy operator.
# Get operand.
operand_a = memory_execution_stack.pop()
# Push copy to the stack.
memory_execution_stack.push(operand_a)
memory_execution_stack.push(operand_a)
elif current_operator.operand == Intrinsic.COPY2:
# Intristic copy2 operator.
# Get both operands.
operand_a = memory_execution_stack.pop()
operand_b = memory_execution_stack.pop()
# Push copy to the stack.
memory_execution_stack.push(operand_b)
memory_execution_stack.push(operand_a)
memory_execution_stack.push(operand_b)
memory_execution_stack.push(operand_a)
elif current_operator.operand == Intrinsic.COPY_OVER:
# Intristic copy over operator.
# Get both operands.
operand_a = memory_execution_stack.pop()
operand_b = memory_execution_stack.pop()
# Push copy to the stack.
memory_execution_stack.push(operand_b)
memory_execution_stack.push(operand_a)
memory_execution_stack.push(operand_b)
elif current_operator.operand == Intrinsic.DECREMENT:
# Intristic decrement operator.
# Get operand.
operand_a = memory_execution_stack.pop()
# Push decrement to the stack.
memory_execution_stack.push(operand_a - 1)
elif current_operator.operand == Intrinsic.INCREMENT:
# Intristic increment operator.
# Get operand.
operand_a = memory_execution_stack.pop()
# Push increment to the stack.
memory_execution_stack.push(operand_a + 1)
elif current_operator.operand == Intrinsic.FREE:
# Intristic free operator.
# Pop and left.
memory_execution_stack.pop()
elif current_operator.operand == Intrinsic.SHOW:
# Intristic show operator.
# Get operand.
operand_a = memory_execution_stack.pop()
# Show operand.
print(operand_a)
elif current_operator.operand == Intrinsic.MEMORY_WRITE:
# Intristic memory write operator.
# Get both operands.
operand_a = memory_execution_stack.pop()
operand_b = memory_execution_stack.pop()
if operand_b > len(memory_bytearray):
# If this is going to be memory overflow.
# Error.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
f"Trying to write at memory address {operand_b} "
f"that overflows memory buffer size {(len(memory_bytearray))}"
" bytes (MemoryBufferOverflow)", True)
elif operand_b < 0:
# If this is going to be memory undeflow.
# Error.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
f"Trying to write at memory address {operand_b} "
f"that underflows memory buffer size {(len(memory_bytearray))}"
" bytes (MemoryBufferUnderflow)", True)
# Write memory.
try:
memory_bytearray[operand_b] = operand_a
except IndexError:
# Memory error.
# Error message.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
f"Memory buffer (over|under)flow "
f"(Write to pointer {operand_b} when there is memory buffer "
f"with size {len(memory_bytearray)} bytes)!", True)
except ValueError:
# If this is 8bit (1byte) range (number) overflow.
# Error message.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
f"Memory buffer cell can only contain 1 byte (8 bit) "
f"that must be in range (0, 256),\nbut you passed number "
f"{operand_a} which is not fits in the 1 byte cell! (ByteOverflow)",
True)
elif current_operator.operand == Intrinsic.MEMORY_WRITE4BYTES:
# Intristic memory write 4 bytes operator.
# Get both operands.
operand_a = memory_execution_stack.pop()
operand_b = memory_execution_stack.pop()
# Convert value to 4 bytes.
try:
operand_a = operand_a.to_bytes(length=4, byteorder="little", signed=(operand_a < 0))
except OverflowError:
# Error message.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
f"Memory buffer cell can only contain 4 byte (32 bit) "
f"that must be in range (0, 4294967295),\nbut you passed number "
f"{operand_a} which is not fits in the 4 byte cell! (ByteOverflow)",
True)
if operand_b + 4 - 1 > len(memory_bytearray):
# If this is going to be memory overflow.
# Error.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
f"Trying to write 4 bytes to memory address from {operand_b} to "
f"{operand_b + 4 - 1} "
f"that overflows memory buffer size {(len(memory_bytearray))}"
" bytes (MemoryBufferOverflow)", True)
elif operand_b < 0:
# If this is going to be memory undeflow.
# Error.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
f"Trying to write at memory address "
f"from {operand_b} to {operand_b + 2} "
f"that underflows memory buffer size {(len(memory_bytearray))}"
" bytes (MemoryBufferUnderflow)", True)
# Write memory.
try:
memory_bytearray[operand_b:operand_b + 4] = operand_a
except IndexError:
# Memory* error.
# Error message.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
f"Memory buffer (over|under)flow "
f"(Write to pointer from "
f"{operand_b} to {operand_b + 4 - 1} "
f"when there is memory buffer with size "
f"{len(memory_bytearray)} bytes)!", True)
except ValueError:
# If this is 32bit (4byte) range (number) overflow.
# Error message.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
f"Memory buffer cell can only contain 4 byte (32 bit) "
f"that must be in range (0, 4294967295),\nbut you passed number "
f"{operand_a} which is not fits in the 4 byte cell! (ByteOverflow)",
True)
elif current_operator.operand == Intrinsic.MEMORY_READ4BYTES:
# Intristic memory read 4 bytes operator.
# Get operand.
operand_a = memory_execution_stack.pop()
if operand_a + 4 - 1 > len(memory_bytearray):
# If this is going to be memory overflow.
# Error.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
f"Trying to read from memory address "
f"{operand_a} to {operand_a + 4 - 1} "
f"that overflows memory buffer size {(len(memory_bytearray))}"
" bytes (MemoryBufferOverflow)", True)
elif operand_a < 0:
# If this is going to be memory undeflow.
# Error.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
f"Trying to read from memory address "
f"{operand_a} to {operand_a + 4 - 1}"
f"that underflows memory buffer size {(len(memory_bytearray))}"
" bytes (MemoryBufferUnderflow)", True)
# Read memory at the pointer.
try:
memory_bytes = int.from_bytes(memory_bytearray[operand_a:operand_a + 4], byteorder="little")
except IndexError:
# Memory* error.
# Error message.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
f"Memory buffer (over|under)flow "
f"(Read from pointer {operand_a} to {operand_a + 4 - 1} "
f"when there is memory buffer with size "
f"{len(memory_bytearray)} bytes)!", True)
else:
# Push memory to the stack.
memory_execution_stack.push(memory_bytes)
elif current_operator.operand == Intrinsic.MEMORY_READ:
# Intristic memory read operator.
# Get operand.
operand_a = memory_execution_stack.pop()
if operand_a > len(memory_bytearray):
# If this is going to be memory overflow.
# Error.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
f"Trying to read from memory address {operand_a} "
f"that overflows memory buffer size {(len(memory_bytearray))}"
" bytes (MemoryBufferOverflow)", True)
elif operand_a < 0:
# If this is going to be memory undeflow.
# Error.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
f"Trying to read from memory address {operand_a} "
f"that underflows memory buffer size {(len(memory_bytearray))}"
" bytes (MemoryBufferUnderflow)", True)
# Read memory at the pointer.
try:
memory_byte = memory_bytearray[operand_a]
except IndexError:
# Memory* error.
# Error message.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
f"Memory buffer (over|under)flow "
f"(Read from pointer {operand_a} when there is memory buffer "
f"with size {len(memory_bytearray)} bytes)!", True)
else:
# Push memory to the stack.
memory_execution_stack.push(memory_byte)
elif current_operator.operand == Intrinsic.MEMORY_SHOW_CHARACTERS:
# Intristic memory show as chars operator.
# Get both operands.
operand_a = memory_execution_stack.pop()
operand_b = memory_execution_stack.pop()
# String to show.
memory_string: bytes = b""
if operand_b + operand_a > len(memory_bytearray):
# If this is going to be memory overflow.
# Error.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
f"Trying to read from memory address "
f"from {operand_b} to {operand_b + operand_a} "
f"that overflows memory buffer size {(len(memory_bytearray))}"
" bytes (MemoryBufferOverflow)", True)
elif operand_a < 0:
# If this is going to be memory undeflow.
# Error.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
f"Trying to read from memory address"
f"from {operand_b} to {operand_b + operand_a} "
f"that underflows memory buffer size {(len(memory_bytearray))}"
" bytes (MemoryBufferUnderflow)", True)
# Read memory string.
try:
memory_string = memory_bytearray[operand_b: operand_b + operand_a]
except IndexError:
# Memory* error.
# Error message.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
f"Memory buffer (over|under)flow "
f"(Read from {operand_b} to {operand_b + operand_a} "
f"when there is memory "
f"buffer with size {len(memory_bytearray)} bytes)!", True)
# Print decoded memory bytes.
print(memory_string.decode("UTF-8"), end="")
elif current_operator.operand == Intrinsic.MEMORY_POINTER:
# Intristic memory pointer operator.
# Push pointer to the stack.
memory_execution_stack.push(MEMORY_BYTEARRAY_NULL_POINTER)
elif current_operator.operand == Intrinsic.NULL:
# Intristic null operator.
# Push pointer to the stack.
memory_execution_stack.push(0)
elif current_operator.operand == Intrinsic.IO_READ_STRING:
# Intrinsic I/O read string operator.
# Get string data.
string_value = input().encode("UTF-8")
string_length = len(string_value)
if current_operator_index not in memory_string_pointers:
# If we not found string in allocated string pointers.
# Get pointer, and push in to the pointers.
string_pointer: TYPE_POINTER = 1 + memory_string_size_ponter
memory_string_pointers[current_operator_index] = string_pointer
# Write string right into the bytearray memory.
memory_bytearray[string_pointer: string_pointer + string_length] = string_value
# Increase next pointer by current string length.
memory_string_size_ponter += string_length
# Check that there is no overflow.
if string_length > memory_string_size:
# If overflow.
# Error.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
"Trying to push I/O string, "
"when there is memory string buffer overflow! "
"Try use memory size directive, to increase size!", True)
# Push found string pointer to the stack.
found_string_pointer = memory_string_pointers[current_operator_index]
memory_execution_stack.push(found_string_pointer)
# Push string length to the stack.
memory_execution_stack.push(string_length)
elif current_operator.operand == Intrinsic.IO_READ_INTEGER:
# Intrinsic I/O read integer operator.
# Get integer data.
try:
integer_value = int(input())
except ValueError:
integer_value = -1
# Push integer to the stack.
memory_execution_stack.push(integer_value)
else:
# If unknown instrinsic type.
assert False, "Unknown instrinsic! (How?)"
# Increase operator index.
current_operator_index += 1
elif current_operator.type == OperatorType.IF:
# IF operator.
# Get operand.
operand_a = memory_execution_stack.pop()
# Type check.
assert isinstance(current_operator.operand, OPERATOR_ADDRESS), "Type error, parser level error?"
if operand_a == 0:
# If this is false.
# Type check.
assert isinstance(current_operator.operand, OPERATOR_ADDRESS), \
"Type error, parser level error?"
# Jump to the operator operand.
# As this is IF, so we should jump to the END.
current_operator_index = current_operator.operand
else:
# If this is true.
# Increment operator index.
# This is makes jump into the if branch.
current_operator_index += 1
elif current_operator.type == OperatorType.ELSE:
# ELSE operator.
# Type check.
assert isinstance(current_operator.operand, OPERATOR_ADDRESS), "Type error, parser level error?"
# Jump to the operator operand.
# As this is ELSE operator, we should have index + 1, index!
current_operator_index = current_operator.operand
elif current_operator.type == OperatorType.DO:
# DO operator.
# Get operand.
operand_a = memory_execution_stack.pop()
# Type check.
assert isinstance(current_operator.operand, OPERATOR_ADDRESS), "Type error, parser level error?"
if operand_a == 0:
# If this is false.
# Endif jump operator index.
end_jump_operator_index = source.operators[current_operator.operand].operand
# Type check.
assert isinstance(end_jump_operator_index, OPERATOR_ADDRESS), \
"Type error, parser level error?"
# Jump to the operator operand.
# As this is DO, so we should jump to the END.
current_operator_index = int(end_jump_operator_index)
else:
# If this is true.
# Increment operator index.
# This is makes jump into the if body.
current_operator_index += 1
elif current_operator.type == OperatorType.WHILE:
# WHILE operator.
# Increment operator index.
# This is makes jump into the if statement (expression).
current_operator_index += 1
elif current_operator.type == OperatorType.END:
# END operator.
# Type check.
assert isinstance(current_operator.operand, OPERATOR_ADDRESS), "Type error, parser level error?"
# Type check.
assert isinstance(current_operator.operand, OPERATOR_ADDRESS), \
"Type error, parser level error?"
# Jump to the operator operand.
# As this is END operator, we should have index + 1, index!
current_operator_index = current_operator.operand
elif current_operator.type == OperatorType.DEFINE:
# DEFINE Operator.
# Error.
assert False, "Got definition operator at runner stage, parser level error?"
elif current_operator.type == OperatorType.MEMORY:
assert False, "Got memory operator at runner stage, parser level error?"
else:
# If unknown operator type.
assert False, "Unknown operator type! (How?)"
except IndexError:
# Should be stack error.
# Error message.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
f"Stack error! This is may caused by popping from empty stack!"
f"Do you used {EXTRA_DIRECTIVE}LINTER_SKIP directive? IndexError, (From: "
f"{current_operator.token.text})", True)
except KeyboardInterrupt:
# If stopped.
# Error message.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
"Interpretation was stopped by keyboard interrupt!", True)
if len(memory_execution_stack) > 0:
# If there is any in the stack.
# Error message.
gofra.core.errors.message_verbosed(Stage.RUNNER, ("__runner__", 1, 1), "Warning",
"Stack is not empty after running the interpretation!")
# Linter.
def linter_type_check(source: Source):
""" Linter static type check. """
# TODO: IF/WHILE anylyse fixes.
# Check that there is no new operator type.
assert len(OperatorType) == 10, "Please update implementation after adding new OperatorType!"
# Check that there is no new instrinsic type.
assert len(Intrinsic) == 30, "Please update implementation after adding new Intrinsic!"
# Create empty linter stack.
memory_linter_stack = Stack()
# Get source operators count.
operators_count = len(source.operators)
# Current operator index from the source.
current_operator_index = 0
# Check that there is more than zero operators in context.
if operators_count == 0:
# If there is no operators in the final parser context.
# Error.
gofra.core.errors.message_verbosed(Stage.LINTER, ("__linter__", 1, 1), "Error",
"There is no operators found in given file after parsing, "
"are you given empty file or file without resulting operators?", True)
while current_operator_index < operators_count:
# While we not run out of the source operators list.
# Get current operator from the source.
current_operator: Operator = source.operators[current_operator_index]
# Grab our operator
if current_operator.type == OperatorType.PUSH_INTEGER:
# PUSH INTEGER operator.
# Type check.
assert isinstance(current_operator.operand, int), "Type error, lexer level error?"
# Push operand type to the stack.
memory_linter_stack.push(int)
# Increase operator index.
current_operator_index += 1
elif current_operator.type == OperatorType.PUSH_STRING:
# PUSH STRING operator.
# Type check.
assert isinstance(current_operator.operand, str), "Type error, lexer level error?"
# Push operand types to the stack.
memory_linter_stack.push(int) # String size.
memory_linter_stack.push(TYPE_POINTER) # String pointer.
# Increase operator index.
current_operator_index += 1
elif current_operator.type == OperatorType.MEMORY:
assert False, "Got memory operator at linter stage, parser level error?"
elif current_operator.type == OperatorType.INTRINSIC:
# Intrinsic operator.
if current_operator.operand == Intrinsic.PLUS:
# Intristic plus operator.
# Check stack size.
if len(memory_linter_stack) < 2:
cli_no_arguments_error_message(current_operator, True)
# Get both operands.
operand_a = memory_linter_stack.pop()
operand_b = memory_linter_stack.pop()
# Check type.
if operand_a != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 1, operand_a, int, True)
# Check type.
if operand_b != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 2, operand_b, int, True)
# Push to the stack.
memory_linter_stack.push(int)
elif current_operator.operand == Intrinsic.DIVIDE:
# Intristic divide operator.
# Check stack size.
if len(memory_linter_stack) < 2:
cli_no_arguments_error_message(current_operator, True)
# Get both operands.
operand_a = memory_linter_stack.pop()
operand_b = memory_linter_stack.pop()
# Check type.
if operand_a != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 1, operand_a, int, True)
# Check type.
if operand_b != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 2, operand_b, int, True)
# Push to the stack.
memory_linter_stack.push(int)
elif current_operator.operand == Intrinsic.MODULUS:
# Intristic modulus operator.
# Check stack size.
if len(memory_linter_stack) < 2:
cli_no_arguments_error_message(current_operator, True)
# Get both operands.
operand_a = memory_linter_stack.pop()
operand_b = memory_linter_stack.pop()
# Check type.
if operand_a != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 1, operand_a, int, True)
# Check type.
if operand_b != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 2, operand_b, int, True)
# Push to the stack.
memory_linter_stack.push(int)
elif current_operator.operand == Intrinsic.MINUS:
# Intristic minus operator.
# Check stack size.
if len(memory_linter_stack) < 2:
cli_no_arguments_error_message(current_operator, True)
# Get both operands.
operand_a = memory_linter_stack.pop()
operand_b = memory_linter_stack.pop()
# Check type.
if operand_a != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 1, operand_a, int, True)
# Check type.
if operand_b != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 2, operand_b, int, True)
# Push to the stack.
memory_linter_stack.push(int)
elif current_operator.operand == Intrinsic.MULTIPLY:
# Intristic multiply operator.
# Check stack size.
if len(memory_linter_stack) < 2:
cli_no_arguments_error_message(current_operator, True)
# Get both operands.
operand_a = memory_linter_stack.pop()
operand_b = memory_linter_stack.pop()
# Check type.
if operand_a != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 1, operand_a, int, True)
# Check type.
if operand_b != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 2, operand_b, int, True)
# Push to the stack.
memory_linter_stack.push(int)
elif current_operator.operand == Intrinsic.EQUAL:
# Intristic equal operator.
# Check stack size.
if len(memory_linter_stack) < 2:
cli_no_arguments_error_message(current_operator, True)
# Get both operands.
operand_a = memory_linter_stack.pop()
operand_b = memory_linter_stack.pop()
# Check type.
if operand_a != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 1, operand_a, int, True)
# Check type.
if operand_b != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 2, operand_b, int, True)
# Push to the stack.
memory_linter_stack.push(int)
elif current_operator.operand == Intrinsic.NOT_EQUAL:
# Intristic not equal operator.
# Check stack size.
if len(memory_linter_stack) < 2:
cli_no_arguments_error_message(current_operator, True)
# Get both operands.
operand_a = memory_linter_stack.pop()
operand_b = memory_linter_stack.pop()
# Check type.
if operand_a != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 1, operand_a, int, True)
# Check type.
if operand_b != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 2, operand_b, int, True)
# Push to the stack.
memory_linter_stack.push(int)
elif current_operator.operand == Intrinsic.LESS_THAN:
# Intristic less than operator.
# Check stack size.
if len(memory_linter_stack) < 2:
cli_no_arguments_error_message(current_operator, True)
# Get both operands.
operand_a = memory_linter_stack.pop()
operand_b = memory_linter_stack.pop()
# Check type.
if operand_a != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 1, operand_a, int, True)
# Check type.
if operand_b != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 2, operand_b, int, True)
# Push to the stack.
memory_linter_stack.push(int)
elif current_operator.operand == Intrinsic.GREATER_THAN:
# Intristic greater than operator.
# Check stack size.
if len(memory_linter_stack) < 2:
cli_no_arguments_error_message(current_operator, True)
# Get both operands.
operand_a = memory_linter_stack.pop()
operand_b = memory_linter_stack.pop()
# Check type.
if operand_a != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 1, operand_a, int, True)
# Check type.
if operand_b != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 2, operand_b, int, True)
# Push to the stack.
memory_linter_stack.push(int)
elif current_operator.operand == Intrinsic.LESS_EQUAL_THAN:
# Intristic less equal than operator.
# Check stack size.
if len(memory_linter_stack) < 2:
cli_no_arguments_error_message(current_operator, True)
# Get both operands.
operand_a = memory_linter_stack.pop()
operand_b = memory_linter_stack.pop()
# Check type.
if operand_a != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 1, operand_a, int, True)
# Check type.
if operand_b != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 2, operand_b, int, True)
# Push to the stack.
memory_linter_stack.push(int)
elif current_operator.operand == Intrinsic.GREATER_EQUAL_THAN:
# Intristic greater equal than operator.
# Check stack size.
if len(memory_linter_stack) < 2:
cli_no_arguments_error_message(current_operator, True)
# Get both operands.
operand_a = memory_linter_stack.pop()
operand_b = memory_linter_stack.pop()
# Check type.
if operand_a != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 1, operand_a, int, True)
# Check type.
if operand_b != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 2, operand_b, int, True)
# Push to the stack.
memory_linter_stack.push(int)
elif current_operator.operand == Intrinsic.SWAP:
# Intristic swap operator.
# Check stack size.
if len(memory_linter_stack) < 2:
cli_no_arguments_error_message(current_operator, True)
# Get both operands.
operand_a = memory_linter_stack.pop()
operand_b = memory_linter_stack.pop()
# Push swapped to the stack.
memory_linter_stack.push(operand_a)
memory_linter_stack.push(operand_b)
elif current_operator.operand == Intrinsic.COPY:
# Intristic copy operator.
# Check stack size.
if len(memory_linter_stack) < 1:
cli_no_arguments_error_message(current_operator, True)
# Get operand.
operand_a = memory_linter_stack.pop()
# Push copy to the stack.
memory_linter_stack.push(operand_a)
memory_linter_stack.push(operand_a)
elif current_operator.operand == Intrinsic.COPY2:
# Intristic copy2 operator.
# Check stack size.
if len(memory_linter_stack) < 2:
cli_no_arguments_error_message(current_operator, True)
# Get both operands.
operand_a = memory_linter_stack.pop()
operand_b = memory_linter_stack.pop()
# Push copy to the stack.
memory_linter_stack.push(operand_b)
memory_linter_stack.push(operand_a)
memory_linter_stack.push(operand_b)
memory_linter_stack.push(operand_a)
elif current_operator.operand == Intrinsic.COPY_OVER:
# Intristic copy over operator.
# Check stack size.
if len(memory_linter_stack) < 2:
cli_no_arguments_error_message(current_operator, True)
# Get both operands.
operand_a = memory_linter_stack.pop()
operand_b = memory_linter_stack.pop()
# Push copy to the stack.
memory_linter_stack.push(operand_a)
memory_linter_stack.push(operand_a)
memory_linter_stack.push(operand_b)
elif current_operator.operand == Intrinsic.DECREMENT:
# Intristic decrement operator.
# Check stack size.
if len(memory_linter_stack) < 1:
cli_no_arguments_error_message(current_operator, True)
# Get operand.
operand_a = memory_linter_stack.pop()
# Check type.
if operand_a != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 1, operand_a, int, True)
# Push to the stack.
memory_linter_stack.push(int)
elif current_operator.operand == Intrinsic.INCREMENT:
# Intristic increment operator.
# Check stack size.
if len(memory_linter_stack) < 1:
cli_no_arguments_error_message(current_operator, True)
# Get operand.
operand_a = memory_linter_stack.pop()
# Check type.
if operand_a != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 1, operand_a, int, True)
# Push to the stack.
memory_linter_stack.push(int)
elif current_operator.operand == Intrinsic.FREE:
# Intristic free operator.
# Check stack size.
if len(memory_linter_stack) < 1:
cli_no_arguments_error_message(current_operator, True)
# Free operand.
memory_linter_stack.pop()
elif current_operator.operand == Intrinsic.SHOW:
# Intristic show operator.
# Check stack size.
if len(memory_linter_stack) < 1:
cli_no_arguments_error_message(current_operator, True)
# Get operand.
operand_a = memory_linter_stack.pop()
# Check type.
if operand_a != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 1, operand_a, int, True)
elif current_operator.operand == Intrinsic.MEMORY_WRITE:
# Intristic memory write operator.
# Check stack size.
if len(memory_linter_stack) < 2:
cli_no_arguments_error_message(current_operator, True)
# Get both operands.
operand_a = memory_linter_stack.pop()
operand_b = memory_linter_stack.pop()
# Check type.
if operand_a != TYPE_POINTER:
cli_argument_type_error_message(current_operator, 1, operand_a, int, True)
# Check type.
if operand_b != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 2, operand_b, int, True)
elif current_operator.operand == Intrinsic.MEMORY_WRITE4BYTES:
# Intristic memory write 4 bytes operator.
# Check stack size.
if len(memory_linter_stack) < 2:
cli_no_arguments_error_message(current_operator, True)
# Get both operands.
operand_a = memory_linter_stack.pop()
operand_b = memory_linter_stack.pop()
# Check type.
if operand_a != TYPE_POINTER:
cli_argument_type_error_message(current_operator, 1, operand_a, int, True)
# Check type.
if operand_b != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 2, operand_b, int, True)
elif current_operator.operand == Intrinsic.MEMORY_READ4BYTES:
# Intristic memory read 4 bytes operator.
# Check stack size.
if len(memory_linter_stack) < 1:
cli_no_arguments_error_message(current_operator, True)
# Get operand.
operand_a = memory_linter_stack.pop()
# Check type.
if operand_a != TYPE_POINTER:
cli_argument_type_error_message(current_operator, 1, operand_a, int, True)
# Push to the stack.
memory_linter_stack.push(int)
elif current_operator.operand == Intrinsic.MEMORY_READ:
# Intristic memory read operator.
# Check stack size.
if len(memory_linter_stack) < 1:
cli_no_arguments_error_message(current_operator, True)
# Get operand.
operand_a = memory_linter_stack.pop()
# Check type.
if operand_a != TYPE_POINTER:
cli_argument_type_error_message(current_operator, 2, operand_a, int, True)
# Push to the stack.
memory_linter_stack.push(int)
elif current_operator.operand == Intrinsic.MEMORY_SHOW_CHARACTERS:
# Intristic memory show bytes as chars operator.
# Check stack size.
if len(memory_linter_stack) < 2:
cli_no_arguments_error_message(current_operator, True)
# Get both operands.
operand_a = memory_linter_stack.pop()
operand_b = memory_linter_stack.pop()
# Check type.
if operand_a != TYPE_POINTER:
cli_argument_type_error_message(current_operator, 1, operand_a, int, True)
if operand_b != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 2, operand_b, int, True)
elif current_operator.operand == Intrinsic.MEMORY_POINTER:
# Intristic memory pointer operator.
# Push pointer to the stack.
memory_linter_stack.push(int)
elif current_operator.operand == Intrinsic.NULL:
# Intristic null operator.
# Push pointer to the stack.
memory_linter_stack.push(int)
elif current_operator.operand == Intrinsic.IO_READ_STRING:
# I/O read string operator.
# Push operand types to the stack.
memory_linter_stack.push(int) # String size.
memory_linter_stack.push(TYPE_POINTER) # String pointer.
elif current_operator.operand == Intrinsic.IO_READ_INTEGER:
# I/O read integer operator.
# Push operand types to the stack.
memory_linter_stack.push(int) # Integer.
else:
# If unknown instrinsic type.
assert False, "Got unexpected / unknon intrinsic type! (How?)"
# Increase operator index.
current_operator_index += 1
elif current_operator.type == OperatorType.IF:
# IF operator.
# Check stack size.
if len(memory_linter_stack) < 1:
cli_no_arguments_error_message(current_operator, True)
# Get operand.
operand_a = memory_linter_stack.pop()
# Check type.
if operand_a != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 1, operand_a, int, True)
# Type check.
assert isinstance(current_operator.operand, OPERATOR_ADDRESS), "Type error, parser level error?"
# Increment operator index.
# This is makes jump into the if branch.
current_operator_index += 1
elif current_operator.type == OperatorType.ELSE:
# ELSE operator.
# Type check.
assert isinstance(current_operator.operand, OPERATOR_ADDRESS), "Type error, parser level error?"
# Jump to the operator operand.
# As this is ELSE operator, we should have index + 1, index!
current_operator_index = current_operator.operand
elif current_operator.type == OperatorType.WHILE:
# WHILE operator.
# Type check.
assert isinstance(current_operator.operand, OPERATOR_ADDRESS), "Type error, parser level error?"
# Increase operator index.
current_operator_index += 1
elif current_operator.type == OperatorType.DO:
# DO operator.
# Type check.
assert isinstance(current_operator.operand, OPERATOR_ADDRESS), "Type error, parser level error?"
# Check stack size.
if len(memory_linter_stack) < 1:
cli_no_arguments_error_message(current_operator, True)
# Get operand.
operand_a = memory_linter_stack.pop()
# Check type.
if operand_a != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 1, operand_a, int, True)
# Endif jump operator index.
end_jump_operator_index = source.operators[current_operator.operand].operand
# Type check.
assert isinstance(end_jump_operator_index, OPERATOR_ADDRESS), "Type error, parser level error?"
# Jump to the END from WHILE.
current_operator_index = int(end_jump_operator_index)
elif current_operator.type == OperatorType.END:
# END operator.
# Type check.
assert isinstance(current_operator.operand, OPERATOR_ADDRESS), "Type error, parser level error?"
# Jump to the operator operand.
# As this is END operator, we should have index + 1, index!
current_operator_index = current_operator.operand
elif current_operator.type == OperatorType.DEFINE:
assert False, "Got definition operator at linter stage, parser level error?"
else:
assert False, "Got unexpected / unknon operator type! (How?)"
if len(memory_linter_stack) != 0:
# If there is any in the stack.
# Get last operator token location.
location: LOCATION = source.operators[current_operator_index - 1].token.location
# Error message.
gofra.core.errors.message_verbosed(Stage.LINTER, location, "Error",
f"Stack is not empty at the type checking stage! "
f"(There is {len(memory_linter_stack)} elements when should be 0)", True)
# Source.
def load_source_from_file(file_path: str) -> tuple[Source, ParserContext]:
""" Load file, then return ready source and context for it. (Tokenized, Parsed, Linted). """
# Read source lines.
source_file, _ = gofra.core.other.try_open_file(file_path, "r", True, encoding="UTF-8")
source_lines = source_file.readlines()
source_file.close()
parser_context = ParserContext()
# Tokenize.
lexer_tokens = list(lexer_tokenize(source_lines, file_path))
if len(lexer_tokens) == 0:
gofra.core.errors.message_verbosed(Stage.LEXER, (basename(file_path), 1, 1), "Error",
"There is no tokens found in given file, are you given empty file?", True)
# Parse.
parser_parse(lexer_tokens, parser_context, file_path)
# Create source from context.
parser_context_source = Source(parser_context.operators)
# Type check.
assert isinstance(parser_context.directive_linter_skip, bool), "Expected linter skip directive to be boolean."
if not parser_context.directive_linter_skip:
linter_type_check(parser_context_source)
return parser_context_source, parser_context
# Python.
def python_generate(source: Source, context: ParserContext, path: str):
""" Generates graph from the source. """
# Check that there is no changes in operator type or intrinsic.
assert len(OperatorType) == 10, "Please update implementation for python generation after adding new OperatorType!"
assert len(Intrinsic) == 28, "Please update implementation for python generationg after adding new Intrinsic!"
def __update_indent(value: int):
""" Updates indent by given value. """
# Update level.
nonlocal current_indent_level # type: ignore
current_indent_level += value
# Update indent string.
nonlocal current_indent # type: ignore
current_indent = "\t" * current_indent_level
def __write_footer():
""" Write footer. """
# Trick.
nonlocal current_bytearray_should_written, current_string_buffer_should_written
if current_bytearray_should_written or current_string_buffer_should_written:
current_string_buffer_should_written = True
current_bytearray_should_written = True
if current_bytearray_should_written:
# If we should write bytearray block.
# Allocate bytearray.
current_lines.insert(current_bytearray_insert_position,
f"memory = bytearray("
f"{context.memory_bytearray_size} + strings_size"
f")")
# Comment allocation.
if not directive_skip_comments:
current_lines.insert(current_bytearray_insert_position,
"# Allocate memory buffer (memory + strings)"
"(As you called memory operators): \n")
# Warn user about using byte operations in python compilation.
gofra.core.errors.message("Warning", "YOU ARE USING MEMORY OPERATIONS, THAT MAY HAVE EXPLICIT BEHAVIOUR! "
"IT IS MAY HARDER TO CATCH ERROR IF YOU RUN COMPILED VERSION "
"(NOT INTERPRETATED)")
if current_string_buffer_should_written:
# If we should write string buffer block.
# Push string function.
current_lines.insert(current_string_buffer_insert_position,
"\ndef stack_push_string(stack_str, op_index): \n"
"\tstr_len = len(stack_str)\n"
"\tif op_index not in strings_pointers:\n"
"\t\tglobal strings_size_pointer\n"
"\t\tptr = strings_size + 1 + strings_size_pointer\n"
"\t\tstrings_pointers[op_index] = ptr\n"
"\t\tmemory[ptr: ptr + str_len] = stack_str\n"
"\t\tstrings_size_pointer += str_len\n"
"\t\tif str_len > strings_size:\n"
"\t\t\tprint(\""
"ERROR! Trying to push string, "
"when there is memory string buffer overflow! "
"Try use memory size directive, to increase size!"
"\")\n"
"\t\t\texit(1)\n"
"\tfsp = strings_pointers[op_index]\n"
"\treturn fsp, str_len\n"
)
# Allocate string buffer.
current_lines.insert(current_string_buffer_insert_position,
f"strings_pointers = dict()\n"
f"strings_size = {context.memory_bytearray_size}\n"
f"strings_size_pointer = 0")
# Comment allocation.
if not directive_skip_comments:
current_lines.insert(current_string_buffer_insert_position,
"# Allocate strings buffer "
"(As you used strings): \n")
def __write_header():
""" Writes header. """
# Write auto-generated mention.
if not directive_skip_comments:
current_lines.append("# This file is auto-generated by Gofra-Language python subcommand! \n\n")
# Write stack initialization element.
if not directive_skip_comments:
current_lines.append("# Allocate stack (As is Gofra is Stack-Based Language): \n")
current_lines.append("stack = []\n")
# Update bytearray insert position.
nonlocal current_bytearray_insert_position
current_bytearray_insert_position = len(current_lines)
# Update string buffer insert position.
nonlocal current_string_buffer_insert_position
current_string_buffer_insert_position = len(current_lines)
# Write file and expression comments.
if not directive_skip_comments:
current_lines.append("\n\n")
current_lines.append(f"# File ({basename(path)}): \n")
current_lines.append(f"# Expressions: \n")
# Update while insert position.
nonlocal current_while_insert_position
current_while_insert_position = len(current_lines)
# Write source header.
if not directive_skip_comments:
current_lines.append("# Source:\n")
def __write_operator_intrinsic(operator: Operator):
""" Writes default operator (non-intrinsic). """
# Check that this is intrinsic operator.
assert operator.type == OperatorType.INTRINSIC, "Non-INTRINSIC operators " \
"should be written using __write_operator()!"
# Type check.
assert isinstance(current_operator.operand, Intrinsic), f"Type error, parser level error?"
nonlocal current_bytearray_should_written # type: ignore
if current_operator.operand == Intrinsic.PLUS:
# Intristic plus operator.
# Write operator data.
write("operand_a = stack.pop()")
write("operand_b = stack.pop()")
write("stack.append(operand_b + operand_a)")
elif current_operator.operand == Intrinsic.MINUS:
# Intristic minus operator.
# Write operator data.
write("operand_a = stack.pop()")
write("operand_b = stack.pop()")
write("stack.append(operand_b - operand_a)")
elif current_operator.operand == Intrinsic.INCREMENT:
# Intristic increment operator.
# Write operator data.
write("stack.append(stack.pop() + 1)")
elif current_operator.operand == Intrinsic.DECREMENT:
# Intristic decrement operator.
# Write operator data.
write("stack.append(stack.pop() - 1)")
elif current_operator.operand == Intrinsic.MULTIPLY:
# Intristic multiply operator.
# Write operator data.
write("operand_a = stack.pop()")
write("operand_b = stack.pop()")
write("stack.append(operand_b * operand_a)")
elif current_operator.operand == Intrinsic.DIVIDE:
# Intristic divide operator.
# Write operator data.
write("operand_a = stack.pop()")
write("operand_b = stack.pop()")
write("stack.append(operand_b // operand_a)")
elif current_operator.operand == Intrinsic.MODULUS:
# Intristic modulus operator.
# Write operator data.
write("operand_a = stack.pop()")
write("operand_b = stack.pop()")
write(f"stack.append(int(operand_b % operand_a))") # TODO: Check %, remove or left int()
elif current_operator.operand == Intrinsic.EQUAL:
# Intristic equal operator.
# Write operator data.
write("operand_a = stack.pop()")
write("operand_b = stack.pop()")
write("stack.append(int(operand_b == operand_a))")
elif current_operator.operand == Intrinsic.GREATER_EQUAL_THAN:
# Intristic greater equal than operator.
# Write operator data.
write("operand_a = stack.pop()")
write("operand_b = stack.pop()")
write("stack.append(int(operand_b >= operand_a))")
elif current_operator.operand == Intrinsic.GREATER_THAN:
# Intristic greater than operator.
# Write operator data.
write("operand_a = stack.pop()")
write("operand_b = stack.pop()")
write("stack.append(int(operand_b > operand_a))")
elif current_operator.operand == Intrinsic.LESS_THAN:
# Intristic less than operator.
# Write operator data.
write("operand_a = stack.pop()")
write("operand_b = stack.pop()")
write("stack.append(int(operand_b < operand_a))")
elif current_operator.operand == Intrinsic.LESS_EQUAL_THAN:
# Intristic less equal than operator.
# Write operator data.
write("operand_a = stack.pop()")
write("operand_b = stack.pop()")
write("stack.append(int(operand_b<= operand_a))")
elif current_operator.operand == Intrinsic.SWAP:
# Intristic swap operator.
# Write operator data.
write("operand_a = stack.pop()")
write("operand_b = stack.pop()")
write("stack.append(operand_a)")
write("stack.append(operand_b)")
elif current_operator.operand == Intrinsic.COPY:
# Intristic copy operator.
# Write operator data.
write("operand_a = stack.pop()")
write("stack.append(operand_a)")
write("stack.append(operand_a)")
elif current_operator.operand == Intrinsic.SHOW:
# Intristic show operator.
# Write operator data.
write("print(stack.pop())")
elif current_operator.operand == Intrinsic.FREE:
# Intristic free operator.
# Write operator data.
write("stack.pop()")
elif current_operator.operand == Intrinsic.NOT_EQUAL:
# Intristic not equal operator.
# Write operator data.
write("operand_a = stack.pop()")
write("operand_b = stack.pop()")
write("stack.append(int(operand_b != operand_a))")
elif current_operator.operand == Intrinsic.COPY2:
# Intristic copy2 operator.
# Write operator data.
write("operand_a = stack.pop()")
write("operand_b = stack.pop()")
write("stack.append(operand_b)")
write("stack.append(operand_a)")
write("stack.append(operand_b)")
write("stack.append(operand_a)")
elif current_operator.operand == Intrinsic.COPY_OVER:
# Intristic copy over operator.
# Write operator data.
write("operand_a = stack.pop()")
write("operand_b = stack.pop()")
write("stack.append(operand_b)")
write("stack.append(operand_a)")
write("stack.append(operand_b)")
elif current_operator.operand == Intrinsic.MEMORY_POINTER:
# Intrinsic null pointer operator.
# Write bytearray block.
# TODO: May be removed, but just OK.
current_bytearray_should_written = True
# Write operator data.
write(f"stack.append({MEMORY_BYTEARRAY_NULL_POINTER})")
elif current_operator.operand == Intrinsic.NULL:
# Intrinsic null operator.
# Write operator data.
write(f"stack.append(0)")
elif current_operator.operand == Intrinsic.MEMORY_WRITE:
# Intrinsic memory write operator.
# Write bytearray block.
current_bytearray_should_written = True
# Write operator data.
# TODO: More checks at compiled script.
write("operand_a = stack.pop()")
write("operand_b = stack.pop()")
write("memory[operand_b] = operand_a")
elif current_operator.operand == Intrinsic.MEMORY_READ:
# Intrinsic memory read operator.
# Write bytearray block.
current_bytearray_should_written = True
# Write operator data.
# TODO: More checks at compiled script.
write("operand_a = stack.pop()")
write("memory_byte = memory[operand_a]")
write("stack.append(memory_byte)")
elif current_operator.operand == Intrinsic.MEMORY_WRITE4BYTES:
# Intristic memory write 4 bytes operator.
# Write bytearray block.
current_bytearray_should_written = True
# Write operator data.
# TODO: More checks at compiled script.
write("operand_a = stack.pop()")
write("operand_b = stack.pop()")
write("memory_bytes = operand_a.to_bytes(length=4, byteorder=\"little\", signed=(operand_a < 0))")
write("memory[operand_b:operand_b + 4] = memory_bytes")
elif current_operator.operand == Intrinsic.MEMORY_READ4BYTES:
# Intristic memory read 4 bytes operator.
# Write bytearray block.
current_bytearray_should_written = True
# Write operator data.
# TODO: More checks at compiled script.
write("operand_a = stack.pop()")
write("memory_bytes = int.from_bytes(memory[operand_a:operand_a + 4], byteorder=\"little\")")
write("stack.append(memory_bytes)")
elif current_operator.operand == Intrinsic.MEMORY_SHOW_CHARACTERS:
# Intrinsic memory show as characters operator.
# Write bytearray block.
current_bytearray_should_written = True
# Write operator data.
write("memory_length = stack.pop()")
write("memory_pointer = stack.pop()")
write("memory_index = 0")
write("while memory_index < memory_length:")
write("\tmemory_byte = memory[memory_pointer + memory_index]")
write("\tprint(chr(memory_byte), end=\"\")")
write("\tmemory_index += 1")
else:
# If unknown instrinsic type.
# Write node data.
gofra.core.errors.message_verbosed(Stage.COMPILATOR, current_operator.token.location, "Error",
f"Intrinsic `{INTRINSIC_TYPES_TO_NAME[current_operator.operand]}` "
f"is not implemented for python generation!", True)
def __write_operator(operator: Operator):
""" Writes default operator (non-intrinsic). """
# Nonlocalise while data.
nonlocal current_while_block # type: ignore
nonlocal current_while_defined_name # type: ignore
nonlocal current_while_comment # type: ignore
# Grab our operator
if operator.type == OperatorType.INTRINSIC:
# Intrinsic operator.
# Error.
assert False, "Intrinsic operators should be written using __write_operator_intrinsic()!"
elif operator.type == OperatorType.PUSH_INTEGER:
# PUSH INTEGER operator.
# Type check.
assert isinstance(operator.operand, int), "Type error, parser level error?"
# Write operator data.
write(f"stack.append({operator.operand})")
elif operator.type == OperatorType.PUSH_STRING:
# PUSH STRING operator.
# Type check.
assert isinstance(operator.operand, str), "Type error, parser level error?"
# Write operator data.
# TODO: Warn using `current_operator_index`
write(f"s_str, s_len = stack_push_string({operator.operand.encode('UTF-8')}, {current_operator_index})")
write(f"stack.append(s_str)")
write(f"stack.append(s_len)")
# Write strings buffer block.
nonlocal current_string_buffer_should_written
current_string_buffer_should_written = True
# And memory.
nonlocal current_bytearray_should_written
current_bytearray_should_written = True
elif operator.type == OperatorType.IF:
# IF operator.
# Type check.
assert isinstance(operator.operand, OPERATOR_ADDRESS), f"Type error, parser level error?"
# Write operator data.
write("if stack.pop() != 0:")
# Increase indent level.
__update_indent(1)
elif operator.type == OperatorType.WHILE:
# WHILE operator.
# Type check.
assert isinstance(operator.operand, OPERATOR_ADDRESS), f"Type error, parser level error?"
# Remember name, so we can write "def" at the top of the source in current_while_insert_position.
current_while_defined_name = f"while_expression_ip{current_operator_index}"
# Remember comment for while function block.
current_while_comment = comment
# Write operator data.
current_lines.append(f"{current_indent}{comment[2:]}\n"
f"{current_indent}while {current_while_defined_name}()")
# Set that we in while expression.
current_while_block = True
elif operator.type == OperatorType.DO:
# DO operator.
# Type check.
assert isinstance(operator.operand, OPERATOR_ADDRESS), f"Type error, parser level error?"
if current_while_block:
# If we close while.
# Current insert position for lines.
# (As we don`t want to reset current_while_insert_position)
while_block_insert_position = current_while_insert_position
# Insert header.
function_comment = "" if directive_skip_comments else f"\t# -- Should be called from WHILE.\n"
current_lines.insert(while_block_insert_position,
f"def {current_while_defined_name}():{current_while_comment}\n" + function_comment)
for while_stack_line in current_while_lines:
# Iterate over while stack lines.
# Increment.
while_block_insert_position += 1
# Insert.
current_lines.insert(while_block_insert_position, f"\t{while_stack_line}")
# Insert return.
return_comment = "" if directive_skip_comments else f" # -- Return for calling from WHILE ."
current_lines.insert(while_block_insert_position + 1,
f"\treturn stack.pop()" + return_comment + "\n")
else:
# If this is not while.
# Error.
gofra.core.errors.message_verbosed(Stage.COMPILATOR, operator.token.location, "Error",
"Got `do`, when there is no `while` block started! "
"(Parsing error?)", True)
# Write operator.
current_lines.append(f":{comment}\n")
# Go out the while block expression.
current_while_block = False
# Reset current while lines list (stack).
current_while_lines.clear()
# Increase indent level.
__update_indent(1)
elif operator.type == OperatorType.ELSE:
# ELSE operator.
# Type check.
assert isinstance(operator.operand, OPERATOR_ADDRESS), "Type error, parser level error?"
# Write operator data.
pass_comment = "" if directive_skip_comments else f" # -- Be sure that there is no empty body."
current_lines.append(current_indent + f"pass{pass_comment}\n")
# Decrease indent level.
__update_indent(-1)
# Write operator data.
write("else:")
# Increase indent level.
__update_indent(1)
elif operator.type == OperatorType.END:
# END operator.
# Actually, there is no END in Python.
# Type check.
assert isinstance(operator.operand, OPERATOR_ADDRESS), "Type error, parser level error?"
# Write operator data.
pass_comment = "" if directive_skip_comments else f" # -- Be sure that there is no empty body."
current_lines.append(current_indent + f"pass{pass_comment}\n")
# Decrease indent level.
__update_indent(-1)
elif operator.type == OperatorType.DEFINE:
# DEFINE Operator.
# Error.
assert False, "Got definition operator at runner stage, parser level error?"
else:
# If unknown operator type.
assert False, f"Got unexpected / unknon operator type! (How?)"
def write(text: str):
""" Writes text to file. """
if current_while_block:
# If we are in loop.
# Add text without indent.
current_while_lines.append(text + comment + "\n")
else:
# Write default text.
current_lines.append(current_indent + text + comment + "\n")
# Indentation level.
current_indent_level = 0 # Indent level for calculating.
current_indent = "" # Indent string for writing.
# While.
current_while_block = False # If true, we are in while loop.
current_while_comment = "" # While block comment to place in final expression function.
current_while_defined_name = "" # While defined name for naming expression function.
current_while_lines: List[str] = [] # List of while lines to write in expression function.
current_while_insert_position = 0 # Position to insert while expressions blocks.
# Bytearray.
current_bytearray_insert_position = 0 # Position to insert bytearray block if bytearray_should_written is true.
current_bytearray_should_written = False # If true, will warn about memory usage and write bytearray block.
# TODO: Remove, as redundant, there is bytearray insert position above, which is same.
# Strings.
# Position to insert string bufer allocation block,
# if current_string_buffer_should_written is true.
current_string_buffer_insert_position = 0
current_string_buffer_should_written = False # If true, will write string buffer allocation block.
# Should we skip comments.
directive_skip_comments = context.directive_python_comments_skip
# Get source operators count.
operators_count = len(source.operators)
# Current operator index from the source.
current_operator_index = 0
# Lines.
current_lines: List[str] = []
# Check that there is more than zero operators in context.
if operators_count == 0:
# If there is no operators in the final parser context.
# Error.
gofra.core.errors.message_verbosed(Stage.COMPILATOR, (basename(path), 1, 1), "Error",
"There is no operators found in given file after parsing, "
"are you given empty file or file without resulting operators?", True)
# Open file.
file, _ = gofra.core.other.try_open_file(path + ".py", "w", True)
# Write header.
__write_header()
while current_operator_index < operators_count:
# While we not run out of the source operators list.
# Get current operator from the source.
current_operator: Operator = source.operators[current_operator_index]
# Make comment string.
location: LOCATION = current_operator.token.location
location_string: str = f"Line {location[1]}, Row {location[2]}"
comment = "" if directive_skip_comments else f" # Token: {current_operator.token.text} [{location_string}]"
if current_operator.type == OperatorType.INTRINSIC:
# If this is intrinsic.
# Write intrinsic operator.
__write_operator_intrinsic(current_operator)
else:
# If this is other operator.
# Write default operator.
__write_operator(current_operator)
# Increment current index.
current_operator_index += 1
# Write footer.
__write_footer()
if len(current_while_lines) != 0:
# If we have something at the while lines stack.
# Error.
gofra.core.errors.message_verbosed(Stage.COMPILATOR, source.operators[-1].token.location, "Error",
"While lines stack is not empty after running python generation! "
"(Compilation error?)", True)
# Write lines in final file.
for current_stack_line in current_lines:
file.write(current_stack_line)
# Close file.
file.close()
# Bytecode.
def compile_bytecode(source: Source, _, path: str):
""" Compiles operators to bytecode. """
# Check that there is no changes in operator type or intrinsic.
assert len(OperatorType) == 10, \
"Please update implementation for bytecode compilation after adding new OperatorType!"
assert len(Intrinsic) == 28, "Please update implementation for bytecode compilation after adding new Intrinsic!"
def __write_operator_intrinsic(operator: Operator):
""" Writes default operator (non-intrinsic). """
# Check that this is intrinsic operator.
assert operator.type == OperatorType.INTRINSIC, "Non-INTRINSIC operators " \
"should be written using __write_operator()!"
# Type check.
assert isinstance(current_operator.operand, Intrinsic), f"Type error, parser level error?"
if current_operator.operand in INTRINSIC_TO_BYTECODE_OPERATOR:
# Intristic operator.
# Write operator data.
write(INTRINSIC_TO_BYTECODE_OPERATOR[current_operator.operand])
else:
gofra.core.errors.message_verbosed(Stage.COMPILATOR, current_operator.token.location, "Error",
f"Intrinsic `{INTRINSIC_TYPES_TO_NAME[current_operator.operand]}` "
f"is not implemented for bytecode compilation!", True)
def __write_operator(operator: Operator):
""" Writes default operator (non-intrinsic). """
# Grab our operator
if operator.type == OperatorType.INTRINSIC:
assert False, "Intrinsic operators should be written using __write_operator_intrinsic()!"
elif operator.type == OperatorType.PUSH_INTEGER:
assert isinstance(operator.operand, int), "Type error, parser level error?"
# Write operator data.
write(OPERATOR_TYPE_TO_BYTECODE_OPERATOR[OperatorType.PUSH_INTEGER])
write(f"{operator.operand}")
elif operator.type == OperatorType.PUSH_STRING:
assert isinstance(operator.operand, str), "Type error, parser level error?"
gofra.core.errors.message("Error", "Strings is not implemented yet in the bytecode!", True)
elif operator.type == OperatorType.IF:
assert isinstance(operator.operand, OPERATOR_ADDRESS), f"Type error, parser level error?"
gofra.core.errors.message("Error", "Conditional is not implemented yet in the bytecode!", True)
elif operator.type == OperatorType.WHILE:
assert isinstance(operator.operand, OPERATOR_ADDRESS), f"Type error, parser level error?"
gofra.core.errors.message("Error", "Conditional is not implemented yet in the bytecode!", True)
elif operator.type == OperatorType.DO:
assert isinstance(operator.operand, OPERATOR_ADDRESS), f"Type error, parser level error?"
gofra.core.errors.message("Error", "Conditional is not implemented yet in the bytecode!", True)
elif operator.type == OperatorType.ELSE:
assert isinstance(operator.operand, OPERATOR_ADDRESS), "Type error, parser level error?"
gofra.core.errors.message("Error", "Conditional is not implemented yet in the bytecode!", True)
elif operator.type == OperatorType.END:
assert isinstance(operator.operand, OPERATOR_ADDRESS), "Type error, parser level error?"
gofra.core.errors.message("Error", "Conditional is not implemented yet in the bytecode!", True)
elif operator.type == OperatorType.DEFINE:
assert False, "Got definition operator at runner stage, parser level error?"
else:
# If unknown operator type.
assert False, f"Got unexpected / unknon operator type! (How?)"
# WIP.
current_lines.append("\n")
def write(text: str):
""" Writes text to file. """
current_lines.append(text + " ")
# Get source operators count.
operators_count = len(source.operators)
# Current operator index from the source.
current_operator_index = 0
# Lines.
current_lines: List[str] = []
# Check that there is more than zero operators in context.
if operators_count == 0:
# If there is no operators in the final parser context.
gofra.core.errors.message_verbosed(Stage.COMPILATOR, (basename(path), 1, 1), "Error",
"There is no operators found in given file after parsing, "
"are you given empty file or file without resulting operators?", True)
# Open file.
bytecode_path = path + ".gofbc"
file, _ = gofra.core.other.try_open_file(bytecode_path, "w", True)
while current_operator_index < operators_count:
# While we not run out of the source operators list.
# Get current operator from the source.
current_operator: Operator = source.operators[current_operator_index]
if current_operator.type == OperatorType.INTRINSIC:
# If this is intrinsic.
# Write intrinsic operator.
__write_operator_intrinsic(current_operator)
else:
# If this is other operator.
# Write default operator.
__write_operator(current_operator)
# Increment current index.
current_operator_index += 1
# Write lines in final file.
for current_stack_line in current_lines:
file.write(current_stack_line)
# Close file.
file.close()
return bytecode_path
def execute_bytecode(path: str):
""" Executes bytecode file. """
# Check that there is no changes in operator type or intrinsic.
assert len(OperatorType) == 10, "Please update implementation for bytecode execution after adding new OperatorType!"
assert len(Intrinsic) == 28, "Please update implementation for bytecode execution after adding new Intrinsic!"
if not path.endswith(".gofbc"):
gofra.core.errors.message("Error", f"File \"{path}\" should have extension `.gofbc` for being executed!", True)
return
# Open file.
file, _ = gofra.core.other.try_open_file(path, "r", True)
# Tokenize operator tokens.
bc_op_tokens = []
for line in file.readlines():
op_tokens = line.split(" ")
for op_token in op_tokens:
if op_token == "\n" or op_token.replace(" ", "") == "":
continue
bc_op_tokens.append(op_token)
# New context.
parser_context = ParserContext()
# Convert OPs to interpretator operators.
current_bc_operator_index = 0
while current_bc_operator_index < len(bc_op_tokens):
bc_operator = bc_op_tokens[current_bc_operator_index]
if bc_operator == OPERATOR_TYPE_TO_BYTECODE_OPERATOR[OperatorType.PUSH_INTEGER]:
parser_context.operators.append(Operator(
OperatorType.PUSH_INTEGER,
Token(TokenType.BYTECODE, bc_operator, (path, -1, -1), bc_operator),
int(bc_op_tokens[current_bc_operator_index + 1])
))
current_bc_operator_index += 2
continue
else:
if bc_operator in BYTECODE_OPERATOR_NAMES_TO_INTRINSIC:
parser_context.operators.append(Operator(
OperatorType.INTRINSIC,
Token(TokenType.BYTECODE, bc_operator, (path, -1, -1), bc_operator),
BYTECODE_OPERATOR_NAMES_TO_INTRINSIC[bc_operator]
))
else:
gofra.core.errors.message_verbosed(Stage.PARSER, ("Bytecode", -1, -1), "Error",
f"Got unexpected bytecode instruction - {repr(bc_operator)}!", True)
current_bc_operator_index += 1
continue
# Run.
parser_context_source = Source(parser_context.operators)
interpretator_run(parser_context_source)
# Close file.
file.close()
# CLI.
def cli_no_arguments_error_message(operator: Operator, force_exit: bool = False):
""" Shows no arguments passed error message to the CLI. """
if operator.type == OperatorType.INTRINSIC:
# Intrinsic Operator.
# Type check.
assert isinstance(operator.operand, Intrinsic), "Type error, parser level error?"
# Error
gofra.core.errors.message_verbosed(Stage.LINTER, operator.token.location, "Error",
f"`{INTRINSIC_TYPES_TO_NAME[operator.operand]}` "
f"intrinsic should have more arguments at the stack, but it was not founded!")
elif operator.type == OperatorType.IF:
# IF Operator.
# Error
gofra.core.errors.message_verbosed(Stage.LINTER, operator.token.location, "Error",
"`IF` operator should have 1 argument at the stack, but it was not found!")
elif operator.type == OperatorType.DO:
# DO Operator.
# Error
gofra.core.errors.message_verbosed(Stage.LINTER, operator.token.location, "Error",
"`DO` operator should have 1 argument at the stack, but it was not found!")
else:
# Unknown operator.
assert False, "Tried to call no_arguments_error_message() " \
"for operator that does not need arguments! (Type checker error?)"
# If we should force exit.
if force_exit:
exit(1)
def cli_argument_type_error_message(operator: Operator, argument_index: int,
actual_type: type, expected_type: type, force_exit: bool = False):
""" Shows unexpected argument type passed error message to the CLI. """
if operator.type == OperatorType.INTRINSIC:
assert isinstance(operator.operand, Intrinsic), "Type error, parser level error?"
gofra.core.errors.message_verbosed(Stage.LINTER, operator.token.location, "Error",
f"`{INTRINSIC_TYPES_TO_NAME[operator.operand]}` "
f"intrinsic expected {argument_index} argument "
f"to be with type {expected_type}, but it has type {actual_type}!")
elif operator.type == OperatorType.IF:
gofra.core.errors.message_verbosed(Stage.LINTER, operator.token.location, "Error",
f"`IF` operator expected type {expected_type} but got {actual_type}!")
elif operator.type == OperatorType.DO:
gofra.core.errors.message_verbosed(Stage.LINTER, operator.token.location, "Error",
f"`DO` operator expected type {expected_type} but got {actual_type}!")
else:
assert False, "Tried to call cli_argument_type_error_message() " \
"for unknown operator! (Type checker error?)"
if force_exit:
exit(1)
def cli_validate_argument_vector(argument_vector: List[str]) -> List[str]:
""" Validates CLI argv (argument vector) """
# Check that ther is any in the ARGV.
assert len(argument_vector) > 0, "There is no source (mspl.py) file path in the ARGV"
# Get argument vector without source(mspl.py) path.
argument_runner_filename: str = argument_vector[0]
argument_vector = argument_vector[1:]
# Validate ARGV.
if len(argument_vector) == 0:
# If there is no arguments.
# Message.
gofra.systems.cli.usage_message(argument_runner_filename)
gofra.core.errors.message("Error", "Please pass file path to work with (.gof or .gofbc ~)", True)
elif len(argument_vector) == 1:
# Just one argument.
if argument_vector[0] != "help":
# If this is not help.
# Message.
gofra.systems.cli.usage_message(argument_runner_filename)
gofra.core.errors.message("Error", "Please pass subcommand after the file path!", True)
# Show usage.
gofra.systems.cli.usage_message(argument_runner_filename)
# Exit.
exit(0)
# Return path as source file and help (argv[0]).
return ["", argument_vector[0], ""]
elif len(argument_vector) == 2:
# Expected ARGV length.
# All ok.
return [*argument_vector, ""]
elif len(argument_vector) == 3:
# If this is may silent argument.
if argument_vector[2] != "-silent":
# If silent.
# Message.
gofra.systems.cli.usage_message(argument_runner_filename)
gofra.core.errors.message("Error", "Unexpected arguments!", True)
# Return final ARGV.
return argument_vector
def cli_entry_point():
""" Entry point for the CLI. """
# Get and check size of cli argument vector.
cli_argument_vector = cli_validate_argument_vector(argv)
assert len(cli_argument_vector) == 3, "Got unexpected size of argument vector."
# CLI Options.
cli_source_path, cli_subcommand, cli_silent = cli_argument_vector
cli_silent: bool = bool(cli_silent == "-silent")
# Welcome message.
if not cli_silent:
gofra.systems.cli.welcome_message()
# Load source and check size of it.
loaded_file = None
if cli_subcommand in ("run", "graph", "python", "dump", "compile"):
loaded_file = load_source_from_file(cli_source_path)
assert len(loaded_file) == 2, "Got unexpected data from loaded file."
if cli_subcommand == "run":
# If this is interpretate subcommand.
cli_source, cli_context = loaded_file
interpretator_run(cli_source, cli_context.memory_bytearray_size)
# Message.
if not cli_silent:
print(f"[Info] File \"{basename(cli_source_path)}\" was interpreted!")
elif cli_subcommand == "graph":
# If this is graph subcommand.
# Get source from loaded file.
cli_source, _ = loaded_file
# Generate graph file.
gofra.systems.graph.write(cli_source, cli_source_path)
# Message.
if not cli_silent:
print(f"[Info] .dot file \"{basename(cli_source_path)}.dot\" generated!")
elif cli_subcommand == "python":
# If this is python subcommand.
# Get source and context from loaded file.
cli_source, cli_context = loaded_file
# Generate python file.
python_generate(cli_source, cli_context, cli_source_path)
# Message.
if not cli_silent:
print(f"[Info] .py file \"{basename(cli_source_path)}.py\" generated!")
elif cli_subcommand == "dump":
# If this is dump subcommand.
# Get source from loaded file.
cli_source, _ = loaded_file
# Dump print.
gofra.systems.dump.dump(cli_source.operators)
# Message.
if not cli_silent:
print(f"[Info] File \"{basename(cli_source_path)}\" was dump printed!")
elif cli_subcommand == "compile":
# If this is compile subcommand.
# Get source from loaded file.
cli_source, cli_context = loaded_file
# Compile.
bytecode_path = compile_bytecode(cli_source, cli_context, cli_source_path)
# Message.
if not cli_silent:
print(f"[Info] File \"{basename(cli_source_path)}\" was compiled to \"{basename(bytecode_path)}\"!")
elif cli_subcommand == "execute":
# If this is execute subcommand.
# Execute.
execute_bytecode(cli_source_path)
# Message.
if not cli_silent:
print(f"[Info] File \"{basename(cli_source_path)}\" was executed!")
else:
# If unknown subcommand.
# Message.
gofra.systems.cli.usage_message(__file__)
gofra.core.errors.message("Error", f"Unknown subcommand `{cli_subcommand}`!")
if __name__ == "__main__":
cli_entry_point()
| 44.570689 | 174 | 0.548081 | """
Main Gofra programming language source code.
"""
__author__ = "Kirill Zhosul @kirillzhosul"
__license__ = "MIT"
from typing import Generator
from os.path import basename
from sys import argv
import gofra
from gofra.core.danger import *
from gofra.core.stack import Stack
# MAJOR WARNING FOR ALL READERS.
# This code is not refactored,
# currently I am working on refactoring and splitting into the gofra module,
# there is a lot of stuff, that will be reworked.
# Also, want to say that bytecode is not finished, and interpretation will be
# converted to gofra.core.vm that will be run bytecode for own,
# as internal interpretation method (if you want to use C++ VM which is may not be finished also yet,
# see that https://github.com/gofralang/vm/)
# Lexer.
def lexer_tokenize(lines: List[str], file_parent: str) -> Generator[Token, None, None]:
""" Tokenizes lines into list of the Tokens. """
# Check that there is no changes in token type.
assert len(TokenType) == 6, "Please update implementation after adding new TokenType!"
# Get the basename.
file_parent = basename(file_parent)
# Current line index.
current_line_index = 0
# Get lines count.
lines_count = len(lines)
# Check that there is more than zero lines.
if lines_count == 0:
# If there is no lines.
# Error.
gofra.core.errors.message_verbosed(Stage.LEXER, (file_parent, 1, 1), "Error",
"There is no lines found in the given file "
"are you given empty file?", True)
while current_line_index < lines_count:
# Loop over lines.
# Get line.
current_line = lines[current_line_index]
# Find first non-space char.
current_collumn_index = gofra.core.lexer.find_collumn(current_line, 0, lambda char: not char.isspace())
# Get current line length.
current_line_length = len(current_line)
# ?.
current_collumn_end_index = 0
while current_collumn_index < current_line_length:
# Iterate over line.
# Get the location.
current_location = (file_parent, current_line_index + 1, current_collumn_index + 1)
if current_line[current_collumn_index] == EXTRA_CHAR:
# If we got character quote*.
# Index of the column end.
# (Trying to find closing quote*
current_collumn_end_index = gofra.core.lexer.find_collumn(current_line, current_collumn_index + 1,
lambda char: char == EXTRA_CHAR)
if current_collumn_end_index >= len(current_line) or \
current_line[current_collumn_end_index] != EXTRA_CHAR:
# If we got not EXTRA_CHAR or exceed current line length.
# Error.
gofra.core.errors.message_verbosed(Stage.LEXER, current_location, "Error",
"There is unclosed character literal. "
f"Do you forgot to place `{EXTRA_CHAR}`?", True)
# Get current token text.
current_token_text = current_line[current_collumn_index + 1: current_collumn_end_index]
# Get current char value.
current_char_value = gofra.core.lexer.unescape(current_token_text).encode("UTF-8")
if len(current_char_value) != 1:
# If there is 0 or more than 1 characters*.
# Error.
gofra.core.errors.message_verbosed(Stage.LEXER, current_location, "Error",
"Unexpected number of characters in the character literal."
"Only one character is allowed in character literal", True)
# Return character token.
yield Token(
type=TokenType.CHARACTER,
text=current_token_text,
location=current_location,
value=current_char_value[0]
)
# Find first non-space char.
current_collumn_index = gofra.core.lexer.find_collumn(current_line, current_collumn_end_index + 1,
lambda char: not char.isspace())
elif current_line[current_collumn_index] == EXTRA_STRING:
# If this is string.
# String buffer for strings.
current_string_buffer = ""
while current_line_index < len(lines):
# While we don`t reach end of the lines.
# Get string start.
string_start_collumn_index = current_collumn_index
if current_string_buffer == "":
# If we not start writing string buffer.
# Increment by one for quote.
string_start_collumn_index += len(EXTRA_STRING)
else:
# If we started.
# Just grab line.
current_line = lines[current_line_index]
# Get string end.
string_end_collumn_index = gofra.core.lexer.find_string_end(current_line, string_start_collumn_index)
if string_end_collumn_index >= len(current_line) or \
current_line[string_end_collumn_index] != EXTRA_STRING:
# If got end of current line, or not found closing string.
# Add current line.
current_string_buffer += current_line[string_start_collumn_index:]
# Reset and move next line.
current_line_index += 1
current_collumn_index = 0
else:
# If current line.
# Add final buffer.
current_string_buffer += current_line[string_start_collumn_index:string_end_collumn_index]
current_collumn_end_index = string_end_collumn_index
# End lexing string.
break
if current_line_index >= len(lines):
# If we exceed current lines length.
# Error.
gofra.core.errors.message_verbosed(Stage.LEXER, current_location, "Error",
"There is unclosed string literal. "
f"Do you forgot to place `{EXTRA_STRING}`?", True)
# Error?.
assert current_line[current_collumn_index] == EXTRA_STRING, "Got non string closing character!"
# Increase end index.
current_collumn_end_index += 1
# Get current token text.
current_token_text = current_string_buffer
# Return string token.
yield Token(
type=TokenType.STRING,
text=current_token_text,
location=current_location,
value=gofra.core.lexer.unescape(current_token_text)
)
# Find first non-space char.
current_collumn_index = gofra.core.lexer.find_collumn(current_line, current_collumn_end_index,
lambda char: not char.isspace())
else:
# Index of the column end.
current_collumn_end_index = gofra.core.lexer.find_collumn(current_line, current_collumn_index,
lambda char: char.isspace())
# Get current token text.
current_token_text = current_line[current_collumn_index: current_collumn_end_index]
try:
# Try convert token integer.
current_token_integer = int(current_token_text)
except ValueError:
# If there is invalid value for integer.
if current_token_text in KEYWORD_NAMES_TO_TYPE:
# If this is keyword.
# Return keyword token.
yield Token(
type=TokenType.KEYWORD,
text=current_token_text,
location=current_location,
value=KEYWORD_NAMES_TO_TYPE[current_token_text]
)
else:
# Not keyword.
# If this is comment - break.
# TODO: Try to fix something like 0//0 (comment not at the start) will lex not as should.
if current_token_text.startswith(EXTRA_COMMENT):
break
# Return word token.
yield Token(
type=TokenType.WORD,
text=current_token_text,
location=current_location,
value=current_token_text
)
else:
# If all ok.
# Return token.
yield Token(
type=TokenType.INTEGER,
text=current_token_text,
location=current_location,
value=current_token_integer
)
# Find first non-space char.
current_collumn_index = gofra.core.lexer.find_collumn(current_line, current_collumn_end_index,
lambda char: not char.isspace())
# Increment current line.
current_line_index += 1
# Parser.
def parser_parse(tokens: List[Token], context: ParserContext, path: str):
""" Parses token from lexer* (lexer_tokenize()) """
# Check that there is no changes in operator type.
assert len(OperatorType) == 10, "Please update implementation after adding new OperatorType!"
# Check that there is no changes in keyword type.
assert len(Keyword) == 8, "Please update implementation after adding new Keyword!"
# Check that there is no changes in token type.
assert len(TokenType) == 6, "Please update implementation after adding new TokenType!"
# Reverse tokens.
reversed_tokens: List[Token] = list(reversed(tokens))
# Definitions.
definitions: Dict[str, Definition] = dict()
memories: Dict[str, Memory] = dict()
variables: Dict[str, Variable] = dict()
variables_offset = 0
memories_offset = 0
if len(reversed_tokens) == 0:
gofra.core.errors.message_verbosed(Stage.PARSER, (basename(path), 1, 1), "Error",
"There is no tokens found, are you given empty file?", True)
while len(reversed_tokens) > 0:
# While there is any token.
# Get current token.
current_token: Token = reversed_tokens.pop()
if current_token.type == TokenType.WORD:
assert isinstance(current_token.value, str), "Type error, lexer level error?"
if current_token.value in INTRINSIC_NAMES_TO_TYPE:
context.operators.append(Operator(
type=OperatorType.INTRINSIC,
token=current_token,
operand=INTRINSIC_NAMES_TO_TYPE[current_token.value]
))
context.operator_index += 1
continue
if current_token.text in definitions:
# Expand definition tokens.
reversed_tokens += reversed(definitions[current_token.text].tokens)
continue
if current_token.text in memories:
memory = memories[current_token.text]
context.operators.append(Operator(
type=OperatorType.PUSH_INTEGER,
token=current_token,
operand=memory.ptr_offset
))
context.operator_index += 1
continue
if current_token.text in variables:
variable = variables[current_token.text]
context.operators.append(Operator(
type=OperatorType.PUSH_INTEGER,
token=current_token,
operand=variable.ptr_offset
))
context.operator_index += 1
continue
if current_token.text.startswith(EXTRA_DIRECTIVE):
directive = current_token.text[len(EXTRA_DIRECTIVE):]
if directive == "LINTER_SKIP":
if context.directive_linter_skip:
gofra.core.errors.message_verbosed(Stage.PARSER, current_token.location, "Error",
f"Directive `{EXTRA_DIRECTIVE}{directive}` defined twice!", True)
context.directive_linter_skip = True
elif directive == "PYTHON_COMMENTS_SKIP":
if context.directive_python_comments_skip:
gofra.core.errors.message_verbosed(Stage.PARSER, current_token.location, "Error",
f"Directive `{EXTRA_DIRECTIVE}{directive}` defined twice!",
True)
context.directive_python_comments_skip = True
else:
if directive.startswith("MEM_BUF_BYTE_SIZE="):
# If this is starts with memory buffer byte size definition name.
# Get directive value from all directive text.
directive_value = directive[len("MEM_BUF_BYTE_SIZE="):]
# Get new memory size
try:
new_memory_bytearray_size = int(directive_value)
except ValueError:
gofra.core.errors.message_verbosed(Stage.PARSER, current_token.location, "Error",
f"Directive `{EXTRA_DIRECTIVE}{directive}` "
f"passed invalid size `{directive_value}`!", True)
else:
# Change size of the bytearray.
context.memory_bytearray_size = new_memory_bytearray_size
else:
# Message.
gofra.core.errors.message_verbosed(Stage.PARSER, current_token.location, "Error",
f"Unknown directive `{EXTRA_DIRECTIVE}{directive}`", True)
continue
gofra.core.errors.message_verbosed(Stage.PARSER, current_token.location, "Error",
f"Unknown WORD `{current_token.text}`, "
f"are you misspelled something?", True)
elif current_token.type == TokenType.INTEGER:
# If we got an integer.
# Type check.
assert isinstance(current_token.value, int), "Type error, lexer level error?"
# Add operator to the context.
context.operators.append(Operator(
type=OperatorType.PUSH_INTEGER,
token=current_token,
operand=current_token.value
))
# Increment operator index.
context.operator_index += 1
elif current_token.type == TokenType.STRING:
# If we got a string.
# Type check.
assert isinstance(current_token.value, str), "Type error, lexer level error?"
# Add operator to the context.
context.operators.append(Operator(
type=OperatorType.PUSH_STRING,
token=current_token,
operand=current_token.value
))
# Increment operator index.
context.operator_index += 1
elif current_token.type == TokenType.CHARACTER:
# If we got a character.
# Type check.
assert isinstance(current_token.value, int), "Type error, lexer level error?"
# Add operator to the context.
context.operators.append(Operator(
type=OperatorType.PUSH_INTEGER,
token=current_token,
operand=current_token.value
))
# Increment operator index.
context.operator_index += 1
elif current_token.type == TokenType.KEYWORD:
# If we got a keyword.
if current_token.value == Keyword.IF:
# This is IF keyword.
# Push operator to the context.
context.operators.append(Operator(
type=OperatorType.IF,
token=current_token
))
# Push current operator index to the context memory stack.
context.memory_stack.append(context.operator_index)
# Increment operator index.
context.operator_index += 1
elif current_token.value == Keyword.WHILE:
# This is WHILE keyword.
# Push operator to the context.
context.operators.append(Operator(
type=OperatorType.WHILE,
token=current_token
))
# Push current operator index to the context memory stack.
context.memory_stack.append(context.operator_index)
# Increment operator index.
context.operator_index += 1
elif current_token.value == Keyword.DO:
# This is `DO` keyword.
if len(context.memory_stack) == 0:
# If there is nothing on the memory stack.
# Error.
gofra.core.errors.message_verbosed(Stage.PARSER, current_token.location, "Error",
"`do` should used after the `while` block!", True)
# Push operator to the context.
context.operators.append(Operator(
type=OperatorType.DO,
token=current_token
))
# Get `WHILE` operator from the memory stack.
block_operator_index = context.memory_stack.pop()
block_operator = context.operators[block_operator_index]
if block_operator.type != OperatorType.WHILE:
# If this is not while.
# Error.
gofra.core.errors.message_verbosed(Stage.PARSER, current_token.location, "Error",
"`do` should used after the `while` block!", True)
# Say that we crossreference WHILE block.
context.operators[context.operator_index].operand = block_operator_index
# Push current operator index to the context memory stack.
context.memory_stack.append(context.operator_index)
# Increment operator index.
context.operator_index += 1
elif current_token.value == Keyword.ELSE:
# If this is else keyword.
if len(context.memory_stack) == 0:
# If there is nothing on the memory stack.
# Error.
gofra.core.errors.message_verbosed(Stage.PARSER, current_token.location, "Error",
"`else` should used after the `if` block!", True)
# Get `IF` operator from the memory stack.
block_operator_index = context.memory_stack.pop()
block_operator = context.operators[block_operator_index]
if block_operator.type == OperatorType.IF:
# If we use else after the IF.
# Say that previous IF should jump at the our+1 operator index.
context.operators[block_operator_index].operand = context.operator_index + 1
# Push current operator index to the stack.
context.memory_stack.append(context.operator_index)
# Push operator to the context.
context.operators.append(Operator(
type=OperatorType.ELSE,
token=current_token
))
# Increment operator index.
context.operator_index += 1
else:
# If not `IF`.
# Get error location.
error_location = block_operator.token.location
# Error message.
gofra.core.errors.message_verbosed(Stage.PARSER, error_location, "Error",
"`else` can only used after `if` block!", True)
elif current_token.value == Keyword.END:
# If this is end keyword.
# Get block operator from the stack.
block_operator_index = context.memory_stack.pop()
block_operator = context.operators[block_operator_index]
if block_operator.type == OperatorType.IF:
# If this is IF block.
# Push operator to the context.
context.operators.append(Operator(
type=OperatorType.END,
token=current_token
))
# Say that start IF block refers to this END block.
context.operators[block_operator_index].operand = context.operator_index
# Say that this END block refers to next operator index.
context.operators[context.operator_index].operand = context.operator_index + 1
elif block_operator.type == OperatorType.ELSE:
# If this is ELSE block.
# Push operator to the context.
context.operators.append(Operator(
type=OperatorType.END,
token=current_token
))
# Say that owner block (If/Else) should jump to us.
context.operators[block_operator_index].operand = context.operator_index
# Say that we should jump to the next position.
context.operators[context.operator_index].operand = context.operator_index + 1
elif block_operator.type == OperatorType.DO:
# If this is DO block.
# Type check.
assert block_operator.operand is not None, "DO operator has unset operand! Parser level error?"
assert isinstance(block_operator.operand, OPERATOR_ADDRESS), "Type error, parser level error?"
# Push operator to the context.
context.operators.append(Operator(
type=OperatorType.END,
token=current_token
))
# Say that DO crossreference to the WHILE block.
context.operators[context.operator_index].operand = block_operator.operand
# Say that WHILE should jump in the DO body.
context.operators[block_operator.operand].operand = context.operator_index + 1
else:
# If invalid we call end not after the if or else.
# Get error location.
error_location = block_operator.token.location
# Error message.
gofra.core.errors.message_verbosed(Stage.PARSER, error_location, "Error",
"`end` can only close `if`, `else` or `do` block!", True)
# Increment operator index.
context.operator_index += 1
elif current_token.value == Keyword.DEFINE:
# This is DEFINE keyword.
if len(reversed_tokens) == 0:
# No name for definition is given.
# Error message.
gofra.core.errors.message_verbosed(Stage.PARSER, current_token.location, "Error",
"`define` should have name after the keyword, "
"do you has unfinished definition?", True)
# Get name for definition.
definition_name = reversed_tokens.pop()
if definition_name.type != TokenType.WORD:
# If name is not word.
# Error message.
gofra.core.errors.message_verbosed(Stage.PARSER, definition_name.location, "Error",
"`define` name, should be of type WORD, sorry, but you can`t use something that you give as name for the definition!", True)
if definition_name.text in definitions:
# If already defined.
# Error messages.
gofra.core.errors.message_verbosed(Stage.PARSER, definition_name.location, "Error",
"Definition with name {} was already defined!", False)
gofra.core.errors.message_verbosed(Stage.PARSER, definitions[definition_name.text].location, "Error",
"Original definition was here...", True)
if definition_name.text in INTRINSIC_NAMES_TO_TYPE or definition_name.text in KEYWORD_NAMES_TO_TYPE:
# If default item.
# Error message.
gofra.core.errors.message_verbosed(Stage.PARSER, definition_name.location, "Error",
"Can`t define definition with language defined name!", True)
# Create blank new definition.
definition = Definition(current_token.location, [])
# Add definition.
definitions[definition_name.text] = definition
# How much we require ends.
required_end_count = 0
while len(reversed_tokens) > 0:
# If there is still tokens.
# Get new token.
current_token = reversed_tokens.pop()
if current_token.type == TokenType.KEYWORD:
# If got keyword.
if current_token.text in KEYWORD_NAMES_TO_TYPE:
# If this is correct keyword.
if current_token.text == KEYWORD_TYPES_TO_NAME[Keyword.END]:
# If this is end.
if required_end_count <= 0:
# If we no more require end.
# Stop definition.
break
# Decrease required end counter.
required_end_count -= 1
if KEYWORD_NAMES_TO_TYPE[current_token.text] in \
(Keyword.IF, Keyword.DEFINE, Keyword.DO):
# If this is keyword that requires end.
# Increase required end count.
required_end_count += 1
if KEYWORD_NAMES_TO_TYPE[current_token.text] == Keyword.ELSE:
# If got else.
# Just pass as else not requires end.
pass
else:
# Invalid keyword.
assert False, "Got invalid keyword!"
# Append token.
definition.tokens.append(current_token)
if required_end_count != 0:
# If there is still required end.
# Error message.
gofra.core.errors.message_verbosed(Stage.PARSER, current_token.location, "Error",
f"There is {required_end_count} unclosed blocks, "
"that requires cloing `end` keyword inside `define` definition. ",
True)
if not (current_token.type == TokenType.KEYWORD and
current_token.text == KEYWORD_TYPES_TO_NAME[Keyword.END]):
# If got not end at end of definition.
# Error message.
gofra.core.errors.message_verbosed(Stage.PARSER, current_token.location, "Error",
"`define` should have `end` at the end of definition, "
"but it was not founded!", True)
elif current_token.value == Keyword.MEMORY:
if len(reversed_tokens) == 0:
gofra.core.errors.message_verbosed(Stage.PARSER, current_token.location, "Error",
"`memory` should have name after the keyword, "
"do you has unfinished memory definition?", True)
name_token = reversed_tokens.pop()
if name_token.type != TokenType.WORD:
gofra.core.errors.message_verbosed(Stage.PARSER, name_token.location, "Error",
"`memory` name, should be of type WORD, sorry, but "
"you can`t use something that you give as name "
"for the memory!", True)
if name_token.text in memories or name_token.text in definitions:
gofra.core.errors.message_verbosed(Stage.PARSER, name_token.location, "Error",
f"Definition or memory with name {name_token.text} "
f"was already defined!", False)
if name_token.text in definitions:
gofra.core.errors.message_verbosed(Stage.PARSER, definitions[name_token.text].location,
"Error", "Original definition was here...", True)
# TODO: Memory location report.
if name_token.text in INTRINSIC_NAMES_TO_TYPE or name_token.text in KEYWORD_NAMES_TO_TYPE:
# If default item.
gofra.core.errors.message_verbosed(Stage.PARSER, name_token.location, "Error",
"Can`t define memories with language defined name!", True)
if len(reversed_tokens) <= 0:
gofra.core.errors.message_verbosed(Stage.PARSER, name_token.location, "Error",
"`memory` requires size for memory definition, "
"which was not given!", True)
memory_size_token = reversed_tokens.pop()
if memory_size_token.type != TokenType.INTEGER:
gofra.core.errors.message_verbosed(Stage.PARSER, name_token.location, "Error",
"`var` size, should be of type INTEGER, sorry, but "
"you can`t use something that you give as size "
"for the memory!", True)
# TODO: Proper evaluation.
# Create blank new memory.
memory_name = name_token.text
memories[memory_name] = Memory(memory_name, memory_size_token.value, memories_offset)
memories_offset += memory_size_token.value
if len(reversed_tokens) >= 0:
end_token = reversed_tokens.pop()
if end_token.type == TokenType.KEYWORD and \
end_token.text == KEYWORD_TYPES_TO_NAME[Keyword.END]:
continue
# If got not end at end of definition.
gofra.core.errors.message_verbosed(Stage.PARSER, current_token.location, "Error",
"`memory` should have `end` at the end of memory definition, "
"but it was not founded!", True)
elif current_token.value == Keyword.VARIABLE:
if len(reversed_tokens) == 0:
gofra.core.errors.message_verbosed(Stage.PARSER, current_token.location, "Error",
"`var` should have name after the keyword, "
"do you has unfinished variable definition?", True)
name_token = reversed_tokens.pop()
if name_token.type != TokenType.WORD:
gofra.core.errors.message_verbosed(Stage.PARSER, name_token.location, "Error",
"`var` name, should be of type WORD, sorry, but "
"you can`t use something that you give as name "
"for the variable!", True)
if name_token.text in variables or name_token.text in definitions or name_token.text in memories:
gofra.core.errors.message_verbosed(Stage.PARSER, name_token.location, "Error",
f"Definition or variable with name {name_token.text} "
f"was already defined!", False)
if name_token.text in definitions:
gofra.core.errors.message_verbosed(Stage.PARSER, definitions[name_token.text].location,
"Error", "Original definition was here...", True)
# TODO: Memory / variable location report.
if name_token.text in INTRINSIC_NAMES_TO_TYPE or name_token.text in KEYWORD_NAMES_TO_TYPE:
# If default item.
gofra.core.errors.message_verbosed(Stage.PARSER, name_token.location, "Error",
"Can`t define variable with language defined name!", True)
# Create blank new memory.
variable_name = name_token.text
variables[variable_name] = Variable(variable_name, variables_offset)
variables_offset += VARIABLE_SIZE
else:
# If unknown keyword type.
assert False, "Unknown keyword type! (How?)"
else:
# If unknown operator type.
assert False, "Unknown operator type! (How?)"
if len(context.memory_stack) > 0:
# If there is any in the stack.
# Get error operator.
error_operator = context.operators[context.memory_stack.pop()]
# Get error location.
error_location = error_operator.token.location
# Error message.
gofra.core.errors.message_verbosed(Stage.PARSER, error_location, "Error",
f"Unclosed block \"{error_operator.token.text}\"!", True)
if context.directive_linter_skip:
# If skip linter.
# Warning message.
gofra.core.errors.message_verbosed(Stage.PARSER, (basename(path), 1, 1), "Warning",
"#LINTER_SKIP DIRECTIVE! THIS IS UNSAFE, PLEASE DISABLE IT!")
# Interpretator.
def interpretator_run(source: Source,
bytearray_size: int = MEMORY_BYTEARRAY_SIZE):
""" Interpretates the source. """
assert len(OperatorType) == 10, "Please update implementation after adding new OperatorType!"
assert len(Intrinsic) == 30, "Please update implementation after adding new Intrinsic!"
# Create empty stack.
memory_execution_stack = Stack()
# String pointers.
memory_string_pointers: Dict[OPERATOR_ADDRESS, TYPE_POINTER] = dict()
memory_string_size = bytearray_size
memory_string_size_ponter = 0
# Allocate sized bytearray.
memory_bytearray = bytearray(bytearray_size + memory_string_size + MEMORY_MEMORIES_SIZE + MEMORY_VARIABLES_SIZE)
# Get source operators count.
operators_count = len(source.operators)
current_operator_index = 0
if operators_count == 0:
gofra.core.errors.message_verbosed(Stage.RUNNER, ("__RUNNER__", 1, 1), "Error",
"There is no operators found in given file after parsing, "
"are you given empty file or file without resulting operators?", True)
while current_operator_index < operators_count:
# While we not run out of the source operators list.
# Get current operator from the source.
current_operator: Operator = source.operators[current_operator_index]
try:
# Try / Catch to get unexpected Python errors.
if current_operator.type == OperatorType.PUSH_INTEGER:
# Push integer operator.
# Type check.
assert isinstance(current_operator.operand, int), "Type error, parser level error?"
# Push operand to the stack.
memory_execution_stack.push(current_operator.operand)
# Increase operator index.
current_operator_index += 1
elif current_operator.type == OperatorType.PUSH_STRING:
# Push string operator.
# Type check.
assert isinstance(current_operator.operand, str), "Type error, parser level error?"
# Get string data.
string_value = current_operator.operand.encode("UTF-8")
string_length = len(string_value)
if current_operator_index not in memory_string_pointers:
# If we not found string in allocated string pointers.
# Get pointer, and push in to the pointers.
string_pointer: TYPE_POINTER = memory_string_size + 1 + memory_string_size_ponter
memory_string_pointers[current_operator_index] = string_pointer
# Write string right into the bytearray memory.
memory_bytearray[string_pointer: string_pointer + string_length] = string_value
# Increase next pointer by current string length.
memory_string_size_ponter += string_length
# Check that there is no overflow.
if string_length > memory_string_size:
# If overflowed.
# Error.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
"Trying to push string, when there is memory string buffer overflow!"
" Try use memory size directive, to increase size!", True)
# Push found string pointer to the stack.
found_string_pointer = memory_string_pointers[current_operator_index]
memory_execution_stack.push(found_string_pointer)
# Push string length to the stack.
memory_execution_stack.push(string_length)
# Increase operator index.
current_operator_index += 1
elif current_operator.type == OperatorType.INTRINSIC:
# Intrinsic operator.
if current_operator.operand == Intrinsic.PLUS:
# Intristic plus operator.
# Get both operands.
operand_a = memory_execution_stack.pop()
operand_b = memory_execution_stack.pop()
# Push sum to the stack.
memory_execution_stack.push(operand_b + operand_a)
elif current_operator.operand == Intrinsic.DIVIDE:
# Intristic divide operator.
# Get both operands.
operand_a = memory_execution_stack.pop()
operand_b = memory_execution_stack.pop()
# Push divide to the stack.
memory_execution_stack.push(operand_b // operand_a)
elif current_operator.operand == Intrinsic.MODULUS:
# Intristic modulus operator.
# Get both operands.
operand_a = memory_execution_stack.pop()
operand_b = memory_execution_stack.pop()
# Push divide to the stack.
memory_execution_stack.push(int(operand_b % operand_a))
elif current_operator.operand == Intrinsic.MINUS:
# Intristic minus operator.
# Get both operands.
operand_a = memory_execution_stack.pop()
operand_b = memory_execution_stack.pop()
# Push difference to the stack.
memory_execution_stack.push(operand_b - operand_a)
elif current_operator.operand == Intrinsic.MULTIPLY:
# Intristic multiply operator.
# Get both operands.
operand_a = memory_execution_stack.pop()
operand_b = memory_execution_stack.pop()
# Push muliply to the stack.
memory_execution_stack.push(operand_b * operand_a)
elif current_operator.operand == Intrinsic.EQUAL:
# Intristic equal operator.
# Get both operands.
operand_a = memory_execution_stack.pop()
operand_b = memory_execution_stack.pop()
# Push equal to the stack.
memory_execution_stack.push(int(operand_b == operand_a))
elif current_operator.operand == Intrinsic.NOT_EQUAL:
# Intristic not equal operator.
# Get both operands.
operand_a = memory_execution_stack.pop()
operand_b = memory_execution_stack.pop()
# Push not equal to the stack.
memory_execution_stack.push(int(operand_b != operand_a))
elif current_operator.operand == Intrinsic.LESS_THAN:
# Intristic less than operator.
# Get both operands.
operand_a = memory_execution_stack.pop()
operand_b = memory_execution_stack.pop()
# Push less than to the stack.
memory_execution_stack.push(int(operand_b < operand_a))
elif current_operator.operand == Intrinsic.GREATER_THAN:
# Intristic greater than operator.
# Get both operands.
operand_a = memory_execution_stack.pop()
operand_b = memory_execution_stack.pop()
# Push greater than to the stack.
memory_execution_stack.push(int(operand_b > operand_a))
elif current_operator.operand == Intrinsic.LESS_EQUAL_THAN:
# Intristic less equal than operator.
# Get both operands.
operand_a = memory_execution_stack.pop()
operand_b = memory_execution_stack.pop()
# Push less equal than to the stack.
memory_execution_stack.push(int(operand_b <= operand_a))
elif current_operator.operand == Intrinsic.GREATER_EQUAL_THAN:
# Intristic greater equal than operator.
# Get both operands.
operand_a = memory_execution_stack.pop()
operand_b = memory_execution_stack.pop()
# Push greater equal than to the stack.
memory_execution_stack.push(int(operand_b >= operand_a))
elif current_operator.operand == Intrinsic.SWAP:
# Intristic swap operator.
# Get both operands.
operand_a = memory_execution_stack.pop()
operand_b = memory_execution_stack.pop()
# Push swapped to the stack.
memory_execution_stack.push(operand_a)
memory_execution_stack.push(operand_b)
elif current_operator.operand == Intrinsic.COPY:
# Intristic copy operator.
# Get operand.
operand_a = memory_execution_stack.pop()
# Push copy to the stack.
memory_execution_stack.push(operand_a)
memory_execution_stack.push(operand_a)
elif current_operator.operand == Intrinsic.COPY2:
# Intristic copy2 operator.
# Get both operands.
operand_a = memory_execution_stack.pop()
operand_b = memory_execution_stack.pop()
# Push copy to the stack.
memory_execution_stack.push(operand_b)
memory_execution_stack.push(operand_a)
memory_execution_stack.push(operand_b)
memory_execution_stack.push(operand_a)
elif current_operator.operand == Intrinsic.COPY_OVER:
# Intristic copy over operator.
# Get both operands.
operand_a = memory_execution_stack.pop()
operand_b = memory_execution_stack.pop()
# Push copy to the stack.
memory_execution_stack.push(operand_b)
memory_execution_stack.push(operand_a)
memory_execution_stack.push(operand_b)
elif current_operator.operand == Intrinsic.DECREMENT:
# Intristic decrement operator.
# Get operand.
operand_a = memory_execution_stack.pop()
# Push decrement to the stack.
memory_execution_stack.push(operand_a - 1)
elif current_operator.operand == Intrinsic.INCREMENT:
# Intristic increment operator.
# Get operand.
operand_a = memory_execution_stack.pop()
# Push increment to the stack.
memory_execution_stack.push(operand_a + 1)
elif current_operator.operand == Intrinsic.FREE:
# Intristic free operator.
# Pop and left.
memory_execution_stack.pop()
elif current_operator.operand == Intrinsic.SHOW:
# Intristic show operator.
# Get operand.
operand_a = memory_execution_stack.pop()
# Show operand.
print(operand_a)
elif current_operator.operand == Intrinsic.MEMORY_WRITE:
# Intristic memory write operator.
# Get both operands.
operand_a = memory_execution_stack.pop()
operand_b = memory_execution_stack.pop()
if operand_b > len(memory_bytearray):
# If this is going to be memory overflow.
# Error.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
f"Trying to write at memory address {operand_b} "
f"that overflows memory buffer size {(len(memory_bytearray))}"
" bytes (MemoryBufferOverflow)", True)
elif operand_b < 0:
# If this is going to be memory undeflow.
# Error.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
f"Trying to write at memory address {operand_b} "
f"that underflows memory buffer size {(len(memory_bytearray))}"
" bytes (MemoryBufferUnderflow)", True)
# Write memory.
try:
memory_bytearray[operand_b] = operand_a
except IndexError:
# Memory error.
# Error message.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
f"Memory buffer (over|under)flow "
f"(Write to pointer {operand_b} when there is memory buffer "
f"with size {len(memory_bytearray)} bytes)!", True)
except ValueError:
# If this is 8bit (1byte) range (number) overflow.
# Error message.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
f"Memory buffer cell can only contain 1 byte (8 bit) "
f"that must be in range (0, 256),\nbut you passed number "
f"{operand_a} which is not fits in the 1 byte cell! (ByteOverflow)",
True)
elif current_operator.operand == Intrinsic.MEMORY_WRITE4BYTES:
# Intristic memory write 4 bytes operator.
# Get both operands.
operand_a = memory_execution_stack.pop()
operand_b = memory_execution_stack.pop()
# Convert value to 4 bytes.
try:
operand_a = operand_a.to_bytes(length=4, byteorder="little", signed=(operand_a < 0))
except OverflowError:
# Error message.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
f"Memory buffer cell can only contain 4 byte (32 bit) "
f"that must be in range (0, 4294967295),\nbut you passed number "
f"{operand_a} which is not fits in the 4 byte cell! (ByteOverflow)",
True)
if operand_b + 4 - 1 > len(memory_bytearray):
# If this is going to be memory overflow.
# Error.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
f"Trying to write 4 bytes to memory address from {operand_b} to "
f"{operand_b + 4 - 1} "
f"that overflows memory buffer size {(len(memory_bytearray))}"
" bytes (MemoryBufferOverflow)", True)
elif operand_b < 0:
# If this is going to be memory undeflow.
# Error.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
f"Trying to write at memory address "
f"from {operand_b} to {operand_b + 2} "
f"that underflows memory buffer size {(len(memory_bytearray))}"
" bytes (MemoryBufferUnderflow)", True)
# Write memory.
try:
memory_bytearray[operand_b:operand_b + 4] = operand_a
except IndexError:
# Memory* error.
# Error message.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
f"Memory buffer (over|under)flow "
f"(Write to pointer from "
f"{operand_b} to {operand_b + 4 - 1} "
f"when there is memory buffer with size "
f"{len(memory_bytearray)} bytes)!", True)
except ValueError:
# If this is 32bit (4byte) range (number) overflow.
# Error message.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
f"Memory buffer cell can only contain 4 byte (32 bit) "
f"that must be in range (0, 4294967295),\nbut you passed number "
f"{operand_a} which is not fits in the 4 byte cell! (ByteOverflow)",
True)
elif current_operator.operand == Intrinsic.MEMORY_READ4BYTES:
# Intristic memory read 4 bytes operator.
# Get operand.
operand_a = memory_execution_stack.pop()
if operand_a + 4 - 1 > len(memory_bytearray):
# If this is going to be memory overflow.
# Error.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
f"Trying to read from memory address "
f"{operand_a} to {operand_a + 4 - 1} "
f"that overflows memory buffer size {(len(memory_bytearray))}"
" bytes (MemoryBufferOverflow)", True)
elif operand_a < 0:
# If this is going to be memory undeflow.
# Error.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
f"Trying to read from memory address "
f"{operand_a} to {operand_a + 4 - 1}"
f"that underflows memory buffer size {(len(memory_bytearray))}"
" bytes (MemoryBufferUnderflow)", True)
# Read memory at the pointer.
try:
memory_bytes = int.from_bytes(memory_bytearray[operand_a:operand_a + 4], byteorder="little")
except IndexError:
# Memory* error.
# Error message.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
f"Memory buffer (over|under)flow "
f"(Read from pointer {operand_a} to {operand_a + 4 - 1} "
f"when there is memory buffer with size "
f"{len(memory_bytearray)} bytes)!", True)
else:
# Push memory to the stack.
memory_execution_stack.push(memory_bytes)
elif current_operator.operand == Intrinsic.MEMORY_READ:
# Intristic memory read operator.
# Get operand.
operand_a = memory_execution_stack.pop()
if operand_a > len(memory_bytearray):
# If this is going to be memory overflow.
# Error.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
f"Trying to read from memory address {operand_a} "
f"that overflows memory buffer size {(len(memory_bytearray))}"
" bytes (MemoryBufferOverflow)", True)
elif operand_a < 0:
# If this is going to be memory undeflow.
# Error.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
f"Trying to read from memory address {operand_a} "
f"that underflows memory buffer size {(len(memory_bytearray))}"
" bytes (MemoryBufferUnderflow)", True)
# Read memory at the pointer.
try:
memory_byte = memory_bytearray[operand_a]
except IndexError:
# Memory* error.
# Error message.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
f"Memory buffer (over|under)flow "
f"(Read from pointer {operand_a} when there is memory buffer "
f"with size {len(memory_bytearray)} bytes)!", True)
else:
# Push memory to the stack.
memory_execution_stack.push(memory_byte)
elif current_operator.operand == Intrinsic.MEMORY_SHOW_CHARACTERS:
# Intristic memory show as chars operator.
# Get both operands.
operand_a = memory_execution_stack.pop()
operand_b = memory_execution_stack.pop()
# String to show.
memory_string: bytes = b""
if operand_b + operand_a > len(memory_bytearray):
# If this is going to be memory overflow.
# Error.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
f"Trying to read from memory address "
f"from {operand_b} to {operand_b + operand_a} "
f"that overflows memory buffer size {(len(memory_bytearray))}"
" bytes (MemoryBufferOverflow)", True)
elif operand_a < 0:
# If this is going to be memory undeflow.
# Error.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
f"Trying to read from memory address"
f"from {operand_b} to {operand_b + operand_a} "
f"that underflows memory buffer size {(len(memory_bytearray))}"
" bytes (MemoryBufferUnderflow)", True)
# Read memory string.
try:
memory_string = memory_bytearray[operand_b: operand_b + operand_a]
except IndexError:
# Memory* error.
# Error message.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
f"Memory buffer (over|under)flow "
f"(Read from {operand_b} to {operand_b + operand_a} "
f"when there is memory "
f"buffer with size {len(memory_bytearray)} bytes)!", True)
# Print decoded memory bytes.
print(memory_string.decode("UTF-8"), end="")
elif current_operator.operand == Intrinsic.MEMORY_POINTER:
# Intristic memory pointer operator.
# Push pointer to the stack.
memory_execution_stack.push(MEMORY_BYTEARRAY_NULL_POINTER)
elif current_operator.operand == Intrinsic.NULL:
# Intristic null operator.
# Push pointer to the stack.
memory_execution_stack.push(0)
elif current_operator.operand == Intrinsic.IO_READ_STRING:
# Intrinsic I/O read string operator.
# Get string data.
string_value = input().encode("UTF-8")
string_length = len(string_value)
if current_operator_index not in memory_string_pointers:
# If we not found string in allocated string pointers.
# Get pointer, and push in to the pointers.
string_pointer: TYPE_POINTER = 1 + memory_string_size_ponter
memory_string_pointers[current_operator_index] = string_pointer
# Write string right into the bytearray memory.
memory_bytearray[string_pointer: string_pointer + string_length] = string_value
# Increase next pointer by current string length.
memory_string_size_ponter += string_length
# Check that there is no overflow.
if string_length > memory_string_size:
# If overflow.
# Error.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
"Trying to push I/O string, "
"when there is memory string buffer overflow! "
"Try use memory size directive, to increase size!", True)
# Push found string pointer to the stack.
found_string_pointer = memory_string_pointers[current_operator_index]
memory_execution_stack.push(found_string_pointer)
# Push string length to the stack.
memory_execution_stack.push(string_length)
elif current_operator.operand == Intrinsic.IO_READ_INTEGER:
# Intrinsic I/O read integer operator.
# Get integer data.
try:
integer_value = int(input())
except ValueError:
integer_value = -1
# Push integer to the stack.
memory_execution_stack.push(integer_value)
else:
# If unknown instrinsic type.
assert False, "Unknown instrinsic! (How?)"
# Increase operator index.
current_operator_index += 1
elif current_operator.type == OperatorType.IF:
# IF operator.
# Get operand.
operand_a = memory_execution_stack.pop()
# Type check.
assert isinstance(current_operator.operand, OPERATOR_ADDRESS), "Type error, parser level error?"
if operand_a == 0:
# If this is false.
# Type check.
assert isinstance(current_operator.operand, OPERATOR_ADDRESS), \
"Type error, parser level error?"
# Jump to the operator operand.
# As this is IF, so we should jump to the END.
current_operator_index = current_operator.operand
else:
# If this is true.
# Increment operator index.
# This is makes jump into the if branch.
current_operator_index += 1
elif current_operator.type == OperatorType.ELSE:
# ELSE operator.
# Type check.
assert isinstance(current_operator.operand, OPERATOR_ADDRESS), "Type error, parser level error?"
# Jump to the operator operand.
# As this is ELSE operator, we should have index + 1, index!
current_operator_index = current_operator.operand
elif current_operator.type == OperatorType.DO:
# DO operator.
# Get operand.
operand_a = memory_execution_stack.pop()
# Type check.
assert isinstance(current_operator.operand, OPERATOR_ADDRESS), "Type error, parser level error?"
if operand_a == 0:
# If this is false.
# Endif jump operator index.
end_jump_operator_index = source.operators[current_operator.operand].operand
# Type check.
assert isinstance(end_jump_operator_index, OPERATOR_ADDRESS), \
"Type error, parser level error?"
# Jump to the operator operand.
# As this is DO, so we should jump to the END.
current_operator_index = int(end_jump_operator_index)
else:
# If this is true.
# Increment operator index.
# This is makes jump into the if body.
current_operator_index += 1
elif current_operator.type == OperatorType.WHILE:
# WHILE operator.
# Increment operator index.
# This is makes jump into the if statement (expression).
current_operator_index += 1
elif current_operator.type == OperatorType.END:
# END operator.
# Type check.
assert isinstance(current_operator.operand, OPERATOR_ADDRESS), "Type error, parser level error?"
# Type check.
assert isinstance(current_operator.operand, OPERATOR_ADDRESS), \
"Type error, parser level error?"
# Jump to the operator operand.
# As this is END operator, we should have index + 1, index!
current_operator_index = current_operator.operand
elif current_operator.type == OperatorType.DEFINE:
# DEFINE Operator.
# Error.
assert False, "Got definition operator at runner stage, parser level error?"
elif current_operator.type == OperatorType.MEMORY:
assert False, "Got memory operator at runner stage, parser level error?"
else:
# If unknown operator type.
assert False, "Unknown operator type! (How?)"
except IndexError:
# Should be stack error.
# Error message.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
f"Stack error! This is may caused by popping from empty stack!"
f"Do you used {EXTRA_DIRECTIVE}LINTER_SKIP directive? IndexError, (From: "
f"{current_operator.token.text})", True)
except KeyboardInterrupt:
# If stopped.
# Error message.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
"Interpretation was stopped by keyboard interrupt!", True)
if len(memory_execution_stack) > 0:
# If there is any in the stack.
# Error message.
gofra.core.errors.message_verbosed(Stage.RUNNER, ("__runner__", 1, 1), "Warning",
"Stack is not empty after running the interpretation!")
# Linter.
def linter_type_check(source: Source):
""" Linter static type check. """
# TODO: IF/WHILE anylyse fixes.
# Check that there is no new operator type.
assert len(OperatorType) == 10, "Please update implementation after adding new OperatorType!"
# Check that there is no new instrinsic type.
assert len(Intrinsic) == 30, "Please update implementation after adding new Intrinsic!"
# Create empty linter stack.
memory_linter_stack = Stack()
# Get source operators count.
operators_count = len(source.operators)
# Current operator index from the source.
current_operator_index = 0
# Check that there is more than zero operators in context.
if operators_count == 0:
# If there is no operators in the final parser context.
# Error.
gofra.core.errors.message_verbosed(Stage.LINTER, ("__linter__", 1, 1), "Error",
"There is no operators found in given file after parsing, "
"are you given empty file or file without resulting operators?", True)
while current_operator_index < operators_count:
# While we not run out of the source operators list.
# Get current operator from the source.
current_operator: Operator = source.operators[current_operator_index]
# Grab our operator
if current_operator.type == OperatorType.PUSH_INTEGER:
# PUSH INTEGER operator.
# Type check.
assert isinstance(current_operator.operand, int), "Type error, lexer level error?"
# Push operand type to the stack.
memory_linter_stack.push(int)
# Increase operator index.
current_operator_index += 1
elif current_operator.type == OperatorType.PUSH_STRING:
# PUSH STRING operator.
# Type check.
assert isinstance(current_operator.operand, str), "Type error, lexer level error?"
# Push operand types to the stack.
memory_linter_stack.push(int) # String size.
memory_linter_stack.push(TYPE_POINTER) # String pointer.
# Increase operator index.
current_operator_index += 1
elif current_operator.type == OperatorType.MEMORY:
assert False, "Got memory operator at linter stage, parser level error?"
elif current_operator.type == OperatorType.INTRINSIC:
# Intrinsic operator.
if current_operator.operand == Intrinsic.PLUS:
# Intristic plus operator.
# Check stack size.
if len(memory_linter_stack) < 2:
cli_no_arguments_error_message(current_operator, True)
# Get both operands.
operand_a = memory_linter_stack.pop()
operand_b = memory_linter_stack.pop()
# Check type.
if operand_a != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 1, operand_a, int, True)
# Check type.
if operand_b != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 2, operand_b, int, True)
# Push to the stack.
memory_linter_stack.push(int)
elif current_operator.operand == Intrinsic.DIVIDE:
# Intristic divide operator.
# Check stack size.
if len(memory_linter_stack) < 2:
cli_no_arguments_error_message(current_operator, True)
# Get both operands.
operand_a = memory_linter_stack.pop()
operand_b = memory_linter_stack.pop()
# Check type.
if operand_a != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 1, operand_a, int, True)
# Check type.
if operand_b != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 2, operand_b, int, True)
# Push to the stack.
memory_linter_stack.push(int)
elif current_operator.operand == Intrinsic.MODULUS:
# Intristic modulus operator.
# Check stack size.
if len(memory_linter_stack) < 2:
cli_no_arguments_error_message(current_operator, True)
# Get both operands.
operand_a = memory_linter_stack.pop()
operand_b = memory_linter_stack.pop()
# Check type.
if operand_a != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 1, operand_a, int, True)
# Check type.
if operand_b != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 2, operand_b, int, True)
# Push to the stack.
memory_linter_stack.push(int)
elif current_operator.operand == Intrinsic.MINUS:
# Intristic minus operator.
# Check stack size.
if len(memory_linter_stack) < 2:
cli_no_arguments_error_message(current_operator, True)
# Get both operands.
operand_a = memory_linter_stack.pop()
operand_b = memory_linter_stack.pop()
# Check type.
if operand_a != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 1, operand_a, int, True)
# Check type.
if operand_b != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 2, operand_b, int, True)
# Push to the stack.
memory_linter_stack.push(int)
elif current_operator.operand == Intrinsic.MULTIPLY:
# Intristic multiply operator.
# Check stack size.
if len(memory_linter_stack) < 2:
cli_no_arguments_error_message(current_operator, True)
# Get both operands.
operand_a = memory_linter_stack.pop()
operand_b = memory_linter_stack.pop()
# Check type.
if operand_a != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 1, operand_a, int, True)
# Check type.
if operand_b != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 2, operand_b, int, True)
# Push to the stack.
memory_linter_stack.push(int)
elif current_operator.operand == Intrinsic.EQUAL:
# Intristic equal operator.
# Check stack size.
if len(memory_linter_stack) < 2:
cli_no_arguments_error_message(current_operator, True)
# Get both operands.
operand_a = memory_linter_stack.pop()
operand_b = memory_linter_stack.pop()
# Check type.
if operand_a != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 1, operand_a, int, True)
# Check type.
if operand_b != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 2, operand_b, int, True)
# Push to the stack.
memory_linter_stack.push(int)
elif current_operator.operand == Intrinsic.NOT_EQUAL:
# Intristic not equal operator.
# Check stack size.
if len(memory_linter_stack) < 2:
cli_no_arguments_error_message(current_operator, True)
# Get both operands.
operand_a = memory_linter_stack.pop()
operand_b = memory_linter_stack.pop()
# Check type.
if operand_a != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 1, operand_a, int, True)
# Check type.
if operand_b != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 2, operand_b, int, True)
# Push to the stack.
memory_linter_stack.push(int)
elif current_operator.operand == Intrinsic.LESS_THAN:
# Intristic less than operator.
# Check stack size.
if len(memory_linter_stack) < 2:
cli_no_arguments_error_message(current_operator, True)
# Get both operands.
operand_a = memory_linter_stack.pop()
operand_b = memory_linter_stack.pop()
# Check type.
if operand_a != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 1, operand_a, int, True)
# Check type.
if operand_b != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 2, operand_b, int, True)
# Push to the stack.
memory_linter_stack.push(int)
elif current_operator.operand == Intrinsic.GREATER_THAN:
# Intristic greater than operator.
# Check stack size.
if len(memory_linter_stack) < 2:
cli_no_arguments_error_message(current_operator, True)
# Get both operands.
operand_a = memory_linter_stack.pop()
operand_b = memory_linter_stack.pop()
# Check type.
if operand_a != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 1, operand_a, int, True)
# Check type.
if operand_b != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 2, operand_b, int, True)
# Push to the stack.
memory_linter_stack.push(int)
elif current_operator.operand == Intrinsic.LESS_EQUAL_THAN:
# Intristic less equal than operator.
# Check stack size.
if len(memory_linter_stack) < 2:
cli_no_arguments_error_message(current_operator, True)
# Get both operands.
operand_a = memory_linter_stack.pop()
operand_b = memory_linter_stack.pop()
# Check type.
if operand_a != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 1, operand_a, int, True)
# Check type.
if operand_b != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 2, operand_b, int, True)
# Push to the stack.
memory_linter_stack.push(int)
elif current_operator.operand == Intrinsic.GREATER_EQUAL_THAN:
# Intristic greater equal than operator.
# Check stack size.
if len(memory_linter_stack) < 2:
cli_no_arguments_error_message(current_operator, True)
# Get both operands.
operand_a = memory_linter_stack.pop()
operand_b = memory_linter_stack.pop()
# Check type.
if operand_a != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 1, operand_a, int, True)
# Check type.
if operand_b != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 2, operand_b, int, True)
# Push to the stack.
memory_linter_stack.push(int)
elif current_operator.operand == Intrinsic.SWAP:
# Intristic swap operator.
# Check stack size.
if len(memory_linter_stack) < 2:
cli_no_arguments_error_message(current_operator, True)
# Get both operands.
operand_a = memory_linter_stack.pop()
operand_b = memory_linter_stack.pop()
# Push swapped to the stack.
memory_linter_stack.push(operand_a)
memory_linter_stack.push(operand_b)
elif current_operator.operand == Intrinsic.COPY:
# Intristic copy operator.
# Check stack size.
if len(memory_linter_stack) < 1:
cli_no_arguments_error_message(current_operator, True)
# Get operand.
operand_a = memory_linter_stack.pop()
# Push copy to the stack.
memory_linter_stack.push(operand_a)
memory_linter_stack.push(operand_a)
elif current_operator.operand == Intrinsic.COPY2:
# Intristic copy2 operator.
# Check stack size.
if len(memory_linter_stack) < 2:
cli_no_arguments_error_message(current_operator, True)
# Get both operands.
operand_a = memory_linter_stack.pop()
operand_b = memory_linter_stack.pop()
# Push copy to the stack.
memory_linter_stack.push(operand_b)
memory_linter_stack.push(operand_a)
memory_linter_stack.push(operand_b)
memory_linter_stack.push(operand_a)
elif current_operator.operand == Intrinsic.COPY_OVER:
# Intristic copy over operator.
# Check stack size.
if len(memory_linter_stack) < 2:
cli_no_arguments_error_message(current_operator, True)
# Get both operands.
operand_a = memory_linter_stack.pop()
operand_b = memory_linter_stack.pop()
# Push copy to the stack.
memory_linter_stack.push(operand_a)
memory_linter_stack.push(operand_a)
memory_linter_stack.push(operand_b)
elif current_operator.operand == Intrinsic.DECREMENT:
# Intristic decrement operator.
# Check stack size.
if len(memory_linter_stack) < 1:
cli_no_arguments_error_message(current_operator, True)
# Get operand.
operand_a = memory_linter_stack.pop()
# Check type.
if operand_a != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 1, operand_a, int, True)
# Push to the stack.
memory_linter_stack.push(int)
elif current_operator.operand == Intrinsic.INCREMENT:
# Intristic increment operator.
# Check stack size.
if len(memory_linter_stack) < 1:
cli_no_arguments_error_message(current_operator, True)
# Get operand.
operand_a = memory_linter_stack.pop()
# Check type.
if operand_a != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 1, operand_a, int, True)
# Push to the stack.
memory_linter_stack.push(int)
elif current_operator.operand == Intrinsic.FREE:
# Intristic free operator.
# Check stack size.
if len(memory_linter_stack) < 1:
cli_no_arguments_error_message(current_operator, True)
# Free operand.
memory_linter_stack.pop()
elif current_operator.operand == Intrinsic.SHOW:
# Intristic show operator.
# Check stack size.
if len(memory_linter_stack) < 1:
cli_no_arguments_error_message(current_operator, True)
# Get operand.
operand_a = memory_linter_stack.pop()
# Check type.
if operand_a != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 1, operand_a, int, True)
elif current_operator.operand == Intrinsic.MEMORY_WRITE:
# Intristic memory write operator.
# Check stack size.
if len(memory_linter_stack) < 2:
cli_no_arguments_error_message(current_operator, True)
# Get both operands.
operand_a = memory_linter_stack.pop()
operand_b = memory_linter_stack.pop()
# Check type.
if operand_a != TYPE_POINTER:
cli_argument_type_error_message(current_operator, 1, operand_a, int, True)
# Check type.
if operand_b != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 2, operand_b, int, True)
elif current_operator.operand == Intrinsic.MEMORY_WRITE4BYTES:
# Intristic memory write 4 bytes operator.
# Check stack size.
if len(memory_linter_stack) < 2:
cli_no_arguments_error_message(current_operator, True)
# Get both operands.
operand_a = memory_linter_stack.pop()
operand_b = memory_linter_stack.pop()
# Check type.
if operand_a != TYPE_POINTER:
cli_argument_type_error_message(current_operator, 1, operand_a, int, True)
# Check type.
if operand_b != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 2, operand_b, int, True)
elif current_operator.operand == Intrinsic.MEMORY_READ4BYTES:
# Intristic memory read 4 bytes operator.
# Check stack size.
if len(memory_linter_stack) < 1:
cli_no_arguments_error_message(current_operator, True)
# Get operand.
operand_a = memory_linter_stack.pop()
# Check type.
if operand_a != TYPE_POINTER:
cli_argument_type_error_message(current_operator, 1, operand_a, int, True)
# Push to the stack.
memory_linter_stack.push(int)
elif current_operator.operand == Intrinsic.MEMORY_READ:
# Intristic memory read operator.
# Check stack size.
if len(memory_linter_stack) < 1:
cli_no_arguments_error_message(current_operator, True)
# Get operand.
operand_a = memory_linter_stack.pop()
# Check type.
if operand_a != TYPE_POINTER:
cli_argument_type_error_message(current_operator, 2, operand_a, int, True)
# Push to the stack.
memory_linter_stack.push(int)
elif current_operator.operand == Intrinsic.MEMORY_SHOW_CHARACTERS:
# Intristic memory show bytes as chars operator.
# Check stack size.
if len(memory_linter_stack) < 2:
cli_no_arguments_error_message(current_operator, True)
# Get both operands.
operand_a = memory_linter_stack.pop()
operand_b = memory_linter_stack.pop()
# Check type.
if operand_a != TYPE_POINTER:
cli_argument_type_error_message(current_operator, 1, operand_a, int, True)
if operand_b != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 2, operand_b, int, True)
elif current_operator.operand == Intrinsic.MEMORY_POINTER:
# Intristic memory pointer operator.
# Push pointer to the stack.
memory_linter_stack.push(int)
elif current_operator.operand == Intrinsic.NULL:
# Intristic null operator.
# Push pointer to the stack.
memory_linter_stack.push(int)
elif current_operator.operand == Intrinsic.IO_READ_STRING:
# I/O read string operator.
# Push operand types to the stack.
memory_linter_stack.push(int) # String size.
memory_linter_stack.push(TYPE_POINTER) # String pointer.
elif current_operator.operand == Intrinsic.IO_READ_INTEGER:
# I/O read integer operator.
# Push operand types to the stack.
memory_linter_stack.push(int) # Integer.
else:
# If unknown instrinsic type.
assert False, "Got unexpected / unknon intrinsic type! (How?)"
# Increase operator index.
current_operator_index += 1
elif current_operator.type == OperatorType.IF:
# IF operator.
# Check stack size.
if len(memory_linter_stack) < 1:
cli_no_arguments_error_message(current_operator, True)
# Get operand.
operand_a = memory_linter_stack.pop()
# Check type.
if operand_a != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 1, operand_a, int, True)
# Type check.
assert isinstance(current_operator.operand, OPERATOR_ADDRESS), "Type error, parser level error?"
# Increment operator index.
# This is makes jump into the if branch.
current_operator_index += 1
elif current_operator.type == OperatorType.ELSE:
# ELSE operator.
# Type check.
assert isinstance(current_operator.operand, OPERATOR_ADDRESS), "Type error, parser level error?"
# Jump to the operator operand.
# As this is ELSE operator, we should have index + 1, index!
current_operator_index = current_operator.operand
elif current_operator.type == OperatorType.WHILE:
# WHILE operator.
# Type check.
assert isinstance(current_operator.operand, OPERATOR_ADDRESS), "Type error, parser level error?"
# Increase operator index.
current_operator_index += 1
elif current_operator.type == OperatorType.DO:
# DO operator.
# Type check.
assert isinstance(current_operator.operand, OPERATOR_ADDRESS), "Type error, parser level error?"
# Check stack size.
if len(memory_linter_stack) < 1:
cli_no_arguments_error_message(current_operator, True)
# Get operand.
operand_a = memory_linter_stack.pop()
# Check type.
if operand_a != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 1, operand_a, int, True)
# Endif jump operator index.
end_jump_operator_index = source.operators[current_operator.operand].operand
# Type check.
assert isinstance(end_jump_operator_index, OPERATOR_ADDRESS), "Type error, parser level error?"
# Jump to the END from WHILE.
current_operator_index = int(end_jump_operator_index)
elif current_operator.type == OperatorType.END:
# END operator.
# Type check.
assert isinstance(current_operator.operand, OPERATOR_ADDRESS), "Type error, parser level error?"
# Jump to the operator operand.
# As this is END operator, we should have index + 1, index!
current_operator_index = current_operator.operand
elif current_operator.type == OperatorType.DEFINE:
assert False, "Got definition operator at linter stage, parser level error?"
else:
assert False, "Got unexpected / unknon operator type! (How?)"
if len(memory_linter_stack) != 0:
# If there is any in the stack.
# Get last operator token location.
location: LOCATION = source.operators[current_operator_index - 1].token.location
# Error message.
gofra.core.errors.message_verbosed(Stage.LINTER, location, "Error",
f"Stack is not empty at the type checking stage! "
f"(There is {len(memory_linter_stack)} elements when should be 0)", True)
# Source.
def load_source_from_file(file_path: str) -> tuple[Source, ParserContext]:
""" Load file, then return ready source and context for it. (Tokenized, Parsed, Linted). """
# Read source lines.
source_file, _ = gofra.core.other.try_open_file(file_path, "r", True, encoding="UTF-8")
source_lines = source_file.readlines()
source_file.close()
parser_context = ParserContext()
# Tokenize.
lexer_tokens = list(lexer_tokenize(source_lines, file_path))
if len(lexer_tokens) == 0:
gofra.core.errors.message_verbosed(Stage.LEXER, (basename(file_path), 1, 1), "Error",
"There is no tokens found in given file, are you given empty file?", True)
# Parse.
parser_parse(lexer_tokens, parser_context, file_path)
# Create source from context.
parser_context_source = Source(parser_context.operators)
# Type check.
assert isinstance(parser_context.directive_linter_skip, bool), "Expected linter skip directive to be boolean."
if not parser_context.directive_linter_skip:
linter_type_check(parser_context_source)
return parser_context_source, parser_context
# Python.
def python_generate(source: Source, context: ParserContext, path: str):
""" Generates graph from the source. """
# Check that there is no changes in operator type or intrinsic.
assert len(OperatorType) == 10, "Please update implementation for python generation after adding new OperatorType!"
assert len(Intrinsic) == 28, "Please update implementation for python generationg after adding new Intrinsic!"
def __update_indent(value: int):
""" Updates indent by given value. """
# Update level.
nonlocal current_indent_level # type: ignore
current_indent_level += value
# Update indent string.
nonlocal current_indent # type: ignore
current_indent = "\t" * current_indent_level
def __write_footer():
""" Write footer. """
# Trick.
nonlocal current_bytearray_should_written, current_string_buffer_should_written
if current_bytearray_should_written or current_string_buffer_should_written:
current_string_buffer_should_written = True
current_bytearray_should_written = True
if current_bytearray_should_written:
# If we should write bytearray block.
# Allocate bytearray.
current_lines.insert(current_bytearray_insert_position,
f"memory = bytearray("
f"{context.memory_bytearray_size} + strings_size"
f")")
# Comment allocation.
if not directive_skip_comments:
current_lines.insert(current_bytearray_insert_position,
"# Allocate memory buffer (memory + strings)"
"(As you called memory operators): \n")
# Warn user about using byte operations in python compilation.
gofra.core.errors.message("Warning", "YOU ARE USING MEMORY OPERATIONS, THAT MAY HAVE EXPLICIT BEHAVIOUR! "
"IT IS MAY HARDER TO CATCH ERROR IF YOU RUN COMPILED VERSION "
"(NOT INTERPRETATED)")
if current_string_buffer_should_written:
# If we should write string buffer block.
# Push string function.
current_lines.insert(current_string_buffer_insert_position,
"\ndef stack_push_string(stack_str, op_index): \n"
"\tstr_len = len(stack_str)\n"
"\tif op_index not in strings_pointers:\n"
"\t\tglobal strings_size_pointer\n"
"\t\tptr = strings_size + 1 + strings_size_pointer\n"
"\t\tstrings_pointers[op_index] = ptr\n"
"\t\tmemory[ptr: ptr + str_len] = stack_str\n"
"\t\tstrings_size_pointer += str_len\n"
"\t\tif str_len > strings_size:\n"
"\t\t\tprint(\""
"ERROR! Trying to push string, "
"when there is memory string buffer overflow! "
"Try use memory size directive, to increase size!"
"\")\n"
"\t\t\texit(1)\n"
"\tfsp = strings_pointers[op_index]\n"
"\treturn fsp, str_len\n"
)
# Allocate string buffer.
current_lines.insert(current_string_buffer_insert_position,
f"strings_pointers = dict()\n"
f"strings_size = {context.memory_bytearray_size}\n"
f"strings_size_pointer = 0")
# Comment allocation.
if not directive_skip_comments:
current_lines.insert(current_string_buffer_insert_position,
"# Allocate strings buffer "
"(As you used strings): \n")
def __write_header():
""" Writes header. """
# Write auto-generated mention.
if not directive_skip_comments:
current_lines.append("# This file is auto-generated by Gofra-Language python subcommand! \n\n")
# Write stack initialization element.
if not directive_skip_comments:
current_lines.append("# Allocate stack (As is Gofra is Stack-Based Language): \n")
current_lines.append("stack = []\n")
# Update bytearray insert position.
nonlocal current_bytearray_insert_position
current_bytearray_insert_position = len(current_lines)
# Update string buffer insert position.
nonlocal current_string_buffer_insert_position
current_string_buffer_insert_position = len(current_lines)
# Write file and expression comments.
if not directive_skip_comments:
current_lines.append("\n\n")
current_lines.append(f"# File ({basename(path)}): \n")
current_lines.append(f"# Expressions: \n")
# Update while insert position.
nonlocal current_while_insert_position
current_while_insert_position = len(current_lines)
# Write source header.
if not directive_skip_comments:
current_lines.append("# Source:\n")
def __write_operator_intrinsic(operator: Operator):
""" Writes default operator (non-intrinsic). """
# Check that this is intrinsic operator.
assert operator.type == OperatorType.INTRINSIC, "Non-INTRINSIC operators " \
"should be written using __write_operator()!"
# Type check.
assert isinstance(current_operator.operand, Intrinsic), f"Type error, parser level error?"
nonlocal current_bytearray_should_written # type: ignore
if current_operator.operand == Intrinsic.PLUS:
# Intristic plus operator.
# Write operator data.
write("operand_a = stack.pop()")
write("operand_b = stack.pop()")
write("stack.append(operand_b + operand_a)")
elif current_operator.operand == Intrinsic.MINUS:
# Intristic minus operator.
# Write operator data.
write("operand_a = stack.pop()")
write("operand_b = stack.pop()")
write("stack.append(operand_b - operand_a)")
elif current_operator.operand == Intrinsic.INCREMENT:
# Intristic increment operator.
# Write operator data.
write("stack.append(stack.pop() + 1)")
elif current_operator.operand == Intrinsic.DECREMENT:
# Intristic decrement operator.
# Write operator data.
write("stack.append(stack.pop() - 1)")
elif current_operator.operand == Intrinsic.MULTIPLY:
# Intristic multiply operator.
# Write operator data.
write("operand_a = stack.pop()")
write("operand_b = stack.pop()")
write("stack.append(operand_b * operand_a)")
elif current_operator.operand == Intrinsic.DIVIDE:
# Intristic divide operator.
# Write operator data.
write("operand_a = stack.pop()")
write("operand_b = stack.pop()")
write("stack.append(operand_b // operand_a)")
elif current_operator.operand == Intrinsic.MODULUS:
# Intristic modulus operator.
# Write operator data.
write("operand_a = stack.pop()")
write("operand_b = stack.pop()")
write(f"stack.append(int(operand_b % operand_a))") # TODO: Check %, remove or left int()
elif current_operator.operand == Intrinsic.EQUAL:
# Intristic equal operator.
# Write operator data.
write("operand_a = stack.pop()")
write("operand_b = stack.pop()")
write("stack.append(int(operand_b == operand_a))")
elif current_operator.operand == Intrinsic.GREATER_EQUAL_THAN:
# Intristic greater equal than operator.
# Write operator data.
write("operand_a = stack.pop()")
write("operand_b = stack.pop()")
write("stack.append(int(operand_b >= operand_a))")
elif current_operator.operand == Intrinsic.GREATER_THAN:
# Intristic greater than operator.
# Write operator data.
write("operand_a = stack.pop()")
write("operand_b = stack.pop()")
write("stack.append(int(operand_b > operand_a))")
elif current_operator.operand == Intrinsic.LESS_THAN:
# Intristic less than operator.
# Write operator data.
write("operand_a = stack.pop()")
write("operand_b = stack.pop()")
write("stack.append(int(operand_b < operand_a))")
elif current_operator.operand == Intrinsic.LESS_EQUAL_THAN:
# Intristic less equal than operator.
# Write operator data.
write("operand_a = stack.pop()")
write("operand_b = stack.pop()")
write("stack.append(int(operand_b<= operand_a))")
elif current_operator.operand == Intrinsic.SWAP:
# Intristic swap operator.
# Write operator data.
write("operand_a = stack.pop()")
write("operand_b = stack.pop()")
write("stack.append(operand_a)")
write("stack.append(operand_b)")
elif current_operator.operand == Intrinsic.COPY:
# Intristic copy operator.
# Write operator data.
write("operand_a = stack.pop()")
write("stack.append(operand_a)")
write("stack.append(operand_a)")
elif current_operator.operand == Intrinsic.SHOW:
# Intristic show operator.
# Write operator data.
write("print(stack.pop())")
elif current_operator.operand == Intrinsic.FREE:
# Intristic free operator.
# Write operator data.
write("stack.pop()")
elif current_operator.operand == Intrinsic.NOT_EQUAL:
# Intristic not equal operator.
# Write operator data.
write("operand_a = stack.pop()")
write("operand_b = stack.pop()")
write("stack.append(int(operand_b != operand_a))")
elif current_operator.operand == Intrinsic.COPY2:
# Intristic copy2 operator.
# Write operator data.
write("operand_a = stack.pop()")
write("operand_b = stack.pop()")
write("stack.append(operand_b)")
write("stack.append(operand_a)")
write("stack.append(operand_b)")
write("stack.append(operand_a)")
elif current_operator.operand == Intrinsic.COPY_OVER:
# Intristic copy over operator.
# Write operator data.
write("operand_a = stack.pop()")
write("operand_b = stack.pop()")
write("stack.append(operand_b)")
write("stack.append(operand_a)")
write("stack.append(operand_b)")
elif current_operator.operand == Intrinsic.MEMORY_POINTER:
# Intrinsic null pointer operator.
# Write bytearray block.
# TODO: May be removed, but just OK.
current_bytearray_should_written = True
# Write operator data.
write(f"stack.append({MEMORY_BYTEARRAY_NULL_POINTER})")
elif current_operator.operand == Intrinsic.NULL:
# Intrinsic null operator.
# Write operator data.
write(f"stack.append(0)")
elif current_operator.operand == Intrinsic.MEMORY_WRITE:
# Intrinsic memory write operator.
# Write bytearray block.
current_bytearray_should_written = True
# Write operator data.
# TODO: More checks at compiled script.
write("operand_a = stack.pop()")
write("operand_b = stack.pop()")
write("memory[operand_b] = operand_a")
elif current_operator.operand == Intrinsic.MEMORY_READ:
# Intrinsic memory read operator.
# Write bytearray block.
current_bytearray_should_written = True
# Write operator data.
# TODO: More checks at compiled script.
write("operand_a = stack.pop()")
write("memory_byte = memory[operand_a]")
write("stack.append(memory_byte)")
elif current_operator.operand == Intrinsic.MEMORY_WRITE4BYTES:
# Intristic memory write 4 bytes operator.
# Write bytearray block.
current_bytearray_should_written = True
# Write operator data.
# TODO: More checks at compiled script.
write("operand_a = stack.pop()")
write("operand_b = stack.pop()")
write("memory_bytes = operand_a.to_bytes(length=4, byteorder=\"little\", signed=(operand_a < 0))")
write("memory[operand_b:operand_b + 4] = memory_bytes")
elif current_operator.operand == Intrinsic.MEMORY_READ4BYTES:
# Intristic memory read 4 bytes operator.
# Write bytearray block.
current_bytearray_should_written = True
# Write operator data.
# TODO: More checks at compiled script.
write("operand_a = stack.pop()")
write("memory_bytes = int.from_bytes(memory[operand_a:operand_a + 4], byteorder=\"little\")")
write("stack.append(memory_bytes)")
elif current_operator.operand == Intrinsic.MEMORY_SHOW_CHARACTERS:
# Intrinsic memory show as characters operator.
# Write bytearray block.
current_bytearray_should_written = True
# Write operator data.
write("memory_length = stack.pop()")
write("memory_pointer = stack.pop()")
write("memory_index = 0")
write("while memory_index < memory_length:")
write("\tmemory_byte = memory[memory_pointer + memory_index]")
write("\tprint(chr(memory_byte), end=\"\")")
write("\tmemory_index += 1")
else:
# If unknown instrinsic type.
# Write node data.
gofra.core.errors.message_verbosed(Stage.COMPILATOR, current_operator.token.location, "Error",
f"Intrinsic `{INTRINSIC_TYPES_TO_NAME[current_operator.operand]}` "
f"is not implemented for python generation!", True)
def __write_operator(operator: Operator):
""" Writes default operator (non-intrinsic). """
# Nonlocalise while data.
nonlocal current_while_block # type: ignore
nonlocal current_while_defined_name # type: ignore
nonlocal current_while_comment # type: ignore
# Grab our operator
if operator.type == OperatorType.INTRINSIC:
# Intrinsic operator.
# Error.
assert False, "Intrinsic operators should be written using __write_operator_intrinsic()!"
elif operator.type == OperatorType.PUSH_INTEGER:
# PUSH INTEGER operator.
# Type check.
assert isinstance(operator.operand, int), "Type error, parser level error?"
# Write operator data.
write(f"stack.append({operator.operand})")
elif operator.type == OperatorType.PUSH_STRING:
# PUSH STRING operator.
# Type check.
assert isinstance(operator.operand, str), "Type error, parser level error?"
# Write operator data.
# TODO: Warn using `current_operator_index`
write(f"s_str, s_len = stack_push_string({operator.operand.encode('UTF-8')}, {current_operator_index})")
write(f"stack.append(s_str)")
write(f"stack.append(s_len)")
# Write strings buffer block.
nonlocal current_string_buffer_should_written
current_string_buffer_should_written = True
# And memory.
nonlocal current_bytearray_should_written
current_bytearray_should_written = True
elif operator.type == OperatorType.IF:
# IF operator.
# Type check.
assert isinstance(operator.operand, OPERATOR_ADDRESS), f"Type error, parser level error?"
# Write operator data.
write("if stack.pop() != 0:")
# Increase indent level.
__update_indent(1)
elif operator.type == OperatorType.WHILE:
# WHILE operator.
# Type check.
assert isinstance(operator.operand, OPERATOR_ADDRESS), f"Type error, parser level error?"
# Remember name, so we can write "def" at the top of the source in current_while_insert_position.
current_while_defined_name = f"while_expression_ip{current_operator_index}"
# Remember comment for while function block.
current_while_comment = comment
# Write operator data.
current_lines.append(f"{current_indent}{comment[2:]}\n"
f"{current_indent}while {current_while_defined_name}()")
# Set that we in while expression.
current_while_block = True
elif operator.type == OperatorType.DO:
# DO operator.
# Type check.
assert isinstance(operator.operand, OPERATOR_ADDRESS), f"Type error, parser level error?"
if current_while_block:
# If we close while.
# Current insert position for lines.
# (As we don`t want to reset current_while_insert_position)
while_block_insert_position = current_while_insert_position
# Insert header.
function_comment = "" if directive_skip_comments else f"\t# -- Should be called from WHILE.\n"
current_lines.insert(while_block_insert_position,
f"def {current_while_defined_name}():{current_while_comment}\n" + function_comment)
for while_stack_line in current_while_lines:
# Iterate over while stack lines.
# Increment.
while_block_insert_position += 1
# Insert.
current_lines.insert(while_block_insert_position, f"\t{while_stack_line}")
# Insert return.
return_comment = "" if directive_skip_comments else f" # -- Return for calling from WHILE ."
current_lines.insert(while_block_insert_position + 1,
f"\treturn stack.pop()" + return_comment + "\n")
else:
# If this is not while.
# Error.
gofra.core.errors.message_verbosed(Stage.COMPILATOR, operator.token.location, "Error",
"Got `do`, when there is no `while` block started! "
"(Parsing error?)", True)
# Write operator.
current_lines.append(f":{comment}\n")
# Go out the while block expression.
current_while_block = False
# Reset current while lines list (stack).
current_while_lines.clear()
# Increase indent level.
__update_indent(1)
elif operator.type == OperatorType.ELSE:
# ELSE operator.
# Type check.
assert isinstance(operator.operand, OPERATOR_ADDRESS), "Type error, parser level error?"
# Write operator data.
pass_comment = "" if directive_skip_comments else f" # -- Be sure that there is no empty body."
current_lines.append(current_indent + f"pass{pass_comment}\n")
# Decrease indent level.
__update_indent(-1)
# Write operator data.
write("else:")
# Increase indent level.
__update_indent(1)
elif operator.type == OperatorType.END:
# END operator.
# Actually, there is no END in Python.
# Type check.
assert isinstance(operator.operand, OPERATOR_ADDRESS), "Type error, parser level error?"
# Write operator data.
pass_comment = "" if directive_skip_comments else f" # -- Be sure that there is no empty body."
current_lines.append(current_indent + f"pass{pass_comment}\n")
# Decrease indent level.
__update_indent(-1)
elif operator.type == OperatorType.DEFINE:
# DEFINE Operator.
# Error.
assert False, "Got definition operator at runner stage, parser level error?"
else:
# If unknown operator type.
assert False, f"Got unexpected / unknon operator type! (How?)"
def write(text: str):
""" Writes text to file. """
if current_while_block:
# If we are in loop.
# Add text without indent.
current_while_lines.append(text + comment + "\n")
else:
# Write default text.
current_lines.append(current_indent + text + comment + "\n")
# Indentation level.
current_indent_level = 0 # Indent level for calculating.
current_indent = "" # Indent string for writing.
# While.
current_while_block = False # If true, we are in while loop.
current_while_comment = "" # While block comment to place in final expression function.
current_while_defined_name = "" # While defined name for naming expression function.
current_while_lines: List[str] = [] # List of while lines to write in expression function.
current_while_insert_position = 0 # Position to insert while expressions blocks.
# Bytearray.
current_bytearray_insert_position = 0 # Position to insert bytearray block if bytearray_should_written is true.
current_bytearray_should_written = False # If true, will warn about memory usage and write bytearray block.
# TODO: Remove, as redundant, there is bytearray insert position above, which is same.
# Strings.
# Position to insert string bufer allocation block,
# if current_string_buffer_should_written is true.
current_string_buffer_insert_position = 0
current_string_buffer_should_written = False # If true, will write string buffer allocation block.
# Should we skip comments.
directive_skip_comments = context.directive_python_comments_skip
# Get source operators count.
operators_count = len(source.operators)
# Current operator index from the source.
current_operator_index = 0
# Lines.
current_lines: List[str] = []
# Check that there is more than zero operators in context.
if operators_count == 0:
# If there is no operators in the final parser context.
# Error.
gofra.core.errors.message_verbosed(Stage.COMPILATOR, (basename(path), 1, 1), "Error",
"There is no operators found in given file after parsing, "
"are you given empty file or file without resulting operators?", True)
# Open file.
file, _ = gofra.core.other.try_open_file(path + ".py", "w", True)
# Write header.
__write_header()
while current_operator_index < operators_count:
# While we not run out of the source operators list.
# Get current operator from the source.
current_operator: Operator = source.operators[current_operator_index]
# Make comment string.
location: LOCATION = current_operator.token.location
location_string: str = f"Line {location[1]}, Row {location[2]}"
comment = "" if directive_skip_comments else f" # Token: {current_operator.token.text} [{location_string}]"
if current_operator.type == OperatorType.INTRINSIC:
# If this is intrinsic.
# Write intrinsic operator.
__write_operator_intrinsic(current_operator)
else:
# If this is other operator.
# Write default operator.
__write_operator(current_operator)
# Increment current index.
current_operator_index += 1
# Write footer.
__write_footer()
if len(current_while_lines) != 0:
# If we have something at the while lines stack.
# Error.
gofra.core.errors.message_verbosed(Stage.COMPILATOR, source.operators[-1].token.location, "Error",
"While lines stack is not empty after running python generation! "
"(Compilation error?)", True)
# Write lines in final file.
for current_stack_line in current_lines:
file.write(current_stack_line)
# Close file.
file.close()
# Bytecode.
def compile_bytecode(source: Source, _, path: str):
""" Compiles operators to bytecode. """
# Check that there is no changes in operator type or intrinsic.
assert len(OperatorType) == 10, \
"Please update implementation for bytecode compilation after adding new OperatorType!"
assert len(Intrinsic) == 28, "Please update implementation for bytecode compilation after adding new Intrinsic!"
def __write_operator_intrinsic(operator: Operator):
""" Writes default operator (non-intrinsic). """
# Check that this is intrinsic operator.
assert operator.type == OperatorType.INTRINSIC, "Non-INTRINSIC operators " \
"should be written using __write_operator()!"
# Type check.
assert isinstance(current_operator.operand, Intrinsic), f"Type error, parser level error?"
if current_operator.operand in INTRINSIC_TO_BYTECODE_OPERATOR:
# Intristic operator.
# Write operator data.
write(INTRINSIC_TO_BYTECODE_OPERATOR[current_operator.operand])
else:
gofra.core.errors.message_verbosed(Stage.COMPILATOR, current_operator.token.location, "Error",
f"Intrinsic `{INTRINSIC_TYPES_TO_NAME[current_operator.operand]}` "
f"is not implemented for bytecode compilation!", True)
def __write_operator(operator: Operator):
""" Writes default operator (non-intrinsic). """
# Grab our operator
if operator.type == OperatorType.INTRINSIC:
assert False, "Intrinsic operators should be written using __write_operator_intrinsic()!"
elif operator.type == OperatorType.PUSH_INTEGER:
assert isinstance(operator.operand, int), "Type error, parser level error?"
# Write operator data.
write(OPERATOR_TYPE_TO_BYTECODE_OPERATOR[OperatorType.PUSH_INTEGER])
write(f"{operator.operand}")
elif operator.type == OperatorType.PUSH_STRING:
assert isinstance(operator.operand, str), "Type error, parser level error?"
gofra.core.errors.message("Error", "Strings is not implemented yet in the bytecode!", True)
elif operator.type == OperatorType.IF:
assert isinstance(operator.operand, OPERATOR_ADDRESS), f"Type error, parser level error?"
gofra.core.errors.message("Error", "Conditional is not implemented yet in the bytecode!", True)
elif operator.type == OperatorType.WHILE:
assert isinstance(operator.operand, OPERATOR_ADDRESS), f"Type error, parser level error?"
gofra.core.errors.message("Error", "Conditional is not implemented yet in the bytecode!", True)
elif operator.type == OperatorType.DO:
assert isinstance(operator.operand, OPERATOR_ADDRESS), f"Type error, parser level error?"
gofra.core.errors.message("Error", "Conditional is not implemented yet in the bytecode!", True)
elif operator.type == OperatorType.ELSE:
assert isinstance(operator.operand, OPERATOR_ADDRESS), "Type error, parser level error?"
gofra.core.errors.message("Error", "Conditional is not implemented yet in the bytecode!", True)
elif operator.type == OperatorType.END:
assert isinstance(operator.operand, OPERATOR_ADDRESS), "Type error, parser level error?"
gofra.core.errors.message("Error", "Conditional is not implemented yet in the bytecode!", True)
elif operator.type == OperatorType.DEFINE:
assert False, "Got definition operator at runner stage, parser level error?"
else:
# If unknown operator type.
assert False, f"Got unexpected / unknon operator type! (How?)"
# WIP.
current_lines.append("\n")
def write(text: str):
""" Writes text to file. """
current_lines.append(text + " ")
# Get source operators count.
operators_count = len(source.operators)
# Current operator index from the source.
current_operator_index = 0
# Lines.
current_lines: List[str] = []
# Check that there is more than zero operators in context.
if operators_count == 0:
# If there is no operators in the final parser context.
gofra.core.errors.message_verbosed(Stage.COMPILATOR, (basename(path), 1, 1), "Error",
"There is no operators found in given file after parsing, "
"are you given empty file or file without resulting operators?", True)
# Open file.
bytecode_path = path + ".gofbc"
file, _ = gofra.core.other.try_open_file(bytecode_path, "w", True)
while current_operator_index < operators_count:
# While we not run out of the source operators list.
# Get current operator from the source.
current_operator: Operator = source.operators[current_operator_index]
if current_operator.type == OperatorType.INTRINSIC:
# If this is intrinsic.
# Write intrinsic operator.
__write_operator_intrinsic(current_operator)
else:
# If this is other operator.
# Write default operator.
__write_operator(current_operator)
# Increment current index.
current_operator_index += 1
# Write lines in final file.
for current_stack_line in current_lines:
file.write(current_stack_line)
# Close file.
file.close()
return bytecode_path
def execute_bytecode(path: str):
""" Executes bytecode file. """
# Check that there is no changes in operator type or intrinsic.
assert len(OperatorType) == 10, "Please update implementation for bytecode execution after adding new OperatorType!"
assert len(Intrinsic) == 28, "Please update implementation for bytecode execution after adding new Intrinsic!"
if not path.endswith(".gofbc"):
gofra.core.errors.message("Error", f"File \"{path}\" should have extension `.gofbc` for being executed!", True)
return
# Open file.
file, _ = gofra.core.other.try_open_file(path, "r", True)
# Tokenize operator tokens.
bc_op_tokens = []
for line in file.readlines():
op_tokens = line.split(" ")
for op_token in op_tokens:
if op_token == "\n" or op_token.replace(" ", "") == "":
continue
bc_op_tokens.append(op_token)
# New context.
parser_context = ParserContext()
# Convert OPs to interpretator operators.
current_bc_operator_index = 0
while current_bc_operator_index < len(bc_op_tokens):
bc_operator = bc_op_tokens[current_bc_operator_index]
if bc_operator == OPERATOR_TYPE_TO_BYTECODE_OPERATOR[OperatorType.PUSH_INTEGER]:
parser_context.operators.append(Operator(
OperatorType.PUSH_INTEGER,
Token(TokenType.BYTECODE, bc_operator, (path, -1, -1), bc_operator),
int(bc_op_tokens[current_bc_operator_index + 1])
))
current_bc_operator_index += 2
continue
else:
if bc_operator in BYTECODE_OPERATOR_NAMES_TO_INTRINSIC:
parser_context.operators.append(Operator(
OperatorType.INTRINSIC,
Token(TokenType.BYTECODE, bc_operator, (path, -1, -1), bc_operator),
BYTECODE_OPERATOR_NAMES_TO_INTRINSIC[bc_operator]
))
else:
gofra.core.errors.message_verbosed(Stage.PARSER, ("Bytecode", -1, -1), "Error",
f"Got unexpected bytecode instruction - {repr(bc_operator)}!", True)
current_bc_operator_index += 1
continue
# Run.
parser_context_source = Source(parser_context.operators)
interpretator_run(parser_context_source)
# Close file.
file.close()
# CLI.
def cli_no_arguments_error_message(operator: Operator, force_exit: bool = False):
""" Shows no arguments passed error message to the CLI. """
if operator.type == OperatorType.INTRINSIC:
# Intrinsic Operator.
# Type check.
assert isinstance(operator.operand, Intrinsic), "Type error, parser level error?"
# Error
gofra.core.errors.message_verbosed(Stage.LINTER, operator.token.location, "Error",
f"`{INTRINSIC_TYPES_TO_NAME[operator.operand]}` "
f"intrinsic should have more arguments at the stack, but it was not founded!")
elif operator.type == OperatorType.IF:
# IF Operator.
# Error
gofra.core.errors.message_verbosed(Stage.LINTER, operator.token.location, "Error",
"`IF` operator should have 1 argument at the stack, but it was not found!")
elif operator.type == OperatorType.DO:
# DO Operator.
# Error
gofra.core.errors.message_verbosed(Stage.LINTER, operator.token.location, "Error",
"`DO` operator should have 1 argument at the stack, but it was not found!")
else:
# Unknown operator.
assert False, "Tried to call no_arguments_error_message() " \
"for operator that does not need arguments! (Type checker error?)"
# If we should force exit.
if force_exit:
exit(1)
def cli_argument_type_error_message(operator: Operator, argument_index: int,
actual_type: type, expected_type: type, force_exit: bool = False):
""" Shows unexpected argument type passed error message to the CLI. """
if operator.type == OperatorType.INTRINSIC:
assert isinstance(operator.operand, Intrinsic), "Type error, parser level error?"
gofra.core.errors.message_verbosed(Stage.LINTER, operator.token.location, "Error",
f"`{INTRINSIC_TYPES_TO_NAME[operator.operand]}` "
f"intrinsic expected {argument_index} argument "
f"to be with type {expected_type}, but it has type {actual_type}!")
elif operator.type == OperatorType.IF:
gofra.core.errors.message_verbosed(Stage.LINTER, operator.token.location, "Error",
f"`IF` operator expected type {expected_type} but got {actual_type}!")
elif operator.type == OperatorType.DO:
gofra.core.errors.message_verbosed(Stage.LINTER, operator.token.location, "Error",
f"`DO` operator expected type {expected_type} but got {actual_type}!")
else:
assert False, "Tried to call cli_argument_type_error_message() " \
"for unknown operator! (Type checker error?)"
if force_exit:
exit(1)
def cli_validate_argument_vector(argument_vector: List[str]) -> List[str]:
""" Validates CLI argv (argument vector) """
# Check that ther is any in the ARGV.
assert len(argument_vector) > 0, "There is no source (mspl.py) file path in the ARGV"
# Get argument vector without source(mspl.py) path.
argument_runner_filename: str = argument_vector[0]
argument_vector = argument_vector[1:]
# Validate ARGV.
if len(argument_vector) == 0:
# If there is no arguments.
# Message.
gofra.systems.cli.usage_message(argument_runner_filename)
gofra.core.errors.message("Error", "Please pass file path to work with (.gof or .gofbc ~)", True)
elif len(argument_vector) == 1:
# Just one argument.
if argument_vector[0] != "help":
# If this is not help.
# Message.
gofra.systems.cli.usage_message(argument_runner_filename)
gofra.core.errors.message("Error", "Please pass subcommand after the file path!", True)
# Show usage.
gofra.systems.cli.usage_message(argument_runner_filename)
# Exit.
exit(0)
# Return path as source file and help (argv[0]).
return ["", argument_vector[0], ""]
elif len(argument_vector) == 2:
# Expected ARGV length.
# All ok.
return [*argument_vector, ""]
elif len(argument_vector) == 3:
# If this is may silent argument.
if argument_vector[2] != "-silent":
# If silent.
# Message.
gofra.systems.cli.usage_message(argument_runner_filename)
gofra.core.errors.message("Error", "Unexpected arguments!", True)
# Return final ARGV.
return argument_vector
def cli_entry_point():
""" Entry point for the CLI. """
# Get and check size of cli argument vector.
cli_argument_vector = cli_validate_argument_vector(argv)
assert len(cli_argument_vector) == 3, "Got unexpected size of argument vector."
# CLI Options.
cli_source_path, cli_subcommand, cli_silent = cli_argument_vector
cli_silent: bool = bool(cli_silent == "-silent")
# Welcome message.
if not cli_silent:
gofra.systems.cli.welcome_message()
# Load source and check size of it.
loaded_file = None
if cli_subcommand in ("run", "graph", "python", "dump", "compile"):
loaded_file = load_source_from_file(cli_source_path)
assert len(loaded_file) == 2, "Got unexpected data from loaded file."
if cli_subcommand == "run":
# If this is interpretate subcommand.
cli_source, cli_context = loaded_file
interpretator_run(cli_source, cli_context.memory_bytearray_size)
# Message.
if not cli_silent:
print(f"[Info] File \"{basename(cli_source_path)}\" was interpreted!")
elif cli_subcommand == "graph":
# If this is graph subcommand.
# Get source from loaded file.
cli_source, _ = loaded_file
# Generate graph file.
gofra.systems.graph.write(cli_source, cli_source_path)
# Message.
if not cli_silent:
print(f"[Info] .dot file \"{basename(cli_source_path)}.dot\" generated!")
elif cli_subcommand == "python":
# If this is python subcommand.
# Get source and context from loaded file.
cli_source, cli_context = loaded_file
# Generate python file.
python_generate(cli_source, cli_context, cli_source_path)
# Message.
if not cli_silent:
print(f"[Info] .py file \"{basename(cli_source_path)}.py\" generated!")
elif cli_subcommand == "dump":
# If this is dump subcommand.
# Get source from loaded file.
cli_source, _ = loaded_file
# Dump print.
gofra.systems.dump.dump(cli_source.operators)
# Message.
if not cli_silent:
print(f"[Info] File \"{basename(cli_source_path)}\" was dump printed!")
elif cli_subcommand == "compile":
# If this is compile subcommand.
# Get source from loaded file.
cli_source, cli_context = loaded_file
# Compile.
bytecode_path = compile_bytecode(cli_source, cli_context, cli_source_path)
# Message.
if not cli_silent:
print(f"[Info] File \"{basename(cli_source_path)}\" was compiled to \"{basename(bytecode_path)}\"!")
elif cli_subcommand == "execute":
# If this is execute subcommand.
# Execute.
execute_bytecode(cli_source_path)
# Message.
if not cli_silent:
print(f"[Info] File \"{basename(cli_source_path)}\" was executed!")
else:
# If unknown subcommand.
# Message.
gofra.systems.cli.usage_message(__file__)
gofra.core.errors.message("Error", f"Unknown subcommand `{cli_subcommand}`!")
if __name__ == "__main__":
cli_entry_point()
| 0 | 0 | 0 |
fbcbbf8b1de343666b5bd53e849fab28452d2cb1 | 2,907 | py | Python | example_project/example_project/settings.py | TAMUArch/django-photologue | e153dd84715b1dd0bea3ac869cd9fcd9bf01e057 | [
"BSD-3-Clause"
] | null | null | null | example_project/example_project/settings.py | TAMUArch/django-photologue | e153dd84715b1dd0bea3ac869cd9fcd9bf01e057 | [
"BSD-3-Clause"
] | null | null | null | example_project/example_project/settings.py | TAMUArch/django-photologue | e153dd84715b1dd0bea3ac869cd9fcd9bf01e057 | [
"BSD-3-Clause"
] | null | null | null | # Global settings for photologue example project.
import os
DEBUG = TEMPLATE_DEBUG = True
# Top level folder - the one created by the startproject command.
TOP_FOLDER = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
ADMINS = ()
MANAGERS = ADMINS
# Default dev database is Sqlite. In production I use postgres.
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(TOP_FOLDER, 'database.sql3')
}
}
TIME_ZONE = 'UTC'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-gb'
SITE_ID = 1
USE_I18N = True
USE_L10N = True
# TODO: setting this to True in Django 1.4 causes runtime warnings, when 1.4
# is end-of-lined in 2014 we can change this setting to True.
USE_TZ = False
MEDIA_ROOT = os.path.join(TOP_FOLDER, 'public', 'media')
MEDIA_URL = '/media/'
STATIC_ROOT = os.path.join(TOP_FOLDER, 'public', 'static')
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
os.path.join(TOP_FOLDER, 'example_project/static'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '3p0f5q)l$=gt++#z0inpfh%bm_ujl6(-yogbzw2)(xea48@70d'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = [
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
]
ROOT_URLCONF = 'example_project.urls'
from photologue import PHOTOLOGUE_TEMPLATE_DIR
TEMPLATE_DIRS = (
os.path.join(TOP_FOLDER, 'example_project/templates'),
PHOTOLOGUE_TEMPLATE_DIR
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.request',
)
FIXTURE_DIRS = (
os.path.join(TOP_FOLDER, 'example_project/fixtures'),
)
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'photologue',
'example_project',
'south',
]
SOUTH_TESTS_MIGRATE = False
| 26.916667 | 76 | 0.735466 | # Global settings for photologue example project.
import os
DEBUG = TEMPLATE_DEBUG = True
# Top level folder - the one created by the startproject command.
TOP_FOLDER = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
ADMINS = ()
MANAGERS = ADMINS
# Default dev database is Sqlite. In production I use postgres.
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(TOP_FOLDER, 'database.sql3')
}
}
TIME_ZONE = 'UTC'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-gb'
SITE_ID = 1
USE_I18N = True
USE_L10N = True
# TODO: setting this to True in Django 1.4 causes runtime warnings, when 1.4
# is end-of-lined in 2014 we can change this setting to True.
USE_TZ = False
MEDIA_ROOT = os.path.join(TOP_FOLDER, 'public', 'media')
MEDIA_URL = '/media/'
STATIC_ROOT = os.path.join(TOP_FOLDER, 'public', 'static')
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
os.path.join(TOP_FOLDER, 'example_project/static'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '3p0f5q)l$=gt++#z0inpfh%bm_ujl6(-yogbzw2)(xea48@70d'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = [
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
]
ROOT_URLCONF = 'example_project.urls'
from photologue import PHOTOLOGUE_TEMPLATE_DIR
TEMPLATE_DIRS = (
os.path.join(TOP_FOLDER, 'example_project/templates'),
PHOTOLOGUE_TEMPLATE_DIR
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.request',
)
FIXTURE_DIRS = (
os.path.join(TOP_FOLDER, 'example_project/fixtures'),
)
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'photologue',
'example_project',
'south',
]
SOUTH_TESTS_MIGRATE = False
| 0 | 0 | 0 |
4396b65d0afeba59d9fd6311235008de268a0946 | 712 | py | Python | openks/models/pytorch/mmd_modules/spcan/utils/lr_scheduler.py | vivym/OpenKS | ea380782162de2e4c1a413f37ad12b85ccb7048a | [
"Apache-2.0"
] | null | null | null | openks/models/pytorch/mmd_modules/spcan/utils/lr_scheduler.py | vivym/OpenKS | ea380782162de2e4c1a413f37ad12b85ccb7048a | [
"Apache-2.0"
] | null | null | null | openks/models/pytorch/mmd_modules/spcan/utils/lr_scheduler.py | vivym/OpenKS | ea380782162de2e4c1a413f37ad12b85ccb7048a | [
"Apache-2.0"
] | 2 | 2021-11-18T06:55:55.000Z | 2021-12-29T15:21:07.000Z | # Learning rate scheduler
| 32.363636 | 89 | 0.632022 | # Learning rate scheduler
def lr_scheduler(optimizer, lr_mult, args, weight_mult=1, ):
counter = 0
for param_group in optimizer.param_groups:
if counter == 0:
optimizer.param_groups[counter]['lr'] = args.base_lr * lr_mult / 10.0
else:
optimizer.param_groups[counter]['lr'] = args.base_lr * lr_mult
counter += 1
return optimizer, lr_mult
def dom_w_scheduler(optimizer, lr_mult, args, weight_mult=1):
counter = 0
for param_group in optimizer.param_groups:
if counter == 0:
optimizer.param_groups[counter]['lr'] = args.base_lr * lr_mult * weight_mult
counter += 1
return optimizer, lr_mult
| 635 | 0 | 48 |
c11db3f4c3251d40b4093ee2755915bafc3e86d4 | 21,229 | py | Python | computation_graph/run.py | hyroai/computation-graph | 357ea2cfd98c3ea1162b3f373c38738f8ae08b6b | [
"MIT"
] | 23 | 2020-04-19T14:30:40.000Z | 2021-12-03T14:45:44.000Z | computation_graph/run.py | hyroai/computation-graph | 357ea2cfd98c3ea1162b3f373c38738f8ae08b6b | [
"MIT"
] | 7 | 2020-07-20T20:21:27.000Z | 2021-12-30T17:24:28.000Z | computation_graph/run.py | hyroai/computation-graph | 357ea2cfd98c3ea1162b3f373c38738f8ae08b6b | [
"MIT"
] | null | null | null | import asyncio
import dataclasses
import itertools
import logging
import pathlib
import sys
import traceback
import typing
from typing import Any, Callable, Dict, FrozenSet, Iterable, Set, Text, Tuple, Type
import gamla
import immutables
import toposort
import typeguard
from gamla.optimized import async_functions as opt_async_gamla
from gamla.optimized import sync as opt_gamla
from computation_graph import base_types, graph
_toposort_nodes: Callable[
[base_types.GraphType], Tuple[FrozenSet[base_types.ComputationNode], ...]
] = opt_gamla.compose_left(
opt_gamla.groupby_many(_get_edge_sources),
opt_gamla.valmap(
opt_gamla.compose_left(opt_gamla.map(base_types.edge_destination), set)
),
_transpose_graph,
toposort.toposort,
opt_gamla.map(frozenset),
tuple,
)
_incoming_edge_options = opt_gamla.compose_left(
graph.get_incoming_edges_for_node,
gamla.after(
opt_gamla.compose_left(
opt_gamla.groupby(base_types.edge_key),
opt_gamla.valmap(gamla.sort_by(gamla.attrgetter("priority"))),
dict.values,
opt_gamla.star(itertools.product),
opt_gamla.map(tuple),
)
),
)
_get_args_helper = opt_gamla.compose_left(
opt_gamla.keyfilter(gamla.attrgetter("args")),
dict.values,
gamla.head,
opt_gamla.maptuple(gamla.attrgetter("result")),
)
_get_inner_kwargs = opt_gamla.compose_left(
opt_gamla.keyfilter(base_types.edge_key),
dict.items,
opt_gamla.groupby(opt_gamla.compose_left(gamla.head, base_types.edge_key)),
opt_gamla.valmap(
opt_gamla.compose_left(
gamla.head, gamla.second, gamla.head, gamla.attrgetter("result")
)
),
)
_DecisionsType = Dict[base_types.ComputationNode, base_types.ComputationResult]
_ResultToDecisionsType = Dict[base_types.ComputationResult, _DecisionsType]
_IntermediaryResults = Dict[base_types.ComputationNode, _ResultToDecisionsType]
class _NotCoherent(Exception):
"""This exception signals that for a specific set of incoming
node edges not all paths agree on the ComputationResult"""
NodeToResults = Callable[[base_types.ComputationNode], _ResultToDecisionsType]
_get_kwargs_from_edges = opt_gamla.compose_left(
opt_gamla.map(base_types.edge_key), opt_gamla.remove(gamla.equals(None)), tuple
)
_ChoiceOfOutputForNode = Tuple[
Tuple[base_types.ComputationResult, _DecisionsType], base_types.ComputationNode,
]
@gamla.curry
_choice_to_value: Callable[
[_ChoiceOfOutputForNode], base_types.ComputationResult
] = opt_gamla.compose_left(gamla.head, gamla.head)
_decisions_from_value_choices = opt_gamla.compose_left(
gamla.concat,
gamla.bifurcate(
opt_gamla.compose_left(
opt_gamla.map(opt_gamla.compose_left(gamla.head, gamla.second)),
opt_gamla.reduce(
opt_gamla.merge_with_reducer(_check_equal_and_take_one),
immutables.Map(),
),
),
opt_gamla.mapdict(opt_gamla.juxt(gamla.second, _choice_to_value)),
),
opt_gamla.merge,
)
_SingleNodeSideEffect = Callable[[base_types.ComputationNode, Any], None]
def _dag_layer_reduce(
f: Callable[
[_IntermediaryResults, FrozenSet[base_types.ComputationNode]],
_IntermediaryResults,
]
) -> Callable[[base_types.GraphType], _IntermediaryResults]:
"""Directed acyclic graph reduction."""
return gamla.compose_left(
_toposort_nodes, gamla.reduce_curried(f, immutables.Map())
)
_create_node_run_options = opt_gamla.compose_left(
gamla.pack,
gamla.explode(1),
opt_gamla.mapcat(
opt_gamla.compose_left(
gamla.bifurcate(
gamla.head, gamla.second, opt_gamla.star(lambda _, y, z: z(y))
),
gamla.explode(2),
)
),
)
@gamla.curry
_is_graph_async = opt_gamla.compose_left(
opt_gamla.mapcat(lambda edge: (edge.source, *edge.args)),
opt_gamla.remove(gamla.equals(None)),
opt_gamla.map(gamla.attrgetter("func")),
gamla.anymap(asyncio.iscoroutinefunction),
)
to_callable_with_side_effect = gamla.curry(
_to_callable_with_side_effect_for_single_and_multiple
)(_type_check)
# Use the second line if you want to see the winning path in the computation graph (a little slower).
to_callable = to_callable_with_side_effect(gamla.just(gamla.just(None)))
# to_callable = to_callable_with_side_effect(graphviz.computation_trace('utterance_computation.dot'))
| 31.971386 | 196 | 0.63955 | import asyncio
import dataclasses
import itertools
import logging
import pathlib
import sys
import traceback
import typing
from typing import Any, Callable, Dict, FrozenSet, Iterable, Set, Text, Tuple, Type
import gamla
import immutables
import toposort
import typeguard
from gamla.optimized import async_functions as opt_async_gamla
from gamla.optimized import sync as opt_gamla
from computation_graph import base_types, graph
class _ComputationGraphException(Exception):
pass
def _transpose_graph(
graph: Dict[base_types.ComputationNode, Set[base_types.ComputationNode]]
) -> Dict[base_types.ComputationNode, Set[base_types.ComputationNode]]:
return opt_gamla.pipe(
graph.keys(), opt_gamla.groupby_many(graph.get), opt_gamla.valmap(set)
)
def _get_edge_sources(edge: base_types.ComputationEdge):
return edge.args or (edge.source,)
_toposort_nodes: Callable[
[base_types.GraphType], Tuple[FrozenSet[base_types.ComputationNode], ...]
] = opt_gamla.compose_left(
opt_gamla.groupby_many(_get_edge_sources),
opt_gamla.valmap(
opt_gamla.compose_left(opt_gamla.map(base_types.edge_destination), set)
),
_transpose_graph,
toposort.toposort,
opt_gamla.map(frozenset),
tuple,
)
def _make_outer_computation_input(*args, **kwargs) -> base_types.ComputationInput:
if "state" in kwargs:
return base_types.ComputationInput(
args=args,
kwargs=gamla.remove_key("state")(kwargs),
state=dict(kwargs["state"] or {}),
)
return base_types.ComputationInput(args=args, kwargs=kwargs)
_incoming_edge_options = opt_gamla.compose_left(
graph.get_incoming_edges_for_node,
gamla.after(
opt_gamla.compose_left(
opt_gamla.groupby(base_types.edge_key),
opt_gamla.valmap(gamla.sort_by(gamla.attrgetter("priority"))),
dict.values,
opt_gamla.star(itertools.product),
opt_gamla.map(tuple),
)
),
)
_get_args_helper = opt_gamla.compose_left(
opt_gamla.keyfilter(gamla.attrgetter("args")),
dict.values,
gamla.head,
opt_gamla.maptuple(gamla.attrgetter("result")),
)
def _get_args(
edges_to_results: Dict[
base_types.ComputationEdge, Tuple[base_types.ComputationResult, ...],
],
unbound_signature: base_types.NodeSignature,
bound_signature: base_types.NodeSignature,
unbound_input: base_types.ComputationInput,
) -> Tuple[base_types.ComputationResult, ...]:
if unbound_signature.is_args:
return unbound_input.args
if bound_signature.is_args:
return _get_args_helper(edges_to_results)
return ()
def _get_unary_computation_input(
kwargs: Tuple[Text, ...],
value: base_types.ComputationResult,
unbound_signature: base_types.NodeSignature,
) -> Dict[Text, Any]:
return opt_gamla.pipe(
unbound_signature.kwargs,
opt_gamla.remove(
opt_gamla.anyjuxt(
gamla.contains(unbound_signature.optional_kwargs), gamla.equals("state")
)
),
tuple,
opt_gamla.check(
opt_gamla.anyjuxt(gamla.len_equals(1), gamla.len_equals(0)),
_ComputationGraphException(
f"got a single input function with more than 1 unbound arguments. cannot bind function. {unbound_signature}"
),
),
opt_gamla.ternary(gamla.len_equals(1), gamla.identity, gamla.just(kwargs)),
gamla.head,
lambda first_kwarg: {first_kwarg: value.result},
)
def _get_outer_kwargs(
unbound_signature: base_types.NodeSignature,
unbound_input: base_types.ComputationInput,
) -> Dict[Text, Any]:
# Optimized because being called a lot.
d = {}
for kwarg in unbound_signature.kwargs:
if kwarg != "state" and kwarg in unbound_input.kwargs:
d[kwarg] = unbound_input.kwargs[kwarg]
return d
_get_inner_kwargs = opt_gamla.compose_left(
opt_gamla.keyfilter(base_types.edge_key),
dict.items,
opt_gamla.groupby(opt_gamla.compose_left(gamla.head, base_types.edge_key)),
opt_gamla.valmap(
opt_gamla.compose_left(
gamla.head, gamla.second, gamla.head, gamla.attrgetter("result")
)
),
)
_DecisionsType = Dict[base_types.ComputationNode, base_types.ComputationResult]
_ResultToDecisionsType = Dict[base_types.ComputationResult, _DecisionsType]
_IntermediaryResults = Dict[base_types.ComputationNode, _ResultToDecisionsType]
class _NotCoherent(Exception):
"""This exception signals that for a specific set of incoming
node edges not all paths agree on the ComputationResult"""
def _check_equal_and_take_one(x, y):
if x == y:
return x
raise _NotCoherent
NodeToResults = Callable[[base_types.ComputationNode], _ResultToDecisionsType]
def _signature_difference(
sig_a: base_types.NodeSignature, sig_b: base_types.NodeSignature
) -> base_types.NodeSignature:
return base_types.NodeSignature(
is_args=(sig_a.is_args != sig_b.is_args),
# Difference must save the order of the left signature.
kwargs=tuple(filter(lambda x: x not in sig_b.kwargs, sig_a.kwargs)),
optional_kwargs=tuple(
filter(lambda x: x not in sig_b.optional_kwargs, sig_a.optional_kwargs)
),
)
_get_kwargs_from_edges = opt_gamla.compose_left(
opt_gamla.map(base_types.edge_key), opt_gamla.remove(gamla.equals(None)), tuple
)
def _get_bound_signature(
is_args: bool, incoming_edges: base_types.GraphType
) -> base_types.NodeSignature:
return base_types.NodeSignature(
is_args=is_args and any(edge.args for edge in incoming_edges),
kwargs=_get_kwargs_from_edges(incoming_edges),
optional_kwargs=(),
)
_ChoiceOfOutputForNode = Tuple[
Tuple[base_types.ComputationResult, _DecisionsType], base_types.ComputationNode,
]
@gamla.curry
def _get_computation_input(
unbound_input: Callable[[base_types.ComputationNode], base_types.ComputationInput],
node: base_types.ComputationNode,
incoming_edges: base_types.GraphType,
# For each edge, there are multiple values options, each having its own trace.
values_for_edges_choice: Iterable[Iterable[_ChoiceOfOutputForNode]],
) -> base_types.ComputationInput:
bound_signature = _get_bound_signature(node.signature.is_args, incoming_edges)
unbound_signature = _signature_difference(node.signature, bound_signature)
results = gamla.pipe(
values_for_edges_choice,
opt_gamla.maptuple(opt_gamla.maptuple(_choice_to_value)),
)
if node.signature.is_kwargs:
assert (
len(results) == 1
), f"signature for {base_types.pretty_print_function_name(node.func)} contains `**kwargs`. This is considered unary, meaning one incoming edge, but we got more than one: {incoming_edges}."
return base_types.ComputationInput(
args=gamla.wrap_tuple(gamla.head(gamla.head(results)).result),
kwargs={},
state=None,
)
if (
not (unbound_signature.is_args or bound_signature.is_args)
and sum(
map(
opt_gamla.compose_left(base_types.edge_key, gamla.equals(None)),
incoming_edges,
)
)
== 1
):
return base_types.ComputationInput(
args=(),
kwargs=_get_unary_computation_input(
node.signature.kwargs,
gamla.head(gamla.head(results)),
unbound_signature,
),
state=unbound_input(node).state,
)
edges_to_results = dict(zip(incoming_edges, results))
return base_types.ComputationInput(
args=_get_args(
edges_to_results, unbound_signature, bound_signature, unbound_input(node)
),
kwargs={
**_get_outer_kwargs(unbound_signature, unbound_input(node)),
**_get_inner_kwargs(edges_to_results),
},
state=unbound_input(node).state,
)
def _wrap_in_result_if_needed(node: base_types.ComputationNode, result):
if isinstance(result, base_types.ComputationResult):
return result
if node.is_stateful:
return base_types.ComputationResult(result, result)
return base_types.ComputationResult(result, None)
def _inject_state(unbound_input: base_types.ComputationInput):
def inject_state(node_id: int):
if unbound_input.state is None:
return unbound_input
return dataclasses.replace(
unbound_input,
state=unbound_input.state[node_id]
if node_id in unbound_input.state
else None,
)
return inject_state
_choice_to_value: Callable[
[_ChoiceOfOutputForNode], base_types.ComputationResult
] = opt_gamla.compose_left(gamla.head, gamla.head)
_decisions_from_value_choices = opt_gamla.compose_left(
gamla.concat,
gamla.bifurcate(
opt_gamla.compose_left(
opt_gamla.map(opt_gamla.compose_left(gamla.head, gamla.second)),
opt_gamla.reduce(
opt_gamla.merge_with_reducer(_check_equal_and_take_one),
immutables.Map(),
),
),
opt_gamla.mapdict(opt_gamla.juxt(gamla.second, _choice_to_value)),
),
opt_gamla.merge,
)
def _construct_computation_state(
results: _ResultToDecisionsType, sink_node: base_types.ComputationNode
) -> Dict:
first_result = gamla.head(results)
return {
**{sink_node: first_result.state},
**opt_gamla.pipe(
results,
gamla.itemgetter(first_result),
opt_gamla.valmap(gamla.attrgetter("state")),
),
}
def _merge_with_previous_state(
previous_state: Dict, result: base_types.ComputationResult, state: Dict
) -> base_types.ComputationResult:
return base_types.ComputationResult(
result=result,
# Convert to tuples (node id, state) so this would be hashable.
state=tuple({**(previous_state or {}), **state}.items()),
)
def _get_results_from_terminals(
result_to_dependencies: Callable,
) -> Callable[[Iterable[base_types.ComputationNode]], _DecisionsType]:
return opt_gamla.compose_left(
opt_gamla.map(
opt_gamla.pair_right(
opt_gamla.compose_left(
result_to_dependencies,
opt_gamla.ternary(
gamla.identity,
opt_gamla.compose_left(
dict.items,
gamla.head,
gamla.head, # Take results, not dependencies
gamla.attrgetter("result"),
),
gamla.just({}),
),
)
)
),
dict,
opt_gamla.valfilter(gamla.identity), # Only return terminals with results
)
def _get_computation_state_from_terminals(
result_to_dependencies: Callable, edges_to_node_id: Callable
) -> Callable[[Iterable[base_types.ComputationNode]], Dict]:
return opt_gamla.compose_left(
opt_gamla.map(
opt_gamla.compose_left(
opt_gamla.pair_left(result_to_dependencies),
opt_gamla.ternary(
opt_gamla.compose_left(gamla.head, gamla.nonempty),
opt_gamla.compose_left(
opt_gamla.star(_construct_computation_state),
opt_gamla.keymap(edges_to_node_id),
),
gamla.just({}),
),
)
),
opt_gamla.merge,
)
def _construct_computation_result(edges: base_types.GraphType, edges_to_node_id):
def construct_computation_result(
result_to_dependencies: Callable[
[base_types.ComputationNode], _ResultToDecisionsType
]
):
return opt_gamla.pipe(
edges,
graph.get_leaves,
opt_gamla.filter(gamla.attrgetter("is_terminal")),
frozenset,
opt_gamla.juxt(
_get_results_from_terminals(result_to_dependencies),
_get_computation_state_from_terminals(
result_to_dependencies, edges_to_node_id
),
),
)
return construct_computation_result
def _type_check(node: base_types.ComputationNode, result):
try:
return_typing = typing.get_type_hints(node.func).get("return", None)
except TypeError:
# Does not support `functools.partial`.
return
if return_typing:
try:
typeguard.check_type(str(node), result, return_typing)
except TypeError as e:
logging.error([node.func.__code__, e])
def _apply(node: base_types.ComputationNode, node_input: base_types.ComputationInput):
return node.func(
*node_input.args,
**gamla.add_key_value("state", node_input.state)(node_input.kwargs)
if node.is_stateful
else node_input.kwargs,
)
_SingleNodeSideEffect = Callable[[base_types.ComputationNode, Any], None]
def _run_keeping_choices(
is_async: bool, side_effect: _SingleNodeSideEffect
) -> Callable:
def run_keeping_choices(node_to_external_input):
input_maker = opt_gamla.star(_get_computation_input(node_to_external_input))
if is_async:
async def run_keeping_choices(params):
result = _apply(params[0], input_maker(params))
result = await gamla.to_awaitable(result)
side_effect(params[0], result)
return (
_wrap_in_result_if_needed(params[0], result),
_decisions_from_value_choices(params[2]),
)
else:
def run_keeping_choices(params):
result = _apply(params[0], input_maker(params))
side_effect(params[0], result)
return (
_wrap_in_result_if_needed(params[0], result),
_decisions_from_value_choices(params[2]),
)
return run_keeping_choices
return run_keeping_choices
def _merge_immutable(x, y):
return x.update(y)
def _process_layer_in_parallel(
f: Callable[
[_IntermediaryResults, base_types.ComputationNode], _IntermediaryResults
]
) -> Callable[
[_IntermediaryResults, FrozenSet[base_types.ComputationNode]], _IntermediaryResults,
]:
return gamla.compose_left(
gamla.pack,
gamla.explode(1),
gamla.map(f),
opt_gamla.reduce(_merge_immutable, immutables.Map()),
)
def _dag_layer_reduce(
f: Callable[
[_IntermediaryResults, FrozenSet[base_types.ComputationNode]],
_IntermediaryResults,
]
) -> Callable[[base_types.GraphType], _IntermediaryResults]:
"""Directed acyclic graph reduction."""
return gamla.compose_left(
_toposort_nodes, gamla.reduce_curried(f, immutables.Map())
)
def _edge_to_value_options(
accumulated_outputs,
) -> Callable[[Iterable[base_types.ComputationEdge]], Iterable[Any]]:
return opt_gamla.mapduct(
opt_gamla.compose_left(
_get_edge_sources,
opt_gamla.mapduct(
opt_gamla.compose_left(
opt_gamla.pair_left(
opt_gamla.compose_left(
gamla.dict_to_getter_with_default(
immutables.Map(), accumulated_outputs
),
dict.items,
)
),
gamla.explode(0),
)
),
)
)
_create_node_run_options = opt_gamla.compose_left(
gamla.pack,
gamla.explode(1),
opt_gamla.mapcat(
opt_gamla.compose_left(
gamla.bifurcate(
gamla.head, gamla.second, opt_gamla.star(lambda _, y, z: z(y))
),
gamla.explode(2),
)
),
)
def _assoc_immutable(d, k, v):
return d.set(k, v)
@gamla.curry
def _lift_single_runner_to_run_on_many_options(is_async: bool, f):
return (opt_async_gamla.compose_left if is_async else opt_gamla.compose_left)(
_create_node_run_options,
(opt_async_gamla.map if is_async else opt_gamla.map)(f),
opt_gamla.filter(gamla.identity),
dict,
)
def _process_node(
is_async: bool, get_edge_options: Callable[[base_types.ComputationNode], Any]
) -> Callable[[Callable], Callable]:
def process_node(f: Callable):
if is_async:
@opt_async_gamla.star
async def process_node(
accumulated_results: _IntermediaryResults,
node: base_types.ComputationNode,
) -> _IntermediaryResults:
return _assoc_immutable(
accumulated_results,
node,
await f(
node,
get_edge_options(node),
_edge_to_value_options(accumulated_results),
),
)
return process_node
else:
@opt_gamla.star
def process_node(
accumulated_results: _IntermediaryResults,
node: base_types.ComputationNode,
) -> _IntermediaryResults:
return _assoc_immutable(
accumulated_results,
node,
f(
node,
get_edge_options(node),
_edge_to_value_options(accumulated_results),
),
)
return process_node
return process_node
_is_graph_async = opt_gamla.compose_left(
opt_gamla.mapcat(lambda edge: (edge.source, *edge.args)),
opt_gamla.remove(gamla.equals(None)),
opt_gamla.map(gamla.attrgetter("func")),
gamla.anymap(asyncio.iscoroutinefunction),
)
def _make_runner(
single_node_runner,
is_async,
async_decoration,
edges,
handled_exceptions,
edges_to_node_id,
):
return gamla.compose_left(
# Higher order pipeline that constructs a graph runner.
opt_gamla.compose(
_dag_layer_reduce,
_process_layer_in_parallel,
_process_node(is_async, _incoming_edge_options(edges)),
_lift_single_runner_to_run_on_many_options(is_async),
gamla.excepts(
(*handled_exceptions, _NotCoherent),
opt_gamla.compose_left(type, _log_handled_exception, gamla.just(None)),
),
single_node_runner,
gamla.before(edges_to_node_id),
_inject_state,
),
# gamla.profileit, # Enable to get a read on slow functions.
# At this point we move to a regular pipeline of values.
async_decoration(gamla.apply(edges)),
gamla.attrgetter("__getitem__"),
)
def _to_callable_with_side_effect_for_single_and_multiple(
single_node_side_effect: _SingleNodeSideEffect,
all_nodes_side_effect: Callable,
edges: base_types.GraphType,
handled_exceptions: FrozenSet[Type[Exception]],
) -> Callable:
edges = gamla.pipe(edges, gamla.unique, tuple)
edges_to_node_id = graph.edges_to_node_id_map(edges).__getitem__
return gamla.compose_left(
_make_outer_computation_input,
gamla.pair_with(
gamla.compose_left(
_make_runner(
_run_keeping_choices(
_is_graph_async(edges), single_node_side_effect
),
_is_graph_async(edges),
gamla.after(gamla.to_awaitable)
if _is_graph_async(edges)
else gamla.identity,
edges,
handled_exceptions,
edges_to_node_id,
),
gamla.side_effect(all_nodes_side_effect(edges)),
_construct_computation_result(edges, edges_to_node_id),
)
),
opt_gamla.star(
lambda result_and_state, computation_input: _merge_with_previous_state(
computation_input.state, *result_and_state
)
),
)
to_callable_with_side_effect = gamla.curry(
_to_callable_with_side_effect_for_single_and_multiple
)(_type_check)
# Use the second line if you want to see the winning path in the computation graph (a little slower).
to_callable = to_callable_with_side_effect(gamla.just(gamla.just(None)))
# to_callable = to_callable_with_side_effect(graphviz.computation_trace('utterance_computation.dot'))
def _log_handled_exception(exception_type: Type[Exception]):
_, exception, exception_traceback = sys.exc_info()
filename, line_num, func_name, _ = traceback.extract_tb(exception_traceback)[-1]
reason = ""
if str(exception):
reason = f": {exception}"
code_location = f"{pathlib.Path(filename).name}:{line_num}"
logging.debug(f"'{func_name.strip('_')}' {exception_type}@{code_location}{reason}")
| 15,989 | 32 | 688 |
4b2723499c63e7ea94e15a4f2b6a0192b66dde7b | 787 | py | Python | eventos/models.py | aromero45/proyecto0 | e5f5744bd073a4a82b584d1953e8204516ae8193 | [
"bzip2-1.0.6"
] | null | null | null | eventos/models.py | aromero45/proyecto0 | e5f5744bd073a4a82b584d1953e8204516ae8193 | [
"bzip2-1.0.6"
] | 2 | 2021-03-19T23:01:39.000Z | 2021-06-10T22:32:11.000Z | eventos/models.py | aromero45/proyecto0 | e5f5744bd073a4a82b584d1953e8204516ae8193 | [
"bzip2-1.0.6"
] | null | null | null | from django.db import models
from django.contrib.auth.models import User
from django.utils import timezone
# Create your models here. | 35.772727 | 83 | 0.777637 | from django.db import models
from django.contrib.auth.models import User
from django.utils import timezone
# Create your models here.
class Category(models.Model):
name=models.CharField(max_length=300)
class Type(models.Model):
name= models.CharField(max_length=200)
class Event(models.Model):
person = models.ForeignKey(User, on_delete=models.CASCADE, related_name="User")
name = models.CharField(max_length=200)
category=models.ForeignKey(Category, on_delete=models.CASCADE)
place= models.CharField(max_length=200)
address= models.CharField(max_length=200)
creation_date=models.DateTimeField(default=timezone.now)
start_date=models.DateTimeField()
finish_date=models.DateTimeField()
type=models.ForeignKey(Type, on_delete=models.CASCADE) | 0 | 584 | 68 |
0d7673ac0332f7616662b2423a0f2d136b010877 | 1,028 | py | Python | experiments/expression/st/plot_gsea_results.py | andrewcharlesjones/spatial-alignment | 70aecf800c5efea6a92990ccf87a1950752a268b | [
"MIT"
] | 14 | 2022-01-11T14:51:17.000Z | 2022-02-26T20:46:58.000Z | experiments/expression/st/plot_gsea_results.py | andrewcharlesjones/spatial-alignment | 70aecf800c5efea6a92990ccf87a1950752a268b | [
"MIT"
] | 3 | 2022-01-26T17:16:24.000Z | 2022-02-24T13:22:39.000Z | experiments/expression/st/plot_gsea_results.py | andrewcharlesjones/spatial-alignment | 70aecf800c5efea6a92990ccf87a1950752a268b | [
"MIT"
] | 1 | 2022-02-23T09:54:37.000Z | 2022-02-23T09:54:37.000Z | import torch
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import sys
import matplotlib
font = {"size": 30}
matplotlib.rc("font", **font)
matplotlib.rcParams["text.usetex"] = True
n_gene_sets_to_name = 2
results_df = pd.read_csv("./out/st_avg_gene_variance_gsea_results.csv", index_col=0)
results_df["logpval"] = -np.log10(results_df.padj.values)
plt.figure(figsize=(7, 7))
sns.scatterplot(data=results_df, x="NES", y="logpval", color="black", edgecolor=None)
plt.xlabel("Enrichment score")
plt.ylabel(r"$-\log_{10}$(p-val)")
sorted_idx = np.argsort(-results_df.NES.values)
for ii in range(n_gene_sets_to_name):
gs_name = " ".join(results_df.pathway.values[sorted_idx[ii]].split("_")[1:])
plt.text(
s=gs_name,
x=results_df.NES.values[sorted_idx[ii]],
y=results_df.logpval.values[sorted_idx[ii]],
ha="right",
)
plt.tight_layout()
plt.savefig("./out/st_avg_gene_variance_gsea_results.png")
plt.show()
import ipdb
ipdb.set_trace()
| 25.073171 | 85 | 0.717899 | import torch
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import sys
import matplotlib
font = {"size": 30}
matplotlib.rc("font", **font)
matplotlib.rcParams["text.usetex"] = True
n_gene_sets_to_name = 2
results_df = pd.read_csv("./out/st_avg_gene_variance_gsea_results.csv", index_col=0)
results_df["logpval"] = -np.log10(results_df.padj.values)
plt.figure(figsize=(7, 7))
sns.scatterplot(data=results_df, x="NES", y="logpval", color="black", edgecolor=None)
plt.xlabel("Enrichment score")
plt.ylabel(r"$-\log_{10}$(p-val)")
sorted_idx = np.argsort(-results_df.NES.values)
for ii in range(n_gene_sets_to_name):
gs_name = " ".join(results_df.pathway.values[sorted_idx[ii]].split("_")[1:])
plt.text(
s=gs_name,
x=results_df.NES.values[sorted_idx[ii]],
y=results_df.logpval.values[sorted_idx[ii]],
ha="right",
)
plt.tight_layout()
plt.savefig("./out/st_avg_gene_variance_gsea_results.png")
plt.show()
import ipdb
ipdb.set_trace()
| 0 | 0 | 0 |
da78212870006cf43395ab04a85fb39d62441a05 | 541 | py | Python | src/translation/viff_templates/MainTemplate.py | n1v0lg/Musketeer | 27baa102b63dd4e49c06a519c2021b984a2cd4c5 | [
"Apache-2.0"
] | 3 | 2017-07-21T16:29:18.000Z | 2020-03-14T21:51:00.000Z | src/translation/viff_templates/MainTemplate.py | n1v0lg/Musketeer | 27baa102b63dd4e49c06a519c2021b984a2cd4c5 | [
"Apache-2.0"
] | null | null | null | src/translation/viff_templates/MainTemplate.py | n1v0lg/Musketeer | 27baa102b63dd4e49c06a519c2021b984a2cd4c5 | [
"Apache-2.0"
] | 2 | 2021-02-02T17:13:11.000Z | 2022-02-14T03:49:29.000Z |
if __name__ == "__main__":
parser = OptionParser()
Runtime.add_options(parser)
options, args = parser.parse_args()
pid, players = load_config("{{VIFF_CONFIG_LOC}}")
Zp = GF(find_prime(2**65, blum=True))
runtime_class = make_runtime_class(
mixins=[ProbabilisticEqualityMixin, ComparisonToft07Mixin]
)
pre_runtime = create_runtime(pid, players, 1, options,
runtime_class=runtime_class)
pre_runtime.addCallback(protocol, Zp)
pre_runtime.addErrback(report_error)
reactor.run()
| 30.055556 | 66 | 0.695009 |
if __name__ == "__main__":
parser = OptionParser()
Runtime.add_options(parser)
options, args = parser.parse_args()
pid, players = load_config("{{VIFF_CONFIG_LOC}}")
Zp = GF(find_prime(2**65, blum=True))
runtime_class = make_runtime_class(
mixins=[ProbabilisticEqualityMixin, ComparisonToft07Mixin]
)
pre_runtime = create_runtime(pid, players, 1, options,
runtime_class=runtime_class)
pre_runtime.addCallback(protocol, Zp)
pre_runtime.addErrback(report_error)
reactor.run()
| 0 | 0 | 0 |
76c95753c1e15bf6f8028d7e3a72421f60d31e6b | 291 | py | Python | server/project/urls.py | istommao/fakedataset | 365ef0c68d1ecac30ab2c9908e6a5efa1da5d81e | [
"MIT"
] | null | null | null | server/project/urls.py | istommao/fakedataset | 365ef0c68d1ecac30ab2c9908e6a5efa1da5d81e | [
"MIT"
] | null | null | null | server/project/urls.py | istommao/fakedataset | 365ef0c68d1ecac30ab2c9908e6a5efa1da5d81e | [
"MIT"
] | null | null | null | """project urls."""
from django.urls import path, re_path
from project.views import ProjectListView, ProjectDetailView
app_name = 'project'
urlpatterns = [
path('', ProjectListView.as_view(), name='index'),
re_path(r'(?P<uid>\w+)/', ProjectDetailView.as_view(), name='detail'),
]
| 24.25 | 74 | 0.701031 | """project urls."""
from django.urls import path, re_path
from project.views import ProjectListView, ProjectDetailView
app_name = 'project'
urlpatterns = [
path('', ProjectListView.as_view(), name='index'),
re_path(r'(?P<uid>\w+)/', ProjectDetailView.as_view(), name='detail'),
]
| 0 | 0 | 0 |
0b818088c56f02351c73371f2b80d35f2f738a6e | 1,444 | py | Python | main.py | anhtpn/app_flask | ba1509a9bffdec8c4e6c5c98d211d75e3d87541f | [
"MIT"
] | null | null | null | main.py | anhtpn/app_flask | ba1509a9bffdec8c4e6c5c98d211d75e3d87541f | [
"MIT"
] | null | null | null | main.py | anhtpn/app_flask | ba1509a9bffdec8c4e6c5c98d211d75e3d87541f | [
"MIT"
] | null | null | null | from flask import Flask, render_template, redirect, url_for, request, session
from flask_restful import Api
from connect import config
from routes.routes import initialize_routes
from my_code.Process import RecommendMovie
from read_file import get_user_id
from read_file import get_rating
app = Flask(__name__)
api = Api(app)
config()
initialize_routes(api)
@app.route("/")
@app.route("/recommend")
@app.route("/", methods=['POST'])
@app.route("/movie")
if __name__ == "__main__":
app.secret_key = b'_5#y2L"F4Q8z\n\xec]/'
app.run(debug=True)
# app.run()
| 23.672131 | 79 | 0.664127 | from flask import Flask, render_template, redirect, url_for, request, session
from flask_restful import Api
from connect import config
from routes.routes import initialize_routes
from my_code.Process import RecommendMovie
from read_file import get_user_id
from read_file import get_rating
app = Flask(__name__)
api = Api(app)
config()
initialize_routes(api)
@app.route("/")
def home():
return render_template("login.html")
@app.route("/recommend")
def recommend():
movies = []
if 'user' in session:
user = session['user']
print(user)
movies = RecommendMovie(user)
return render_template("recommend.html", movies=movies, length=len(movies))
@app.route("/", methods=['POST'])
def login_post():
username = request.form.get('username')
password = request.form.get('password')
# print(email)
session["user"] = username
return redirect(url_for('movie'))
@app.route("/movie")
def movie():
if 'user' in session:
user = session['user']
movies = get_user_id(user)
plt.figure(figsize=[5,10]); # Set dimensions for figure
plt.plot(get_rating(user))
plt.title("")
plt.show()
plt.savefig('../static/1.svg', format='svg', dpi=1200)
return render_template("user.html", movies=movies, length=len(movies))
if __name__ == "__main__":
app.secret_key = b'_5#y2L"F4Q8z\n\xec]/'
app.run(debug=True)
# app.run()
| 776 | 0 | 88 |
c8c57f593b9593d2eb3eb638ed4eeec1fc1f396d | 382 | py | Python | utils/draw_utils.py | DdeGeus/single-network-panoptic-segmentation | 891f13b8bca0f41e298900fe1c73bc3035caef5d | [
"Apache-2.0"
] | 32 | 2019-06-17T06:28:15.000Z | 2021-12-20T15:40:56.000Z | utils/draw_utils.py | DdeGeus/single-network-panoptic-segmentation | 891f13b8bca0f41e298900fe1c73bc3035caef5d | [
"Apache-2.0"
] | 14 | 2019-07-11T10:04:37.000Z | 2022-03-11T23:49:52.000Z | utils/draw_utils.py | DdeGeus/single-network-panoptic-segmentation | 891f13b8bca0f41e298900fe1c73bc3035caef5d | [
"Apache-2.0"
] | 4 | 2019-06-17T06:28:17.000Z | 2022-03-18T19:06:25.000Z | from PIL import Image, ImageDraw, ImageFont
FONT = ImageFont.load_default() | 27.285714 | 61 | 0.549738 | from PIL import Image, ImageDraw, ImageFont
FONT = ImageFont.load_default()
def draw_labels_and_probs(draw_obj, box, label, prob, color):
x, y = box[0], box[1]
draw_obj.rectangle([x, y, x+100, y+10],
fill=color)
txt = label + ": " + str(prob)
draw_obj.text(xy=(x, y),
text=txt,
fill='black',
font=FONT) | 283 | 0 | 23 |