hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
15c14148399646733c5e58c83719b3d8f14d8f51
| 2,082
|
py
|
Python
|
models/model_loader.py
|
mayu95/pytorch-gve-lrcn
|
25f0b2835db669d05663b6c67a4e9775acc97ef4
|
[
"MIT"
] | 1
|
2020-10-17T13:20:27.000Z
|
2020-10-17T13:20:27.000Z
|
models/model_loader.py
|
mayu95/pytorch-gve-lrcn
|
25f0b2835db669d05663b6c67a4e9775acc97ef4
|
[
"MIT"
] | null | null | null |
models/model_loader.py
|
mayu95/pytorch-gve-lrcn
|
25f0b2835db669d05663b6c67a4e9775acc97ef4
|
[
"MIT"
] | null | null | null |
import torch
from .lrcn import LRCN
from .gve import GVE
from .sentence_classifier import SentenceClassifier
class ModelLoader:
def __init__(self, args, dataset):
self.args = args
self.dataset = dataset
def lrcn(self):
# LRCN arguments
pretrained_model = self.args.pretrained_model
embedding_size = self.args.embedding_size
hidden_size = self.args.hidden_size
vocab_size = len(self.dataset.vocab)
layers_to_truncate = self.args.layers_to_truncate
lrcn = LRCN(pretrained_model, embedding_size, hidden_size, vocab_size,
layers_to_truncate)
return lrcn
def gve(self):
# Make sure dataset returns labels
self.dataset.set_label_usage(True)
# GVE arguments
embedding_size = self.args.embedding_size
hidden_size = self.args.hidden_size
vocab_size = len(self.dataset.vocab)
input_size = self.dataset.input_size
num_classes = self.dataset.num_classes
sc = self.sc()
sc.load_state_dict(torch.load(self.args.sc_ckpt))
for param in sc.parameters():
param.requires_grad = False
sc.eval()
# cub
# gve = GVE(input_size, embedding_size, hidden_size, vocab_size, sc,
# num_classes)
# iu
pretrained_model = self.args.pretrained_model
gve = GVE(pretrained_model, embedding_size, hidden_size, vocab_size, sc,
num_classes)
if self.args.weights_ckpt:
gve.load_state_dict(torch.load(self.args.weights_ckpt))
return gve
def sc(self):
# Make sure dataset returns labels
self.dataset.set_label_usage(True)
# Sentence classifier arguments
embedding_size = self.args.embedding_size
hidden_size = self.args.hidden_size
vocab_size = len(self.dataset.vocab)
num_classes = self.dataset.num_classes
sc = SentenceClassifier(embedding_size, hidden_size, vocab_size,
num_classes)
return sc
| 29.742857
| 80
| 0.646494
|
a7a96fd7346f2eb9ad0ebcc6fab76ba1fbd585c4
| 6,065
|
py
|
Python
|
app/views/auth.py
|
Akeru/ACI-EnhancedEndpointTracker
|
498bc4c07604b27a57f82cde12967eaa9117b5bc
|
[
"MIT"
] | null | null | null |
app/views/auth.py
|
Akeru/ACI-EnhancedEndpointTracker
|
498bc4c07604b27a57f82cde12967eaa9117b5bc
|
[
"MIT"
] | null | null | null |
app/views/auth.py
|
Akeru/ACI-EnhancedEndpointTracker
|
498bc4c07604b27a57f82cde12967eaa9117b5bc
|
[
"MIT"
] | null | null | null |
from flask import current_app, Blueprint, render_template
auth_prefix = "/auth"
auth = Blueprint("auth", __name__, url_prefix=auth_prefix)
from flask import Flask, jsonify, flash, redirect, url_for
from flask import request, make_response, g, url_for, abort, session
from flask_login import (LoginManager, login_required, login_user,
current_user, logout_user)
from ..models.users import Users
from ..models.roles import Roles
from ..models.utils import MSG_403
# setup login manager
login_manager = LoginManager()
# since this is a blueprint, use record_once instead of login_manager.init_app
@auth.record_once
def on_load(state):
# setup login manager login_view
if state.app.config.get("SSO_ENABLED", False):
# url_for not available at this point
login_manager.login_view = "%s/sso/" % auth_prefix
else:
# url_for not available at this point
login_manager.login_view = "%s/login/" % auth_prefix
login_manager.login_message = ""
login_manager.init_app(state.app)
@auth.before_app_request
def before_request():
# force everything over HTTPS if enabled
if current_app.config.get("force_https", False):
fwd_proto = request.headers.get("x-forwarded-proto",None)
if fwd_proto is not None:
if fwd_proto.lower() == "http":
return redirect(request.url.replace("http:","https:", 1))
else:
if re.search("^http:", request.url) is not None:
return redirect(request.url.replace("http:","https:", 1))
# set global object various configs
g.app_name = current_app.config.get("app_name", "AppName1")
# set global object 'g.user' based off current user session
g.ROLE_FULL_ADMIN = Roles.FULL_ADMIN
g.user = current_user
if g.user is not None:
if hasattr(g.user, 'role') and g.user.role == Roles.BLACKLIST:
Users.logout()
abort(403, MSG_403)
elif not current_app.config.get("LOGIN_ENABLED", True) and \
not g.user.is_authenticated:
# auto-login user as local if login is disabled
g.user = Users.load_user("local")
if g.user is None:
# setup local user
from ..models.users import setup_local
setup_local(current_app)
g.user = Users({"username":"local", "role": Roles.FULL_ADMIN})
Users.start_session("local")
@login_manager.user_loader
def load_user(username):
return Users.load_user(username)
@auth.route("/login", methods=["GET", "POST"])
@auth.route("/login/", methods=["GET", "POST"])
def login():
if request.method == "POST":
if Users.login(
username = request.form['username'],
password = request.form['password'],
remember = True):
return redirect(request.args.get("next") or "/")
else:
flash("The email or password you entered is incorrect.")
return render_template("auth/login.html")
return render_template("auth/login.html")
@auth.route("/logout")
@auth.route("/logout/")
def logout():
Users.logout()
return render_template("auth/logout.html")
@auth.route("/pwreset/<string:key>", methods=["GET"])
def pwreset(key):
return render_template("auth/pwreset.html", key=key)
##############################################################################
# Auth API, imported by api module
##############################################################################
def api_login():
""" login to application and create session cookie. Note, if sso
authentication is enabled, then this function is not required. Simply
provided the sso cookie and the application will authenticate it.
Args:
username(str): application username
password(str): application password
Returns:
success(bool): successfully login
"""
data = request.json
if not data: abort(400, "Invalid JSON provided")
if "username" not in data:
abort(400, "Required parameter \"username\" not provided")
if "password" not in data:
abort(400, "Required parameter \"password\" not provided")
if Users.login(
username = data["username"],
password = data["password"],
remember = True):
return jsonify({"success": True})
else:
abort(401, "Authentication Failed")
def api_logout():
""" logout of application and delete session """
Users.logout()
return jsonify({"success": True})
##############################################################################
#
# Cisco SSO handlers
#
##############################################################################
import re, requests
@auth.route("/sso", methods=["GET"])
@auth.route("/sso/", methods=["GET"])
def sso_login():
"""
use sso_handler to check if user is already authenticated. If not,
redirect user to sso_url with referer set to /rsso (return sso) and next
argument set to original provided parameter. If user is authenticated,
login user locally to site and redirect to page at next argument
"""
if not current_app.config.get("SSO_ENABLED", False):
abort(400, "SSO authentication not enabled")
if len(current_app.config.get("sso_url",""))<=0:
abort(500, "sso_url not defined in app.config")
if len(current_app.config.get("fqdn",""))<=0:
abort(500, "fqdn not defined in app.config")
# not implemented, force fail
return render_template("auth/sso_failed.html")
@auth.route("/rsso", methods=["GET"])
@auth.route("/rsso/", methods=["GET"])
def sso_login_return():
"""
this page is redirected to after user has completed authentication on
SSO site. If username is valid, redirect to 'next' parameter, else display
sso_failed page
"""
if not current_app.config.get("SSO_ENABLED", False):
abort(400, "SSO authentication not enabled")
# not implemented, force fail
return render_template("auth/sso_failed.html")
| 35.261628
| 78
| 0.621105
|
f1ff57ba4556946e2be66615376e7a9ebdfb904e
| 920
|
py
|
Python
|
neuralNetwork.py
|
higorcamporez/RoboMus_Server2
|
f7cc5fe832085f66f26bc5fad146e8896bcc20f9
|
[
"Apache-2.0"
] | null | null | null |
neuralNetwork.py
|
higorcamporez/RoboMus_Server2
|
f7cc5fe832085f66f26bc5fad146e8896bcc20f9
|
[
"Apache-2.0"
] | null | null | null |
neuralNetwork.py
|
higorcamporez/RoboMus_Server2
|
f7cc5fe832085f66f26bc5fad146e8896bcc20f9
|
[
"Apache-2.0"
] | null | null | null |
import keras
import sys
import numpy as np
from keras.models import load_model
import keras.losses
from keras import backend as K
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
def getMaxValues(name):
file = open(name+"_max_values.txt", "r")
max_x = file.readline()
max_y = file.readline()
return float(max_x),float(max_y)
def root_mean_squared_error(y_true, y_pred):
return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1))
if(len(sys.argv)<6):
print("-1")
exit()
name = sys.argv[1]
max_x,max_y = getMaxValues(name)
# load model
model = load_model(name+'_keras_model.h5',
custom_objects={'root_mean_squared_error': root_mean_squared_error})
#model.summary()
inp = np.array([[float(sys.argv[2]), float(sys.argv[3]),
float(sys.argv[4]), float(sys.argv[5])]])
inp = inp/max_x #normalização
print(model.predict(inp)[0][0]*max_y)
| 24.864865
| 87
| 0.670652
|
870d17a299ee198348940856d3a4a964f80857b6
| 2,892
|
py
|
Python
|
tests/actions/_optimizer.py
|
zduey/shap
|
1bb8203f2d43f7552396a5f26167a258cbdc505c
|
[
"MIT"
] | 16,097
|
2016-12-01T20:01:26.000Z
|
2022-03-31T20:27:40.000Z
|
tests/actions/_optimizer.py
|
zduey/shap
|
1bb8203f2d43f7552396a5f26167a258cbdc505c
|
[
"MIT"
] | 2,217
|
2017-09-18T20:06:45.000Z
|
2022-03-31T21:00:25.000Z
|
tests/actions/_optimizer.py
|
zduey/shap
|
1bb8203f2d43f7552396a5f26167a258cbdc505c
|
[
"MIT"
] | 2,634
|
2017-06-29T21:30:46.000Z
|
2022-03-30T07:30:36.000Z
|
""" Unit tests for the Exact explainer.
"""
# pylint: disable=missing-function-docstring
import numpy as np
import pandas as pd
import pytest
import shap
def create_basic_scenario():
X = pd.DataFrame({"feature1": np.ones(5), "feature2": np.ones(5), "feature3": np.ones(5)})
class IncreaseFeature1(shap.actions.Action):
""" Sample action.
"""
def __init__(self, amount):
self.amount = amount
self.cost = 5 * amount
def __call__(self, X):
X["feature1"] += self.amount
def __str__(self):
return f"Improve feature1 by {self.amount}."
class IncreaseFeature2(shap.actions.Action):
""" Sample action.
"""
def __init__(self, amount):
self.amount = amount
self.cost = 3 * amount
def __call__(self, X):
X["feature2"] += self.amount
def __str__(self):
return f"Improve feature2 by {self.amount}."
class IncreaseFeature3(shap.actions.Action):
""" Sample action.
"""
def __init__(self, amount):
self.amount = amount
self.cost = 4 * amount
def __call__(self, X):
X["feature3"] += self.amount
def __str__(self):
return f"Improve feature3 by {self.amount}."
def passed(x):
return np.sum(x) > 10
return X, IncreaseFeature1, IncreaseFeature2, IncreaseFeature3, passed
def test_basic_run():
X, IncreaseFeature1, IncreaseFeature2, IncreaseFeature3, passed = create_basic_scenario()
possible_actions = [
[IncreaseFeature1(i) for i in range(1,10)],
IncreaseFeature2(5),
[IncreaseFeature3(i) for i in range(1,20)]
]
optimizer = shap.ActionOptimizer(passed, possible_actions)
actions = optimizer(X.iloc[0])
assert len(actions) == 2
assert sum(a.cost for a in actions) == 27 # ensure we got the optimal answer
def test_too_few_evals():
X, IncreaseFeature1, IncreaseFeature2, IncreaseFeature3, passed = create_basic_scenario()
possible_actions = [
[IncreaseFeature1(i) for i in range(1,10)],
IncreaseFeature2(5),
[IncreaseFeature3(i) for i in range(1,20)]
]
optimizer = shap.ActionOptimizer(passed, possible_actions)
with pytest.raises(Exception):
optimizer(X.iloc[0], max_evals=3)
def test_run_out_of_group():
X, IncreaseFeature1, IncreaseFeature2, IncreaseFeature3, passed = create_basic_scenario()
possible_actions = [
[IncreaseFeature1(i) for i in range(1,10)],
IncreaseFeature2(5),
[IncreaseFeature3(1)]
]
optimizer = shap.ActionOptimizer(passed, possible_actions)
actions = optimizer(X.iloc[0])
print(actions)
assert len(actions) == 3
def test_bad_action():
with pytest.raises(Exception):
shap.ActionOptimizer(None, [None])
| 28.352941
| 94
| 0.628285
|
7b693528725146e4969215222e3a497287b2ac6f
| 23,859
|
py
|
Python
|
Community Samples/Python2/SendOMFDataToPISystem_fromPhidgetsIRTempSensor.py
|
mschellenger96/OMF-Samples
|
7eb3c5be77e66f4360227dc14def28f688037873
|
[
"Apache-2.0"
] | null | null | null |
Community Samples/Python2/SendOMFDataToPISystem_fromPhidgetsIRTempSensor.py
|
mschellenger96/OMF-Samples
|
7eb3c5be77e66f4360227dc14def28f688037873
|
[
"Apache-2.0"
] | null | null | null |
Community Samples/Python2/SendOMFDataToPISystem_fromPhidgetsIRTempSensor.py
|
mschellenger96/OMF-Samples
|
7eb3c5be77e66f4360227dc14def28f688037873
|
[
"Apache-2.0"
] | null | null | null |
#Copyright 2018 OSIsoft, LLC
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
#<http://www.apache.org/licenses/LICENSE-2.0>
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
# NOTE: this script was designed using the v1.0
# version of the OMF specification, as outlined here:
# http://omf-docs.readthedocs.io/en/v1.0/index.html
# NOTE: this example was designed to use the following USB IR temperature sensor:
# https://www.phidgets.com/?tier=3&catid=14&pcid=12&prodid=1041
# NOTE: to install Linux Phidget support, see https://www.phidgets.com/docs/OS_-_Linux#Debian_Install
# For general installation instructions, see https://www.phidgets.com/docs/Software_Overview#Operating_System_Support
# To specifically enable Python support for the Phidget, see
# https://www.phidgets.com/docs/Language_-_Python#Getting_Started_with_Python
# ************************************************************************
# Import necessary packages
# ************************************************************************
# Import packages
import json
import time
import platform
import socket
import datetime
import random # Used to generate sample data; comment out this line if real data is used
import requests
# Import any special packages
# for example, for a Raspberry PI,
# import RPi.GPIO as GPIO
import sys # Used to parse error messages
import os # Used to sync the internal clock
from Phidget22.Devices.TemperatureSensor import *
from Phidget22.PhidgetException import *
from Phidget22.Phidget import *
from Phidget22.Net import *
# ************************************************************************
# Specify constant values (names, target URLS, et centera) needed by the script
# ************************************************************************
# Specify the name of this device, or simply use the hostname; this is the name
# of the PI AF Element that will be created, and it'll be included in the names
# of PI Points that get created as well
DEVICE_NAME = "Phidgets IR Temperature Sensor"
#DEVICE_NAME = "MyCustomDeviceName"
# Specify a device location (optional); this will be added as a static
# string attribute to the AF Element that is created
DEVICE_LOCATION = "IoT Test Lab"
# Specify the name of the Assets type message; this will also end up becoming
# part of the name of the PI AF Element template that is created; for example, this could be
# "AssetsType_RaspberryPI" or "AssetsType_Dragonboard"
# You will want to make this different for each general class of IoT module that you use
ASSETS_MESSAGE_TYPE_NAME = DEVICE_NAME + "_assets_type"
#ASSETS_MESSAGE_TYPE_NAME = "assets_type" + "IoT Device Model 74656" # An example
# NoteL you can repalce DEVICE_NAME with DEVICE_TYPE if you'd like to use a common type for multiple assets
# Similarly, specify the name of for the data values type; this should likewise be unique
# for each general class of IoT device--for example, if you were running this
# script on two different devices, each with different numbers and kinds of sensors,
# you'd specify a different data values message type name
# when running the script on each device. If both devices were the same,
# you could use the same DATA_VALUES_MESSAGE_TYPE_NAME
DATA_VALUES_MESSAGE_TYPE_NAME = DEVICE_NAME + "_data_values_type"
#DATA_VALUES_MESSAGE_TYPE_NAME = "data_values_type" + "IoT Device Model 74656" # An example
# NoteL you can repalce DEVICE_NAME with DEVICE_TYPE if you'd like to use a common type for multiple assets
# Store the id of the container that will be used to receive live data values
DATA_VALUES_CONTAINER_ID = DEVICE_NAME + "_data_values_container"
# Specify the number of seconds to sleep in between value messages
NUMBER_OF_SECONDS_BETWEEN_VALUE_MESSAGES = 2
# Specify whether you're sending data to OSIsoft cloud services or not
SEND_DATA_TO_OSISOFT_CLOUD_SERVICES = False
# Specify the address of the destination endpoint; it should be of the form
# http://<host/ip>:<port>/ingress/messages
# For example, "https://myservername:8118/ingress/messages"
TARGET_URL = "https://lopezpiserver:777/ingress/messages"
#TARGET_URL = "https://localhost:5000/edge/omf/tenants/default/namespaces/data"
# !!! Note: if sending data to OSIsoft cloud services,
# uncomment the below line in order to set the target URL to the OCS OMF endpoint:
#TARGET_URL = "https://dat-a.osisoft.com/api/omf"
# Specify the producer token, a unique token used to identify and authorize a given OMF producer. Consult the OSIsoft Cloud Services or PI Connector Relay documentation for further information.
PRODUCER_TOKEN = "OMFv1"
#PRODUCER_TOKEN = "778408" # An example
# !!! Note: if sending data to OSIsoft cloud services, the producer token should be the
# security token obtained for a particular Tenant and Publisher; see
# http://qi-docs.readthedocs.io/en/latest/OMF_Ingress_Specification.html#headers
#PRODUCER_TOKEN = ""
# ************************************************************************
# Specify options for sending web requests to the target
# ************************************************************************
# If self-signed certificates are used (true by default),
# do not verify HTTPS SSL certificates; normally, leave this as is
VERIFY_SSL = False
# Specify the timeout, in seconds, for sending web requests
# (if it takes longer than this to send a message, an error will be thrown)
WEB_REQUEST_TIMEOUT_SECONDS = 30
# ************************************************************************
# Helper function: run any code needed to initialize local sensors, if necessary for this hardware
# ************************************************************************
# Below is where you can initialize any global variables that are needed by your applicatio;
# certain sensors, for example, will require global interface or sensor variables
# myExampleInterfaceKitGlobalVar = None
# Define a variable for the Phidget
ch = 0
# Define helper functions to handle when the Phidget is connected or disconnected
def PhidgetAttached(self):
try:
attached = self
print("\nAttach Event Detected (Information Below)")
print("===========================================")
print("Library Version: %s" % attached.getLibraryVersion())
print("Serial Number: %d" % attached.getDeviceSerialNumber())
print("Channel: %d" % attached.getChannel())
print("Channel Class: %s" % attached.getChannelClass())
print("Channel Name: %s" % attached.getChannelName())
print("Device ID: %d" % attached.getDeviceID())
print("Device Version: %d" % attached.getDeviceVersion())
print("Device Name: %s" % attached.getDeviceName())
print("Device Class: %d" % attached.getDeviceClass())
print("\n")
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
print("Press Enter to Exit...\n")
readin = sys.stdin.read(1)
exit(1)
def PhidgetDetached(self):
detached = self
try:
print("\nDetach event on Port %d Channel %d" % (detached.getHubPort(), detached.getChannel()))
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
print("Press Enter to Exit...\n")
readin = sys.stdin.read(1)
exit(1)
def ErrorEvent(self, eCode, description):
print("Error %i : %s" % (eCode, description))
# The following function is where you can insert specific initialization code to set up
# sensors for a particular IoT module or platform
def initialize_sensors():
print("\n--- Sensors initializing...")
try:
#For a raspberry pi, for example, to set up pins 4 and 5, you would add
#GPIO.setmode(GPIO.BCM)
#GPIO.setup(4, GPIO.IN)
#GPIO.setup(5, GPIO.IN)
print("--- Waiting 10 seconds for sensors to warm up...")
time.sleep(10)
# Activate the Phidget
global ch
ch = TemperatureSensor()
# Assign event handlers
ch.setOnAttachHandler(PhidgetAttached)
ch.setOnDetachHandler(PhidgetDetached)
ch.setOnErrorHandler(ErrorEvent)
# Wait for the sensor to be attached
print("--- Waiting for the Phidget Object to be attached...")
ch.openWaitForAttachment(5000)
print("--- Sensors initialized!")
# Sync the time on this device to an internet time server
try:
print('\n--- Syncing time...')
os.system('sudo service ntpd stop')
time.sleep(1)
os.system('sudo ntpd -gq')
time.sleep(1)
os.system('sudo service ntpd start')
print('--- Success! Time is ' + str(datetime.datetime.now()))
except:
print('Error syncing time!')
except Exception as ex:
# Log any error, if it occurs
print(str(datetime.datetime.now()) + " Error when initializing sensors: " + str(ex))
# ************************************************************************
# Helper function: REQUIRED: create a JSON message that contains sensor data values
# ************************************************************************
# The following function you can customize to allow this script to send along any
# number of different data values, so long as the values that you send here match
# up with the values defined in the "DataValuesType" OMF message type (see the next section)
# In this example, this function simply generates two random values for the sensor values,
# but here is where you could change this function to reference a library that actually
# reads from sensors attached to the device that's running the script
def create_data_values_message():
# Get the current timestamp in ISO format
timestamp = datetime.datetime.utcnow().isoformat() + 'Z'
# Read the Phidget
temperature = ch.ambientSensor.Temperature * 9/5 + 32
# Assemble a JSON object containing the streamId and any data values
return [
{
"containerid": DATA_VALUES_CONTAINER_ID,
"values": [
{
"Time": timestamp,
# Again, in this example,
# we're just sending along random values for these two "sensors"
#"Raw Sensor Reading 1": 100*random.random(),
#"Raw Sensor Reading 2": 100*random.random()
"Temperature": temperature
# If you wanted to read, for example, the digital GPIO pins
# 4 and 5 on a Raspberry PI,
# you would add to the earlier package import section:
# import RPi.GPIO as GPIO
# then add the below 3 lines to the above initialize_sensors
# function to set up the GPIO pins:
# GPIO.setmode(GPIO.BCM)
# GPIO.setup(4, GPIO.IN)
# GPIO.setup(5, GPIO.IN)
# and then lastly, you would change the two Raw Sensor reading lines above to
# "Raw Sensor Reading 1": GPIO.input(4),
# "Raw Sensor Reading 2": GPIO.input(5)
}
]
}
]
# ************************************************************************
# Helper function: REQUIRED: wrapper function for sending an HTTPS message
# ************************************************************************
# Define a helper function to allow easily sending web request messages;
# this function can later be customized to allow you to port this script to other languages.
# All it does is take in a data object and a message type, and it sends an HTTPS
# request to the target OMF endpoint
def send_omf_message_to_endpoint(action, message_type, message_json):
try:
# Assemble headers that contain the producer token and message type
# Note: in this example, the only action that is used is "create",
# which will work totally fine;
# to expand this application, you could modify it to use the "update"
# action to, for example, modify existing AF element template types
web_request_header = {
'producertoken': PRODUCER_TOKEN,
'messagetype': message_type,
'action': action,
'messageformat': 'JSON',
'omfversion': '1.0'
}
# !!! Note: if desired, ucomment the below line to print the outgoing message
print('\nOutgoing message: ' + json.dumps(message_json));
# Send the request, and collect the response; json.dumps is used to
# properly format the message JSON so that it can be sent as a web request
response = requests.post(
TARGET_URL,
headers=web_request_header,
data=json.dumps(message_json),
verify=VERIFY_SSL,
timeout=WEB_REQUEST_TIMEOUT_SECONDS
)
# Print a debug message, if desired; note: you should receive a
# response code 200 or 202 if the request was successful!
print(
'Response from sending a message of type ' +
'"{0}" with action "{1}": {2} {3}'.format(
message_type,
action,
response.status_code,
response.text
)
)
except Exception as ex:
# Log any error, if it occurs
print(str(datetime.datetime.now()) + " Error during web request: " + str(ex))
# ************************************************************************
# Turn off HTTPS warnings, if desired
# (if the default certificate configuration was used by the PI Connector)
# ************************************************************************
# Suppress insecure HTTPS warnings, if an untrusted certificate is used by the target endpoint
# Remove if targetting trusted targets
try:
if not VERIFY_SSL:
requests.packages.urllib3.disable_warnings()
except Exception as ex:
# Log any error, if it occurs
print(str(datetime.datetime.now()) + " Possible non-fatal error when disabling SSL validation: " + str(ex))
print(
'\n--- Setup: targeting endpoint "' + TARGET_URL + '"...' +
'\n--- Now sending types, defining containers, and creating assets and links...' +
'\n--- (Note: a successful message will return a 20X response code.)\n'
)
# ************************************************************************
# Create a JSON packet to define the types of streams that will be sent
# ************************************************************************
DYNAMIC_TYPES_MESSAGE_JSON = [
# ************************************************************************
# There are several different message types that will be used by this script, but
# you can customize this script for your own needs by modifying the types:
# First, you can modify the "AssetsType", which will allow you to customize which static
# attributes are added to the new PI AF Element that will be created, and second,
# you can modify the "DataValuesType", which will allow you to customize this script to send
# additional sensor values, in addition to (or instead of) the two shown here
# This values type is going to be used to send real-time values; feel free to rename the
# values from "Raw Sensor Reading 1" to, say, "Temperature", or "Pressure"
# Note:
# all keywords ("id", "type", "classification", etc. are case sensitive!)
# For a list of the specific keywords used in these messages,
# see http://omf-docs.readthedocs.io/
{
"id": DATA_VALUES_MESSAGE_TYPE_NAME,
"type": "object",
"classification": "dynamic",
"properties": {
"Time": {
"format": "date-time",
"type": "string",
"isindex": True
},
"Temperature": {
"type": "number"
}
# For example, to allow you to send a string-type live data value,
# such as "Status", you would add
#"Status": {
# "type": "string"
#}
}
}
]
# ************************************************************************
# Send the DYNAMIC types message, so that these types can be referenced in all later messages
# ************************************************************************
send_omf_message_to_endpoint("create", "Type", DYNAMIC_TYPES_MESSAGE_JSON)
# !!! Note: if sending data to OCS, static types are not included!
if not SEND_DATA_TO_OSISOFT_CLOUD_SERVICES:
STATIC_TYPES_MESSAGE_JSON = [
# This asset type is used to define a PI AF Element that will be created;
# this type also defines two static string attributes that will be created
# as well; feel free to rename these or add additional
# static attributes for each Element (PI Point attributes will be added later)
# The name of this type will also end up being part of the name of the PI AF Element template
# that is automatically created
{
"id": ASSETS_MESSAGE_TYPE_NAME,
"type": "object",
"classification": "static",
"properties": {
"Name": {
"type": "string",
"isindex": True
},
"Device Type": {
"type": "string"
},
"Location": {
"type": "string"
},
"Data Ingress Method": {
"type": "string"
}
# For example, to add a number-type static
# attribute for the device model, you would add
# "Model": {
# "type": "number"
#}
}
}
]
# ************************************************************************
# Send the STATIC types message, so that these types can be referenced in all later messages
# ************************************************************************
send_omf_message_to_endpoint("create", "Type", STATIC_TYPES_MESSAGE_JSON)
# ************************************************************************
# Create a JSON packet to define containerids and the type
# (using the types listed above) for each new data events container
# ************************************************************************
# The device name that you specified earlier will be used as the AF Element name!
NEW_AF_ELEMENT_NAME = DEVICE_NAME
CONTAINERS_MESSAGE_JSON = [
{
"id": DATA_VALUES_CONTAINER_ID,
"typeid": DATA_VALUES_MESSAGE_TYPE_NAME
}
]
# ************************************************************************
# Send the container message, to instantiate this particular container;
# we can now directly start sending data to it using its Id
# ************************************************************************
send_omf_message_to_endpoint("create", "Container", CONTAINERS_MESSAGE_JSON)
# !!! Note: if sending data to OCS, static types are not included!
if not SEND_DATA_TO_OSISOFT_CLOUD_SERVICES:
# ************************************************************************
# Create a JSON packet to containing the asset and
# linking data for the PI AF asset that will be made
# ************************************************************************
# Here is where you can specify values for the static PI AF attributes;
# in this case, we're auto-populating the Device Type,
# but you can manually hard-code in values if you wish
# we also add the LINKS to be made, which will both position the new PI AF
# Element, so it will show up in AF, and will associate the PI Points
# that will be created with that Element
ASSETS_AND_LINKS_MESSAGE_JSON = [
{
# This will end up creating a new PI AF Element with
# this specific name and static attribute values
"typeid": ASSETS_MESSAGE_TYPE_NAME,
"values": [
{
"Name": NEW_AF_ELEMENT_NAME,
"Device Type": (
platform.machine() + " - " + platform.platform() + " - " + platform.processor()
),
"Location": DEVICE_LOCATION,
"Data Ingress Method": "OMF"
}
]
},
{
"typeid": "__Link",
"values": [
# This first link will locate such a newly created AF Element under
# the root PI element targeted by the PI Connector in your target AF database
# This was specfied in the Connector Relay Admin page; note that a new
# parent element, with the same name as the PRODUCER_TOKEN, will also be made
{
"Source": {
"typeid": ASSETS_MESSAGE_TYPE_NAME,
"index": "_ROOT"
},
"Target": {
"typeid": ASSETS_MESSAGE_TYPE_NAME,
"index": NEW_AF_ELEMENT_NAME
}
},
# This second link will map new PI Points (created by messages
# sent to the data values container) to a newly create element
{
"Source": {
"typeid": ASSETS_MESSAGE_TYPE_NAME,
"index": NEW_AF_ELEMENT_NAME
},
"Target": {
"containerid": DATA_VALUES_CONTAINER_ID
}
}
]
}
]
# ************************************************************************
# Send the message to create the PI AF asset; it won't appear in PI AF,
# though, because it hasn't yet been positioned...
# ************************************************************************
send_omf_message_to_endpoint("create", "Data", ASSETS_AND_LINKS_MESSAGE_JSON)
# ************************************************************************
# Initialize sensors prior to sending data (if needed), using the function defined earlier
# ************************************************************************
initialize_sensors()
# ************************************************************************
# Finally, loop indefinitely, sending random events
# conforming to the value type that we defined earlier
# ************************************************************************
print(
'\n--- Now sending live data every ' + str(NUMBER_OF_SECONDS_BETWEEN_VALUE_MESSAGES) +
' second(s) for device "' + NEW_AF_ELEMENT_NAME + '"... (press CTRL+C to quit at any time)\n'
)
if not SEND_DATA_TO_OSISOFT_CLOUD_SERVICES:
print(
'--- (Look for a new AF Element named "' + NEW_AF_ELEMENT_NAME + '".)\n'
)
while True:
# Call the custom function that builds a JSON object that
# contains new data values; see the beginning of this script
VALUES_MESSAGE_JSON = create_data_values_message()
# Send the JSON message to the target URL
send_omf_message_to_endpoint("create", "Data", VALUES_MESSAGE_JSON)
# Send the next message after the required interval
time.sleep(NUMBER_OF_SECONDS_BETWEEN_VALUE_MESSAGES)
| 44.679775
| 193
| 0.583092
|
308c073b21961d6c57fcb4452799d4593d61ab6c
| 510
|
py
|
Python
|
zookeeper/kazoo-os.py
|
li-ma/homework
|
d75b1752a02bd028af0806683abe079c7b0a9b29
|
[
"Apache-2.0"
] | null | null | null |
zookeeper/kazoo-os.py
|
li-ma/homework
|
d75b1752a02bd028af0806683abe079c7b0a9b29
|
[
"Apache-2.0"
] | null | null | null |
zookeeper/kazoo-os.py
|
li-ma/homework
|
d75b1752a02bd028af0806683abe079c7b0a9b29
|
[
"Apache-2.0"
] | null | null | null |
import kazoo
from kazoo.client import KazooClient
from kazoo.handlers.eventlet import SequentialEventletHandler
from kazoo.retry import KazooRetry
_handler = SequentialEventletHandler()
_retry = KazooRetry(max_tries=3, delay=0.5, backoff=2,
sleep_func=_handler.sleep_func)
client = KazooClient(hosts='192.168.163.129:2181',
handler=_handler,
timeout=30,
connection_retry=_retry)
#import pdb
#pdb.set_trace()
client.start()
| 26.842105
| 61
| 0.680392
|
91f6c5d788d0ab945c8df3a505e88889f301e574
| 16,108
|
py
|
Python
|
pandas/_version.py
|
saurav2608/pandas
|
b90f9db6d23f64b0ce9ff469445e1c31013a07c0
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 2
|
2021-04-07T13:56:06.000Z
|
2021-04-12T13:45:23.000Z
|
pandas/_version.py
|
saurav2608/pandas
|
b90f9db6d23f64b0ce9ff469445e1c31013a07c0
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
pandas/_version.py
|
saurav2608/pandas
|
b90f9db6d23f64b0ce9ff469445e1c31013a07c0
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.15 (https://github.com/warner/python-versioneer)
import errno
import os
import re
import subprocess
import sys
from typing import Callable, Dict
def get_keywords():
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
keywords = {"refnames": git_refnames, "full": git_full}
return keywords
class VersioneerConfig(object):
pass
def get_config():
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = "v"
cfg.parentdir_prefix = "pandas-"
cfg.versionfile_source = "pandas/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
pass
HANDLERS = {} # type: Dict[str, Dict[str, Callable]]
def register_vcs_handler(vcs: str, method: str) -> Callable: # decorator
def decorate(f: Callable) -> Callable:
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run {dispcmd}".format(dispcmd=dispcmd))
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip().decode()
if p.returncode != 0:
if verbose:
print("unable to run {dispcmd} (error)".format(dispcmd=dispcmd))
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '{root}', but '{dirname}' "
"doesn't start with prefix '{parentdir_prefix}'".format(
root=root, dirname=dirname,
parentdir_prefix=parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None}
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
if not keywords:
raise NotThisMethod("no keywords at all, weird")
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = {r.strip() for r in refnames.strip("()").split(",")}
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = {r[len(TAG):] for r in refs if r.startswith(TAG)}
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = {r for r in refs if re.search(r'\d', r)}
if verbose:
print("discarding '{}', no digits".format(",".join(refs - tags)))
if verbose:
print("likely tags: {}".format(",".join(sorted(tags))))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking {r}".format(r=r))
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags"}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in {root}".format(root=root))
raise NotThisMethod("no .git directory")
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# if there is a tag, this yields TAG-NUM-gHEX[-dirty]
# if there are no tags, this yields HEX[-dirty] (no NUM)
describe_out = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long"],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: "
"'{describe_out}'".format(
describe_out=describe_out))
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
fmt = ("tag '{full_tag}' doesn't start with prefix "
"'{tag_prefix}'")
msg = fmt.format(full_tag=full_tag, tag_prefix=tag_prefix)
if verbose:
print(msg)
pieces["error"] = msg
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
return pieces
def plus_or_dot(pieces):
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
# now build up version string, with post-release "local version
# identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
# get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
# exceptions:
# 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "{:d}.g{}".format(pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.{:d}.g{}".format(pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
# TAG[.post.devDISTANCE] . No -dirty
# exceptions:
# 1: no tags. 0.post.devDISTANCE
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
# TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that
# .dev0 sorts backwards (a dirty tree will appear "older" than the
# corresponding clean one), but you shouldn't be releasing software with
# -dirty anyways.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post{:d}".format(pieces["distance"])
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g{}".format(pieces["short"])
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g{}".format(pieces["short"])
return rendered
def render_pep440_old(pieces):
# TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
# TAG[-DISTANCE-gHEX][-dirty], like 'git describe --tags --dirty
# --always'
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-{:d}-g{}".format(pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
# TAG-DISTANCE-gHEX[-dirty], like 'git describe --tags --dirty
# --always -long'. The distance/hash is unconditional.
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-{:d}-g{}".format(pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"]}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '{style}'".format(style=style))
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None}
def get_versions():
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree"}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version"}
| 34.790497
| 79
| 0.590576
|
ef205f4b979c06b57829ef1caf6e9b483a15f0a8
| 9,821
|
py
|
Python
|
smrt/match_ccs.py
|
CellLineage/SLOTH
|
b9c81b10af522b648b2d230c027cb0712b57c123
|
[
"MIT"
] | 5
|
2019-03-12T03:50:39.000Z
|
2021-12-17T07:00:57.000Z
|
smrt/match_ccs.py
|
shadowdeng1994/SLOTH
|
a996163d8cde7e31766264e1f1e0d32f47e93666
|
[
"MIT"
] | null | null | null |
smrt/match_ccs.py
|
shadowdeng1994/SLOTH
|
a996163d8cde7e31766264e1f1e0d32f47e93666
|
[
"MIT"
] | 2
|
2021-03-24T17:05:59.000Z
|
2021-12-14T13:28:11.000Z
|
#!/usr/bin/env python3
"""match sequence with sample barcode and UMI index.
- update in 2018-08-06: support reverse complement
- method
one step match
- pattern
# 454UPR-(UMI)-3kF-(sequence)-3kR-(sampleBarcode)
"""
import logging
import multiprocessing as mp
import os
import sys
import pysam
import regex
LOGGER: logging.Logger = logging.getLogger()
if not LOGGER.handlers:
HANDLER: logging.StreamHandler = logging.StreamHandler()
FORMATTER: logging.Formatter = logging.Formatter(
"%(asctime)s %(name)-12s %(levelname)-8s %(message)s"
)
HANDLER.setFormatter(FORMATTER)
LOGGER.addHandler(HANDLER)
LOGGER.setLevel(logging.DEBUG)
# LOGGER.setLevel(logging.INFO)
_SEQ_COMPLEMENTS = str.maketrans("ACTG", "TGAC")
def reverse_complement(seq):
return seq.translate(_SEQ_COMPLEMENTS)[::-1]
def find_dna(p, seq):
matches = [(m, 1) for m in regex.finditer(p, seq, overlapped=False)] + [
(m, -1)
for m in regex.finditer(p, reverse_complement(seq), overlapped=False)
]
if len(matches) == 1:
return matches[0]
return (None, None)
def match_read(read):
seq = read.query_sequence
# filter 1: check length
if len(seq) < 2848 or len(seq) > 3448:
LOGGER.info(f"{read.query_name}:\tlength is not in range")
return None
# filter 2: check barcode
p_barcode = regex.compile(
r"(?:GGAACTAGTTCCTAAGCTAGTAGGTTAGTA){e<=5}(?:TCTACACAAGGAACAAACACTG){e<=4}((?:GATG){e<=1}(?:[ATGC]{14,18})(?:ACCC){e<=1})"
)
barcode_match, barcode_orientation = find_dna(p_barcode, seq)
if not barcode_match:
LOGGER.info(f"{read.query_name}:\tbarcode not match")
return None
# filter 3: check umi
p_umi = regex.compile(
r"(?:TCGCCTCCCTCGCGCCA){e<=3}((?:TCAG){e<=1}(?:[ATGC]{12,16})(?:AGAA){e<=1})(?:GCTTACTAACCAGCCAACTAGC){e<=4}(?:TGGCTAGCAGGTAAACCTGCCAGCCTGCCG){e<=5}"
)
umi_match, umi_orientation = find_dna(p_umi, seq)
if not umi_match:
LOGGER.info(f"{read.query_name}:\tumi not match")
return None
# filter 4: check target
p_target = regex.compile(
r"(?:GCTTACTAACCAGCCAACTAGC){e<=4}((?:TGGCTAGCAGGTAAACCTGCCAGCCTGCCG){e<=5}[ATGC]{2780,3180}(?:GGAACTAGTTCCTAAGCTAGTAGGTTAGTA){e<=5})(?:TCTACACAAGGAACAAACACTG){e<=4}((?:GATG){e<=1}(?:[ATGC]{14,18})(?:ACCC){e<=1})"
)
target_match, target_orientation = find_dna(p_target, seq)
if not target_match:
LOGGER.info(f"{read.query_name}:\ttarget not match")
return None
if not (
umi_orientation == target_orientation == barcode_orientation
and umi_match.end(1) < target_match.start(1)
and barcode_match.start(1) > target_match.end(1)
):
LOGGER.debug(f"{read.query_name}:\tmatch in not in correct order")
return None
LOGGER.debug(f"{read.query_name}:\tpass match filter")
orientation_str = "+" if target_orientation == 1 else "-"
quality = (
read.query_qualities
if orientation_str == "+"
else read.query_qualities[::-1]
)
umi_span = f"{umi_match.start(1)}-{umi_match.end(1)}"
umi_seq = umi_match.group(1)
umi_qual = quality[umi_match.start(1) : umi_match.end(1)]
target_span = f"{target_match.start(1)}-{target_match.end(1)}"
target_seq = target_match.group(1)
target_qual = quality[target_match.start(1) : target_match.end(1)]
barcode_span = f"{barcode_match.start(1)}-{barcode_match.end(1)}"
barcode_seq = barcode_match.group(1)
barcode_qual = quality[barcode_match.start(1) : barcode_match.end(1)]
# output match only when fit this step
read_matched = pysam.AlignedSegment()
read_matched.query_name = read.query_name + "/matched"
read_matched.query_sequence = target_seq
read_matched.query_qualities = target_qual
read_matched.flag = read.flag
read_matched.reference_id = read.reference_id
read_matched.reference_start = read.reference_start
read_matched.mapping_quality = read.mapping_quality
read_matched.cigar = read.cigar
read_matched.next_reference_id = read.next_reference_id
read_matched.next_reference_start = read.next_reference_start
read_matched.template_length = read.template_length
read_matched.set_tags(
read.get_tags()
+ [
("RN", read.query_name),
("RN", orientation_str),
("TR", target_span),
("UR", umi_span),
("US", umi_seq),
("UQ", umi_qual),
("BR", barcode_span),
("BS", barcode_seq),
("BQ", barcode_qual),
]
)
return read_matched
def match_read_dict(read_dict):
"""
{'name': 'm54079_180817_091252/32440632/ccs/rev', '
flag': '4', '
ref_name': '*', '
ref_pos': '0', '
map_quality': '255', '
cigar': '*', '
next_ref_name': '*', '
next_ref_pos': '0', '
length': '0', '
seq': 'GCCTCCCTCGCGCCATCCGTTAGGATT...
qual': ")+5=/G.8=B5=3AA0,)81%A;D?....
tags': ['np:i:2', '
rq:f:0.936408', '
rs:B:i,6,0,0,0,0,0', '
sn:B:f,6.47793,12.5605,6.01262,9.98906', '
za:f:2.75598', '
zm:i:32440632', '
zs:B:f,4.01855,4.07533,-1.52781', '
RG:Z:a751be35']}
"""
seq = read_dict["seq"]
name = read_dict["name"]
# filter 1: check length
if len(seq) < 2848 or len(seq) > 3448:
LOGGER.info(f"{name}:\tlength is not in range")
return None
# filter 2: check barcode
p_barcode = regex.compile(
r"(?:GGAACTAGTTCCTAAGCTAGTAGGTTAGTA){e<=5}(?:TCTACACAAGGAACAAACACTG){e<=4}((?:GATG){e<=1}(?:[ATGC]{14,18})(?:ACCC){e<=1})"
)
barcode_match, barcode_orientation = find_dna(p_barcode, seq)
if not barcode_match:
LOGGER.info(f"{name}:\tbarcode not match")
return None
# filter 3: check umi
p_umi = regex.compile(
r"(?:TCGCCTCCCTCGCGCCA){e<=3}((?:TCAG){e<=1}(?:[ATGC]{12,16})(?:AGAA){e<=1})(?:GCTTACTAACCAGCCAACTAGC){e<=4}(?:TGGCTAGCAGGTAAACCTGCCAGCCTGCCG){e<=5}"
)
umi_match, umi_orientation = find_dna(p_umi, seq)
if not umi_match:
LOGGER.info(f"{name}:\tumi not match")
return None
# filter 4: check target
p_target = regex.compile(
r"(?:GCTTACTAACCAGCCAACTAGC){e<=4}((?:TGGCTAGCAGGTAAACCTGCCAGCCTGCCG){e<=5}[ATGC]{2780,3180}(?:GGAACTAGTTCCTAAGCTAGTAGGTTAGTA){e<=5})(?:TCTACACAAGGAACAAACACTG){e<=4}((?:GATG){e<=1}(?:[ATGC]{14,18})(?:ACCC){e<=1})"
)
target_match, target_orientation = find_dna(p_target, seq)
if not target_match:
LOGGER.info(f"{name}:\ttarget not match")
return None
if not (
umi_orientation == target_orientation == barcode_orientation
and umi_match.end(1) < target_match.start(1)
and barcode_match.start(1) > target_match.end(1)
):
LOGGER.debug(f"{name}:\tmatch in not in correct order")
return None
LOGGER.debug(f"{name}:\tpass match filter")
orientation_str = "+" if target_orientation == 1 else "-"
quality = (
read_dict["qual"]
if orientation_str == "+"
else read_dict["qual"][::-1]
)
umi_span = f"{umi_match.start(1)}-{umi_match.end(1)}"
umi_seq = umi_match.group(1)
umi_qual = quality[umi_match.start(1) : umi_match.end(1)]
target_span = f"{target_match.start(1)}-{target_match.end(1)}"
target_seq = target_match.group(1)
target_qual = quality[target_match.start(1) : target_match.end(1)]
barcode_span = f"{barcode_match.start(1)}-{barcode_match.end(1)}"
barcode_seq = barcode_match.group(1)
barcode_qual = quality[barcode_match.start(1) : barcode_match.end(1)]
# output match only when fit this step
read_dict["name"] = name + "/matched"
read_dict["seq"] = target_seq
read_dict["qual"] = target_qual
read_dict["tags"] += [
f"RN:Z:{name}",
f"RN:Z:{orientation_str}",
f"TR:Z:{target_span}",
f"UR:Z:{umi_span}",
f"US:Z:{umi_seq}",
f"UQ:Z:{umi_qual}",
f"BR:Z:{barcode_span}",
f"BS:Z:{barcode_seq}",
f"BQ:Z:{barcode_qual}",
]
return read_dict
def run_iter(input_bam_file, output_bam_file):
"""test run."""
infile = pysam.AlignmentFile(input_bam_file, "rb", check_sq=False)
with pysam.AlignmentFile(
output_bam_file, "wb", template=infile
) as outfile:
for read in infile.fetch(until_eof=True):
read_matched = match_read(read)
if read_matched:
outfile.write(read_matched)
def run_pool(input_bam_file, output_bam_file):
infile = pysam.AlignmentFile(input_bam_file, "rb", check_sq=False)
_HEADER = infile.header
read_list = [read.to_dict() for read in infile.fetch(until_eof=True)]
LOGGER.info("finish reading file into list")
with mp.Pool(processes=mp.cpu_count() - 10) as pool:
matches_list = pool.map(match_read_dict, read_list)
with pysam.AlignmentFile(
output_bam_file, "wb", template=infile
) as outfile:
for read_dict in matches_list:
if read_dict:
read_matched = pysam.AlignedSegment.from_dict(
read_dict, _HEADER
)
read_matched.from_dict(read_dict, _HEADER)
outfile.write(read_matched)
if __name__ == "__main__":
# sample_name = "flyS4_runA_ssccs"
SAMPLE_NAME = sys.argv[1]
INPUT_BAM_FILE = f"../pacbio_data/ccs/{SAMPLE_NAME}.bam"
OUTPUT_BAM_FILE = f"./sequence_matched/{SAMPLE_NAME}_match.bam"
if not os.path.exists(INPUT_BAM_FILE):
raise ValueError("BAM FILE of input sample does not exist!")
# test run
# run_iter(INPUT_BAM_FILE, OUTPUT_BAM_FILE)
# parallel run
run_pool(INPUT_BAM_FILE, OUTPUT_BAM_FILE)
| 33.291525
| 221
| 0.631504
|
0684c91b3a5746c48b6c6ffd9423ab5a2f9e5e1e
| 1,171
|
py
|
Python
|
ALREC_Method/rene/dataset_defines.py
|
proy3/Abnormal_Trajectory_Classifier
|
a6b27c6847262e9703a0f3404c85c135415c1d4c
|
[
"MIT"
] | 6
|
2019-10-29T03:05:14.000Z
|
2022-03-18T05:14:25.000Z
|
ALREC_Method/rene/dataset_defines.py
|
proy3/Abnormal_Trajectory_Classifier
|
a6b27c6847262e9703a0f3404c85c135415c1d4c
|
[
"MIT"
] | 1
|
2022-03-11T03:49:34.000Z
|
2022-03-11T03:49:34.000Z
|
ALREC_Method/rene/dataset_defines.py
|
proy3/Abnormal_Trajectory_Classifier
|
a6b27c6847262e9703a0f3404c85c135415c1d4c
|
[
"MIT"
] | 1
|
2021-12-15T09:21:26.000Z
|
2021-12-15T09:21:26.000Z
|
"""
This script contains some global variables that are used throughout this specific dataset.
"""
# Dataset file names
raw_input_file_all = '/home/travail/datasets/urban_tracker/rene/rene_annotations/rene_gt.sqlite'
input_raw_image_frame_path = '/home/travail/datasets/urban_tracker/rene/rene_frames/'
raw_input_file_peds = ''
raw_input_file_cars = raw_input_file_all[:raw_input_file_all.rfind('.')] + '_cars.sqlite'
raw_input_file_bike = raw_input_file_all[:raw_input_file_all.rfind('.')] + '_bike.sqlite'
raw_input_file_names = [raw_input_file_peds, raw_input_file_cars, raw_input_file_bike]
raw_input_file_all_2 = 'C:/Users/panka/PyCharmProjects/Dataset/rene_annotations/rene_annotations/rene_gt.sqlite'
input_raw_image_frame_path_2 = 'C:/Users/panka/PyCharmProjects/Dataset/rene_frames/rene_frames/'
raw_input_file_cars_2 = raw_input_file_all_2[:raw_input_file_all_2.rfind('.')] + '_cars.sqlite'
raw_input_file_bike_2 = raw_input_file_all_2[:raw_input_file_all_2.rfind('.')] + '_bike.sqlite'
raw_input_file_names_2 = [raw_input_file_peds, raw_input_file_cars_2, raw_input_file_bike_2]
# Used for generating velocities
video_data_fps = 30
best_deep_ae_model = 28
| 50.913043
| 112
| 0.826644
|
5305e0ba3b901fe7ac4144e292bafc7091455923
| 29,369
|
py
|
Python
|
src/models/sequence/ss/kernel.py
|
tarepan/state-spaces
|
3427a1f12a29ad8ccebaa8ee105f62f7f8a3d891
|
[
"Apache-2.0"
] | null | null | null |
src/models/sequence/ss/kernel.py
|
tarepan/state-spaces
|
3427a1f12a29ad8ccebaa8ee105f62f7f8a3d891
|
[
"Apache-2.0"
] | null | null | null |
src/models/sequence/ss/kernel.py
|
tarepan/state-spaces
|
3427a1f12a29ad8ccebaa8ee105f62f7f8a3d891
|
[
"Apache-2.0"
] | 1
|
2022-01-27T17:13:26.000Z
|
2022-01-27T17:13:26.000Z
|
""" Core S3 convolution kernel implementing the 'normal plus low-rank' algorithm.
The main module is SSKernelNPLR, which stores parameters A, B, C, dt, and calling it creates the SSM convolution kernel bar{K}.
A much simpler version SSKernelSlow is included for illustration purposes: it has the same output, but uses the naive algorithm which is much slower. This module is meant for testing and exposition, to understand what the State Space Kernel actually does.
HiPPOSSKernel specializes the SSKernels to specific instantiations of HiPPO matrices.
"""
if __name__ == "__main__":
import sys
import pathlib
p = pathlib.Path().absolute()
print("Adding path: ", p)
sys.path.append(str(p))
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import scipy.fft
from einops import rearrange, repeat
from opt_einsum import contract
from omegaconf import DictConfig
import src.models.hippo.hippo as hippo
from src.models.functional.krylov import krylov, power
import src.utils.train
log = src.utils.train.get_logger(__name__)
try:
from extensions.cauchy.cauchy import cauchy_mult
has_cauchy_extension = True
except:
log.warn(
"CUDA extension for cauchy multiplication not found. Install by going to extensions/cauchy/ and running `python setup.py install`. This should speed up end-to-end training by 10-50%"
)
has_cauchy_extension = False
try:
import src.models.functional.cauchy as cauchy
except ImportError:
if not has_cauchy_extension:
log.error(
"Install at least one of pykeops or cauchy_mult."
)
_isnan = lambda x: torch.isnan(x).any()
_isinf = lambda x: torch.isinf(x).any()
_conj = lambda x: torch.cat([x, x.conj()], dim=-1)
class OptimModule(nn.Module):
""" Interface for Module that allows registering buffers/parameters with configurable optimizer hyperparameters """
def register(self, name, tensor, trainable=0, lr=None, wd=None, repeat=1):
"""Utility method: register a tensor as a buffer or trainable parameter"""
if trainable == 0:
self.register_buffer(name, tensor)
elif trainable == 1:
self.register_parameter(name, nn.Parameter(tensor))
elif trainable == 2:
tensor = tensor.repeat(repeat, *(1,) * len(tensor.shape))
self.register_parameter(name, nn.Parameter(tensor))
else:
raise NotImplementedError
optim = {}
if trainable and lr is not None:
optim["lr"] = lr
# setattr(getattr(self, name), '_lr', lr)
if trainable and wd is not None:
optim["weight_decay"] = wd
# setattr(getattr(self, name), '_wd', wd)
if len(optim) > 0:
setattr(getattr(self, name), "_optim", optim)
class SSKernelNPLR(OptimModule):
"""Stores a representation of and computes the SSKernel function K_L(A^dt, B^dt, C) corresponding to a discretized state space, where A is Normal + Low Rank (NPLR)
The class name stands for 'State-Space SSKernel for Normal Plus Low-Rank'.
The parameters of this function are as follows.
A: (... N N) the state matrix
B: (... N) input matrix
C: (... N) output matrix
dt: (...) timescales / discretization step size
p, q: (... P N) low-rank correction to A, such that Ap=A+pq^T is a normal matrix
The forward pass of this Module returns:
(... L) that represents represents FFT SSKernel_L(A^dt, B^dt, C)
"""
@torch.no_grad()
def _process_C(self, L, double_length=False):
C = torch.view_as_complex(self.C)
self._setup(setup_C=False)
dA = self.dA
dA_L = power(L, dA)
# I = torch.eye(dA.size(-1)).to(dA)
N = C.size(-1)
# Multiply C by I - dA_L
C_ = C[..., 0, :]
C_ = torch.cat([C_, C_.conj()], dim=-1)
prod = contract("... m n, ... n -> ... m", dA_L.conj().transpose(-1, -2), C_)
if double_length: # Multiply by I + dA_L instead
C_ = C_ + prod
else:
C_ = C_ - prod
C_ = C_[..., :N]
self.C[..., 0, :, :].copy_(torch.view_as_real(C_))
def _nodes(self, L, dtype, device):
# Cache FFT nodes and their "unprocessed" them with the bilinear transform
# nodes = torch.tensor(np.exp(-2j * np.pi / (L)), dtype=torch.cfloat, device=Ap.device) # \omega_{2L}
nodes = torch.tensor(
np.exp(-2j * np.pi / (L)), dtype=dtype, device=device
) # \omega_{2L}
nodes = nodes ** torch.arange(0, L // 2 + 1, device=device)
z = 2 * (1 - nodes) / (1 + nodes)
return nodes, z
def __init__(
self,
L,
w,
p,
q,
B,
C,
log_dt,
trainable=None,
lr=None,
setup_C=False,
keops=False,
):
"""Optim arguments into a representation. This occurs after init so that these operations can occur after moving model to device
L: Maximum length; this module computes SSKernel function of length L
A: (..., N, N) represented by diag(w) - pq^*
B: (..., N)
C: (..., N)
dt: (...)
p: (..., N) low-rank correction to A
q: (..., N)
"""
super().__init__()
self.keops = keops
# Rank of low-rank correction
assert p.shape[-2] == q.shape[-2]
self.rank = p.shape[-2]
self.L = L
# Augment B and C with low rank correction
B = B.unsqueeze(-2) # (..., 1, N)
C = C.unsqueeze(-2) # (..., 1, N)
if len(B.shape) > len(p.shape):
p = p.repeat(B.shape[:-2] + (1, 1))
B = torch.cat([B, p], dim=-2)
if len(C.shape) > len(q.shape):
q = q.repeat(C.shape[:-2] + (1, 1))
C = torch.cat([C, q], dim=-2)
if L is not None:
nodes, z = self._nodes(L, dtype=w.dtype, device=w.device)
self.register_buffer("nodes", torch.view_as_real(nodes))
self.register_buffer("z", torch.view_as_real(z))
# Register parameters
if trainable is None:
trainable = DictConfig({"A": 0, "B": 0, "C": 0, "dt": 0})
if lr is None:
lr = DictConfig({"A": None, "B": None, "C": None, "dt": None})
repeat = C.size(0)
self.register("log_dt", log_dt, trainable.dt, lr.dt, 0.0)
self.register("w", torch.view_as_real(w), trainable.A, lr.A, 0.0, repeat=repeat)
self.register("B", torch.view_as_real(B), trainable.B, lr.B, 0.0, repeat=repeat)
self.register("C", torch.view_as_real(C), trainable.C, lr.C)
if setup_C:
self._process_C(L)
def forward(self, state=None, rate=1.0, L=None):
"""
state: (..., s, N) extra tensor that augments B
rate: sampling rate factor
"""
# if L is not None: raise NotImplementedError
# TODO: handle potential length doubling logic so that max_len doesn't need to be passed in
while rate == 1.0 and L > self.L:
log.info(f"S3: Doubling length from L = {self.L} to {2*self.L}")
self.double_length()
if L is None:
L = self.L
if rate == 1.0:
L = self.L
else:
rate = self.L / L
dt = torch.exp(self.log_dt) * rate
B = torch.view_as_complex(self.B)
C = torch.view_as_complex(self.C)
w = torch.view_as_complex(self.w) # (..., N)
# z = torch.view_as_complex(self.z) # (..., L)
# TODO adjust based on rate times normal max length
if L == self.L:
nodes = torch.view_as_complex(self.nodes)
z = torch.view_as_complex(self.z) # (..., L)
else:
nodes, z = self._nodes(L, dtype=w.dtype, device=w.device)
# Augment B
if state is not None: # TODO have not updated
# Have to "unbilinear" the state to put it into the same "type" as B
# Compute (I + dt/2 A) @ state
s = state.transpose(0, 1) # (H B N)
p = B[..., 1:, :] # (... r N)
q = C[..., 1:, :] # (... r N)
# Calculate contract('... s n, ... r n, ... r m -> ... s m', sV, qV.conj(), pV), but take care of conjugate symmetry
sA = (
s * w.unsqueeze(-2)
- (2 + 0j) * (s @ q.conj().transpose(-1, -2)).real @ p
)
s = s / dt.unsqueeze(-1).unsqueeze(-1) + sA / 2
B = torch.cat([s, B], dim=-2) # (..., 2+s, N)
# Incorporate dt into A
w = w * dt.unsqueeze(-1) # (... N)
# Incorporate B and C batch dimensions
v = B.unsqueeze(-3) * C.unsqueeze(-2).conj() # (..., 2, 2, N)
w = w[..., None, None, :] # (..., 1, 1, N)
z = z[..., None, None, :] # (..., 1, 1, L)
# Calculate resolvent at nodes
if not self.keops and has_cauchy_extension:
r = cauchy_mult(v, z, w, symmetric=True)
else:
r = cauchy.cauchy_conj(v, z, w)
r = r * dt[..., None, None, None] # (..., 1+r, 1+r, L)
# Low-rank Woodbury correction
if self.rank == 1:
k_f = r[..., :-1, :-1, :] - r[..., :-1, -1:, :] * r[..., -1:, :-1, :] / (
1 + r[..., -1:, -1:, :]
)
elif self.rank == 2:
r00 = r[..., : -self.rank, : -self.rank, :]
r01 = r[..., : -self.rank, -self.rank :, :]
r10 = r[..., -self.rank :, : -self.rank, :]
r11 = r[..., -self.rank :, -self.rank :, :]
det = (1 + r11[..., :1, :1, :]) * (1 + r11[..., 1:, 1:, :]) - r11[
..., :1, 1:, :
] * r11[..., 1:, :1, :]
s = (
r01[..., :, :1, :] * (1 + r11[..., 1:, 1:, :]) * r10[..., :1, :, :]
+ r01[..., :, 1:, :] * (1 + r11[..., :1, :1, :]) * r10[..., 1:, :, :]
- r01[..., :, :1, :] * (r11[..., :1, 1:, :]) * r10[..., 1:, :, :]
- r01[..., :, 1:, :] * (r11[..., 1:, :1, :]) * r10[..., :1, :, :]
)
s = s / det
k_f = r00 - s
else:
r00 = r[..., : -self.rank, : -self.rank, :]
r01 = r[..., : -self.rank, -self.rank :, :]
r10 = r[..., -self.rank :, : -self.rank, :]
r11 = r[..., -self.rank :, -self.rank :, :]
r11 = rearrange(r11, "... a b n -> ... n a b")
r11 = torch.linalg.inv(torch.eye(self.rank, device=r.device) + r11)
r11 = rearrange(r11, "... n a b -> ... a b n")
k_f = r00 - torch.einsum(
"... i j n, ... j k n, ... k l n -> ... i l n", r01, r11, r10
)
# Final correction for the bilinear transform
k_f = k_f * 2 / (1 + nodes)
k = torch.fft.irfft(k_f) # (..., 1, 1+s, L)
if state is not None:
k_state = k[..., 0, :-1, :] # (..., s, L)
k_state = k_state.transpose(0, 1)
k_B = k[..., 0, -1, :] # (..., L)
return k_B.to(torch.float), k_state.to(torch.float)
else:
return k.squeeze(-2).squeeze(-2).to(torch.float)
@torch.no_grad()
def double_length(self):
self._process_C(self.L, double_length=True)
self.L *= 2
dtype = torch.view_as_complex(self.w).dtype
nodes, z = self._nodes(self.L, dtype=dtype, device=self.w.device)
self.register_buffer("nodes", torch.view_as_real(nodes))
self.register_buffer("z", torch.view_as_real(z))
@torch.no_grad()
def _check(self):
"""Check if A, B, C parameters and vanilla SSKernel construction can be recovered"""
self._setup(setup_C=True)
K = krylov(self.L, self.dA, self.dB, self.dC.conj())
diff = K - self.forward()
print("checking SSKernel construction", torch.sum(diff ** 2))
def _setup(self, setup_C=True):
w = _conj(torch.view_as_complex(self.w))
B = _conj(torch.view_as_complex(self.B))
C = _conj(torch.view_as_complex(self.C))
C = C.conj()
p = B[..., -1, :]
q = C[..., -1, :]
B = B[..., 0, :]
C = C[..., 0, :]
dt = torch.exp(self.log_dt)
d = (2.0 / dt.unsqueeze(-1) - w).reciprocal() # (H, N)
r = (1 + contract("... n, ... n, ... n -> ...", q, d, p)).reciprocal()
# A_f = torch.diag_embed(2./dt[:, None] + w) - contract('... n, ... m -> ... n m', p, q)
# A_b = torch.diag_embed(d) - contract('... p, ... p, ..., ... q, ... q -> ... p q', d, p, r, q, d)
# dA = A_b @ A_f
self.step_params = {
"d": d,
"r": r.unsqueeze(-1) * d * q,
# 'r': r,
"p": p,
"q": q,
"B": B,
"d1": 2.0 / dt.unsqueeze(-1) + w,
}
N = d.size(-1)
H = dt.size(-1)
state = torch.eye(N, dtype=w.dtype, device=w.device).unsqueeze(-2)
u = w.new_zeros(H)
dA = self.step_state_linear(u, state)
dA = rearrange(dA, "n h m -> h m n")
self.dA = dA
u = w.new_ones(H)
state = w.new_zeros(N // 2)
dB = self.step_state_linear(u, state)
dB = _conj(dB)
self.dB = dB
if setup_C:
dA_L = power(self.L, dA)
I = torch.eye(dA.size(-1)).to(dA)
dC = torch.linalg.solve(
I - dA_L.transpose(-1, -2).conj(), C.conj().unsqueeze(-1)
).squeeze(-1)
self.dC = dC
def step_state_linear(self, u=None, state=None):
"""Version of the step function that has time O(N) instead of O(N^2) per step. Unfortunately, as currently implemented it's about 2x slower because it calls several sequential operations. Perhaps a fused CUDA kernel implementation would be much faster"""
N = self.step_params["d"].size(-1)
H = self.log_dt.size(-1)
if u is None:
u = torch.zeros(H, dtype=torch.float, device=self.log_dt.device)
if state is None:
state = torch.zeros(H, N, dtype=torch.cfloat, device=self.log_dt.device)
conj = state.size(-1) != N
step_params = self.step_params.copy()
if conj:
assert state.size(-1) == N // 2
step_params = {k: v[..., : N // 2] for k, v in step_params.items()}
d1 = step_params["d1"] # (H N)
p = step_params["p"] # (H N)
q = step_params["q"] # (H N)
B = step_params["B"] # (H N)
r = step_params["r"]
d = step_params["d"] # (H N)
# dC = self.step_params['dC'] # (H N)
state = state.to(d1)
if conj:
new_state = (
2 * p * torch.sum(q * state, dim=-1, keepdim=True).real
) # conjugated version
else:
new_state = contract("... n, ... m, ... m -> ... n", p, q, state) # (B H N)
new_state = d1 * state - new_state
new_state = new_state + 2.0 * B * u.unsqueeze(-1) # (B H N)
if conj:
A_ = (
2 * p * torch.sum(r * new_state, dim=-1, keepdim=True).real
) # conj version
else:
A_ = contract("... p, ... q, ... q -> ... p", p, r, new_state) # (B H N)
new_state = d * (new_state - A_)
return new_state
def step_state(self, u, state):
state = state.to(self.dA)
conj = state.size(-1) != self.dA.size(-1)
if conj:
state = _conj(state)
next_state = contract("h m n, b h n -> b h m", self.dA, state) + contract(
"h n, b h -> b h n", self.dB, u
)
if conj:
next_state = next_state[..., : state.size(-1) // 2]
return next_state
def step(self, u, state, linear=False):
N = self.step_params["d"].size(-1)
conj = state.size(-1) != N
if linear:
new_state = self.step_state_linear(u, state)
else:
new_state = self.step_state(u, state)
if conj:
assert state.size(-1) == N // 2
# dC = self.dC[..., 0::2].conj()
dC = self.dC[..., : N // 2].conj()
out = 2 * torch.sum(dC * new_state, dim=-1).real # conj version
else:
out = contract("... n, ... n -> ...", self.dC.conj(), new_state)
return out.to(torch.float), new_state
class SSKernelSlow(OptimModule):
"""Slow version of SSKernel function for illustration and benchmarking.
- Caches discretized matrices A^(dt), B^(dt)
- Computes K_L(A^dt, B^dt, C)
Usage:
```
krylov = SSKernelSlow(L, A, B, C, log_dt)()
```
Result is expected to be equal to SSKernelNPLR(L, A, B, C, log_dt, p, q)() for p, q such that A+pq^T is normal
"""
def __init__(self, L, A, B, C, log_dt, trainable=None, lr=None):
super().__init__()
self.N = A.shape[-1]
self.L = L
dA, dB = SSKernelSlow.bilinear(torch.exp(log_dt), A, B)
# Register parameters
if trainable is None:
trainable = DictConfig({"A": 0, "B": 0, "C": 0, "dt": 0})
if lr is None:
lr = DictConfig({"A": None, "B": None, "C": None, "dt": None})
if trainable is not None and lr is not None:
repeat = C.size(0)
self.register("log_dt", log_dt, trainable.dt, lr.dt)
self.register("dA", dA, trainable.A, lr.A, repeat=repeat)
self.register("dB", dB, 1, lr.B)
self.register("C", C, trainable.C, lr.C)
def forward(self, rate=1.0, L=None, state=None):
if L is None:
L = self.L
if rate is None:
rate = self.L / L # TODO this class doesn't actually support rates
k = krylov(L, self.dA, self.dB, self.C.conj()) # (H L)
if state is not None:
if state.size(-1) != self.dA.size(-1):
state = _conj(state)
state = state.to(self.dA)
state = contract("... n m, ... m -> ... n", self.dA, state)
k_state = krylov(L, self.dA, state, self.C.conj())
return k.to(torch.float), k_state.to(torch.float)
return k.to(torch.float)
@classmethod
def bilinear(cls, dt, A, B=None, separate=False):
"""
dt: (...) timescales
A: (... N N)
B: (... N)
"""
N = A.shape[-1]
I = torch.eye(N).to(A)
A_backwards = I - dt[:, None, None] / 2 * A
A_forwards = I + dt[:, None, None] / 2 * A
if B is None:
dB = None
else:
dB = dt[..., None] * torch.linalg.solve(
A_backwards, B.unsqueeze(-1)
).squeeze(
-1
) # (... N)
if separate:
A_b = torch.linalg.solve(A_backwards, I) # (... N N)
return A_forwards, A_b, dB
else:
dA = torch.linalg.solve(A_backwards, A_forwards) # (... N N)
return dA, dB
def _setup(self, setup_C=True):
if setup_C:
self.dC = self.C
def step(self, u, state):
state = state.to(self.dA)
if state.size(-1) != self.dA.size(-1):
state = _conj(state)
next_state = contract("h m n, b h n -> b h m", self.dA, state) + contract(
"h n, b h -> b h n", self.dB, u
)
y = contract("... n, ... n -> ...", self.dC.conj(), next_state)
return y.to(torch.float), next_state
class HippoSSKernel(nn.Module):
"""Wrapper around SSKernelNPLR that generates A, B, C, dt according to HiPPO arguments."""
def __init__(
self,
N,
H,
L=None,
measure="legs",
rank=1,
dt_min=0.001,
dt_max=0.1,
trainable=None,
lr=None,
mode="nplr", # 'slow' for complex naive version, 'real' for real naive version
length_correction=False,
precision=1,
cache=False,
resample=False, # if given inputs of different lengths, adjust the sampling rate
keops=False,
):
super().__init__()
self.N = N
self.H = H
L = L or 1
self.precision = precision
dtype = torch.double if self.precision == 2 else torch.float
self.rate = None if resample else 1.0
# Set default trainable and lr parameters
self.trainable = DictConfig(
{
"A": 1,
"B": 2,
"C": 1,
"dt": 1,
}
)
if trainable is not None:
self.trainable.update(trainable)
self.lr = DictConfig(
{
"A": 1e-3,
"B": 1e-3,
"C": None,
"dt": 1e-3,
}
)
if lr is not None:
self.lr.update(lr)
# Generate dt
self.log_dt = torch.rand(self.H, dtype=dtype) * (
math.log(dt_max) - math.log(dt_min)
) + math.log(dt_min)
# Compute the preprocessed representation
if mode == "real": # Testing purposes only
# Generate A, B
A, B = hippo.transition(measure, N)
A = torch.as_tensor(A, dtype=dtype)
B = torch.as_tensor(B, dtype=dtype)[:, 0]
# Generate C
C = torch.randn(self.H, self.N, dtype=dtype)
self.krylov = SSKernelSlow(
L, A, B, C, self.log_dt, trainable=self.trainable, lr=self.lr
)
else:
# Generate low rank correction p for the measure
w, p, q, B, _ = hippo.nplr(measure, N, rank, dtype=dtype)
cdtype = torch.cfloat if dtype == torch.float else torch.cdouble
C = torch.randn(self.H, self.N // 2, dtype=cdtype)
if mode == "nplr":
self.krylov = SSKernelNPLR(
L,
w,
p,
q,
B,
C,
self.log_dt,
trainable=self.trainable,
lr=self.lr,
setup_C=length_correction,
keops=keops,
)
elif mode == "slow": # Testing only
A = torch.diag_embed(_conj(w)) - contract(
"... r p, ... r q -> ... p q", _conj(p), _conj(q).conj()
)
self.krylov = SSKernelSlow(
L,
A,
_conj(B),
_conj(C),
self.log_dt,
trainable=self.trainable,
lr=self.lr,
)
# Cached tensors
self.K = None
self.cache = cache
def forward(self, state=None, L=None):
"""
state: (B, H, N)
"""
if state is not None:
k, k_state = self.krylov(
state=state, rate=self.rate, L=L
) # (B, H, L) (B, H, N)
return k, k_state
else:
# Calculate K if needed
if not self.training and self.K is not None and self.K.size(-1) == L:
k = self.K
else:
k = self.krylov(rate=self.rate, L=L).to(torch.float)
# Store K if needed
if self.cache and not self.training:
self.K = k
else: # If training, parameter will change after backprop so make sure to recompute on next pass
self.K = None
return k
@torch.no_grad()
def next_state(self, state, u):
"""
state: (..., N)
u: (..., L)
Returns: (..., N)
"""
self.krylov._setup()
dA, dB = self.krylov.dA, self.krylov.dB
conj = state.size(-1) != dA.size(-1)
if conj:
state = _conj(state)
v = dB.unsqueeze(-1) * u.flip(-1).unsqueeze(-2) # (..., N, L)
AL, v = power(u.size(-1), dA, v)
next_state = contract("... m n, ... n -> ... m", AL, state)
next_state = next_state + v
if conj:
next_state = next_state[..., : next_state.size(-1) // 2]
return next_state
def step(self, u, state):
return self.krylov.step(u, state)
def double_length(self):
self.krylov.double_length()
""" Tests below """
def generate_krylov(H, N, L, measure="legs", rank=1):
trainable = DictConfig(
{
"A": 1,
"B": 2,
"C": 1,
"dt": 1,
}
)
lr = DictConfig(
{
"A": 1e-3,
"B": 1e-3,
"C": None,
"dt": 1e-3,
}
)
A, B = hippo.transition(measure, N)
A = torch.as_tensor(A, dtype=torch.float)
B = torch.as_tensor(B, dtype=torch.float)[:, 0]
C = torch.ones(H, N)
log_dt = torch.log((1 + 10 * torch.arange(H) / H) * 1 / L)
krylov_real = SSKernelSlow(L, A, B, torch.ones(N), log_dt)
w, p, q, B, V = hippo.nplr(measure, N)
C = contract(
"ij, ... j -> ... i", V.conj().transpose(-1, -2), V.new_ones(H, N)
) # V^* B
A = torch.diag_embed(_conj(w)) - contract(
"... r p, ... r q -> ... p q", _conj(p), _conj(q).conj()
)
krylov_slow = SSKernelSlow(
L, A, _conj(B), _conj(C), log_dt, trainable=trainable, lr=lr
)
print("krylov real vs krylov complex", krylov_real() - krylov_slow())
krylov = SSKernelNPLR(L, w, p, q, B, C, log_dt, setup_C=True)
krylov._setup()
krylov._check()
print("krylov slow vs krylov fast", krylov_slow() - krylov())
krylov = SSKernelNPLR(
L, w, p, q, B, C, log_dt, trainable=trainable, lr=lr, setup_C=True
)
krylov_slow = SSKernelSlow(
L, A, _conj(B), _conj(C), log_dt, trainable=trainable, lr=lr
)
return krylov_real.to(device), krylov_slow.to(device), krylov.to(device)
def benchmark_krylov():
N = 64
L = 4096
H = 256
krylov_real, krylov_slow, krylov = generate_krylov(H, N, L)
utils.compare_outputs(krylov_slow(), krylov(), full=False, relative=True)
utils.benchmark_forward(100, krylov_slow, desc="krylov fft manual")
utils.benchmark_forward(100, krylov, desc="krylov fft rank")
utils.benchmark_backward(100, krylov_slow, desc="krylov fft manual")
utils.benchmark_backward(100, krylov, desc="krylov fft rank")
utils.benchmark_memory(krylov_slow, desc="krylov fft manual")
utils.benchmark_memory(krylov, desc="krylov fft rank")
def test_step():
B = 2
L = 4
N = 4
H = 3
krylov_real, krylov_slow, krylov = generate_krylov(H, N, L)
print("TESTING SLOW STEP")
krylov_slow._setup()
state = torch.zeros(B, H, N).to(device)
u = torch.ones(B, H, L).to(device)
for u_ in torch.unbind(u, dim=-1):
y_, state = krylov_slow.step(u_, state=state)
print("y", y_, y_.shape)
print("state", state, state.shape)
print("TESTING STEP")
krylov._setup()
state = torch.zeros(B, H, N).to(device)
u = torch.ones(B, H, L).to(device)
for u_ in torch.unbind(u, dim=-1):
y_, state = krylov.step(u_, state=state, linear=False)
# y_, state = krylov.step(u_, state=state)
print("y", y_, y_.shape)
print("state", state, state.shape)
print("TESTING LINEAR STEP")
krylov._setup()
state = torch.zeros(B, H, N // 2).to(device).to(torch.cfloat)
u = torch.ones(B, H, L).to(device)
for u_ in torch.unbind(u, dim=-1):
y_, state = krylov.step(u_, state=state, linear=True)
print("y", y_, y_.shape)
print("state", state, state.shape)
@torch.inference_mode()
def benchmark_step():
B = 1024
L = 16
N = 64
H = 1024
_, _, krylov = generate_krylov(H, N, L)
krylov._setup()
print("Benchmarking Step")
state = torch.zeros(B, H, N).to(device)
u = torch.ones(B, H).to(device)
utils.benchmark_forward(16, krylov.step, u, state, linear=False, desc="dense step")
print("Benchmarking Linear Step")
state = torch.zeros(B, H, N).to(device) # .to(torch.cfloat)
u = torch.ones(B, H).to(device)
utils.benchmark_forward(16, krylov.step, u, state, linear=True, desc="linear step")
state = torch.zeros(B, H, N // 2).to(device) # .to(torch.cfloat)
u = torch.ones(B, H).to(device)
utils.benchmark_forward(
16, krylov.step, u, state, linear=True, desc="linear step conj"
)
def test_double():
torch.set_printoptions(sci_mode=False, linewidth=160)
L = 8
N = 4
H = 3
_, krylov_slow, krylov = generate_krylov(H, N, L, "legs", 1)
k = krylov.forward()
print(k, k.shape)
krylov._check()
krylov.double_length()
k = krylov.forward()
print(k, k.shape)
def test_state():
B = 1
N = 4
L = 4
H = 3
krylov_real, krylov_slow, krylov = generate_krylov(H, N, L)
state = torch.ones(B, H, N // 2, device=device, dtype=torch.cfloat)
k, k_state = krylov_slow.forward(state=state)
print("k slow", k)
print("k_state slow", k_state)
k, k_state = krylov.forward(state=state)
print("k", k)
print("k_state", k_state)
if __name__ == "__main__":
from benchmark import utils
device = "cuda" # 'cpu'
device = torch.device(device)
# benchmark_krylov()
# test_double()
# test_step()
# benchmark_step()
test_state()
| 33.335982
| 262
| 0.512922
|
ee7f48e33deaa659bb7261b622350178975e637f
| 933
|
py
|
Python
|
ngb36utils.py
|
calmetree/NgToolset
|
b7db73ba0a2309085b87b0859f9b5448e56ea931
|
[
"BSD-3-Clause"
] | null | null | null |
ngb36utils.py
|
calmetree/NgToolset
|
b7db73ba0a2309085b87b0859f9b5448e56ea931
|
[
"BSD-3-Clause"
] | null | null | null |
ngb36utils.py
|
calmetree/NgToolset
|
b7db73ba0a2309085b87b0859f9b5448e56ea931
|
[
"BSD-3-Clause"
] | 1
|
2020-01-09T06:57:49.000Z
|
2020-01-09T06:57:49.000Z
|
#!/usr/bin/python3
# -*- encoding: utf-8 -*-
'''
File:
ngb36utils.py
Description:
Implementation of time/frequency encoding in base36.
Change History:
2018-1-30 v0.1 created. github/zhenggao2
'''
from numpy import base_repr
def time2str36(hsfn, sfn, slot, symb):
#HSFN, range 0~1023, two base36 chars
strHsfn = base_repr(hsfn, base=36)
#SFN, range 0~1023, two base36 chars
strSfn = base_repr(sfn, base=36)
#slot, range 0~20, one base36 char
strSlot = base_repr(slot, base=36)
#symbol, range 0~7, one base36 char
strSymb = base_repr(symb, base=36)
return '[%s%s%s%s]' % (strHsfn.zfill(2), strSfn.zfill(2), strSlot, strSymb)
def freq2str36(prb, sc):
#prb, range 0~99, two base36 chars
strPrb = base_repr(prb, base=36)
#subcarrier, range 0~47, two base36 chars
strSc = base_repr(sc, base=36)
return '[%s%s]' % (strPrb.zfill(2), strSc.zfill(2))
| 26.657143
| 79
| 0.646302
|
c94cde12db775192a62ea8be2d82947df442562f
| 1,440
|
py
|
Python
|
marqeta/response_models/digital_wallet_apple_pay_provision_response.py
|
marqeta/marqeta-python
|
66fa690eb910825c510a391720b0fe717fac0234
|
[
"MIT"
] | 21
|
2019-04-12T09:02:17.000Z
|
2022-02-18T11:39:06.000Z
|
marqeta/response_models/digital_wallet_apple_pay_provision_response.py
|
marqeta/marqeta-python
|
66fa690eb910825c510a391720b0fe717fac0234
|
[
"MIT"
] | 1
|
2020-07-22T21:27:40.000Z
|
2020-07-23T17:38:43.000Z
|
marqeta/response_models/digital_wallet_apple_pay_provision_response.py
|
marqeta/marqeta-python
|
66fa690eb910825c510a391720b0fe717fac0234
|
[
"MIT"
] | 10
|
2019-05-08T14:20:37.000Z
|
2021-09-20T18:09:26.000Z
|
from datetime import datetime, date
from marqeta.response_models import datetime_object
import json
import re
class DigitalWalletApplePayProvisionResponse(object):
def __init__(self, json_response):
self.json_response = json_response
def __str__(self):
return json.dumps(self.json_response, default=self.json_serial)
@staticmethod
def json_serial(o):
if isinstance(o, datetime) or isinstance(o, date):
return o.__str__()
@property
def created_time(self):
if 'created_time' in self.json_response:
return datetime_object('created_time', self.json_response)
@property
def last_modified_time(self):
if 'last_modified_time' in self.json_response:
return datetime_object('last_modified_time', self.json_response)
@property
def card_token(self):
return self.json_response.get('card_token', None)
@property
def encrypted_pass_data(self):
return self.json_response.get('encrypted_pass_data', None)
@property
def activation_data(self):
return self.json_response.get('activation_data', None)
@property
def ephemeral_public_key(self):
return self.json_response.get('ephemeral_public_key', None)
def __repr__(self):
return '<Marqeta.response_models.digital_wallet_apple_pay_provision_response.DigitalWalletApplePayProvisionResponse>' + self.__str__()
| 27.169811
| 143
| 0.715278
|
5b9a5cb45b5f9ede616b6a798a571276821c1884
| 2,178
|
py
|
Python
|
tfx/orchestration/kubeflow/kubeflow_metadata_adapter.py
|
stjordanis/tfx
|
4749388de03230361f2b7b733a657b3bc18b4152
|
[
"Apache-2.0"
] | 1,813
|
2019-02-04T17:17:30.000Z
|
2022-03-29T13:39:30.000Z
|
tfx/orchestration/kubeflow/kubeflow_metadata_adapter.py
|
stjordanis/tfx
|
4749388de03230361f2b7b733a657b3bc18b4152
|
[
"Apache-2.0"
] | 2,710
|
2019-02-14T00:41:00.000Z
|
2022-03-31T07:23:00.000Z
|
tfx/orchestration/kubeflow/kubeflow_metadata_adapter.py
|
stjordanis/tfx
|
4749388de03230361f2b7b733a657b3bc18b4152
|
[
"Apache-2.0"
] | 731
|
2019-02-04T17:59:18.000Z
|
2022-03-31T06:45:51.000Z
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A Metadata adapter class used to add Kubeflow-specific context."""
import os
from typing import Any, Dict
import absl
from tfx.orchestration import data_types
from tfx.orchestration import metadata
from ml_metadata.proto import metadata_store_pb2
_KFP_POD_NAME_ENV_KEY = 'KFP_POD_NAME'
_KFP_POD_NAME_PROPERTY_KEY = 'kfp_pod_name'
class KubeflowMetadataAdapter(metadata.Metadata):
"""A Metadata adapter class for pipelines run using KFP.
This is used to add properties to artifacts and executions, such as the Argo
pod IDs.
"""
def _is_eligible_previous_execution(
self, current_execution: metadata_store_pb2.Execution,
target_execution: metadata_store_pb2.Execution) -> bool:
current_execution.properties[_KFP_POD_NAME_PROPERTY_KEY].string_value = ''
target_execution.properties[_KFP_POD_NAME_PROPERTY_KEY].string_value = ''
return super()._is_eligible_previous_execution(current_execution,
target_execution)
def _prepare_execution(
self,
state: str,
exec_properties: Dict[str, Any],
pipeline_info: data_types.PipelineInfo,
component_info: data_types.ComponentInfo,
) -> metadata_store_pb2.Execution:
if os.environ[_KFP_POD_NAME_ENV_KEY]:
kfp_pod_name = os.environ[_KFP_POD_NAME_ENV_KEY]
absl.logging.info('Adding KFP pod name %s to execution' % kfp_pod_name)
exec_properties[_KFP_POD_NAME_PROPERTY_KEY] = kfp_pod_name
return super()._prepare_execution(state, exec_properties, pipeline_info,
component_info)
| 38.210526
| 78
| 0.74472
|
2abcb1cf28c8d52aa7b907566693b12ad1677671
| 2,566
|
py
|
Python
|
netfields/forms.py
|
jaychoo/django-postgresql-netfields
|
5d29fbe480f0c039d54472d406f14d08422cb91a
|
[
"BSD-3-Clause"
] | null | null | null |
netfields/forms.py
|
jaychoo/django-postgresql-netfields
|
5d29fbe480f0c039d54472d406f14d08422cb91a
|
[
"BSD-3-Clause"
] | null | null | null |
netfields/forms.py
|
jaychoo/django-postgresql-netfields
|
5d29fbe480f0c039d54472d406f14d08422cb91a
|
[
"BSD-3-Clause"
] | null | null | null |
from ipaddress import ip_interface, ip_network, _IPAddressBase, _BaseNetwork
from netaddr import EUI, AddrFormatError
from django import forms
from django.utils.six import text_type
from django.core.exceptions import ValidationError
from netfields.mac import mac_unix_common
class InetAddressFormField(forms.Field):
widget = forms.TextInput
default_error_messages = {
'invalid': u'Enter a valid IP address.',
}
def __init__(self, *args, **kwargs):
super(InetAddressFormField, self).__init__(*args, **kwargs)
def to_python(self, value):
if not value:
return None
if isinstance(value, _IPAddressBase):
return value
if isinstance(value, text_type):
value = value.strip()
try:
return ip_interface(value)
except ValueError as e:
raise ValidationError(self.error_messages['invalid'])
class CidrAddressFormField(forms.Field):
widget = forms.TextInput
default_error_messages = {
'invalid': u'Enter a valid CIDR address.',
'network': u'Must be a network address.',
}
def __init__(self, *args, **kwargs):
super(CidrAddressFormField, self).__init__(*args, **kwargs)
def to_python(self, value):
if not value:
return None
if isinstance(value, _BaseNetwork):
network = value
if isinstance(value, text_type):
value = value.strip()
try:
network = ip_network(value)
except ValueError as e:
if 'has host bits' in e.args[0]:
raise ValidationError(self.error_messages['network'])
raise ValidationError(self.error_messages['invalid'])
return network
class MACAddressFormField(forms.Field):
default_error_messages = {
'invalid': u'Enter a valid MAC address.',
}
def __init__(self, *args, **kwargs):
super(MACAddressFormField, self).__init__(*args, **kwargs)
def to_python(self, value):
if not value:
return None
if isinstance(value, EUI):
return value
if isinstance(value, text_type):
value = value.strip()
try:
return EUI(value, dialect=mac_unix_common)
except (AddrFormatError, TypeError):
raise ValidationError(self.error_messages['invalid'])
def widget_attrs(self, widget):
attrs = super(MACAddressFormField, self).widget_attrs(widget)
attrs.update({'maxlength': '17'})
return attrs
| 27.591398
| 76
| 0.631333
|
a7e598556478fb732ecaf99db038592d7e980c79
| 130
|
py
|
Python
|
utils/__init__.py
|
Jokoe66/Ultra-Fast-Lane-Detection
|
b857e41d3026f1cdb70354c4267f42053a145f90
|
[
"MIT"
] | null | null | null |
utils/__init__.py
|
Jokoe66/Ultra-Fast-Lane-Detection
|
b857e41d3026f1cdb70354c4267f42053a145f90
|
[
"MIT"
] | null | null | null |
utils/__init__.py
|
Jokoe66/Ultra-Fast-Lane-Detection
|
b857e41d3026f1cdb70354c4267f42053a145f90
|
[
"MIT"
] | null | null | null |
from . import common
from . import config
from . import dist_utils
from . import factory
from . import loss
from . import metrics
| 18.571429
| 24
| 0.769231
|
494a26206f062563bcf3cd616d59ef386e1882c5
| 235
|
py
|
Python
|
promsnaps/api/config.py
|
ant31/prombench
|
8f676e2654a7477e2bf0b39930fd599f75828612
|
[
"Apache-2.0"
] | null | null | null |
promsnaps/api/config.py
|
ant31/prombench
|
8f676e2654a7477e2bf0b39930fd599f75828612
|
[
"Apache-2.0"
] | null | null | null |
promsnaps/api/config.py
|
ant31/prombench
|
8f676e2654a7477e2bf0b39930fd599f75828612
|
[
"Apache-2.0"
] | null | null | null |
class Config(object):
""" Default configuration """
DEBUG = False
class ProductionConfig(Config):
""" Production configuration """
class DevelopmentConfig(Config):
""" Development configuration """
DEBUG = True
| 18.076923
| 37
| 0.67234
|
cb47a8c645e0c13e626de4bd4113516926740ead
| 446
|
py
|
Python
|
data/scripts/templates/object/mobile/shared_dressed_quest_liar_01.py
|
obi-two/GameServer
|
7d37024e2291a97d49522610cd8f1dbe5666afc2
|
[
"MIT"
] | 20
|
2015-02-23T15:11:56.000Z
|
2022-03-18T20:56:48.000Z
|
data/scripts/templates/object/mobile/shared_dressed_quest_liar_01.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | null | null | null |
data/scripts/templates/object/mobile/shared_dressed_quest_liar_01.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | 20
|
2015-04-04T16:35:59.000Z
|
2022-03-24T14:54:37.000Z
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_quest_liar_01.iff"
result.attribute_template_id = 9
result.stfName("npc_name","human_base_male")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
| 26.235294
| 67
| 0.730942
|
be489dae87860227d6c5e0cc2d9a934dfede4db0
| 504
|
py
|
Python
|
03Thread/day01/basic03.py
|
HaoZhang95/PythonAndMachineLearning
|
b897224b8a0e6a5734f408df8c24846a98c553bf
|
[
"MIT"
] | 937
|
2019-05-08T08:46:25.000Z
|
2022-03-31T12:56:07.000Z
|
03Thread/day01/basic03.py
|
Sakura-gh/Python24
|
b97e18867264a0647d5645c7d757a0040e755577
|
[
"MIT"
] | 47
|
2019-09-17T10:06:02.000Z
|
2022-03-11T23:46:52.000Z
|
03Thread/day01/basic03.py
|
Sakura-gh/Python24
|
b97e18867264a0647d5645c7d757a0040e755577
|
[
"MIT"
] | 354
|
2019-05-10T02:15:26.000Z
|
2022-03-30T05:52:57.000Z
|
"""
创建线程的第二个方式:自定义的thread
多个子线程的执行顺序是乱序的,不是固定的
1- 一个任务创建出来:就绪状态
2- cpu分配时间段: 运行状态
3- 运行的代码中调用recv,join等方法时: 从运行状态 -> 阻塞状态
4- recv,join等执行完成满足要求后就会变成就绪状态, 一个轮回三角关系
"""
import threading
import time
class MyThread(threading.Thread):
def run(self):
for i in range(3):
print("这是在子线程中执行的...")
time.sleep(1)
if __name__ == '__main__':
thd = MyThread()
thd.start()
# while True:
# time.sleep(1)
# print("这是在主线程中执行的")
| 17.37931
| 44
| 0.597222
|
638fc6a72c8354ec3227a7b23120bfc848910848
| 11,282
|
py
|
Python
|
sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_form_training_client.py
|
zfoster/azure-sdk-for-python
|
0160912a4dd0f027a533d33f91f1a73afea41034
|
[
"MIT"
] | null | null | null |
sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_form_training_client.py
|
zfoster/azure-sdk-for-python
|
0160912a4dd0f027a533d33f91f1a73afea41034
|
[
"MIT"
] | null | null | null |
sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_form_training_client.py
|
zfoster/azure-sdk-for-python
|
0160912a4dd0f027a533d33f91f1a73afea41034
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
# pylint: disable=protected-access
from typing import (
Optional,
Any,
Iterable,
Union,
TYPE_CHECKING,
)
from azure.core.tracing.decorator import distributed_trace
from azure.core.polling import LROPoller
from azure.core.polling.base_polling import LROBasePolling
from ._generated.models import Model
from ._generated._form_recognizer_client import FormRecognizerClient as FormRecognizer
from ._generated.models import TrainRequest, TrainSourceFilter
from ._helpers import error_map, get_authentication_policy, POLLING_INTERVAL
from ._models import (
CustomFormModelInfo,
AccountProperties,
CustomFormModel
)
from ._polling import TrainingPolling
from ._user_agent import USER_AGENT
from ._form_recognizer_client import FormRecognizerClient
if TYPE_CHECKING:
from azure.core.credentials import AzureKeyCredential, TokenCredential
from azure.core.pipeline.transport import HttpResponse
PipelineResponseType = HttpResponse
class FormTrainingClient(object):
"""FormTrainingClient is the Form Recognizer interface to use for creating,
and managing custom models. It provides methods for training models on forms
you provide and methods for viewing and deleting models, as well as
accessing account properties.
:param str endpoint: Supported Cognitive Services endpoints (protocol and hostname,
for example: https://westus2.api.cognitive.microsoft.com).
:param credential: Credentials needed for the client to connect to Azure.
This is an instance of AzureKeyCredential if using an API key or a token
credential from :mod:`azure.identity`.
:type credential: :class:`~azure.core.credentials.AzureKeyCredential` or
:class:`~azure.core.credentials.TokenCredential`
.. admonition:: Example:
.. literalinclude:: ../samples/sample_authentication.py
:start-after: [START create_ft_client_with_key]
:end-before: [END create_ft_client_with_key]
:language: python
:dedent: 8
:caption: Creating the FormTrainingClient with an endpoint and API key.
.. literalinclude:: ../samples/sample_authentication.py
:start-after: [START create_ft_client_with_aad]
:end-before: [END create_ft_client_with_aad]
:language: python
:dedent: 8
:caption: Creating the FormTrainingClient with a token credential.
"""
def __init__(self, endpoint, credential, **kwargs):
# type: (str, Union[AzureKeyCredential, TokenCredential], Any) -> None
self._endpoint = endpoint
self._credential = credential
authentication_policy = get_authentication_policy(credential)
self._client = FormRecognizer(
endpoint=self._endpoint,
credential=self._credential,
sdk_moniker=USER_AGENT,
authentication_policy=authentication_policy,
**kwargs
)
@distributed_trace
def begin_train_model(self, training_files_url, use_training_labels=False, **kwargs):
# type: (str, Optional[bool], Any) -> LROPoller
"""Create and train a custom model. The request must include a `training_files_url` parameter that is an
externally accessible Azure storage blob container Uri (preferably a Shared Access Signature Uri).
Models are trained using documents that are of the following content type - 'application/pdf',
'image/jpeg', 'image/png', 'image/tiff'. Other type of content in the container is ignored.
:param str training_files_url: An Azure Storage blob container's SAS URI.
:param bool use_training_labels: Whether to train with labels or not. Corresponding labeled files must
exist in the blob container.
:keyword str prefix: A case-sensitive prefix string to filter documents for training.
Use `prefix` to filter documents themselves, or to restrict sub folders for training
when `include_sub_folders` is set to True. Not supported if training with labels.
:keyword bool include_sub_folders: A flag to indicate if sub folders
will also need to be included when searching for content to be preprocessed.
Use with `prefix` to filter for only certain sub folders. Not supported if training with labels.
:keyword int polling_interval: Waiting time between two polls for LRO operations
if no Retry-After header is present. Defaults to 5 seconds.
:return: An instance of an LROPoller. Call `result()` on the poller
object to return a :class:`~azure.ai.formrecognizer.CustomFormModel`.
:rtype: ~azure.core.polling.LROPoller[~azure.ai.formrecognizer.CustomFormModel]
:raises ~azure.core.exceptions.HttpResponseError:
.. admonition:: Example:
.. literalinclude:: ../samples/sample_train_model_without_labels.py
:start-after: [START training]
:end-before: [END training]
:language: python
:dedent: 8
:caption: Training a model with your custom forms.
"""
cls = kwargs.pop("cls", None)
polling_interval = kwargs.pop("polling_interval", POLLING_INTERVAL)
response = self._client.train_custom_model_async( # type: ignore
train_request=TrainRequest(
source=training_files_url,
use_label_file=use_training_labels,
source_filter=TrainSourceFilter(
prefix=kwargs.pop("prefix", ""),
include_sub_folders=kwargs.pop("include_sub_folders", False),
)
),
cls=lambda pipeline_response, _, response_headers: pipeline_response,
error_map=error_map,
**kwargs
) # type: PipelineResponseType
def callback(raw_response):
model = self._client._deserialize(Model, raw_response)
return CustomFormModel._from_generated(model)
deserialization_callback = cls if cls else callback
return LROPoller(
self._client._client,
response,
deserialization_callback,
LROBasePolling(timeout=polling_interval, lro_algorithms=[TrainingPolling()], **kwargs)
)
@distributed_trace
def delete_model(self, model_id, **kwargs):
# type: (str, Any) -> None
"""Mark model for deletion. Model artifacts will be permanently
removed within a predetermined period.
:param model_id: Model identifier.
:type model_id: str
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError or ~azure.core.exceptions.ResourceNotFoundError:
.. admonition:: Example:
.. literalinclude:: ../samples/sample_manage_custom_models.py
:start-after: [START delete_model]
:end-before: [END delete_model]
:language: python
:dedent: 8
:caption: Delete a custom model.
"""
self._client.delete_custom_model(
model_id=model_id,
error_map=error_map,
**kwargs
)
@distributed_trace
def list_custom_models(self, **kwargs):
# type: (Any) -> Iterable[CustomFormModelInfo]
"""List information for each model, including model id,
model status, and when it was created and last modified.
:return: ItemPaged[:class:`~azure.ai.formrecognizer.CustomFormModelInfo`]
:rtype: ~azure.core.paging.ItemPaged
:raises ~azure.core.exceptions.HttpResponseError:
.. admonition:: Example:
.. literalinclude:: ../samples/sample_manage_custom_models.py
:start-after: [START list_custom_models]
:end-before: [END list_custom_models]
:language: python
:dedent: 8
:caption: List model information for each model on the account.
"""
return self._client.list_custom_models(
cls=kwargs.pop("cls", lambda objs: [CustomFormModelInfo._from_generated(x) for x in objs]),
error_map=error_map,
**kwargs
)
@distributed_trace
def get_account_properties(self, **kwargs):
# type: (Any) -> AccountProperties
"""Get information about the models on the form recognizer account.
:return: Summary of models on account - custom model count,
custom model limit.
:rtype: ~azure.ai.formrecognizer.AccountProperties
:raises ~azure.core.exceptions.HttpResponseError:
.. admonition:: Example:
.. literalinclude:: ../samples/sample_manage_custom_models.py
:start-after: [START get_account_properties]
:end-before: [END get_account_properties]
:language: python
:dedent: 8
:caption: Get properties for the form recognizer account.
"""
response = self._client.get_custom_models(error_map=error_map, **kwargs)
return AccountProperties._from_generated(response.summary)
@distributed_trace
def get_custom_model(self, model_id, **kwargs):
# type: (str, Any) -> CustomFormModel
"""Get a description of a custom model, including the types of forms
it can recognize, and the fields it will extract for each form type.
:param str model_id: Model identifier.
:return: CustomFormModel
:rtype: ~azure.ai.formrecognizer.CustomFormModel
:raises ~azure.core.exceptions.HttpResponseError or ~azure.core.exceptions.ResourceNotFoundError:
.. admonition:: Example:
.. literalinclude:: ../samples/sample_manage_custom_models.py
:start-after: [START get_custom_model]
:end-before: [END get_custom_model]
:language: python
:dedent: 8
:caption: Get a custom model with a model ID.
"""
response = self._client.get_custom_model(model_id=model_id, include_keys=True, error_map=error_map, **kwargs)
return CustomFormModel._from_generated(response)
def get_form_recognizer_client(self, **kwargs):
# type: (Any) -> FormRecognizerClient
"""Get an instance of a FormRecognizerClient from FormTrainingClient.
:rtype: ~azure.ai.formrecognizer.FormRecognizerClient
:return: A FormRecognizerClient
"""
return FormRecognizerClient(
endpoint=self._endpoint,
credential=self._credential,
**kwargs
)
def close(self):
# type: () -> None
"""Close the :class:`~azure.ai.formrecognizer.FormTrainingClient` session.
"""
return self._client.close()
def __enter__(self):
# type: () -> FormTrainingClient
self._client.__enter__() # pylint:disable=no-member
return self
def __exit__(self, *args):
# type: (*Any) -> None
self._client.__exit__(*args) # pylint:disable=no-member
| 42.254682
| 117
| 0.655292
|
cc859c0339f81f72674d2f1184af6ba72caaa4cc
| 15,875
|
py
|
Python
|
platform/windows/detect.py
|
JayFontenot/godot
|
20edf69f96160fcf7c0ea2449f4daf50f572ce99
|
[
"CC-BY-3.0",
"Apache-2.0",
"MIT"
] | 1
|
2020-03-29T09:43:03.000Z
|
2020-03-29T09:43:03.000Z
|
platform/windows/detect.py
|
JayFontenot/godot
|
20edf69f96160fcf7c0ea2449f4daf50f572ce99
|
[
"CC-BY-3.0",
"Apache-2.0",
"MIT"
] | 3
|
2020-04-12T03:48:43.000Z
|
2020-04-12T05:26:17.000Z
|
platform/windows/detect.py
|
JayFontenot/godot
|
20edf69f96160fcf7c0ea2449f4daf50f572ce99
|
[
"CC-BY-3.0",
"Apache-2.0",
"MIT"
] | 1
|
2021-12-09T11:39:26.000Z
|
2021-12-09T11:39:26.000Z
|
import methods
import os
# To match other platforms
STACK_SIZE = 8388608
def is_active():
return True
def get_name():
return "Windows"
def can_build():
if (os.name == "nt"):
# Building natively on Windows
# If VCINSTALLDIR is set in the OS environ, use traditional Godot logic to set up MSVC
if (os.getenv("VCINSTALLDIR")): # MSVC, manual setup
return True
# Otherwise, let SCons find MSVC if installed, or else Mingw.
# Since we're just returning True here, if there's no compiler
# installed, we'll get errors when it tries to build with the
# null compiler.
return True
if (os.name == "posix"):
# Cross-compiling with MinGW-w64 (old MinGW32 is not supported)
mingw32 = "i686-w64-mingw32-"
mingw64 = "x86_64-w64-mingw32-"
if (os.getenv("MINGW32_PREFIX")):
mingw32 = os.getenv("MINGW32_PREFIX")
if (os.getenv("MINGW64_PREFIX")):
mingw64 = os.getenv("MINGW64_PREFIX")
test = "gcc --version > /dev/null 2>&1"
if (os.system(mingw64 + test) == 0 or os.system(mingw32 + test) == 0):
return True
return False
def get_opts():
from SCons.Variables import BoolVariable, EnumVariable
mingw32 = ""
mingw64 = ""
if (os.name == "posix"):
mingw32 = "i686-w64-mingw32-"
mingw64 = "x86_64-w64-mingw32-"
if (os.getenv("MINGW32_PREFIX")):
mingw32 = os.getenv("MINGW32_PREFIX")
if (os.getenv("MINGW64_PREFIX")):
mingw64 = os.getenv("MINGW64_PREFIX")
return [
('mingw_prefix_32', 'MinGW prefix (Win32)', mingw32),
('mingw_prefix_64', 'MinGW prefix (Win64)', mingw64),
# Targeted Windows version: 7 (and later), minimum supported version
# XP support dropped after EOL due to missing API for IPv6 and other issues
# Vista support dropped after EOL due to GH-10243
('target_win_version', 'Targeted Windows version, >= 0x0601 (Windows 7)', '0x0601'),
EnumVariable('debug_symbols', 'Add debugging symbols to release builds', 'yes', ('yes', 'no', 'full')),
BoolVariable('separate_debug_symbols', 'Create a separate file containing debugging symbols', False),
('msvc_version', 'MSVC version to use. Ignored if VCINSTALLDIR is set in shell env.', None),
BoolVariable('use_mingw', 'Use the Mingw compiler, even if MSVC is installed. Only used on Windows.', False),
BoolVariable('use_llvm', 'Use the LLVM compiler', False),
BoolVariable('use_thinlto', 'Use ThinLTO', False),
]
def get_flags():
return [
]
def build_res_file(target, source, env):
if (env["bits"] == "32"):
cmdbase = env['mingw_prefix_32']
else:
cmdbase = env['mingw_prefix_64']
cmdbase = cmdbase + 'windres --include-dir . '
import subprocess
for x in range(len(source)):
cmd = cmdbase + '-i ' + str(source[x]) + ' -o ' + str(target[x])
try:
out = subprocess.Popen(cmd, shell=True, stderr=subprocess.PIPE).communicate()
if len(out[1]):
return 1
except:
return 1
return 0
def setup_msvc_manual(env):
"""Set up env to use MSVC manually, using VCINSTALLDIR"""
if (env["bits"] != "default"):
print("""
Bits argument is not supported for MSVC compilation. Architecture depends on the Native/Cross Compile Tools Prompt/Developer Console
(or Visual Studio settings) that is being used to run SCons. As a consequence, bits argument is disabled. Run scons again without bits
argument (example: scons p=windows) and SCons will attempt to detect what MSVC compiler will be executed and inform you.
""")
raise SCons.Errors.UserError("Bits argument should not be used when using VCINSTALLDIR")
# Force bits arg
# (Actually msys2 mingw can support 64-bit, we could detect that)
env["bits"] = "32"
env["x86_libtheora_opt_vc"] = True
# find compiler manually
compiler_version_str = methods.detect_visual_c_compiler_version(env['ENV'])
print("Found MSVC compiler: " + compiler_version_str)
# If building for 64bit architecture, disable assembly optimisations for 32 bit builds (theora as of writing)... vc compiler for 64bit can not compile _asm
if(compiler_version_str == "amd64" or compiler_version_str == "x86_amd64"):
env["bits"] = "64"
env["x86_libtheora_opt_vc"] = False
print("Compiled program architecture will be a 64 bit executable (forcing bits=64).")
elif (compiler_version_str == "x86" or compiler_version_str == "amd64_x86"):
print("Compiled program architecture will be a 32 bit executable. (forcing bits=32).")
else:
print("Failed to manually detect MSVC compiler architecture version... Defaulting to 32bit executable settings (forcing bits=32). Compilation attempt will continue, but SCons can not detect for what architecture this build is compiled for. You should check your settings/compilation setup, or avoid setting VCINSTALLDIR.")
def setup_msvc_auto(env):
"""Set up MSVC using SCons's auto-detection logic"""
# If MSVC_VERSION is set by SCons, we know MSVC is installed.
# But we may want a different version or target arch.
# The env may have already been set up with default MSVC tools, so
# reset a few things so we can set it up with the tools we want.
# (Ideally we'd decide on the tool config before configuring any
# environment, and just set the env up once, but this function runs
# on an existing env so this is the simplest way.)
env['MSVC_SETUP_RUN'] = False # Need to set this to re-run the tool
env['MSVS_VERSION'] = None
env['MSVC_VERSION'] = None
env['TARGET_ARCH'] = None
if env['bits'] != 'default':
env['TARGET_ARCH'] = {'32': 'x86', '64': 'x86_64'}[env['bits']]
if env.has_key('msvc_version'):
env['MSVC_VERSION'] = env['msvc_version']
env.Tool('msvc')
env.Tool('mssdk') # we want the MS SDK
# Note: actual compiler version can be found in env['MSVC_VERSION'], e.g. "14.1" for VS2015
# Get actual target arch into bits (it may be "default" at this point):
if env['TARGET_ARCH'] in ('amd64', 'x86_64'):
env['bits'] = '64'
else:
env['bits'] = '32'
print("Found MSVC version %s, arch %s, bits=%s" % (env['MSVC_VERSION'], env['TARGET_ARCH'], env['bits']))
if env['TARGET_ARCH'] in ('amd64', 'x86_64'):
env["x86_libtheora_opt_vc"] = False
def setup_mingw(env):
"""Set up env for use with mingw"""
# Nothing to do here
print("Using MinGW")
pass
def configure_msvc(env, manual_msvc_config):
"""Configure env to work with MSVC"""
# Build type
if (env["target"] == "release"):
if (env["optimize"] == "speed"): #optimize for speed (default)
env.Append(CCFLAGS=['/O2'])
else: # optimize for size
env.Append(CCFLAGS=['/O1'])
env.Append(LINKFLAGS=['/SUBSYSTEM:WINDOWS'])
env.Append(LINKFLAGS=['/ENTRY:mainCRTStartup'])
env.Append(LINKFLAGS=['/OPT:REF'])
elif (env["target"] == "release_debug"):
if (env["optimize"] == "speed"): #optimize for speed (default)
env.Append(CCFLAGS=['/O2'])
else: # optimize for size
env.Append(CCFLAGS=['/O1'])
env.AppendUnique(CPPDEFINES = ['DEBUG_ENABLED'])
env.Append(LINKFLAGS=['/SUBSYSTEM:CONSOLE'])
env.Append(LINKFLAGS=['/OPT:REF'])
elif (env["target"] == "debug"):
env.AppendUnique(CCFLAGS=['/Z7', '/Od', '/EHsc'])
env.AppendUnique(CPPDEFINES = ['DEBUG_ENABLED', 'DEBUG_MEMORY_ENABLED',
'D3D_DEBUG_INFO'])
env.Append(LINKFLAGS=['/SUBSYSTEM:CONSOLE'])
env.Append(LINKFLAGS=['/DEBUG'])
if (env["debug_symbols"] == "full" or env["debug_symbols"] == "yes"):
env.AppendUnique(CCFLAGS=['/Z7'])
env.AppendUnique(LINKFLAGS=['/DEBUG'])
## Compile/link flags
env.AppendUnique(CCFLAGS=['/MT', '/Gd', '/GR', '/nologo'])
if int(env['MSVC_VERSION'].split('.')[0]) >= 14: #vs2015 and later
env.AppendUnique(CCFLAGS=['/utf-8'])
env.AppendUnique(CXXFLAGS=['/TP']) # assume all sources are C++
if manual_msvc_config: # should be automatic if SCons found it
if os.getenv("WindowsSdkDir") is not None:
env.Prepend(CPPPATH=[os.getenv("WindowsSdkDir") + "/Include"])
else:
print("Missing environment variable: WindowsSdkDir")
env.AppendUnique(CPPDEFINES = ['WINDOWS_ENABLED',
'WASAPI_ENABLED', 'WINMIDI_ENABLED',
'TYPED_METHOD_BIND',
'WIN32', 'MSVC',
'WINVER=%s' % env["target_win_version"],
'_WIN32_WINNT=%s' % env["target_win_version"]])
env.AppendUnique(CPPDEFINES=['NOMINMAX']) # disable bogus min/max WinDef.h macros
if env["bits"] == "64":
env.AppendUnique(CPPDEFINES=['_WIN64'])
## Libs
LIBS = ['winmm', 'dsound', 'kernel32', 'ole32', 'oleaut32',
'user32', 'gdi32', 'IPHLPAPI', 'Shlwapi', 'wsock32', 'Ws2_32',
'shell32', 'advapi32', 'dinput8', 'dxguid', 'imm32', 'bcrypt', 'Avrt',
'dwmapi']
env.AppendUnique(CPPDEFINES=['VULKAN_ENABLED'])
if not env['builtin_vulkan']:
LIBS += ['vulkan']
else:
LIBS += ['cfgmgr32']
#env.AppendUnique(CPPDEFINES = ['OPENGL_ENABLED'])
LIBS += ['opengl32']
env.Append(LINKFLAGS=[p + env["LIBSUFFIX"] for p in LIBS])
if manual_msvc_config:
if os.getenv("WindowsSdkDir") is not None:
env.Append(LIBPATH=[os.getenv("WindowsSdkDir") + "/Lib"])
else:
print("Missing environment variable: WindowsSdkDir")
## LTO
if (env["use_lto"]):
env.AppendUnique(CCFLAGS=['/GL'])
env.AppendUnique(ARFLAGS=['/LTCG'])
if env["progress"]:
env.AppendUnique(LINKFLAGS=['/LTCG:STATUS'])
else:
env.AppendUnique(LINKFLAGS=['/LTCG'])
if manual_msvc_config:
env.Prepend(CPPPATH=[p for p in os.getenv("INCLUDE").split(";")])
env.Append(LIBPATH=[p for p in os.getenv("LIB").split(";")])
# Incremental linking fix
env['BUILDERS']['ProgramOriginal'] = env['BUILDERS']['Program']
env['BUILDERS']['Program'] = methods.precious_program
env.AppendUnique(LINKFLAGS=['/STACK:' + str(STACK_SIZE)])
def configure_mingw(env):
# Workaround for MinGW. See:
# http://www.scons.org/wiki/LongCmdLinesOnWin32
env.use_windows_spawn_fix()
## Build type
if (env["target"] == "release"):
env.Append(CCFLAGS=['-msse2'])
if (env["optimize"] == "speed"): #optimize for speed (default)
if (env["bits"] == "64"):
env.Append(CCFLAGS=['-O3'])
else:
env.Append(CCFLAGS=['-O2'])
else: #optimize for size
env.Prepend(CCFLAGS=['-Os'])
env.Append(LINKFLAGS=['-Wl,--subsystem,windows'])
if (env["debug_symbols"] == "yes"):
env.Prepend(CCFLAGS=['-g1'])
if (env["debug_symbols"] == "full"):
env.Prepend(CCFLAGS=['-g2'])
elif (env["target"] == "release_debug"):
env.Append(CCFLAGS=['-O2'])
env.Append(CPPDEFINES=['DEBUG_ENABLED'])
if (env["debug_symbols"] == "yes"):
env.Prepend(CCFLAGS=['-g1'])
if (env["debug_symbols"] == "full"):
env.Prepend(CCFLAGS=['-g2'])
if (env["optimize"] == "speed"): #optimize for speed (default)
env.Append(CCFLAGS=['-O2'])
else: #optimize for size
env.Prepend(CCFLAGS=['-Os'])
elif (env["target"] == "debug"):
env.Append(CCFLAGS=['-g3'])
env.Append(CPPDEFINES=['DEBUG_ENABLED', 'DEBUG_MEMORY_ENABLED'])
## Compiler configuration
if os.name != "nt":
env["PROGSUFFIX"] = env["PROGSUFFIX"] + ".exe" # for linux cross-compilation
if (env["bits"] == "default"):
if (os.name == "nt"):
env["bits"] = "64" if "PROGRAMFILES(X86)" in os.environ else "32"
else: # default to 64-bit on Linux
env["bits"] = "64"
mingw_prefix = ""
if (env["bits"] == "32"):
env.Append(LINKFLAGS=['-static'])
env.Append(LINKFLAGS=['-static-libgcc'])
env.Append(LINKFLAGS=['-static-libstdc++'])
mingw_prefix = env["mingw_prefix_32"]
else:
env.Append(LINKFLAGS=['-static'])
mingw_prefix = env["mingw_prefix_64"]
if env['use_llvm']:
env["CC"] = mingw_prefix + "clang"
env['AS'] = mingw_prefix + "as"
env["CXX"] = mingw_prefix + "clang++"
env['AR'] = mingw_prefix + "ar"
env['RANLIB'] = mingw_prefix + "ranlib"
env["LINK"] = mingw_prefix + "clang++"
else:
env["CC"] = mingw_prefix + "gcc"
env['AS'] = mingw_prefix + "as"
env['CXX'] = mingw_prefix + "g++"
env['AR'] = mingw_prefix + "gcc-ar"
env['RANLIB'] = mingw_prefix + "gcc-ranlib"
env['LINK'] = mingw_prefix + "g++"
env["x86_libtheora_opt_gcc"] = True
if env['use_lto']:
if not env['use_llvm'] and env.GetOption("num_jobs") > 1:
env.Append(CCFLAGS=['-flto'])
env.Append(LINKFLAGS=['-flto=' + str(env.GetOption("num_jobs"))])
else:
if env['use_thinlto']:
env.Append(CCFLAGS=['-flto=thin'])
env.Append(LINKFLAGS=['-flto=thin'])
else:
env.Append(CCFLAGS=['-flto'])
env.Append(LINKFLAGS=['-flto'])
env.Append(LINKFLAGS=['-Wl,--stack,' + str(STACK_SIZE)])
## Compile flags
env.Append(CCFLAGS=['-mwindows'])
env.Append(CPPDEFINES=['WINDOWS_ENABLED', 'WASAPI_ENABLED', 'WINMIDI_ENABLED'])
env.Append(CPPDEFINES=[('WINVER', env['target_win_version']), ('_WIN32_WINNT', env['target_win_version'])])
env.Append(LIBS=['mingw32', 'dsound', 'ole32', 'd3d9', 'winmm', 'gdi32', 'iphlpapi', 'shlwapi', 'wsock32', 'ws2_32', 'kernel32', 'oleaut32', 'dinput8', 'dxguid', 'ksuser', 'imm32', 'bcrypt', 'avrt', 'uuid', 'dwmapi'])
env.Append(CPPDEFINES=['VULKAN_ENABLED'])
if not env['builtin_vulkan']:
env.Append(LIBS=['vulkan'])
else:
env.Append(LIBS=['cfgmgr32'])
## TODO !!! Reenable when OpenGLES Rendering Device is implemented !!!
#env.Append(CPPDEFINES=['OPENGL_ENABLED'])
env.Append(LIBS=['opengl32'])
env.Append(CPPDEFINES=['MINGW_ENABLED', ('MINGW_HAS_SECURE_API', 1)])
# resrc
env.Append(BUILDERS={'RES': env.Builder(action=build_res_file, suffix='.o', src_suffix='.rc')})
def configure(env):
# At this point the env has been set up with basic tools/compilers.
env.Prepend(CPPPATH=['#platform/windows'])
print("Configuring for Windows: target=%s, bits=%s" % (env['target'], env['bits']))
if (os.name == "nt"):
env['ENV'] = os.environ # this makes build less repeatable, but simplifies some things
env['ENV']['TMP'] = os.environ['TMP']
# First figure out which compiler, version, and target arch we're using
if os.getenv("VCINSTALLDIR") and not env["use_mingw"]:
# Manual setup of MSVC
setup_msvc_manual(env)
env.msvc = True
manual_msvc_config = True
elif env.get('MSVC_VERSION', '') and not env["use_mingw"]:
setup_msvc_auto(env)
env.msvc = True
manual_msvc_config = False
else:
setup_mingw(env)
env.msvc = False
# Now set compiler/linker flags
if env.msvc:
configure_msvc(env, manual_msvc_config)
else: # MinGW
configure_mingw(env)
| 38.345411
| 330
| 0.601071
|
7660715d7788b4e8bbdb76d5aab5714cadab6954
| 143
|
py
|
Python
|
cv/__init__.py
|
mikebader/django-cv
|
9c0150e9299042f4e908b720cf3f3cd8f133050b
|
[
"BSD-3-Clause"
] | 3
|
2018-09-22T05:32:26.000Z
|
2019-10-18T01:34:06.000Z
|
cv/__init__.py
|
mikebader/django-cv
|
9c0150e9299042f4e908b720cf3f3cd8f133050b
|
[
"BSD-3-Clause"
] | 3
|
2020-08-02T19:34:31.000Z
|
2022-01-25T03:00:53.000Z
|
cv/__init__.py
|
mikebader/django-cv
|
9c0150e9299042f4e908b720cf3f3cd8f133050b
|
[
"BSD-3-Clause"
] | null | null | null |
"""
django-cv: a CV generator that can be used with the Django web framework.
"""
name = 'django-cv'
default_app_config = 'cv.apps.CvConfig'
| 17.875
| 73
| 0.713287
|
2914044fae6322b68ee8de7bc61549fe07255a74
| 5,944
|
py
|
Python
|
test_algebraic.py
|
Cientifique/abstract_algebra
|
31b5b952a2a2c521c90ce08d7ab56cc0f3879f3a
|
[
"Apache-2.0"
] | null | null | null |
test_algebraic.py
|
Cientifique/abstract_algebra
|
31b5b952a2a2c521c90ce08d7ab56cc0f3879f3a
|
[
"Apache-2.0"
] | null | null | null |
test_algebraic.py
|
Cientifique/abstract_algebra
|
31b5b952a2a2c521c90ce08d7ab56cc0f3879f3a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
"""
# =============================================================================
from concrete_algebraic import Integer, PolyOverZ, PolyOverQ, Rational, \
makeZpX, makeZp
from polynomials_over_Zp import make_poly_ring
#generates random polynomials for testing
import random
import time
# =============================================================================
PRINT_POLY_OVER=False
if PRINT_POLY_OVER:
f = PolyOverZ.from_int_coefs( [1, 2, 1] )
g = PolyOverZ.from_int_coefs( [1, -2, 3] )
print('f = ', f)
print('g = ', g)
h = f + g
p = f*g
print('f + g = h =', h)
print('f * g = p = ', p)
# =============================================================================
print('\nOver Q\n')
phi = PolyOverQ( [Rational(1,2), Rational(2,3)] )
print( phi )
phi2 = phi**2
# =============================================================================
#GENERATE RANDOM POLYS
PRIME_P = 11
MAX_DEG = 30
NPOLY = 2
NTESTS = 100
random_coefs = []
for k in range( NPOLY ):
deg = random.randint(0, MAX_DEG)
#non-zero poly
leading_coef = [ random.randint( 1, PRIME_P-1 ) ]
coefs = [ random.randint( 0, PRIME_P-1 ) for x in range(deg) ]
random_coefs.append( coefs + leading_coef )
# =============================================================================
def make_random_poly_list( random_coefs, PolyMaker, from_int_coefs = False ):
random_poly_lst = []
for coefs_ in random_coefs:
if from_int_coefs:
random_poly_lst.append( PolyMaker.from_int_coefs( coefs_ ) )
else:
random_poly_lst.append( PolyMaker( coefs_) )
return random_poly_lst
# =============================================================================
#using galois tools
ZpX_Gal = make_poly_ring(PRIME_P)
#not using galois tools
ZpX = makeZpX( PRIME_P )
random_pols_Gal = make_random_poly_list(random_coefs, ZpX_Gal )
random_pols = make_random_poly_list(random_coefs, ZpX, True)
# =============================================================================
#to debug
random_poly_lst = random_pols
def poly_tester( random_poly_lst ):
times_dict = {'euclidean_div' : [], 'com_product' : [],
'com_addition' : []}
#test with pairs of these
for k in range(NTESTS):
g = random.choice( random_poly_lst )
h = random.choice( random_poly_lst )
error_msg = (
'\n' + '-' * 50 + 'g =' + str( g ) + 'h =' + str( h) + '-' * 50
)
tic = time.process_time()
if not (h == (h//g)*g + (h%g) ):
error_ = '\n' + 'failed h == (h//g)*g + (h%g)' + error_msg
raise ValueError( error_ )
times_dict['euclidean_div'].append( time.process_time()-tic )
tic = time.process_time()
if not ( h+g == g+h ):
error_ = '\n' + 'failed h+g == g+h' + error_msg
raise ValueError( error_ )
times_dict['com_addition'].append( time.process_time()-tic )
tic = time.process_time()
if not (h*g == g*h):
error_ = 'failed h*g == g*h' + error_msg
raise ValueError( error_ )
times_dict['com_product'].append( time.process_time()-tic )
return times_dict
# print('-' * 100)
# msg_ = 'Test irreducible'
# print(msg_ + ( 96 -len(msg_) ) * ' ',
# g.is_irreducible(), h.is_irreducible()
# )
# print('\n')
print('\n\nGALOIS\n\n')
times_Gal = poly_tester(random_pols_Gal)
print('\n\nPURE\n\n')
times_pure = poly_tester(random_pols)
for key in times_pure.keys():
gal_ = times_Gal[key]
pure_ = times_pure[key]
print( 'Times for ', key, '\n\n' )
for name, lst in zip( ('gal', 'pure'), [gal_, pure_] ):
print( name)
sum_ = sum(lst) * 1000
n_ = len(lst)
print(' '*4, 'n -> %d tests' % n_ )
print(' '*4, 'sum -> %.2f ms' % sum_ )
print(' '*4, 'mean -> %.2f ms' % (sum_/n_ ) )
print('\n')
# =============================================================================
#Compare
METHODS = ['__add__', '__mul__', '__mod__', '__floordiv__' ]
def comparer( g, h, g_Gal, h_Gal):
for method in METHODS:
result = getattr(g, method)( h )
result_gal = getattr(g_Gal, method)( h_Gal )
error_msg = '\n'.join( [ str(g), str(h), str(g_Gal), str(h_Gal), method])
assert( str(result_gal) == str(result) ), error_msg
for x in range( NTESTS ):
idx_g = random.randint(0, len(random_pols) - 1)
idx_h = random.randint(0, len(random_pols) - 1)
g = random_pols[ idx_g ]
h = random_pols[ idx_h ]
g_Gal = random_pols_Gal[ idx_g ]
h_Gal = random_pols_Gal[ idx_h ]
comparer( g, h, g_Gal, h_Gal )
# =============================================================================
#Correu sem erros!
print('Há aí o Galois Tools que é o CESSO, mas o CESSO agora é outro')
print('É mais lento mas agora é uma questão de otimizar...',
'o produto de polinómios pode ser otimizado')
# =============================================================================
#
#print('\n\n', '-' * 100, '\n\n')
#h = Zp_X.from_dict({'31' : 2, '4' : 2, '11' : 1, '0' : 1})
#g = Zp_X([1, 0, 2, 0])
#
#print('h =', h)
#print('g =', g)
#
#quot, rem = h.div_mod(g)
#
#h_coefs = h.coefs
#h_coefs.reverse()
#
#
#g_coefs = g.coefs.copy()
#g_coefs.reverse()
#
#
#teste = gt.gf_add(h_coefs, g_coefs, 3, ZZ)
#teste2 = gt.gf_add(g_coefs, h_coefs, 3, ZZ)
#
#dct = {'100' : 1, '0' : 1}
#f = Zp_X.from_dict(dct)
#
#f = Zp_X([0,1,2])
#g = Zp_X([1,2,0])
#
#
#
#methods = ['is_zero', 'to_list', 'to_dict', 'degree', '__repr__']
#
#for method in methods:
# result = getattr(h, method)()
# print(method + ':', result)
| 27.391705
| 83
| 0.487887
|
0ed752d6270a03a51f944e262cc2252c8b6270be
| 6,128
|
py
|
Python
|
nets/mobilenetv2.py
|
LJjia/deeplabv3-plus-pytorch
|
158a2643e3faa41bdcc9d148948fdebfc5013ae4
|
[
"MIT"
] | null | null | null |
nets/mobilenetv2.py
|
LJjia/deeplabv3-plus-pytorch
|
158a2643e3faa41bdcc9d148948fdebfc5013ae4
|
[
"MIT"
] | null | null | null |
nets/mobilenetv2.py
|
LJjia/deeplabv3-plus-pytorch
|
158a2643e3faa41bdcc9d148948fdebfc5013ae4
|
[
"MIT"
] | null | null | null |
import math
import os
import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
BatchNorm2d = nn.BatchNorm2d
def conv_bn(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
BatchNorm2d(oup),
nn.ReLU6(inplace=True)
)
def conv_1x1_bn(inp, oup):
return nn.Sequential(
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
BatchNorm2d(oup),
nn.ReLU6(inplace=True)
)
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = round(inp * expand_ratio)
self.use_res_connect = self.stride == 1 and inp == oup
if expand_ratio == 1:
self.conv = nn.Sequential(
#--------------------------------------------#
# 进行3x3的逐层卷积,进行跨特征点的特征提取
#--------------------------------------------#
nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False),
BatchNorm2d(hidden_dim),
nn.ReLU6(inplace=True),
#-----------------------------------#
# 利用1x1卷积进行通道数的调整
#-----------------------------------#
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
BatchNorm2d(oup),
)
else:
self.conv = nn.Sequential(
#-----------------------------------#
# 利用1x1卷积进行通道数的上升
#-----------------------------------#
nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False),
BatchNorm2d(hidden_dim),
nn.ReLU6(inplace=True),
#--------------------------------------------#
# 进行3x3的逐层卷积,进行跨特征点的特征提取
#--------------------------------------------#
nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False),
BatchNorm2d(hidden_dim),
nn.ReLU6(inplace=True),
#-----------------------------------#
# 利用1x1卷积进行通道数的下降
#-----------------------------------#
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
BatchNorm2d(oup),
)
def forward(self, x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
class MobileNetV2(nn.Module):
def __init__(self, n_class=1000, input_size=224, width_mult=1.):
super(MobileNetV2, self).__init__()
block = InvertedResidual
input_channel = 32
last_channel = 1280
interverted_residual_setting = [
# t, c, n, s
# 中间层拓展率,输出通道数,重复次数,跨距
[1, 16, 1, 1], # 256, 256, 32 -> 256, 256, 16
# 第二个下采样
[6, 24, 2, 2], # 256, 256, 16 -> 128, 128, 24 2
# 第三个下采样
[6, 32, 3, 2], # 128, 128, 24 -> 64, 64, 32 4
# 第四个下采样在deeplab中不用了,而采用膨胀卷积来提升感受野
[6, 64, 4, 2], # 64, 64, 32 -> 32, 32, 64 7
[6, 96, 3, 1], # 32, 32, 64 -> 32, 32, 96
[6, 160, 3, 2], # 32, 32, 96 -> 16, 16, 160 14
[6, 320, 1, 1], # 16, 16, 160 -> 16, 16, 320
]
assert input_size % 32 == 0
input_channel = int(input_channel * width_mult)
self.last_channel = int(last_channel * width_mult) if width_mult > 1.0 else last_channel
# 512, 512, 3 -> 256, 256, 32
# 第一个下采样
self.features = [conv_bn(3, input_channel, 2)]
for t, c, n, s in interverted_residual_setting:
output_channel = int(c * width_mult)
for i in range(n):
if i == 0:
self.features.append(block(input_channel, output_channel, s, expand_ratio=t))
else:
self.features.append(block(input_channel, output_channel, 1, expand_ratio=t))
input_channel = output_channel
self.features.append(conv_1x1_bn(input_channel, self.last_channel))
self.features = nn.Sequential(*self.features)
self.classifier = nn.Sequential(
nn.Dropout(0.2),
nn.Linear(self.last_channel, n_class),
)
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = x.mean(3).mean(2)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def load_url(url, model_dir='./model_data', map_location=None):
if not os.path.exists(model_dir):
os.makedirs(model_dir)
filename = url.split('/')[-1]
cached_file = os.path.join(model_dir, filename)
if os.path.exists(cached_file):
return torch.load(cached_file, map_location=map_location)
else:
return model_zoo.load_url(url,model_dir=model_dir)
def mobilenetv2(pretrained=False, **kwargs):
model = MobileNetV2(n_class=1000, **kwargs)
if pretrained:
model.load_state_dict(load_url('https://github.com/bubbliiiing/deeplabv3-plus-pytorch/releases/download/v1.0/mobilenet_v2.pth.tar'), strict=False)
return model
if __name__ == "__main__":
model = mobilenetv2()
for i, layer in enumerate(model.features):
print(i, layer)
| 37.595092
| 155
| 0.489883
|
526c859b36d21957ba054f26d64018474d197668
| 638
|
py
|
Python
|
visualization/img_to_video.py
|
wangxiyang2022/DeepFusionMOT
|
89059c37208bc2c135131d9f600566c3d76b35f2
|
[
"MIT"
] | 41
|
2022-02-18T01:12:09.000Z
|
2022-03-31T16:23:53.000Z
|
visualization/img_to_video.py
|
wangxiyang2022/DeepFusionMOT
|
89059c37208bc2c135131d9f600566c3d76b35f2
|
[
"MIT"
] | 3
|
2022-02-28T09:11:18.000Z
|
2022-03-22T02:39:38.000Z
|
visualization/img_to_video.py
|
wangxiyang2022/DeepFusionMOT
|
89059c37208bc2c135131d9f600566c3d76b35f2
|
[
"MIT"
] | 6
|
2022-02-28T09:50:53.000Z
|
2022-03-09T01:34:34.000Z
|
# -*-coding:utf-8-*
# author: wangxy
import cv2
'''
This code is converting images to video.
'''
# Write your folder path here,example:/home/youname/data/img/
# Note that the last folder should have a /
img_root = '../results/pointrcnn_Car_val/image/'
video_save_path = 'tracking_video.avi'
fps = 24
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
videoWriter = cv2.VideoWriter(video_save_path,fourcc,fps,(1242,375))
for i in range(297): # Here 297 is the number of frames in the dataset. You need to make the appropriate changes
number = '%06d'%i
frame = cv2.imread(img_root+number+'.png')
videoWriter.write(frame)
videoWriter.release()
| 30.380952
| 113
| 0.738245
|
7ef34ffa11bde64c40ef7efc5d74821791cf7ccb
| 7,035
|
py
|
Python
|
objectModel/Python/tests/cdm/projection/test_projection_array.py
|
jocubeit/CDM
|
040ba0eaaadde216bab0bf165e330f40c3b3d089
|
[
"CC-BY-4.0",
"MIT"
] | 265
|
2018-03-04T04:47:50.000Z
|
2019-05-06T13:31:18.000Z
|
objectModel/Python/tests/cdm/projection/test_projection_array.py
|
jocubeit/CDM
|
040ba0eaaadde216bab0bf165e330f40c3b3d089
|
[
"CC-BY-4.0",
"MIT"
] | 39
|
2018-03-21T16:57:12.000Z
|
2019-05-06T17:30:23.000Z
|
objectModel/Python/tests/cdm/projection/test_projection_array.py
|
jocubeit/CDM
|
040ba0eaaadde216bab0bf165e330f40c3b3d089
|
[
"CC-BY-4.0",
"MIT"
] | 75
|
2018-03-09T20:33:13.000Z
|
2019-05-05T06:55:43.000Z
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
import os
import unittest
from cdm.objectmodel import CdmCorpusDefinition, CdmEntityDefinition
from tests.common import async_test
from tests.utilities.projection_test_utils import ProjectionTestUtils
class ProjectionArrayTest(unittest.TestCase):
"""A test class for testing the array type with a set of foundational operations in a projection"""
# All possible combinations of the different resolution directives
res_opts_combinations = [
[],
['referenceOnly'],
['normalized'],
['structured'],
['referenceOnly', 'normalized'],
['referenceOnly', 'structured'],
['normalized', 'structured'],
['referenceOnly', 'normalized', 'structured']
]
# The path between TestDataPath and test_name.
tests_subpath = os.path.join('Cdm', 'Projection', 'ProjectionArrayTest')
@async_test
async def test_entity_attribute(self):
"""Test Array type on an entity attribute"""
test_name = 'test_entity_attribute'
entity_name = 'ThreeMusketeers'
corpus = ProjectionTestUtils.get_local_corpus(self.tests_subpath, test_name) # type: CdmCorpusDefinition
for res_opt in self.res_opts_combinations:
await ProjectionTestUtils.load_entity_for_resolution_option_and_save(self, corpus, test_name, self.tests_subpath, entity_name, res_opt)
entity = await corpus.fetch_object_async('local:/{0}.cdm.json/{0}'.format(entity_name)) # type: CdmEntityDefinition
non_structured_resolved_entity = await ProjectionTestUtils.get_resolved_entity(corpus, entity, [ ])
# Original set of attributes: ["name", "age", "address"]
# in non-structured form
# Expand 1...3;
# renameFormat = {m}{o};
# alterTraits = { has.expansionInfo.list(expansionName: "{a}", ordinal: "{o}", memberAttribute: "{mo}") , "argumentsContainWildcards" : true }
# addArtifactAttribute : "personCount"
# alterTraits = { indicates.expansionInfo.count(expansionName: "{a}") , apply to "personCount" , "argumentsContainWildcards" : true }
self.assertEqual(10, len(non_structured_resolved_entity.attributes))
ProjectionTestUtils.validate_expansion_info_trait(self, non_structured_resolved_entity.attributes[0], 'name1', 1, 'ThreePeople', 'name')
ProjectionTestUtils.validate_expansion_info_trait(self, non_structured_resolved_entity.attributes[1], 'age1', 1, 'ThreePeople', 'age')
ProjectionTestUtils.validate_expansion_info_trait(self, non_structured_resolved_entity.attributes[2], 'address1', 1, 'ThreePeople', 'address')
ProjectionTestUtils.validate_expansion_info_trait(self, non_structured_resolved_entity.attributes[3], 'name2', 2, 'ThreePeople', 'name')
ProjectionTestUtils.validate_expansion_info_trait(self, non_structured_resolved_entity.attributes[4], 'age2', 2, 'ThreePeople', 'age')
ProjectionTestUtils.validate_expansion_info_trait(self, non_structured_resolved_entity.attributes[5], 'address2', 2, 'ThreePeople', 'address')
ProjectionTestUtils.validate_expansion_info_trait(self, non_structured_resolved_entity.attributes[6], 'name3', 3, 'ThreePeople', 'name')
ProjectionTestUtils.validate_expansion_info_trait(self, non_structured_resolved_entity.attributes[7], 'age3', 3, 'ThreePeople', 'age')
ProjectionTestUtils.validate_expansion_info_trait(self, non_structured_resolved_entity.attributes[8], 'address3', 3, 'ThreePeople', 'address')
self.assertEqual('personCount', (non_structured_resolved_entity.attributes[9]).name)
self.assertIsNotNone(non_structured_resolved_entity.attributes[9].applied_traits.item('indicates.expansionInfo.count'))
self.assertEqual('ThreePeople', non_structured_resolved_entity.attributes[9].applied_traits.item('indicates.expansionInfo.count').arguments[0].value)
# Original set of attributes: ["name", "age", "address"]
# in structured form
# alterTraits = { is.dataFormat.list }
# addAttributeGroup: favoriteMusketeers
structured_resolved_entity = await ProjectionTestUtils.get_resolved_entity(corpus, entity, [ 'structured' ])
self.assertEqual(1, len(structured_resolved_entity.attributes))
att_group_definition = ProjectionTestUtils.validate_attribute_group(self, structured_resolved_entity.attributes, 'favoriteMusketeers')
self.assertIsNotNone(att_group_definition.exhibits_traits.item('is.dataFormat.list'))
@async_test
async def test_type_attribute(self):
"""Test Array type on an type attribute"""
test_name = 'test_type_attribute'
entity_name = 'Person'
corpus = ProjectionTestUtils.get_local_corpus(self.tests_subpath, test_name) # type: CdmCorpusDefinition
for res_opt in self.res_opts_combinations:
await ProjectionTestUtils.load_entity_for_resolution_option_and_save(self, corpus, test_name, self.tests_subpath, entity_name, res_opt)
entity = await corpus.fetch_object_async('local:/{0}.cdm.json/{0}'.format(entity_name)) # type: CdmEntityDefinition
non_structured_resolved_entity = await ProjectionTestUtils.get_resolved_entity(corpus, entity, [ ])
# Original set of attributes: ["Favorite Terms"]
# in non-structured form
# Expand 1...2;
# renameFormat = Term {o};
# alterTraits = { has.expansionInfo.list(expansionName: "{a}", ordinal: "{o}") , "argumentsContainWildcards" : true }
# addArtifactAttribute : "number of favorite terms"
# alterTraits = { indicates.expansionInfo.count(expansionName: "{a}") , apply to "number of favorite terms" , "argumentsContainWildcards" : true }
self.assertEqual(3, len(non_structured_resolved_entity.attributes))
self.assertEqual('Term 1', (non_structured_resolved_entity.attributes[0]).name)
self.assertEqual('Term 2', (non_structured_resolved_entity.attributes[1]).name)
self.assertEqual('number of favorite terms', (non_structured_resolved_entity.attributes[2]).name)
self.assertIsNotNone(non_structured_resolved_entity.attributes[2].applied_traits.item('indicates.expansionInfo.count'))
self.assertEqual('Favorite Terms', non_structured_resolved_entity.attributes[2].applied_traits.item('indicates.expansionInfo.count').arguments[0].value)
# Original set of attributes: ["Favorite Terms"]
# in structured form
# alterTraits = { is.dataFormat.list }
structured_resolved_entity = await ProjectionTestUtils.get_resolved_entity(corpus, entity, [ 'structured' ])
self.assertEqual(1, len(structured_resolved_entity.attributes))
self.assertEqual('Favorite Terms', (structured_resolved_entity.attributes[0]).name)
self.assertIsNotNone(structured_resolved_entity.attributes[0].applied_traits.item('is.dataFormat.list'))
| 65.138889
| 160
| 0.735608
|
c2eb27325a85c0d4ddb63e8e8cf1f18646e25d95
| 7,748
|
py
|
Python
|
API v1/python/path.py
|
FarrantAlex/CloudRF-API-clients
|
8e42e8a0325887f6f1aa91395a3f4b9d4cb2c9c2
|
[
"MIT"
] | null | null | null |
API v1/python/path.py
|
FarrantAlex/CloudRF-API-clients
|
8e42e8a0325887f6f1aa91395a3f4b9d4cb2c9c2
|
[
"MIT"
] | null | null | null |
API v1/python/path.py
|
FarrantAlex/CloudRF-API-clients
|
8e42e8a0325887f6f1aa91395a3f4b9d4cb2c9c2
|
[
"MIT"
] | null | null | null |
"""Demonstrates Path CloudRF API."""
import argparse
import configparser
import csv
import os
import textwrap
from pathlib import Path
from cloudrf import CloudRFAPI
# TODO: Deal with html / url
class CloudRFPath(CloudRFAPI):
"""Path API class"""
endpoint = '/API/path/'
api_id = 'path'
file_types = ['chart']
type_map = {'chart': 'Chart image', 'kml': 'Network KML'}
def download(self, select=None):
select = self.file_types if select is None else select
for dtype in select:
if self.type_map[dtype] in self.response:
self.download_direct(self.response[self.type_map[dtype]])
class App:
"""Application class
This class's base class is configured using a cloudrf.ini configuration file.
At first run, a default configuration file will be created. Change all values as required.
Alternatively, the CLOUDRF_UID and CLOUDRF_KEY environment variables may be used to override the file configuration.
This behaviour may be changed by removing the AppAddOn base class
"""
config_filename = 'cloudrf.ini'
def __init__(self, args=None):
print('CloudRF API demo')
self.parse_args(args)
print(f'Reading data from {self.args.data_files}')
print(f'Will download {self.args.dl_types}')
self.configure()
if self.args.output_dir is not None:
self.data_dir = self.args.output_dir
if self.data_dir is None:
self.data_dir = Path.cwd() / 'data'
print(f'All files generated to {self.data_dir}')
def parse_args(self, args):
parser = argparse.ArgumentParser(description=textwrap.dedent(f'''
CloudRF Path application
Path Profile studies the link from one site to another in a direct line.
It factors in system parameters, antenna patterns, environmental characteristics
and terrain data to produce a JSON report containing enough values to incorporate
into your analysis or create a chart from.
This demonstration program utilizes the CloudRF Path API to generate any
of the possible file outputs offered by the API from {CloudRFPath.file_types}.
The API arguments are sourced from csv file(s).
please use -s all to generate ALL outputs available.
Please refer to API Reference https://api.cloudrf.com/'''),
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-i', dest='data_files', metavar='data_file', nargs='+', help='data input filename(csv)')
parser.add_argument('-s', dest='dl_types', nargs='+',
choices=CloudRFPath.file_types + ['all'], default=['chart'],
help='type of output file to be downloaded')
parser.add_argument('-o', dest='output_dir', metavar='output_dir',
help='output directory where files are downloaded')
parser.add_argument('-r', dest='save_response', action="store_true", help='save response content (json/html)')
parser.add_argument('-v', dest='verbose', action="store_true", help='Output more information on screen')
# for unit testing it is useful to be able to pass arguments.
if args is None:
self.args = parser.parse_args()
else:
if isinstance(args, str):
args = args.split(' ')
self.args = parser.parse_args(args)
# if all selected then we reset the list to all types.
if 'all' in self.args.dl_types:
self.args.dl_types = CloudRFPath.file_types
self.save_response = self.args.save_response
def configure(self):
"""Application configuration
Adds functionality to load uid and key from configuration or environment
you may find simpler to use the following assignments instead
self.uid = "CHANGEME"
self.ikey = "CHANGEME"
"""
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise Exception(f'Boolean value expected in {v}.')
self.uid = os.environ['CLOUDRF_UID'] if 'CLOUDRF_UID' in os.environ else None
self.key = os.environ['CLOUDRF_KEY'] if 'CLOUDRF_KEY' in os.environ else None
self.strict_ssl = os.environ['CLOUDRF_STRICT_SSL'] if 'CLOUDRF_STRICT_SSL' in os.environ else None
self.base_url = os.environ['CLOUDRF_BASE_URL'] if 'CLOUDRF_BASE_URL' in os.environ else None
self.data_dir = os.environ['CLOUDRF_DATA_DIR'] if 'CLOUDRF_DATA_DIR' in os.environ else None
# is any value is not None we read the config
if not any([bool(self.uid), bool(self.key), bool(self.strict_ssl), bool(self.base_url)]):
config = configparser.ConfigParser()
# we read the configuration file if it exists
if Path(self.config_filename).is_file():
config.read(self.config_filename)
# if user section does not exist we create it with default value
if 'user' not in config.sections():
config.add_section('user')
config['user']['uid'] = 'CHANGEME'
config['user']['key'] = 'CHANGEME'
config.add_section('api')
config['api']['strict_ssl'] = 'CHANGEME'
config['api']['base_url'] = 'CHANGEME'
config.add_section('data')
config['data']['dir'] = ''
with open('cloudrf.ini', 'w') as fp:
config.write(fp)
if config['user']['uid'] == 'CHANGEME':
raise Exception(f'Please change configuration in {self.config_filename}')
if self.uid is None:
self.uid = config['user']['uid']
if self.key is None:
self.key = config['user']['key']
if self.strict_ssl is None:
self.strict_ssl = config['api']['strict_ssl']
if self.base_url is None:
self.base_url = config['api']['base_url']
if self.data_dir is None and config['data']['dir'].strip() != '':
self.data_dir = config['data']['dir']
self.strict_ssl = str2bool(self.strict_ssl)
def run_path(self):
"""Path coverage analysis"""
responses = []
self.api = CloudRFPath(self.uid, self.key, self.base_url, self.strict_ssl, self.save_response)
self.api.set_download_dir(self.data_dir)
print('Path Profile API demo')
self.row_count = 0
for f in self.args.data_files:
with open(f, 'r') as fp:
self.csv_data = csv.DictReader(fp)
self.csv_rows = [row for row in self.csv_data]
self.row_count += len(self.csv_rows)
for row in self.csv_rows:
if self.args.verbose:
print(f'data: {row}')
self.api.request(row)
if self.args.verbose:
print(f'response: {self.api.response}')
# api.download(select=['kmz']) # remove select to download all types available
self.api.download(select=self.args.dl_types) # remove select to download all types available
responses.append(self.api.response)
print('Done!', flush=True)
return responses
if __name__ == '__main__':
app = App()
app.run_path()
| 40.145078
| 120
| 0.596541
|
c2c27f07d341e0e9f8af029bdf03996b5c31e60c
| 13,123
|
py
|
Python
|
firmware_flash/models.py
|
zacharybussey/fermentrack
|
0c23413e7f91b91ce01aef7355cae39b130cb462
|
[
"MIT"
] | null | null | null |
firmware_flash/models.py
|
zacharybussey/fermentrack
|
0c23413e7f91b91ce01aef7355cae39b130cb462
|
[
"MIT"
] | null | null | null |
firmware_flash/models.py
|
zacharybussey/fermentrack
|
0c23413e7f91b91ce01aef7355cae39b130cb462
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
from django.db import models
from django.core.validators import MinValueValidator, MaxValueValidator
from django.utils import timezone
import os.path
import requests
import logging
from . import fhash
from constance import config
try:
from fermentrack_django import settings
except:
from fermentrack_com import \
settings # This file is a direct copy of what I'm using for fermentrack.com. Simplifying keeping things in sync.
logger = logging.getLogger(__name__)
FERMENTRACK_COM_URL = "http://www.fermentrack.com"
MODEL_VERSION = 1
def check_model_version():
try:
url = FERMENTRACK_COM_URL + "/api/model_version/"
response = requests.get(url)
data = response.json()
except:
return False
return data
def get_model_version():
return MODEL_VERSION
class DeviceFamily(models.Model):
class Meta:
verbose_name = "Device Family"
verbose_name_plural = "Device Families"
FLASH_ARDUINO = "avrdude"
FLASH_ESP8266 = "esptool"
FLASH_CHOICES = (
(FLASH_ARDUINO, "Avrdude (Arduino)"),
(FLASH_ESP8266, "Esptool (ESP8266)")
)
DETECT_ARDUINO = "arduino"
DETECT_ESP8266 = "esp8266"
DETECT_PARTICLE = "particle"
DETECT_CHOICES = (
(DETECT_ARDUINO, "Arduino"),
(DETECT_ESP8266, "ESP8266"),
(DETECT_PARTICLE, "Particle (Spark/Core)"),
)
name = models.CharField(max_length=30, blank=False, null=False, help_text="The name of the device family")
flash_method = models.CharField(max_length=30, choices=FLASH_CHOICES, default=FLASH_ARDUINO)
detection_family = models.CharField(max_length=30, choices=DETECT_CHOICES, default=DETECT_ARDUINO)
def __str__(self):
return self.name
def __unicode__(self):
return self.name
@staticmethod
def load_from_website():
try:
url = FERMENTRACK_COM_URL + "/api/firmware_family_list/"
response = requests.get(url)
data = response.json()
except:
return False
if len(data) > 0:
# If we got data, clear out the cache of DeviceFamilies
DeviceFamily.objects.all().delete()
# Then loop through the data we received and recreate it again
for row in data:
newDevice = DeviceFamily(name=row['name'], flash_method=row['flash_method'], id=row['id'],
detection_family=row['detection_family'])
newDevice.save()
return True # DeviceFamily table is updated
return False # We didn't get data back from Fermentrack.com, or there was an error
def file_suffix(self):
# file_suffix is used to determine the local filename for the firmware file
if self.flash_method == self.FLASH_ARDUINO:
return ".hex"
elif self.flash_method == self.FLASH_ESP8266:
return ".bin"
else:
return None
class Firmware(models.Model):
class Meta:
verbose_name = "Firmware"
verbose_name_plural = "Firmware" # I don't care if this is ambiguous, it bothers me.
WEIGHT_CHOICES = (
(1, "1 (Highest)"),
(2, "2"),
(3, "3"),
(4, "4"),
(5, "5"),
(6, "6"),
(7, "7"),
(8, "8"),
(9, "9 (Lowest)"),
)
name = models.CharField(max_length=128, blank=False, null=False, help_text="The name of the firmware")
family = models.ForeignKey('DeviceFamily')
version = models.CharField(max_length=20, default="0.0", help_text="The major version number")
revision = models.CharField(max_length=20, default="0.0", help_text="The minor revision number")
variant = models.CharField(max_length=80, default="", blank=True,
help_text="The firmware 'variant' (if applicable)")
is_fermentrack_supported = models.BooleanField(default=False,
help_text="Is this firmware officially supported by Fermentrack?")
in_error = models.BooleanField(default=False, help_text="Is there an error with this firmware that should "
"prevent it from being downloaded?")
description = models.TextField(default="", blank=True, null=False, help_text="The description of the firmware")
variant_description = models.TextField(default="", blank=True, null=False,
help_text="The description of the variant")
post_install_instructions = models.TextField(default="", blank=True, null=False,
help_text="Instructions to be displayed to the user after installation")
download_url = models.CharField(max_length=255, default="", blank=True, null=False,
help_text="The URL at which the firmware can be downloaded")
project_url = models.CharField(max_length=255, default="", blank=True, null=False,
help_text="The URL for the project associated with the firmware")
documentation_url = models.CharField(max_length=255, default="", blank=True, null=False,
help_text="The URL for documentation/help on the firmware (if any)")
weight = models.IntegerField(default=5, help_text="Weight for sorting (Lower weights rise to the top)",
choices=WEIGHT_CHOICES)
checksum = models.CharField(max_length=64, help_text="SHA256 checksum of the file (for checking validity)",
default="", blank=True)
def __str__(self):
return self.name + " - " + self.version + " - " + self.revision + " - " + self.variant
def __unicode__(self):
return self.__str__()
@staticmethod
def load_from_website():
try:
url = FERMENTRACK_COM_URL + "/api/firmware_list/all/"
response = requests.get(url)
data = response.json()
except:
return False
if len(data) > 0:
# If we got data, clear out the cache of Firmware
Firmware.objects.all().delete()
# Then loop through the data we received and recreate it again
for row in data:
newFirmware = Firmware(
name=row['name'], version=row['version'], revision=row['revision'], family_id=row['family_id'],
variant=row['variant'], is_fermentrack_supported=row['is_fermentrack_supported'],
in_error=row['in_error'], description=row['description'],
variant_description=row['variant_description'], download_url=row['download_url'],
project_url=row['project_url'], documentation_url=row['documentation_url'], weight=row['weight'],
checksum=row['checksum'],
)
newFirmware.save()
return True # Firmware table is updated
return False # We didn't get data back from Fermentrack.com, or there was an error
def local_filename(self):
def stripslashes(string):
return string.replace('\\', '').replace('/', '')
fname_base = stripslashes(self.family.name) + " - " + stripslashes(self.name) + " - "
fname_base += "v" + stripslashes(self.version) + "r" + stripslashes(self.revision)
if len(self.variant) > 0:
fname_base += " -- " + stripslashes(self.variant)
fname_base += self.family.file_suffix()
return fname_base
def local_filepath(self):
return os.path.join(settings.BASE_DIR, "firmware_flash", "firmware")
def download_to_file(self, check_checksum=True, force_download=False):
full_path = os.path.join(self.local_filepath(), self.local_filename())
if os.path.isfile(full_path):
if force_download: # If we're just going to force the download anyways, just kill the file
os.remove(full_path)
elif self.checksum == fhash.hash_of_file(full_path): # If the file already exists check the checksum
# The file is valid - return the path
return full_path
else:
# The checksum check failed - Kill the file
os.remove(full_path)
# So either we don't have a downloaded copy (or it's invalid). Let's download a new one.
r = requests.get(self.download_url, stream=True)
with open(full_path, str("wb")) as f:
for chunk in r.iter_content():
f.write(chunk)
# Now, let's check that the file is valid (but only if check_checksum is true)
if check_checksum:
if os.path.isfile(full_path):
# If the file already exists check the checksum (and delete if it fails)
if self.checksum != fhash.hash_of_file(full_path):
os.remove(full_path)
return None
else:
return None
# The file is valid (or we aren't checking checksums). Return the path.
return full_path
class Board(models.Model):
class Meta:
verbose_name = "Board"
verbose_name_plural = "Boards"
WEIGHT_CHOICES = (
(1, "1 (Highest)"),
(2, "2"),
(3, "3"),
(4, "4"),
(5, "5"),
(6, "6"),
(7, "7"),
(8, "8"),
(9, "9 (Lowest)"),
)
name = models.CharField(max_length=128, blank=False, null=False, help_text="The name of the board")
family = models.ForeignKey('DeviceFamily')
description = models.TextField(default="", blank=True, null=False, help_text="The description of the board")
weight = models.IntegerField(default=5, help_text="Weight for sorting (Lower weights rise to the top)",
choices=WEIGHT_CHOICES)
flash_options_json = models.TextField(default="", blank=True, null=False,
help_text="A JSON list containing options to pass to subprocess")
def __str__(self):
return self.name + " - " + str(self.family)
def __unicode__(self):
return self.name + " - " + unicode(self.family)
@staticmethod
def load_from_website():
try:
url = FERMENTRACK_COM_URL + "/api/board_list/all/"
response = requests.get(url)
data = response.json()
except:
return False
if len(data) > 0:
# If we got data, clear out the cache of Firmware
Board.objects.all().delete()
# Then loop through the data we received and recreate it again
for row in data:
newBoard = Board(
name=row['name'], family_id=row['family_id'], description=row['description'], weight=row['weight'],
flash_options_json=row['flash_options_json'], id=row['id'],
)
newBoard.save()
return True # Board table is updated
return False # We didn't get data back from Fermentrack.com, or there was an error
class FlashRequest(models.Model):
STATUS_QUEUED = 'queued'
STATUS_RUNNING = 'running'
STATUS_FINISHED = 'finished'
STATUS_FAILED = 'failed'
STATUS_CHOICES = (
(STATUS_QUEUED, 'Queued'),
(STATUS_RUNNING, 'Running'),
(STATUS_FINISHED, 'Finished'),
(STATUS_FAILED, 'Failed'),
)
# huey_task_id = models.CharField(max_length=64, help_text="Task ID used within Huey for tracking status")
status = models.CharField(max_length=32, default=STATUS_QUEUED)
firmware_to_flash = models.ForeignKey('Firmware', on_delete=models.CASCADE, help_text="Firmware to flash")
board_type = models.ForeignKey('Board', on_delete=models.CASCADE, help_text="Board type being flashed")
serial_port = models.CharField(max_length=255, help_text="Path to the serial device used with the flash tool")
result_text = models.CharField(max_length=255, default=None, blank=True, null=True,
help_text="String explaining the result status")
flash_output = models.TextField(null=True, blank=True, default=None, help_text="Output from the flash tool")
created = models.DateTimeField(help_text="The date this flash request was created", auto_now_add=True)
def fail(self, result_text, flash_output=""):
""" FlashRequest.fail is just a fast way to set the status & result text and save the object """
self.result_text = result_text
self.flash_output = flash_output
self.status = self.STATUS_FAILED
self.save()
return True
def succeed(self, result_text, flash_output=""):
""" FlashRequest.succeed is just a fast way to set the status & result text and save the object """
self.result_text = result_text
self.flash_output = flash_output
self.status = self.STATUS_FINISHED
self.save()
return True
| 39.173134
| 121
| 0.612512
|
e9b37e854a2891e5b148d5890a50960c2402743f
| 10,137
|
py
|
Python
|
server.py
|
soykilian/ExamScanner
|
b0df506130913f67d8070457fee36edba2edd95a
|
[
"MIT"
] | null | null | null |
server.py
|
soykilian/ExamScanner
|
b0df506130913f67d8070457fee36edba2edd95a
|
[
"MIT"
] | null | null | null |
server.py
|
soykilian/ExamScanner
|
b0df506130913f67d8070457fee36edba2edd95a
|
[
"MIT"
] | null | null | null |
from flask import Flask, request
from flask_cors import CORS
from flask_autoindex import AutoIndex
from omr.omr import get_answers
import json
import os
import threading
import requests
import sys
import shutil
import pandas as pd
import numpy as np
import email, smtplib, ssl
from email import encoders
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
# -- DATOS DEL EXAMEN
EXAMEN = "INTRODUCCIÓN A LA INGENIERÍA DE TELECOMUNICACIÓN - EXAMEN FINAL"
EMAIL_COORDINADOR = "pruebasSDG2+coordinador@gmail.com"
N_ALTERNATIVAS = 5
RESPUESTAS_CORRECTAS = [
["A", "B", "C", "A", "B", "A", "C", "E", "E", "E", "C", "D"],
["C", "A", "B", "C", "A", "B", "A", "C", "E", "A", "B", "D"],
["A", "B", "C", "A", "B", "A", "C", "E", "A", "B", "C", "D"],
["A", "B", "A", "B", "C", "A", "C", "E", "A", "B", "C", "D"]
]
SENDER_EMAIL = sys.argv[1]
PASSWORD_EMAIL = sys.argv[2]
SMTP_DOMAIN = sys.argv[3]
SMTP_PORT = sys.argv[4]
# -- PATHS NECESARIOS PARA EL FUNCIONAMIENTO DEL PROGRAMA
CURRENT_PATH = os.path.dirname(os.path.abspath(__file__))
DATA_NAME = 'data.csv'
DATA_PATH = os.path.join(CURRENT_PATH, DATA_NAME)
IMAGE_ORIG_NAME = 'imagen_original.jpg'
IMAGE_ORIG_PATH = os.path.join(CURRENT_PATH, IMAGE_ORIG_NAME)
IMAGE_READ_NAME = 'imagen_leida.jpg'
IMAGE_READ_PATH = os.path.join(CURRENT_PATH, IMAGE_READ_NAME)
IMAGE_PREVIEW_NAME = 'imagen_preview.jpg'
IMAGE_PREVIEW_PATH = os.path.join(CURRENT_PATH, IMAGE_PREVIEW_NAME)
STATIC_NAME = 'static'
STATIC_PATH = os.path.join(CURRENT_PATH, STATIC_NAME)
app = Flask(__name__,
static_url_path='',
static_folder='static')
CORS(app)
def get_response():
user_DNI, user_version, user_answers = analyze_picture()
user_answers = user_answers[:len(RESPUESTAS_CORRECTAS[0])]
grade, num_correctas = calculate_grade(user_answers, user_version)
beauty_user_DNI = ''.join(user_DNI)
beauty_user_answers = ''.join(user_answers)
surname, name, email = updateCSV(beauty_user_DNI, user_version, beauty_user_answers, grade)
processThread = threading.Thread(target=sendMail, args=[user_version, user_answers, grade, surname, name, email])
processThread.start()
processThread = threading.Thread(target=save_to_folder, args=[beauty_user_DNI])
processThread.start()
# print(beauty_user_DNI)
ranking = get_ranking(beauty_user_DNI)
media = get_media()
response = {
"user_DNI": user_DNI,
"user_version": user_version,
"user_answers": user_answers,
"grade": grade,
"num_correctas": num_correctas,
"ranking": ranking,
"media": media
}
return response
def analyze_picture():
DNI, version, answers, _ = get_answers(IMAGE_ORIG_PATH, IMAGE_READ_PATH)
user_answers = []
for i, answer in enumerate(answers):
user_answers.append(answer)
user_DNI = []
for i, DNI in enumerate(DNI):
user_DNI.append(DNI)
return user_DNI, version, user_answers
def calculate_grade(user_answers, user_version):
if user_version == 0:
return "NOTA DESCONOCIDA - PÓNGASE EN CONTACTO CON EL COORDINADOR DE LA ASIGNATURA", 0
calificacion = 0
num_correctas = 0
for index, value in enumerate(RESPUESTAS_CORRECTAS[user_version-1]):
if value == user_answers[index]:
calificacion += 1*N_ALTERNATIVAS
num_correctas += 1
elif (user_answers[index] != 'N/A'):
calificacion -= 1
calificacion_maxima = len(RESPUESTAS_CORRECTAS[0])
grade = round((calificacion/(N_ALTERNATIVAS * calificacion_maxima*0.1)), 2)
return grade, num_correctas
def sendMail(user_version, user_answers, grade, surname, name, email):
#print(SENDER_EMAIL, PASSWORD_EMAIL, SMTP_DOMAIN, SMTP_PORT)
beauty_user_answers = '\n\n'
for index, value in enumerate(user_answers):
beauty_user_answers += str(index+1) + ".\t\t" + "Su respuesta\t|\tRespuesta correcta:\t" + value \
+ "\t|\t" + str(RESPUESTAS_CORRECTAS[user_version-1][index]) + "\n"
subject = "Respuestas de " + EXAMEN
body = "Estimado " + name + ' ' + surname + ", para la versión " + str(user_version) \
+ " se han leído las respuestas " + beauty_user_answers + "\nLa nota final (sobre 10) es " \
+ str(grade) + "\nSe adjunta la imagen correspondiente"
receiver_email = email
# Create a multipart message and set headers
message = MIMEMultipart()
message["From"] = SENDER_EMAIL
message["To"] = receiver_email
message["Subject"] = subject
#message["Bcc"] = receiver_email # Recommended for mass emails
# Add body to email
message.attach(MIMEText(body, "plain"))
#---------------------------------------------------------------
# Attach imagen_original
filename = IMAGE_ORIG_PATH # In same directory as script
# Open image file in binary mode
with open(filename, "rb") as attachment:
# Add file as application/octet-stream
# Email client can usually download this automatically as attachment
part = MIMEBase("application", "octet-stream")
part.set_payload(attachment.read())
# Encode file in ASCII characters to send by email
encoders.encode_base64(part)
filename = "examen_original.jpg"
# Add header as key/value pair to attachment part
part.add_header(
"Content-Disposition",
f"attachment; filename= {filename}",
)
# Add attachment to message and convert message to string
message.attach(part)
#---------------------------------------------------------------
#---------------------------------------------------------------
# Attach imagen_modificada
filename = IMAGE_READ_PATH # In same directory as script
# Open image file in binary mode
with open(filename, "rb") as attachment:
# Add file as application/octet-stream
# Email client can usually download this automatically as attachment
part = MIMEBase("application", "octet-stream")
part.set_payload(attachment.read())
# Encode file in ASCII characters to send by email
encoders.encode_base64(part)
filename = "examen_leido.jpg"
# Add header as key/value pair to attachment part
part.add_header(
"Content-Disposition",
f"attachment; filename= {filename}",
)
# Add attachment to message and convert message to string
message.attach(part)
#---------------------------------------------------------------
text = message.as_string()
# Log in to server using secure context and send email
context = ssl.create_default_context()
with smtplib.SMTP_SSL(SMTP_DOMAIN, SMTP_PORT, context=context) as server:
server.login(SENDER_EMAIL, PASSWORD_EMAIL)
server.sendmail(SENDER_EMAIL, receiver_email, text)
#print(receiver_email)
#with smtplib.SMTP("smtp.mailtrap.io", 2525) as server:
# server.login("582897ddd2268b", "7a5fa911b7dfbc")
# server.sendmail("prueba@prueba.com", receiver_email, text)
#print("OK")
def save_to_folder(DNI):
shutil.copy(IMAGE_ORIG_NAME, 'imagenes')
os.rename(os.path.join('imagenes', IMAGE_ORIG_NAME), os.path.join('imagenes', str(DNI) + "_imagen_orig.jpg"))
shutil.copy(IMAGE_READ_PATH, 'imagenes')
os.rename(os.path.join('imagenes', IMAGE_READ_NAME), os.path.join('imagenes', str(DNI) + "_imagen_leida.jpg"))
def updateCSV(user_DNI, user_version, user_answers, user_grade):
df = pd.read_csv(DATA_PATH, sep=',')
#df.loc[df['DNI'] == int(user_DNI), 'Version'] = user_version
#df.loc[df['DNI'] == int(user_DNI), 'Answers'] = user_answers
df.loc[df['DNI'] == int(user_DNI), 'Nota'] = user_grade
if df.loc[df['DNI'] == int(user_DNI)].size != 0:
surname = df.loc[df['DNI'] == int(user_DNI), 'Surname'].values[0]
name = df.loc[df['DNI'] == int(user_DNI), 'Name'].values[0]
email = df.loc[df['DNI'] == int(user_DNI), 'Email'].values[0]
df.to_csv(DATA_PATH, header=True, sep=',', index=False)
return surname, name, email
else:
return "DESCONOCIDO", "DESCONOCIDO", EMAIL_COORDINADOR
def get_media():
df = pd.read_csv(DATA_PATH, sep=',')
media = df['Nota'].mean()
return round(media, 2)
def get_ranking(DNI):
df = pd.read_csv(DATA_PATH, sep=',')
df = df.sort_values('Nota', ascending=False)
df = df.set_index('DNI')
return (int)(np.where(df.index==int(DNI))[0][0] + 1)
files_index = AutoIndex(app, os.path.curdir + '/imagenes', add_url_rules=False)
# Custom indexing
@app.route('/imagenes')
@app.route('/imagenes/<path:path>')
def autoindex(path='.'):
return files_index.render_autoindex(path)
@app.route('/get_answers', methods=['GET'])
def get_cam_picture():
response = requests.get("http://127.0.0.1:8080?action=snapshot")
file = open(IMAGE_ORIG_NAME, "wb")
file.write(response.content)
file.close()
response = get_response()
response_json = app.response_class(
response=json.dumps(response),
status=200,
mimetype='application/json'
)
return response_json
@app.route('/get_answers', methods=['POST'])
def get_post_picture():
image = request.files["image"]
image.save(IMAGE_ORIG_PATH)
response = get_response()
response_json = app.response_class(
response=json.dumps(response),
status=200,
mimetype='application/json'
)
return response_json
@app.route('/get_media', methods=['GET'])
def send_media():
response = get_media()
response_json = app.response_class(
response=json.dumps(response),
status=200,
mimetype='application/json'
)
return response_json
@app.route('/get_ranking', methods=['GET'])
def send_ranking():
DNI = request.args.get('DNI')
response = get_ranking(DNI)
response_json = app.response_class(
response=json.dumps(response),
status=200,
mimetype='application/json'
)
return response_json
if __name__ == '__main__':
app.run(host = '0.0.0.0', debug=True)
| 30.080119
| 117
| 0.652363
|
da0b09148bec1ebb815eb9339fc76370319c4a10
| 928
|
py
|
Python
|
Linguagens/Python/Exercicios/cursos_em_video/aulas-22_23/utilidadesCeV/moeda/__init__.py
|
rafaelvizu/Estudos
|
eef5e3e3706ff99959226c51b9907b6af4377bfe
|
[
"MIT"
] | null | null | null |
Linguagens/Python/Exercicios/cursos_em_video/aulas-22_23/utilidadesCeV/moeda/__init__.py
|
rafaelvizu/Estudos
|
eef5e3e3706ff99959226c51b9907b6af4377bfe
|
[
"MIT"
] | null | null | null |
Linguagens/Python/Exercicios/cursos_em_video/aulas-22_23/utilidadesCeV/moeda/__init__.py
|
rafaelvizu/Estudos
|
eef5e3e3706ff99959226c51b9907b6af4377bfe
|
[
"MIT"
] | null | null | null |
def resumo(v=0, pA=0, pR=0, dinheiro=False):
valorF = v
if dinheiro == True:
valorF = moeda(v)
print('')
print('--' * 15)
print(f' RESUMO DO VALOR')
print('--' * 15)
print(f'Valor analisado: {valorF:>11}')
print(f'Dobro do valor: {dobro(v, dinheiro):>11}')
print(f'Metade do valor: {metade(v, dinheiro):>11}')
print(f'{pA:<3}% de aumento: {aumentar(v, pA, dinheiro):>10}')
print(f'{pR:<3}% de redução: {diminuir(v, pR, dinheiro):>10}')
def dobro(v, dinheiro):
v *= 2
return v if dinheiro is False else moeda(v)
def metade(v, dinheiro):
v /= 2
return v if dinheiro is False else moeda(v)
def aumentar(v, p, dinheiro):
v += (v * p) / 100
return v if dinheiro is False else moeda(v)
def diminuir(v, p, dinheiro):
v -= (v * p) / 100
return v if dinheiro is False else moeda(v)
def moeda(v):
return f'R${v:.2f}'.replace('.', ',')
| 28.121212
| 67
| 0.574353
|
2113ab96d9cdc195dc8ebc006e7085448527e10c
| 1,292
|
py
|
Python
|
tools.py
|
jordimas/fullstop-deep-punctuation-prediction
|
b776dc91fee52fe35d4ee4b098678144eea3078c
|
[
"MIT"
] | null | null | null |
tools.py
|
jordimas/fullstop-deep-punctuation-prediction
|
b776dc91fee52fe35d4ee4b098678144eea3078c
|
[
"MIT"
] | null | null | null |
tools.py
|
jordimas/fullstop-deep-punctuation-prediction
|
b776dc91fee52fe35d4ee4b098678144eea3078c
|
[
"MIT"
] | null | null | null |
from sklearn.metrics import confusion_matrix
#taken from https://gist.github.com/zachguo/10296432
def print_cm(cm, labels, hide_zeroes=False, hide_diagonal=False, hide_threshold=None):
"""pretty print for confusion matrixes"""
columnwidth = max([len(x) for x in labels] + [5]) # 5 is value length
empty_cell = " " * columnwidth
fst_empty_cell = (columnwidth-3)//2 * " " + "t/p" + (columnwidth-3)//2 * " "
if len(fst_empty_cell) < len(empty_cell):
fst_empty_cell = " " * (len(empty_cell) - len(fst_empty_cell)) + fst_empty_cell
# Print header
print(" " + fst_empty_cell, end=" ")
for label in labels:
print("%{0}s".format(columnwidth) % label, end=" ")
print()
# Print rows
for i, label1 in enumerate(labels):
print(" %{0}s".format(columnwidth) % label1, end=" ")
for j in range(len(labels)):
cell = "%{0}.1f".format(columnwidth) % cm[i, j]
if hide_zeroes:
cell = cell if float(cm[i, j]) != 0 else empty_cell
if hide_diagonal:
cell = cell if i != j else empty_cell
if hide_threshold:
cell = cell if cm[i, j] > hide_threshold else empty_cell
print(cell, end=" ")
print()
| 40.375
| 87
| 0.578947
|
841a98be7bdaf44f5eb7b85c8ad5defc647f050c
| 4,401
|
py
|
Python
|
onstar/onstar.py
|
nyxnyx/onstar
|
77336180d7ebb1b9d5bd1d3bc526a9a772e3ae9c
|
[
"MIT"
] | 5
|
2018-05-29T13:39:50.000Z
|
2022-03-09T10:24:32.000Z
|
onstar/onstar.py
|
jesjimher/onstar
|
9681d551ead09ebfff6cc57077568fb02b7264a2
|
[
"MIT"
] | 5
|
2018-02-16T22:17:30.000Z
|
2019-10-17T21:24:19.000Z
|
onstar/onstar.py
|
jesjimher/onstar
|
9681d551ead09ebfff6cc57077568fb02b7264a2
|
[
"MIT"
] | 4
|
2018-04-10T22:52:51.000Z
|
2019-11-15T12:19:50.000Z
|
"""
Class for connecting to OnStar service for getting status of your car
"""
import aiohttp
import asyncio
import json
from collections import namedtuple
class OnStar:
"""Base class for connection to OnStar service"""
def __init__(self,username, password, pin, loop, dump_json = False):
"""Initiate connection and fetch login data"""
self._username = username
self._password = password
self._pin = pin
self._loop = loop
self._dump_json = dump_json #if True - will print out formated JSON
self._session = None
self._token = None
self._vehicle_id = None
self._LOGIN_URL = 'https://gsp.eur.onstar.com/gspserver/services/admin/login.json'
self._LOGINIFO_URL = 'https://gsp.eur.onstar.com/gspserver/services/admin/getLoginInfo.json'
self._DIAGNOSTICS_URL = 'https://gsp.eur.onstar.com/gspserver/services/vehicle/getDiagnosticsReport.json'
self._POSITION_URL = 'https://gsp.eur.onstar.com/gspserver/services/vehicle/performLocationHistoryQuery.json'
async def refresh(self):
await self._login()
await self._login_info()
await self._diagnostics()
await self._location()
await self._session.close()
def dump_json(self, raw_string):
if self._dump_json:
print(json.dumps(json.loads(raw_string), sort_keys=True, indent=4, separators=(',', ': ')))
async def _login(self):
payload = {'username': self._username, 'password': self._password, 'roleCode': 'driver', 'place': ''}
self._session = aiohttp.ClientSession(loop=self._loop)
response = await self._session.post(self._LOGIN_URL, data=payload)
response_data = await response.text()
self.dump_json(response_data)
data = json.loads(response_data, object_hook=lambda d: namedtuple('X',list(map(lambda x:x.replace('$','_'),d.keys())))(*d.values()))
self._login_object = data
self._token = data.results[0].token
self._header = {'X-GM-token': self._token}
async def _login_info(self):
response = await self._session.get(self._LOGINIFO_URL, headers=self._header)
login_info_data = await response.text()
self.dump_json(login_info_data)
self._login_info_object = json.loads(login_info_data, object_hook=lambda d: namedtuple('X', list(map(lambda x:x.replace('$','_'),d.keys())))(*d.values()))
self._vehicle_id = self._login_info_object.results[0].vehicles[0].vehicle.vehicleId
return self._login_info_object
async def _diagnostics(self):
payload = {'vehicleId': self._vehicle_id}
response = await self._session.get(self._DIAGNOSTICS_URL, params = payload, headers = self._header)
diagnostics = await response.text()
self.dump_json(diagnostics)
diagnostics = diagnostics.replace('def','def_') #there is def field which must be renamed
self._diagnostics_object = json.loads(diagnostics, object_hook=lambda d: namedtuple('X', list(map(lambda x:x.replace('$','_'),d.keys())))(*d.values()))
return self._diagnostics_object
async def _location(self):
payload = {'vehicleId': self._vehicle_id}
header = {'X-GM-token': self._token, 'X-GM-pincode': self._pin}
response = await self._session.post(self._POSITION_URL, params = payload, headers = header)
location = await response.text()
self.dump_json(location)
self._location_object = json.loads(location, object_hook=lambda d: namedtuple('X', list(map(lambda x:x.replace('$','_'),d.keys())))(*d.values()))
return self._location_object
def get_login_info(self):
return self._login_info_object
def get_diagnostics(self):
return self._diagnostics_object
def get_location(self):
return self._location_object
if __name__ == "__main__":
import getpass
print("This demo will connect to: https://gsp.eur.onstar.com/\n")
print("Before trying - ensure you have access by login in above site\n")
print("\nProvide credentials\n")
username = input("Username/email: ")
password = getpass.getpass("Password: ")
gm_pin = getpass.getpass("PIN for localization: ")
loop = asyncio.get_event_loop()
o = OnStar(username, password, gm_pin, loop, True)
loop.run_until_complete(o.refresh())
| 37.939655
| 162
| 0.670075
|
7529ab6961f5da477e96af64cf82b0d8427c5a20
| 4,505
|
py
|
Python
|
getFolderFromRepo.py
|
DivyPatel9881/GetFolderFromRepo
|
d218a67ec128c5286330963a5012aeed4e4f8fee
|
[
"MIT"
] | null | null | null |
getFolderFromRepo.py
|
DivyPatel9881/GetFolderFromRepo
|
d218a67ec128c5286330963a5012aeed4e4f8fee
|
[
"MIT"
] | null | null | null |
getFolderFromRepo.py
|
DivyPatel9881/GetFolderFromRepo
|
d218a67ec128c5286330963a5012aeed4e4f8fee
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import requests
import warnings
import os
USERNAME: str = ''
TOKEN: str = ''
SHOW_INFO_LOGS: bool = True
SHOW_DEBUG_LOGS: bool = False
try:
from config import *
except ImportError:
pass
if len(USERNAME) == 0:
env_username = os.environ.get('GITHUB_USERNAME', '')
if env_username != '':
USERNAME = env_username
else:
warnings.warn('GitHub Username not set')
if len(TOKEN) == 0:
env_token = os.environ.get('GITHUB_TOKEN', '')
if env_token != '':
TOKEN = env_token
else:
warnings.warn('GitHub Token not set')
def log(*args, is_debug: bool = False, **kwargs) -> None:
if is_debug:
if SHOW_DEBUG_LOGS:
print('Debug:', *args, **kwargs)
else:
if SHOW_INFO_LOGS or SHOW_DEBUG_LOGS:
print(*args, **kwargs)
def proper_filepath(filepath: str) -> str:
# should start with /
# should not end with /
# so it should be / or /a or /a.../b
return '/' + filepath.strip('/')
def get_last_element(url: str) -> str:
return url.strip('/')[url.rindex('/') + 1:]
def get_new_path(path: str, last_element: str) -> str:
if last_element in path:
return path[path.index(last_element):]
else:
return last_element + os.path.sep + path
def get_folder_from_repo(owner: str, repo: str, folder_path: str, last_element: str, branch: str = '') -> None:
proper_folder_path = proper_filepath(folder_path)
if proper_folder_path == '/':
# TODO clone and exit (Take care of branch also)
pass
url = f'https://api.github.com/repos/{owner}/{repo}/contents{proper_folder_path}'
if branch != '':
url += f'?ref={branch}'
log('Folder URL: ', url, is_debug=True)
r = requests.get(url, auth=(USERNAME, TOKEN))
data = r.json()
if type(data) == type(dict()):
assert data['type'] == 'file'
download_file(data['download_url'], data['path'], last_element)
elif type(data) == type(list()):
for d in data:
if d['type'] == 'file':
download_file(d['download_url'], d['path'], last_element)
elif d['type'] == 'dir':
get_folder_from_repo(
owner, repo, d['path'], last_element, branch)
else:
log(f'Incorrect type: {d["type"]}', is_debug=True)
print('Sorry, an error occurred')
exit(-1)
def download_file(url: str, path: str, last_element: str) -> None:
new_path = get_new_path(path, last_element)
log(f'Downloading file: {new_path}')
log(f'Downloading file: {url}', is_debug=True)
r = requests.get(url, auth=(USERNAME, TOKEN))
log(f'Saving file: {new_path}', is_debug=True)
if os.path.dirname(new_path):
os.makedirs(os.path.dirname(new_path), exist_ok=True)
with open(new_path, 'wb+') as f:
f.write(r.content)
def show_help():
print(f"""USAGE:
{__file__} <GitHub URL of the folder/file>
""")
if __name__ == '__main__':
import sys
if len(sys.argv) == 2:
if sys.argv[1] == "-h" or sys.argv[1] == "--help":
show_help()
exit()
try:
URL = sys.argv[1]
url = sys.argv[1].strip('/') + '/'
url = url[url.index('github.com') + len('github.com') + 1:]
owner = url[:url.index('/')]
url = url[url.index('/') + 1:]
repo = url[:url.index('/')]
url = url[url.index('/'):]
if url.startswith('/tree'): # its a folder
url = url[len('/tree/'):]
branch = url[:url.index('/')]
folder_path = url[url.index('/') + 1:]
elif url.startswith('/blob'): # its a file
url = url[len('/blob/'):]
branch = url[:url.index('/')]
file_path = url[url.index('/') + 1:]
folder_path = file_path
else:
branch = ''
folder_path = '/'
log(f'Owner: {owner}', is_debug=True)
log(f'Repo: {repo}', is_debug=True)
log(f'Branch: {branch}', is_debug=True)
log(f'FolderPath: {folder_path}', is_debug=True)
except ValueError:
print('Please enter a valid GitHub Folder/File URL')
show_help()
exit(-1)
get_folder_from_repo(owner, repo, folder_path,
get_last_element(URL), branch)
else:
show_help()
| 31.503497
| 111
| 0.547614
|
e4f07ac324d3a91367e51e2d23fc13751e0ffe97
| 5,016
|
py
|
Python
|
conanfile.py
|
1b00/corrade
|
bb626d650c7d76dd2846945ef22e9eb0003b6ea9
|
[
"MIT",
"Unlicense"
] | 385
|
2015-01-14T02:53:42.000Z
|
2022-03-24T08:33:19.000Z
|
conanfile.py
|
1b00/corrade
|
bb626d650c7d76dd2846945ef22e9eb0003b6ea9
|
[
"MIT",
"Unlicense"
] | 110
|
2015-12-10T10:55:08.000Z
|
2022-03-06T18:08:08.000Z
|
conanfile.py
|
1b00/corrade
|
bb626d650c7d76dd2846945ef22e9eb0003b6ea9
|
[
"MIT",
"Unlicense"
] | 132
|
2015-02-10T07:31:38.000Z
|
2022-02-22T23:19:14.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from conans import ConanFile, CMake, tools
from conans.errors import ConanException
import os
import shutil
def sort_libs(correct_order, libs, lib_suffix='', reverse_result=False):
# Add suffix for correct string matching
correct_order[:] = [s.__add__(lib_suffix) for s in correct_order]
result = []
for expectedLib in correct_order:
for lib in libs:
if expectedLib == lib:
result.append(lib)
if reverse_result:
# Linking happens in reversed order
result.reverse()
return result
class CorradeConan(ConanFile):
name = "corrade"
version = "2020.06"
description = "Corrade is a multiplatform utility library written \
in C++11/C++14. It's used as a base for the Magnum \
graphics engine, among other things."
# topics can get used for searches, GitHub topics, Bintray tags etc. Add here keywords about the library
topics = ("conan", "corrad", "magnum", "filesystem", "console", "environment", "os")
url = "https://github.com/mosra/corrade"
homepage = "https://magnum.graphics/corrade"
author = "helmesjo <helmesjo@gmail.com>"
license = "MIT" # Indicates license type of the packaged library; please use SPDX Identifiers https://spdx.org/licenses/
exports = ["COPYING"]
exports_sources = ["CMakeLists.txt", "src/*", "package/conan/*", "modules/*"]
generators = "cmake"
short_paths = True # Some folders go out of the 260 chars path length scope (windows)
# Options may need to change depending on the packaged library.
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"build_deprecated": [True, False],
"with_interconnect": [True, False],
"with_pluginmanager": [True, False],
"with_rc": [True, False],
"with_testsuite": [True, False],
"with_utility": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
"build_deprecated": True,
"with_interconnect": True,
"with_pluginmanager": True,
"with_rc": True,
"with_testsuite": True,
"with_utility": True,
}
_build_subfolder = "build_subfolder"
def config_options(self):
if self.settings.os == 'Windows':
del self.options.fPIC
def configure(self):
if self.settings.compiler == 'Visual Studio' and int(self.settings.compiler.version.value) < 14:
raise ConanException("{} requires Visual Studio version 14 or greater".format(self.name))
def source(self):
# Wrap the original CMake file to call conan_basic_setup
shutil.move("CMakeLists.txt", "CMakeListsOriginal.txt")
shutil.move(os.path.join("package", "conan", "CMakeLists.txt"), "CMakeLists.txt")
def _configure_cmake(self):
cmake = CMake(self)
def add_cmake_option(option, value):
var_name = "{}".format(option).upper()
value_str = "{}".format(value)
var_value = "ON" if value_str == 'True' else "OFF" if value_str == 'False' else value_str
cmake.definitions[var_name] = var_value
for option, value in self.options.items():
add_cmake_option(option, value)
# Corrade uses suffix on the resulting 'lib'-folder when running cmake.install()
# Set it explicitly to empty, else Corrade might set it implicitly (eg. to "64")
add_cmake_option("LIB_SUFFIX", "")
add_cmake_option("BUILD_STATIC", not self.options.shared)
if self.settings.compiler == 'Visual Studio':
add_cmake_option("MSVC2015_COMPATIBILITY", int(self.settings.compiler.version.value) == 14)
add_cmake_option("MSVC2017_COMPATIBILITY", int(self.settings.compiler.version.value) == 15)
add_cmake_option("MSVC2019_COMPATIBILITY", int(self.settings.compiler.version.value) == 16)
cmake.configure(build_folder=self._build_subfolder)
return cmake
def build(self):
cmake = self._configure_cmake()
cmake.build()
def package(self):
self.copy("COPYING", dst="licenses", src=".")
cmake = self._configure_cmake()
cmake.install()
def package_info(self):
# See dependency order here: https://doc.magnum.graphics/magnum/custom-buildsystems.html
allLibs = [
#1
"CorradeUtility",
"CorradeContainers",
#2
"CorradeInterconnect",
"CorradePluginManager",
"CorradeTestSuite",
]
# Sort all built libs according to above, and reverse result for correct link order
suffix = '-d' if self.settings.build_type == "Debug" else ''
builtLibs = tools.collect_libs(self)
self.cpp_info.libs = sort_libs(correct_order=allLibs, libs=builtLibs, lib_suffix=suffix, reverse_result=True)
| 37.432836
| 125
| 0.633174
|
e51a73eb96698ed4337ca4612db7b0c3a6bfa0c5
| 27,707
|
py
|
Python
|
elftools/elf/elffile.py
|
mephi42/pyelftools
|
fecd229201f21294f00ebfd06b33bfec232ddc75
|
[
"Unlicense"
] | null | null | null |
elftools/elf/elffile.py
|
mephi42/pyelftools
|
fecd229201f21294f00ebfd06b33bfec232ddc75
|
[
"Unlicense"
] | null | null | null |
elftools/elf/elffile.py
|
mephi42/pyelftools
|
fecd229201f21294f00ebfd06b33bfec232ddc75
|
[
"Unlicense"
] | null | null | null |
#-------------------------------------------------------------------------------
# elftools: elf/elffile.py
#
# ELFFile - main class for accessing ELF files
#
# Eli Bendersky (eliben@gmail.com)
# This code is in the public domain
#-------------------------------------------------------------------------------
import io
import struct
import zlib
try:
import resource
PAGESIZE = resource.getpagesize()
except ImportError:
# Windows system
import mmap
PAGESIZE = mmap.PAGESIZE
from ..common.py3compat import BytesIO
from ..common.exceptions import ELFError
from ..common.utils import struct_parse, elf_assert
from .structs import ELFStructs
from .sections import (
Section, StringTableSection, SymbolTableSection,
SUNWSyminfoTableSection, NullSection, NoteSection,
StabSection, ARMAttributesSection)
from .dynamic import DynamicSection, DynamicSegment
from .relocation import RelocationSection, RelocationHandler
from .gnuversions import (
GNUVerNeedSection, GNUVerDefSection,
GNUVerSymSection)
from .segments import Segment, InterpSegment, NoteSegment
from ..dwarf.dwarfinfo import DWARFInfo, DebugSectionDescriptor, DwarfConfig
class ELFFile(object):
""" Creation: the constructor accepts a stream (file-like object) with the
contents of an ELF file.
Accessible attributes:
stream:
The stream holding the data of the file - must be a binary
stream (bytes, not string).
elfclass:
32 or 64 - specifies the word size of the target machine
little_endian:
boolean - specifies the target machine's endianness
elftype:
string or int, either known value of E_TYPE enum defining ELF
type (e.g. executable, dynamic library or core dump) or integral
unparsed value
header:
the complete ELF file header
e_ident_raw:
the raw e_ident field of the header
"""
def __init__(self, stream):
self.stream = stream
self._identify_file()
self.structs = ELFStructs(
little_endian=self.little_endian,
elfclass=self.elfclass)
self.structs.create_basic_structs()
self.header = self._parse_elf_header()
self.structs.create_advanced_structs(
self['e_type'],
self['e_machine'],
self['e_ident']['EI_OSABI'])
self.stream.seek(0)
self.e_ident_raw = self.stream.read(16)
self._file_stringtable_section = self._get_file_stringtable()
self._section_name_map = None
def num_sections(self):
""" Number of sections in the file
"""
return self['e_shnum']
def get_section(self, n):
""" Get the section at index #n from the file (Section object or a
subclass)
"""
section_header = self._get_section_header(n)
return self._make_section(section_header)
def get_section_by_name(self, name):
""" Get a section from the file, by name. Return None if no such
section exists.
"""
# The first time this method is called, construct a name to number
# mapping
#
if self._section_name_map is None:
self._section_name_map = {}
for i, sec in enumerate(self.iter_sections()):
self._section_name_map[sec.name] = i
secnum = self._section_name_map.get(name, None)
return None if secnum is None else self.get_section(secnum)
def iter_sections(self):
""" Yield all the sections in the file
"""
for i in range(self.num_sections()):
yield self.get_section(i)
def num_segments(self):
""" Number of segments in the file
"""
return self['e_phnum']
def get_segment(self, n):
""" Get the segment at index #n from the file (Segment object)
"""
segment_header = self._get_segment_header(n)
return self._make_segment(segment_header)
def iter_segments(self):
""" Yield all the segments in the file
"""
for i in range(self.num_segments()):
yield self.get_segment(i)
def address_offsets(self, start, size=1):
""" Yield a file offset for each ELF segment containing a memory region.
A memory region is defined by the range [start...start+size). The
offset of the region is yielded.
"""
end = start + size
for seg in self.iter_segments():
# consider LOAD only to prevent same address being yielded twice
if seg['p_type'] != 'PT_LOAD':
continue
if (start >= seg['p_vaddr'] and
end <= seg['p_vaddr'] + seg['p_filesz']):
yield start - seg['p_vaddr'] + seg['p_offset']
def has_dwarf_info(self):
""" Check whether this file appears to have debugging information.
We assume that if it has the .debug_info or .zdebug_info section, it
has all the other required sections as well.
"""
return (self.get_section_by_name('.debug_info') or
self.get_section_by_name('.zdebug_info') or
self.get_section_by_name('.eh_frame'))
def get_dwarf_info(self, relocate_dwarf_sections=None):
""" Return a DWARFInfo object representing the debugging information in
this file.
If relocate_dwarf_sections is True, relocations for DWARF sections
are looked up and applied.
"""
# Expect that has_dwarf_info was called, so at least .debug_info is
# present.
# Sections that aren't found will be passed as None to DWARFInfo.
section_names = ('.debug_info', '.debug_aranges', '.debug_abbrev',
'.debug_str', '.debug_line', '.debug_frame',
'.debug_loc', '.debug_ranges', '.debug_pubtypes',
'.debug_pubnames')
compressed = bool(self.get_section_by_name('.zdebug_info'))
if compressed:
section_names = tuple(map(lambda x: '.z' + x[1:], section_names))
# As it is loaded in the process image, .eh_frame cannot be compressed
section_names += ('.eh_frame', )
(debug_info_sec_name, debug_aranges_sec_name, debug_abbrev_sec_name,
debug_str_sec_name, debug_line_sec_name, debug_frame_sec_name,
debug_loc_sec_name, debug_ranges_sec_name, debug_pubtypes_name,
debug_pubnames_name, eh_frame_sec_name) = section_names
if relocate_dwarf_sections is None:
relocate_dwarf_sections = self['e_type'] == 'ET_REL'
debug_sections = {}
for secname in section_names:
section = self.get_section_by_name(secname)
if section is None:
debug_sections[secname] = None
else:
dwarf_section = self._read_dwarf_section(
section,
relocate_dwarf_sections)
if compressed and secname.startswith('.z'):
dwarf_section = self._decompress_dwarf_section(dwarf_section)
debug_sections[secname] = dwarf_section
return DWARFInfo(
config=DwarfConfig(
little_endian=self.little_endian,
default_address_size=self.elfclass // 8,
machine_arch=self.get_machine_arch()),
debug_info_sec=debug_sections[debug_info_sec_name],
debug_aranges_sec=debug_sections[debug_aranges_sec_name],
debug_abbrev_sec=debug_sections[debug_abbrev_sec_name],
debug_frame_sec=debug_sections[debug_frame_sec_name],
eh_frame_sec=debug_sections[eh_frame_sec_name],
debug_str_sec=debug_sections[debug_str_sec_name],
debug_loc_sec=debug_sections[debug_loc_sec_name],
debug_ranges_sec=debug_sections[debug_ranges_sec_name],
debug_line_sec=debug_sections[debug_line_sec_name],
debug_pubtypes_sec = debug_sections[debug_pubtypes_name],
debug_pubnames_sec = debug_sections[debug_pubnames_name]
)
def get_machine_arch(self):
""" Return the machine architecture, as detected from the ELF header.
"""
architectures = {
'EM_M32' : 'AT&T WE 32100',
'EM_SPARC' : 'SPARC',
'EM_386' : 'x86',
'EM_68K' : 'Motorola 68000',
'EM_88K' : 'Motorola 88000',
'EM_IAMCU' : 'Intel MCU',
'EM_860' : 'Intel 80860',
'EM_MIPS' : 'MIPS',
'EM_S370' : 'IBM System/370',
'EM_MIPS_RS3_LE' : 'MIPS RS3000 Little-endian',
'EM_PARISC' : 'Hewlett-Packard PA-RISC',
'EM_VPP500' : 'Fujitsu VPP500',
'EM_SPARC32PLUS' : 'Enhanced SPARC',
'EM_960' : 'Intel 80960',
'EM_PPC' : 'PowerPC',
'EM_PPC64' : '64-bit PowerPC',
'EM_S390' : 'IBM System/390',
'EM_SPU' : 'IBM SPU/SPC',
'EM_V800' : 'NEC V800',
'EM_FR20' : 'Fujitsu FR20',
'EM_RH32' : 'TRW RH-32',
'EM_RCE' : 'Motorola RCE',
'EM_ARM' : 'ARM',
'EM_ALPHA' : 'Digital Alpha',
'EM_SH' : 'Hitachi SH',
'EM_SPARCV9' : 'SPARC Version 9',
'EM_TRICORE' : 'Siemens TriCore embedded processor',
'EM_ARC' : 'Argonaut RISC Core, Argonaut Technologies Inc.',
'EM_H8_300' : 'Hitachi H8/300',
'EM_H8_300H' : 'Hitachi H8/300H',
'EM_H8S' : 'Hitachi H8S',
'EM_H8_500' : 'Hitachi H8/500',
'EM_IA_64' : 'Intel IA-64',
'EM_MIPS_X' : 'MIPS-X',
'EM_COLDFIRE' : 'Motorola ColdFire',
'EM_68HC12' : 'Motorola M68HC12',
'EM_MMA' : 'Fujitsu MMA',
'EM_PCP' : 'Siemens PCP',
'EM_NCPU' : 'Sony nCPU',
'EM_NDR1' : 'Denso NDR1',
'EM_STARCORE' : 'Motorola Star*Core',
'EM_ME16' : 'Toyota ME16',
'EM_ST100' : 'STMicroelectronics ST100',
'EM_TINYJ' : 'Advanced Logic TinyJ',
'EM_X86_64' : 'x64',
'EM_PDSP' : 'Sony DSP',
'EM_PDP10' : 'Digital Equipment PDP-10',
'EM_PDP11' : 'Digital Equipment PDP-11',
'EM_FX66' : 'Siemens FX66',
'EM_ST9PLUS' : 'STMicroelectronics ST9+ 8/16 bit',
'EM_ST7' : 'STMicroelectronics ST7 8-bit',
'EM_68HC16' : 'Motorola MC68HC16',
'EM_68HC11' : 'Motorola MC68HC11',
'EM_68HC08' : 'Motorola MC68HC08',
'EM_68HC05' : 'Motorola MC68HC05',
'EM_SVX' : 'Silicon Graphics SVx',
'EM_ST19' : 'STMicroelectronics ST19 8-bit',
'EM_VAX' : 'Digital VAX',
'EM_CRIS' : 'Axis Communications 32-bit',
'EM_JAVELIN' : 'Infineon Technologies 32-bit',
'EM_FIREPATH' : 'Element 14 64-bit DSP',
'EM_ZSP' : 'LSI Logic 16-bit DSP',
'EM_MMIX' : 'Donald Knuth\'s educational 64-bit',
'EM_HUANY' : 'Harvard University machine-independent object files',
'EM_PRISM' : 'SiTera Prism',
'EM_AVR' : 'Atmel AVR 8-bit',
'EM_FR30' : 'Fujitsu FR30',
'EM_D10V' : 'Mitsubishi D10V',
'EM_D30V' : 'Mitsubishi D30V',
'EM_V850' : 'NEC v850',
'EM_M32R' : 'Mitsubishi M32R',
'EM_MN10300' : 'Matsushita MN10300',
'EM_MN10200' : 'Matsushita MN10200',
'EM_PJ' : 'picoJava',
'EM_OPENRISC' : 'OpenRISC 32-bit',
'EM_ARC_COMPACT' : 'ARC International ARCompact',
'EM_XTENSA' : 'Tensilica Xtensa',
'EM_VIDEOCORE' : 'Alphamosaic VideoCore',
'EM_TMM_GPP' : 'Thompson Multimedia',
'EM_NS32K' : 'National Semiconductor 32000 series',
'EM_TPC' : 'Tenor Network TPC',
'EM_SNP1K' : 'Trebia SNP 1000',
'EM_ST200' : 'STMicroelectronics ST200',
'EM_IP2K' : 'Ubicom IP2xxx',
'EM_MAX' : 'MAX',
'EM_CR' : 'National Semiconductor CompactRISC',
'EM_F2MC16' : 'Fujitsu F2MC16',
'EM_MSP430' : 'Texas Instruments msp430',
'EM_BLACKFIN' : 'Analog Devices Blackfin',
'EM_SE_C33' : 'Seiko Epson S1C33',
'EM_SEP' : 'Sharp',
'EM_ARCA' : 'Arca RISC',
'EM_UNICORE' : 'PKU-Unity MPRC',
'EM_EXCESS' : 'eXcess',
'EM_DXP' : 'Icera Semiconductor Deep Execution Processor',
'EM_ALTERA_NIOS2' : 'Altera Nios II',
'EM_CRX' : 'National Semiconductor CompactRISC CRX',
'EM_XGATE' : 'Motorola XGATE',
'EM_C166' : 'Infineon C16x/XC16x',
'EM_M16C' : 'Renesas M16C',
'EM_DSPIC30F' : 'Microchip Technology dsPIC30F',
'EM_CE' : 'Freescale Communication Engine RISC core',
'EM_M32C' : 'Renesas M32C',
'EM_TSK3000' : 'Altium TSK3000',
'EM_RS08' : 'Freescale RS08',
'EM_SHARC' : 'Analog Devices SHARC',
'EM_ECOG2' : 'Cyan Technology eCOG2',
'EM_SCORE7' : 'Sunplus S+core7 RISC',
'EM_DSP24' : 'New Japan Radio (NJR) 24-bit DSP',
'EM_VIDEOCORE3' : 'Broadcom VideoCore III',
'EM_LATTICEMICO32' : 'Lattice FPGA RISC',
'EM_SE_C17' : 'Seiko Epson C17',
'EM_TI_C6000' : 'TI TMS320C6000',
'EM_TI_C2000' : 'TI TMS320C2000',
'EM_TI_C5500' : 'TI TMS320C55x',
'EM_TI_ARP32' : 'TI Application Specific RISC, 32bit',
'EM_TI_PRU' : 'TI Programmable Realtime Unit',
'EM_MMDSP_PLUS' : 'STMicroelectronics 64bit VLIW',
'EM_CYPRESS_M8C' : 'Cypress M8C',
'EM_R32C' : 'Renesas R32C',
'EM_TRIMEDIA' : 'NXP Semiconductors TriMedia',
'EM_QDSP6' : 'QUALCOMM DSP6',
'EM_8051' : 'Intel 8051',
'EM_STXP7X' : 'STMicroelectronics STxP7x',
'EM_NDS32' : 'Andes Technology RISC',
'EM_ECOG1' : 'Cyan Technology eCOG1X',
'EM_ECOG1X' : 'Cyan Technology eCOG1X',
'EM_MAXQ30' : 'Dallas Semiconductor MAXQ30',
'EM_XIMO16' : 'New Japan Radio (NJR) 16-bit',
'EM_MANIK' : 'M2000 Reconfigurable RISC',
'EM_CRAYNV2' : 'Cray Inc. NV2',
'EM_RX' : 'Renesas RX',
'EM_METAG' : 'Imagination Technologies META',
'EM_MCST_ELBRUS' : 'MCST Elbrus',
'EM_ECOG16' : 'Cyan Technology eCOG16',
'EM_CR16' : 'National Semiconductor CompactRISC CR16 16-bit',
'EM_ETPU' : 'Freescale',
'EM_SLE9X' : 'Infineon Technologies SLE9X',
'EM_L10M' : 'Intel L10M',
'EM_K10M' : 'Intel K10M',
'EM_AARCH64' : 'AArch64',
'EM_AVR32' : 'Atmel 32-bit',
'EM_STM8' : 'STMicroeletronics STM8 8-bit',
'EM_TILE64' : 'Tilera TILE64',
'EM_TILEPRO' : 'Tilera TILEPro',
'EM_MICROBLAZE' : 'Xilinx MicroBlaze 32-bit RISC',
'EM_CUDA' : 'NVIDIA CUDA',
'EM_TILEGX' : 'Tilera TILE-Gx',
'EM_CLOUDSHIELD' : 'CloudShield',
'EM_COREA_1ST' : 'KIPO-KAIST Core-A 1st generation',
'EM_COREA_2ND' : 'KIPO-KAIST Core-A 2nd generation',
'EM_ARC_COMPACT2' : 'Synopsys ARCompact V2',
'EM_OPEN8' : 'Open8 8-bit RISC',
'EM_RL78' : 'Renesas RL78',
'EM_VIDEOCORE5' : 'Broadcom VideoCore V',
'EM_78KOR' : 'Renesas 78KOR',
'EM_56800EX' : 'Freescale 56800EX',
'EM_BA1' : 'Beyond BA1',
'EM_BA2' : 'Beyond BA2',
'EM_XCORE' : 'XMOS xCORE',
'EM_MCHP_PIC' : 'Microchip 8-bit PIC',
'EM_INTEL205' : 'Reserved by Intel',
'EM_INTEL206' : 'Reserved by Intel',
'EM_INTEL207' : 'Reserved by Intel',
'EM_INTEL208' : 'Reserved by Intel',
'EM_INTEL209' : 'Reserved by Intel',
'EM_KM32' : 'KM211 KM32 32-bit',
'EM_KMX32' : 'KM211 KMX32 32-bit',
'EM_KMX16' : 'KM211 KMX16 16-bit',
'EM_KMX8' : 'KM211 KMX8 8-bit',
'EM_KVARC' : 'KM211 KVARC',
'EM_CDP' : 'Paneve CDP',
'EM_COGE' : 'Cognitive',
'EM_COOL' : 'Bluechip Systems CoolEngine',
'EM_NORC' : 'Nanoradio Optimized RISC',
'EM_CSR_KALIMBA' : 'CSR Kalimba',
'EM_Z80' : 'Zilog Z80',
'EM_VISIUM' : 'VISIUMcore',
'EM_FT32' : 'FTDI Chip FT32 32-bit RISC',
'EM_MOXIE' : 'Moxie',
'EM_AMDGPU' : 'AMD GPU',
'EM_RISCV' : 'RISC-V'
}
return architectures.get(self['e_machine'], '<unknown>')
#-------------------------------- PRIVATE --------------------------------#
def __getitem__(self, name):
""" Implement dict-like access to header entries
"""
return self.header[name]
def _identify_file(self):
""" Verify the ELF file and identify its class and endianness.
"""
# Note: this code reads the stream directly, without using ELFStructs,
# since we don't yet know its exact format. ELF was designed to be
# read like this - its e_ident field is word-size and endian agnostic.
self.stream.seek(0)
magic = self.stream.read(4)
elf_assert(magic == b'\x7fELF', 'Magic number does not match')
ei_class = self.stream.read(1)
if ei_class == b'\x01':
self.elfclass = 32
elif ei_class == b'\x02':
self.elfclass = 64
else:
raise ELFError('Invalid EI_CLASS %s' % repr(ei_class))
ei_data = self.stream.read(1)
if ei_data == b'\x01':
self.little_endian = True
elif ei_data == b'\x02':
self.little_endian = False
else:
raise ELFError('Invalid EI_DATA %s' % repr(ei_data))
def _section_offset(self, n):
""" Compute the offset of section #n in the file
"""
return self['e_shoff'] + n * self['e_shentsize']
def _segment_offset(self, n):
""" Compute the offset of segment #n in the file
"""
return self['e_phoff'] + n * self['e_phentsize']
def _make_segment(self, segment_header):
""" Create a Segment object of the appropriate type
"""
segtype = segment_header['p_type']
if segtype == 'PT_INTERP':
return InterpSegment(segment_header, self.stream)
elif segtype == 'PT_DYNAMIC':
return DynamicSegment(segment_header, self.stream, self)
elif segtype == 'PT_NOTE':
return NoteSegment(segment_header, self.stream, self)
else:
return Segment(segment_header, self.stream)
def _get_section_header(self, n):
""" Find the header of section #n, parse it and return the struct
"""
return struct_parse(
self.structs.Elf_Shdr,
self.stream,
stream_pos=self._section_offset(n))
def _get_section_name(self, section_header):
""" Given a section header, find this section's name in the file's
string table
"""
name_offset = section_header['sh_name']
return self._file_stringtable_section.get_string(name_offset)
def _make_section(self, section_header):
""" Create a section object of the appropriate type
"""
name = self._get_section_name(section_header)
sectype = section_header['sh_type']
if sectype == 'SHT_STRTAB':
return StringTableSection(section_header, name, self)
elif sectype == 'SHT_NULL':
return NullSection(section_header, name, self)
elif sectype in ('SHT_SYMTAB', 'SHT_DYNSYM', 'SHT_SUNW_LDYNSYM'):
return self._make_symbol_table_section(section_header, name)
elif sectype == 'SHT_SUNW_syminfo':
return self._make_sunwsyminfo_table_section(section_header, name)
elif sectype == 'SHT_GNU_verneed':
return self._make_gnu_verneed_section(section_header, name)
elif sectype == 'SHT_GNU_verdef':
return self._make_gnu_verdef_section(section_header, name)
elif sectype == 'SHT_GNU_versym':
return self._make_gnu_versym_section(section_header, name)
elif sectype in ('SHT_REL', 'SHT_RELA'):
return RelocationSection(section_header, name, self)
elif sectype == 'SHT_DYNAMIC':
return DynamicSection(section_header, name, self)
elif sectype == 'SHT_NOTE':
return NoteSection(section_header, name, self)
elif sectype == 'SHT_PROGBITS' and name == '.stab':
return StabSection(section_header, name, self)
elif sectype == 'SHT_ARM_ATTRIBUTES':
return ARMAttributesSection(section_header, name, self)
else:
return Section(section_header, name, self)
def _make_symbol_table_section(self, section_header, name):
""" Create a SymbolTableSection
"""
linked_strtab_index = section_header['sh_link']
strtab_section = self.get_section(linked_strtab_index)
return SymbolTableSection(
section_header, name,
elffile=self,
stringtable=strtab_section)
def _make_sunwsyminfo_table_section(self, section_header, name):
""" Create a SUNWSyminfoTableSection
"""
linked_strtab_index = section_header['sh_link']
strtab_section = self.get_section(linked_strtab_index)
return SUNWSyminfoTableSection(
section_header, name,
elffile=self,
symboltable=strtab_section)
def _make_gnu_verneed_section(self, section_header, name):
""" Create a GNUVerNeedSection
"""
linked_strtab_index = section_header['sh_link']
strtab_section = self.get_section(linked_strtab_index)
return GNUVerNeedSection(
section_header, name,
elffile=self,
stringtable=strtab_section)
def _make_gnu_verdef_section(self, section_header, name):
""" Create a GNUVerDefSection
"""
linked_strtab_index = section_header['sh_link']
strtab_section = self.get_section(linked_strtab_index)
return GNUVerDefSection(
section_header, name,
elffile=self,
stringtable=strtab_section)
def _make_gnu_versym_section(self, section_header, name):
""" Create a GNUVerSymSection
"""
linked_strtab_index = section_header['sh_link']
strtab_section = self.get_section(linked_strtab_index)
return GNUVerSymSection(
section_header, name,
elffile=self,
symboltable=strtab_section)
def _get_segment_header(self, n):
""" Find the header of segment #n, parse it and return the struct
"""
return struct_parse(
self.structs.Elf_Phdr,
self.stream,
stream_pos=self._segment_offset(n))
def _get_file_stringtable(self):
""" Find the file's string table section
"""
stringtable_section_num = self['e_shstrndx']
return StringTableSection(
header=self._get_section_header(stringtable_section_num),
name='',
elffile=self)
def _parse_elf_header(self):
""" Parses the ELF file header and assigns the result to attributes
of this object.
"""
return struct_parse(self.structs.Elf_Ehdr, self.stream, stream_pos=0)
def _read_dwarf_section(self, section, relocate_dwarf_sections):
""" Read the contents of a DWARF section from the stream and return a
DebugSectionDescriptor. Apply relocations if asked to.
"""
# The section data is read into a new stream, for processing
section_stream = BytesIO()
section_stream.write(section.data())
if relocate_dwarf_sections:
reloc_handler = RelocationHandler(self)
reloc_section = reloc_handler.find_relocations_for_section(section)
if reloc_section is not None:
reloc_handler.apply_section_relocations(
section_stream, reloc_section)
return DebugSectionDescriptor(
stream=section_stream,
name=section.name,
global_offset=section['sh_offset'],
size=section['sh_size'],
address=section['sh_addr'])
@staticmethod
def _decompress_dwarf_section(section):
""" Returns the uncompressed contents of the provided DWARF section.
"""
# TODO: support other compression formats from readelf.c
assert section.size > 12, 'Unsupported compression format.'
section.stream.seek(0)
# According to readelf.c the content should contain "ZLIB"
# followed by the uncompressed section size - 8 bytes in
# big-endian order
compression_type = section.stream.read(4)
assert compression_type == b'ZLIB', \
'Invalid compression type: %r' % (compression_type)
uncompressed_size = struct.unpack('>Q', section.stream.read(8))[0]
decompressor = zlib.decompressobj()
uncompressed_stream = BytesIO()
while True:
chunk = section.stream.read(PAGESIZE)
if not chunk:
break
uncompressed_stream.write(decompressor.decompress(chunk))
uncompressed_stream.write(decompressor.flush())
uncompressed_stream.seek(0, io.SEEK_END)
size = uncompressed_stream.tell()
assert uncompressed_size == size, \
'Wrong uncompressed size: expected %r, but got %r' % (
uncompressed_size, size,
)
return section._replace(stream=uncompressed_stream, size=size)
| 43.157321
| 87
| 0.554481
|
6e0ab5250f1489b26551cf81efd7c0a17efc2f79
| 8,684
|
py
|
Python
|
src/command_modules/azure-cli-keyvault/azure/cli/command_modules/keyvault/commands.py
|
saurabsa/azure-cli-old
|
f77477a98c9aa9cb55daf5b0d2f410d1455a9225
|
[
"MIT"
] | null | null | null |
src/command_modules/azure-cli-keyvault/azure/cli/command_modules/keyvault/commands.py
|
saurabsa/azure-cli-old
|
f77477a98c9aa9cb55daf5b0d2f410d1455a9225
|
[
"MIT"
] | null | null | null |
src/command_modules/azure-cli-keyvault/azure/cli/command_modules/keyvault/commands.py
|
saurabsa/azure-cli-old
|
f77477a98c9aa9cb55daf5b0d2f410d1455a9225
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
from azure.cli.core.commands import cli_command
from azure.cli.core.commands.arm import cli_generic_update_command
from azure.cli.core._util import empty_on_404
from ._client_factory import keyvault_client_factory
from ._command_type import cli_keyvault_data_plane_command
convenience_path = 'azure.keyvault.key_vault_client#{}'
base_client_path = 'azure.keyvault.generated.key_vault_client#{}'
custom_path = 'azure.cli.command_modules.keyvault.custom#{}'
mgmt_path = 'azure.mgmt.keyvault.operations.vaults_operations#{}'
factory = lambda args: keyvault_client_factory(**args).vaults
cli_command(__name__, 'keyvault create', custom_path.format('create_keyvault'), factory)
cli_command(__name__, 'keyvault list', custom_path.format('list_keyvault'), factory)
cli_command(__name__, 'keyvault show', mgmt_path.format('VaultsOperations.get'), factory, exception_handler=empty_on_404)
cli_command(__name__, 'keyvault delete', mgmt_path.format('VaultsOperations.delete'), factory)
cli_command(__name__, 'keyvault set-policy', custom_path.format('set_policy'), factory)
cli_command(__name__, 'keyvault delete-policy', custom_path.format('delete_policy'), factory)
cli_generic_update_command(__name__,
'keyvault update',
mgmt_path.format('VaultsOperations.get'),
custom_path.format('update_keyvault_setter'),
lambda: keyvault_client_factory().vaults,
custom_function_op=custom_path.format('update_keyvault'))
# Data Plane Commands
cli_keyvault_data_plane_command('keyvault key list', convenience_path.format('KeyVaultClient.get_keys'))
cli_keyvault_data_plane_command('keyvault key list-versions', convenience_path.format('KeyVaultClient.get_key_versions'))
cli_keyvault_data_plane_command('keyvault key create', custom_path.format('create_key'))
cli_keyvault_data_plane_command('keyvault key set-attributes', base_client_path.format('KeyVaultClient.update_key'))
cli_keyvault_data_plane_command('keyvault key show', base_client_path.format('KeyVaultClient.get_key'))
cli_keyvault_data_plane_command('keyvault key delete', convenience_path.format('KeyVaultClient.delete_key'))
cli_keyvault_data_plane_command('keyvault key backup', custom_path.format('backup_key'))
cli_keyvault_data_plane_command('keyvault key restore', custom_path.format('restore_key'))
cli_keyvault_data_plane_command('keyvault key import', custom_path.format('import_key'))
cli_keyvault_data_plane_command('keyvault secret list', convenience_path.format('KeyVaultClient.get_secrets'))
cli_keyvault_data_plane_command('keyvault secret list-versions', convenience_path.format('KeyVaultClient.get_secret_versions'))
cli_keyvault_data_plane_command('keyvault secret set', convenience_path.format('KeyVaultClient.set_secret'))
cli_keyvault_data_plane_command('keyvault secret set-attributes', base_client_path.format('KeyVaultClient.update_secret'))
cli_keyvault_data_plane_command('keyvault secret show', base_client_path.format('KeyVaultClient.get_secret'))
cli_keyvault_data_plane_command('keyvault secret delete', convenience_path.format('KeyVaultClient.delete_secret'))
cli_keyvault_data_plane_command('keyvault secret download', custom_path.format('download_secret'))
cli_keyvault_data_plane_command('keyvault certificate create', custom_path.format('create_certificate'))
cli_keyvault_data_plane_command('keyvault certificate list', convenience_path.format('KeyVaultClient.get_certificates'))
cli_keyvault_data_plane_command('keyvault certificate list-versions', convenience_path.format('KeyVaultClient.get_certificate_versions'))
cli_keyvault_data_plane_command('keyvault certificate show', base_client_path.format('KeyVaultClient.get_certificate'))
cli_keyvault_data_plane_command('keyvault certificate delete', convenience_path.format('KeyVaultClient.delete_certificate'))
cli_keyvault_data_plane_command('keyvault certificate set-attributes', base_client_path.format('KeyVaultClient.update_certificate'))
cli_keyvault_data_plane_command('keyvault certificate import', convenience_path.format('KeyVaultClient.import_certificate'))
cli_keyvault_data_plane_command('keyvault certificate download', custom_path.format('download_certificate'))
cli_keyvault_data_plane_command('keyvault key list', convenience_path.format('KeyVaultClient.get_keys'))
cli_keyvault_data_plane_command('keyvault key list-versions', convenience_path.format('KeyVaultClient.get_key_versions'))
cli_keyvault_data_plane_command('keyvault key create', custom_path.format('create_key'))
cli_keyvault_data_plane_command('keyvault key set-attributes', base_client_path.format('KeyVaultClient.update_key'))
cli_keyvault_data_plane_command('keyvault key show', base_client_path.format('KeyVaultClient.get_key'))
cli_keyvault_data_plane_command('keyvault key delete', convenience_path.format('KeyVaultClient.delete_key'))
cli_keyvault_data_plane_command('keyvault secret list', convenience_path.format('KeyVaultClient.get_secrets'))
cli_keyvault_data_plane_command('keyvault secret list-versions', convenience_path.format('KeyVaultClient.get_secret_versions'))
cli_keyvault_data_plane_command('keyvault secret set', convenience_path.format('KeyVaultClient.set_secret'))
cli_keyvault_data_plane_command('keyvault secret set-attributes', base_client_path.format('KeyVaultClient.update_secret'))
cli_keyvault_data_plane_command('keyvault secret show', base_client_path.format('KeyVaultClient.get_secret'))
cli_keyvault_data_plane_command('keyvault secret delete', convenience_path.format('KeyVaultClient.delete_secret'))
cli_keyvault_data_plane_command('keyvault certificate create', custom_path.format('create_certificate'))
cli_keyvault_data_plane_command('keyvault certificate list', convenience_path.format('KeyVaultClient.get_certificates'))
cli_keyvault_data_plane_command('keyvault certificate list-versions', convenience_path.format('KeyVaultClient.get_certificate_versions'))
cli_keyvault_data_plane_command('keyvault certificate show', base_client_path.format('KeyVaultClient.get_certificate'))
cli_keyvault_data_plane_command('keyvault certificate delete', convenience_path.format('KeyVaultClient.delete_certificate'))
cli_keyvault_data_plane_command('keyvault certificate set-attributes', base_client_path.format('KeyVaultClient.update_certificate'))
cli_keyvault_data_plane_command('keyvault certificate pending merge', convenience_path.format('KeyVaultClient.merge_certificate'))
cli_keyvault_data_plane_command('keyvault certificate pending show', convenience_path.format('KeyVaultClient.get_certificate_operation'))
cli_keyvault_data_plane_command('keyvault certificate pending delete', convenience_path.format('KeyVaultClient.delete_certificate_operation'))
cli_keyvault_data_plane_command('keyvault certificate contact list', convenience_path.format('KeyVaultClient.get_certificate_contacts'))
cli_keyvault_data_plane_command('keyvault certificate contact add', custom_path.format('add_certificate_contact'))
cli_keyvault_data_plane_command('keyvault certificate contact delete', custom_path.format('delete_certificate_contact'))
cli_keyvault_data_plane_command('keyvault certificate issuer update', custom_path.format('update_certificate_issuer'))
cli_keyvault_data_plane_command('keyvault certificate issuer list', convenience_path.format('KeyVaultClient.get_certificate_issuers'))
cli_keyvault_data_plane_command('keyvault certificate issuer create', custom_path.format('create_certificate_issuer'))
cli_keyvault_data_plane_command('keyvault certificate issuer show', convenience_path.format('KeyVaultClient.get_certificate_issuer'))
cli_keyvault_data_plane_command('keyvault certificate issuer delete', convenience_path.format('KeyVaultClient.delete_certificate_issuer'))
cli_keyvault_data_plane_command('keyvault certificate issuer admin list', custom_path.format('list_certificate_issuer_admins'))
cli_keyvault_data_plane_command('keyvault certificate issuer admin add', custom_path.format('add_certificate_issuer_admin'))
cli_keyvault_data_plane_command('keyvault certificate issuer admin delete', custom_path.format('delete_certificate_issuer_admin'))
# default policy document
cli_keyvault_data_plane_command('keyvault certificate get-default-policy', custom_path.format('get_default_policy'))
| 80.407407
| 142
| 0.826693
|
6036494215225011be7a6a30a27381b9abed886b
| 2,406
|
py
|
Python
|
utils/colors.py
|
KenmogneThimotee/ObjectDetection
|
b6261640cd3c083c7c8197a18c3684d88f921377
|
[
"MIT"
] | null | null | null |
utils/colors.py
|
KenmogneThimotee/ObjectDetection
|
b6261640cd3c083c7c8197a18c3684d88f921377
|
[
"MIT"
] | null | null | null |
utils/colors.py
|
KenmogneThimotee/ObjectDetection
|
b6261640cd3c083c7c8197a18c3684d88f921377
|
[
"MIT"
] | null | null | null |
colors = [
[31 , 0 , 255] ,
[0 , 159 , 255] ,
[255 , 95 , 0] ,
[255 , 19 , 0] ,
[255 , 0 , 0] ,
[255 , 38 , 0] ,
[0 , 255 , 25] ,
[255 , 0 , 133] ,
[255 , 172 , 0] ,
[108 , 0 , 255] ,
[0 , 82 , 255] ,
[0 , 255 , 6] ,
[255 , 0 , 152] ,
[223 , 0 , 255] ,
[12 , 0 , 255] ,
[0 , 255 , 178] ,
[108 , 255 , 0] ,
[184 , 0 , 255] ,
[255 , 0 , 76] ,
[146 , 255 , 0] ,
[51 , 0 , 255] ,
[0 , 197 , 255] ,
[255 , 248 , 0] ,
[255 , 0 , 19] ,
[255 , 0 , 38] ,
[89 , 255 , 0] ,
[127 , 255 , 0] ,
[255 , 153 , 0] ,
[0 , 255 , 255] ,
[0 , 255 , 216] ,
[0 , 255 , 121] ,
[255 , 0 , 248] ,
[70 , 0 , 255] ,
[0 , 255 , 159] ,
[0 , 216 , 255] ,
[0 , 6 , 255] ,
[0 , 63 , 255] ,
[31 , 255 , 0] ,
[255 , 57 , 0] ,
[255 , 0 , 210] ,
[0 , 255 , 102] ,
[242 , 255 , 0] ,
[255 , 191 , 0] ,
[0 , 255 , 63] ,
[255 , 0 , 95] ,
[146 , 0 , 255] ,
[184 , 255 , 0] ,
[255 , 114 , 0] ,
[0 , 255 , 235] ,
[255 , 229 , 0] ,
[0 , 178 , 255] ,
[255 , 0 , 114] ,
[255 , 0 , 57] ,
[0 , 140 , 255] ,
[0 , 121 , 255] ,
[12 , 255 , 0] ,
[255 , 210 , 0] ,
[0 , 255 , 44] ,
[165 , 255 , 0] ,
[0 , 25 , 255] ,
[0 , 255 , 140] ,
[0 , 101 , 255] ,
[0 , 255 , 82] ,
[223 , 255 , 0] ,
[242 , 0 , 255] ,
[89 , 0 , 255] ,
[165 , 0 , 255] ,
[70 , 255 , 0] ,
[255 , 0 , 172] ,
[255 , 76 , 0] ,
[203 , 255 , 0] ,
[204 , 0 , 255] ,
[255 , 0 , 229] ,
[255 , 133 , 0] ,
[127 , 0 , 255] ,
[0 , 235 , 255] ,
[0 , 255 , 197] ,
[255 , 0 , 191] ,
[0 , 44 , 255] ,
[50 , 255 , 0]
]
def get_color(label):
""" Return a color from a set of predefined colors. Contains 80 colors in total.
code originally from https://github.com/fizyr/keras-retinanet/
Args
label: The label to get the color for.
Returns
A list of three values representing a RGB color.
"""
if label < len(colors):
return colors[label]
else:
print('Label {} has no color, returning default.'.format(label))
return (0, 255, 0)
| 24.30303
| 84
| 0.347049
|
905ce664c3319edecc2f2e46ae55a9be39a84bfe
| 73
|
py
|
Python
|
src/onegov/core/custom/__init__.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/core/custom/__init__.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/core/custom/__init__.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
from onegov.core.custom import custom_json as json
__all__ = ('json', )
| 18.25
| 50
| 0.739726
|
6bd3fc5c0b1df635bc3dac3b85b1174ea0f5e089
| 26,732
|
py
|
Python
|
flopy/modflow/mffhb.py
|
jtwhite79/flopy
|
41302901e4db38455c28a68153b49a8466da3027
|
[
"CC0-1.0",
"BSD-3-Clause"
] | 1
|
2021-02-23T22:55:04.000Z
|
2021-02-23T22:55:04.000Z
|
flopy/modflow/mffhb.py
|
jtwhite79/flopy
|
41302901e4db38455c28a68153b49a8466da3027
|
[
"CC0-1.0",
"BSD-3-Clause"
] | null | null | null |
flopy/modflow/mffhb.py
|
jtwhite79/flopy
|
41302901e4db38455c28a68153b49a8466da3027
|
[
"CC0-1.0",
"BSD-3-Clause"
] | null | null | null |
"""
mffhb module. Contains the ModflowFhb class. Note that the user can access
the ModflowFhb class as `flopy.modflow.ModflowFhb`.
Additional information for this MODFLOW package can be found at the `Online
MODFLOW Guide
<http://water.usgs.gov/ogw/modflow/MODFLOW-2005-Guide/index.html?fhb.htm>`_.
"""
import sys
import numpy as np
from ..pakbase import Package
from ..utils.recarray_utils import create_empty_recarray
class ModflowFhb(Package):
"""
MODFLOW Flow and Head Boundary Package Class.
Parameters
----------
model : model object
The model object (of type :class:`flopy.modflow.mf.ModflowFhb`) to
which this package will be added.
nbdtim : int
The number of times at which flow and head will be specified for all
selected cells. (default is 1)
nflw : int
Number of cells at which flows will be specified. (default is 0)
nhed: int
Number of cells at which heads will be specified. (default is 0)
ifhbss : int
FHB steady-state option flag. If the simulation includes any
transient-state stress periods, the flag is read but not used; in
this case, specified-flow, specified-head, and auxiliary-variable
values will be interpolated for steady-state stress periods in the
same way that values are interpolated for transient stress periods.
If the simulation includes only steady-state stress periods, the flag
controls how flow, head, and auxiliary-variable values will be
computed for each steady-state solution. (default is 0)
ipakcb : int
A flag that is used to determine if cell-by-cell budget data should be
saved. If ipakcb is non-zero cell-by-cell budget data will be saved.
(default is None).
nfhbx1 : int
Number of auxiliary variables whose values will be computed for each
time step for each specified-flow cell. Auxiliary variables are
currently not supported. (default is 0)
nfhbx2 : int
Number of auxiliary variables whose values will be computed for each
time step for each specified-head cell. Auxiliary variables are
currently not supported. (default is 0)
ifhbpt : int
Flag for printing values of data list. Applies to datasets 4b, 5b, 6b,
7b, and 8b. If ifhbpt > 0, datasets read at the beginning of the
simulation will be printed. Otherwise, the datasets will not be
printed. (default is 0).
bdtimecnstm : float
A constant multiplier for data list bdtime. (default is 1.0)
bdtime : float or list of floats
Simulation time at which values of specified flow and (or) values of
specified head will be read. nbdtim values are required.
(default is 0.0)
cnstm5 : float
A constant multiplier for data list flwrat. (default is 1.0)
ds5 : list or numpy array or recarray
Each FHB flwrat cell (dataset 5) is defined through definition of
layer(int), row(int), column(int), iaux(int), flwrat[nbdtime](float).
There should be nflw entries. (default is None)
The simplest form is a list of lists with the FHB flow boundaries.
This gives the form of::
ds5 =
[
[lay, row, col, iaux, flwrat1, flwra2, ..., flwrat(nbdtime)],
[lay, row, col, iaux, flwrat1, flwra2, ..., flwrat(nbdtime)],
[lay, row, col, iaux, flwrat1, flwra2, ..., flwrat(nbdtime)],
[lay, row, col, iaux, flwrat1, flwra2, ..., flwrat(nbdtime)]
]
cnstm7 : float
A constant multiplier for data list sbhedt. (default is 1.0)
ds7 : list or numpy array or recarray
Each FHB sbhed cell (dataset 7) is defined through definition of
layer(int), row(int), column(int), iaux(int), sbhed[nbdtime](float).
There should be nhed entries. (default is None)
The simplest form is a list of lists with the FHB flow boundaries.
This gives the form of::
ds7 =
[
[lay, row, col, iaux, sbhed1, sbhed2, ..., sbhed(nbdtime)],
[lay, row, col, iaux, sbhed1, sbhed2, ..., sbhed(nbdtime)],
[lay, row, col, iaux, sbhed1, sbhed2, ..., sbhed(nbdtime)],
[lay, row, col, iaux, sbhed1, sbhed2, ..., sbhed(nbdtime)]
]
extension : string
Filename extension (default is 'fhb')
unitnumber : int
File unit number (default is None).
filenames : str or list of str
Filenames to use for the package and the output files. If
filenames=None the package name will be created using the model name
and package extension and the cbc output name will be created using
the model name and .cbc extension (for example, modflowtest.cbc),
if ipakcbc is a number greater than zero. If a single string is passed
the package will be set to the string and cbc output names will be
created using the model name and .cbc extension, if ipakcbc is a
number greater than zero. To define the names for all package files
(input and output) the length of the list of strings should be 2.
Default is None.
Attributes
----------
Methods
-------
See Also
--------
Notes
-----
Parameters are not supported in FloPy.
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> fhb = flopy.modflow.ModflowFhb(m)
"""
def __init__(
self,
model,
nbdtim=1,
nflw=0,
nhed=0,
ifhbss=0,
ipakcb=None,
nfhbx1=0,
nfhbx2=0,
ifhbpt=0,
bdtimecnstm=1.0,
bdtime=[0.0],
cnstm5=1.0,
ds5=None,
cnstm7=1.0,
ds7=None,
extension="fhb",
unitnumber=None,
filenames=None,
):
# set default unit number of one is not specified
if unitnumber is None:
unitnumber = ModflowFhb._defaultunit()
# set filenames
if filenames is None:
filenames = [None, None]
elif isinstance(filenames, str):
filenames = [filenames, None]
elif isinstance(filenames, list):
if len(filenames) < 2:
filenames.append(None)
# update external file information with cbc output, if necessary
if ipakcb is not None:
fname = filenames[1]
model.add_output_file(
ipakcb, fname=fname, package=ModflowFhb._ftype()
)
else:
ipakcb = 0
# Fill namefile items
name = [ModflowFhb._ftype()]
units = [unitnumber]
extra = [""]
# set package name
fname = [filenames[0]]
# Call ancestor's init to set self.parent, extension, name and unit number
Package.__init__(
self,
model,
extension=extension,
name=name,
unit_number=units,
extra=extra,
filenames=fname,
)
self.heading = (
"# {} package for ".format(self.name[0])
+ " {}, ".format(model.version_types[model.version])
+ "generated by Flopy."
)
self.url = "flow_and_head_boundary_packag2.htm"
self.nbdtim = nbdtim
self.nflw = nflw
self.nhed = nhed
self.ifhbss = ifhbss
self.ipakcb = ipakcb
if nfhbx1 != 0:
nfhbx1 = 0
self.nfhbx1 = nfhbx1
if nfhbx2 != 0:
nfhbx2 = 0
self.nfhbx2 = nfhbx2
self.ifhbpt = ifhbpt
self.bdtimecnstm = bdtimecnstm
if isinstance(bdtime, float):
bdtime = [bdtime]
self.bdtime = bdtime
self.cnstm5 = cnstm5
self.cnstm7 = cnstm7
# check the type of dataset 5
if ds5 is not None:
dtype = ModflowFhb.get_default_dtype(
nbdtim=nbdtim, head=False, structured=model.structured
)
if isinstance(ds5, (float, int, str)):
msg = "dataset 5 must be a list of lists or a numpy array"
raise TypeError(msg)
elif isinstance(ds5, list):
ds5 = np.array(ds5)
# convert numpy array to a recarray
if ds5.dtype != dtype:
ds5 = np.core.records.fromarrays(ds5.transpose(), dtype=dtype)
# assign dataset 5
self.ds5 = ds5
# check the type of dataset 7
if ds7 is not None:
dtype = ModflowFhb.get_default_dtype(
nbdtim=nbdtim, head=True, structured=model.structured
)
if isinstance(ds7, (float, int, str)):
msg = "dataset 7 must be a list of lists or a numpy array"
raise TypeError(msg)
elif isinstance(ds7, list):
ds7 = np.array(ds7)
# convert numpy array to a recarray
if ds7.dtype != dtype:
ds7 = np.core.records.fromarrays(ds7.transpose(), dtype=dtype)
# assign dataset 7
self.ds7 = ds7
# perform some simple verification
if len(self.bdtime) != self.nbdtim:
raise ValueError(
"bdtime has {} entries but requires "
"{} entries.".format(len(self.bdtime), self.nbdtim)
)
if self.nflw > 0:
if self.ds5 is None:
raise TypeError(
"dataset 5 is not specified but "
"nflw > 0 ({})".format(self.nflw)
)
if self.ds5.shape[0] != self.nflw:
raise ValueError(
"dataset 5 has {} rows but requires "
"{} rows.".format(self.ds5.shape[0], self.nflw)
)
nc = self.nbdtim
if model.structured:
nc += 4
else:
nc += 2
if len(self.ds5.dtype.names) != nc:
raise ValueError(
"dataset 5 has {} columns but requires "
"{} columns.".format(len(self.ds5.dtype.names), nc)
)
if self.nhed > 0:
if self.ds7 is None:
raise TypeError(
"dataset 7 is not specified but "
"nhed > 0 ({})".format(self.nhed)
)
if self.ds7.shape[0] != self.nhed:
raise ValueError(
"dataset 7 has {} rows but requires "
"{} rows.".format(self.ds7.shape[0], self.nhed)
)
nc = self.nbdtim
if model.structured:
nc += 4
else:
nc += 2
if len(self.ds7.dtype.names) != nc:
raise ValueError(
"dataset 7 has {} columns but requires "
"{} columns.".format(len(self.ds7.dtype.names), nc)
)
self.parent.add_package(self)
@staticmethod
def get_empty(ncells=0, nbdtim=1, structured=True, head=False):
# get an empty recarray that corresponds to dtype
dtype = ModflowFhb.get_default_dtype(
nbdtim=nbdtim, structured=structured, head=head
)
return create_empty_recarray(ncells, dtype, default_value=-1.0e10)
@staticmethod
def get_default_dtype(nbdtim=1, structured=True, head=False):
if structured:
dtype = [("k", int), ("i", int), ("j", int)]
else:
dtype = [("node", int)]
dtype.append(("iaux", int))
for n in range(nbdtim):
if head:
name = "sbhed{}".format(n + 1)
else:
name = "flwrat{}".format(n + 1)
dtype.append((name, np.float32))
return np.dtype(dtype)
def _ncells(self):
"""Maximum number of cells that have fhb (developed for MT3DMS
SSM package).
Returns
-------
ncells: int
maximum number of fhb cells
"""
return self.nflw + self.nhed
def write_file(self):
"""
Write the package file.
Returns
-------
None
"""
nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper
f = open(self.fn_path, "w")
# f.write('{0:s}\n'.format(self.heading))
# Data set 1
f.write("{} ".format(self.nbdtim))
f.write("{} ".format(self.nflw))
f.write("{} ".format(self.nhed))
f.write("{} ".format(self.ifhbss))
f.write("{} ".format(self.ipakcb))
f.write("{} ".format(self.nfhbx1))
f.write("{}\n".format(self.nfhbx2))
# Dataset 2 - flow auxiliary names
# Dataset 3 - head auxiliary names
# Dataset 4a IFHBUN CNSTM IFHBPT
f.write("{} ".format(self.unit_number[0]))
f.write("{} ".format(self.bdtimecnstm))
f.write("{}\n".format(self.ifhbpt))
# Dataset 4b
for n in range(self.nbdtim):
f.write("{} ".format(self.bdtime[n]))
f.write("\n")
# Dataset 5 and 6
if self.nflw > 0:
# Dataset 5a IFHBUN CNSTM IFHBPT
f.write("{} ".format(self.unit_number[0]))
f.write("{} ".format(self.cnstm5))
f.write("{}\n".format(self.ifhbpt))
# Dataset 5b
for n in range(self.nflw):
for name in self.ds5.dtype.names:
v = self.ds5[n][name]
if name in ["k", "i", "j", "node"]:
v += 1
f.write("{} ".format(v))
f.write("\n")
# Dataset 6a and 6b - flow auxiliary data
if self.nfhbx1 > 0:
i = 0
# Dataset 7
if self.nhed > 0:
# Dataset 7a IFHBUN CNSTM IFHBPT
f.write("{} ".format(self.unit_number[0]))
f.write("{} ".format(self.cnstm7))
f.write("{}\n".format(self.ifhbpt))
# Dataset 7b IFHBUN CNSTM IFHBPT
for n in range(self.nhed):
for name in self.ds7.dtype.names:
v = self.ds7[n][name]
if name in ["k", "i", "j", "node"]:
v += 1
f.write("{} ".format(v))
f.write("\n")
# Dataset 8a and 8b - head auxiliary data
if self.nfhbx2 > 0:
i = 1
f.close()
@classmethod
def load(cls, f, model, nper=None, ext_unit_dict=None):
"""
Load an existing package.
Parameters
----------
f : filename or file handle
File to load.
model : model object
The model object (of type :class:`flopy.modflow.mf.Modflow`) to
which this package will be added.
nper : int
The number of stress periods. If nper is None, then nper will be
obtained from the model object. (default is None).
ext_unit_dict : dictionary, optional
If the arrays in the file are specified using EXTERNAL,
or older style array control records, then `f` should be a file
handle. In this case ext_unit_dict is required, which can be
constructed using the function
:class:`flopy.utils.mfreadnam.parsenamefile`.
Returns
-------
fhb : ModflowFhb object
ModflowFhb object.
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> fhb = flopy.modflow.ModflowFhb.load('test.fhb', m)
"""
if model.verbose:
sys.stdout.write("loading fhb package file...\n")
openfile = not hasattr(f, "read")
if openfile:
filename = f
f = open(filename, "r")
# determine package unit number
iufhb = None
if ext_unit_dict is not None:
iufhb, fname = model.get_ext_dict_attr(
ext_unit_dict, filetype=ModflowFhb._ftype()
)
# Dataset 0 -- header
while True:
line = f.readline()
if line[0] != "#":
break
# dataset 1
if model.verbose:
sys.stdout.write("loading fhb dataset 1\n")
raw = line.strip().split()
nbdtim = int(raw[0])
nflw = int(raw[1])
nhed = int(raw[2])
ifhbss = int(raw[3])
ipakcb = int(raw[4])
nfhbx1 = int(raw[5])
nfhbx2 = int(raw[6])
ifhbpt = 0
# Dataset 2
flow_aux = []
if nfhbx1 > 0:
if model.verbose:
sys.stdout.write("loading fhb dataset 2\n")
sys.stdout.write(
"dataset 2 will not be preserved in the created hfb object.\n"
)
for idx in range(nfhbx1):
line = f.readline()
raw = line.strip().split()
varnam = raw[0]
if len(varnam) > 16:
varnam = varnam[0:16]
weight = float(raw[1])
flow_aux.append([varnam, weight])
# Dataset 3
head_aux = []
if nfhbx2 > 0:
if model.verbose:
sys.stdout.write("loading fhb dataset 3\n")
sys.stdout.write(
"dataset 3 will not be preserved in the created hfb object.\n"
)
for idx in range(nfhbx2):
line = f.readline()
raw = line.strip().split()
varnam = raw[0]
if len(varnam) > 16:
varnam = varnam[0:16]
weight = float(raw[1])
head_aux.append([varnam, weight])
# Dataset 4a IFHBUN CNSTM IFHBPT
if model.verbose:
sys.stdout.write("loading fhb dataset 4a\n")
line = f.readline()
raw = line.strip().split()
ifhbun = int(raw[0])
if ifhbun != iufhb:
raise ValueError(
"fhb dataset 4a must be in the fhb file (unit={}) "
"fhb data is specified in unit={}".format(iufhb, ifhbun)
)
bdtimecnstm = float(raw[1])
ifhbpt = max(ifhbpt, int(raw[2]))
# Dataset 4b
if model.verbose:
sys.stdout.write("loading fhb dataset 4b\n")
line = f.readline()
raw = line.strip().split()
bdtime = []
for n in range(nbdtim):
bdtime.append(float(raw[n]))
# Dataset 5 and 6
cnstm5 = None
ds5 = None
cnstm6 = None
ds6 = None
if nflw > 0:
if model.verbose:
sys.stdout.write("loading fhb dataset 5a\n")
# Dataset 5a IFHBUN CNSTM IFHBPT
line = f.readline()
raw = line.strip().split()
ifhbun = int(raw[0])
if ifhbun != iufhb:
raise ValueError(
"fhb dataset 5a must be in the fhb file (unit={}) "
"fhb data is specified in unit={}".format(iufhb, ifhbun)
)
cnstm5 = float(raw[1])
ifhbpt = max(ifhbpt, int(raw[2]))
if model.verbose:
sys.stdout.write("loading fhb dataset 5b\n")
dtype = ModflowFhb.get_default_dtype(
nbdtim=nbdtim, head=False, structured=model.structured
)
ds5 = ModflowFhb.get_empty(
ncells=nflw,
nbdtim=nbdtim,
head=False,
structured=model.structured,
)
for n in range(nflw):
line = f.readline()
raw = line.strip().split()
ds5[n] = tuple(raw[: len(dtype.names)])
if model.structured:
ds5["k"] -= 1
ds5["i"] -= 1
ds5["j"] -= 1
else:
ds5["node"] -= 1
# Dataset 6
if nfhbx1 > 0:
cnstm6 = []
ds6 = []
dtype = []
for name, weight in flow_aux:
dtype.append((name, np.float32))
for naux in range(nfhbx1):
if model.verbose:
sys.stdout.write(
"loading fhb dataset 6a - aux "
"{}\n".format(naux + 1)
)
sys.stdout.write(
"dataset 6a will not be preserved in "
"the created hfb object.\n"
)
# Dataset 6a IFHBUN CNSTM IFHBPT
line = f.readline()
raw = line.strip().split()
ifhbun = int(raw[0])
if ifhbun != iufhb:
raise ValueError(
"fhb dataset 6a must be in the fhb file (unit={}) "
"fhb data is specified in "
"unit={}".format(iufhb, ifhbun)
)
cnstm6.append(float(raw[1]))
ifhbpt = max(ifhbpt, int(raw[2]))
if model.verbose:
sys.stdout.write(
"loading fhb dataset 6b - aux "
"{}\n".format(naux + 1)
)
sys.stdout.write(
"dataset 6b will not be preserved in "
"the created hfb object.\n"
)
current = np.recarray(nflw, dtype=dtype)
for n in range(nflw):
line = f.readline()
raw = line.strip().split()
current[n] = tuple(raw[: len(dtype.names)])
ds6.append(current.copy())
# Dataset 7
cnstm7 = None
ds7 = None
cnstm8 = None
ds8 = None
if nhed > 0:
if model.verbose:
sys.stdout.write("loading fhb dataset 7a\n")
# Dataset 7a IFHBUN CNSTM IFHBPT
line = f.readline()
raw = line.strip().split()
ifhbun = int(raw[0])
if ifhbun != iufhb:
raise ValueError(
"fhb dataset 7a must be in the fhb file (unit={}) "
"fhb data is specified in unit={}".format(iufhb, ifhbun)
)
cnstm7 = float(raw[1])
ifhbpt = max(ifhbpt, int(raw[2]))
if model.verbose:
sys.stdout.write("loading fhb dataset 7b\n")
dtype = ModflowFhb.get_default_dtype(
nbdtim=nbdtim, head=True, structured=model.structured
)
ds7 = ModflowFhb.get_empty(
ncells=nhed,
nbdtim=nbdtim,
head=True,
structured=model.structured,
)
for n in range(nhed):
line = f.readline()
raw = line.strip().split()
ds7[n] = tuple(raw[: len(dtype.names)])
if model.structured:
ds7["k"] -= 1
ds7["i"] -= 1
ds7["j"] -= 1
else:
ds7["node"] -= 1
# Dataset 8
if nfhbx2 > 0:
cnstm8 = []
ds8 = []
dtype = []
for name, weight in head_aux:
dtype.append((name, np.float32))
for naux in range(nfhbx1):
if model.verbose:
sys.stdout.write(
"loading fhb dataset 8a - aux "
"{}\n".format(naux + 1)
)
sys.stdout.write(
"dataset 8a will not be preserved in "
"the created hfb object.\n"
)
# Dataset 6a IFHBUN CNSTM IFHBPT
line = f.readline()
raw = line.strip().split()
ifhbun = int(raw[0])
if ifhbun != iufhb:
raise ValueError(
"fhb dataset 8a must be in the fhb file (unit={}) "
"fhb data is specified in "
"unit={}".format(iufhb, ifhbun)
)
cnstm8.append(float(raw[1]))
ifhbpt6 = int(raw[2])
ifhbpt = max(ifhbpt, ifhbpt6)
if model.verbose:
sys.stdout.write(
"loading fhb dataset 8b - aux "
"{}\n".format(naux + 1)
)
sys.stdout.write(
"dataset 8b will not be preserved in "
"the created hfb object."
)
current = np.recarray(nflw, dtype=dtype)
for n in range(nhed):
line = f.readline()
raw = line.strip().split()
current[n] = tuple(raw[: len(dtype.names)])
ds8.append(current.copy())
if openfile:
f.close()
# determine specified unit number
unitnumber = None
filenames = [None, None]
if ext_unit_dict is not None:
unitnumber, filenames[0] = model.get_ext_dict_attr(
ext_unit_dict, filetype=ModflowFhb._ftype()
)
if ipakcb > 0:
iu, filenames[1] = model.get_ext_dict_attr(
ext_unit_dict, unit=ipakcb
)
model.add_pop_key_list(ipakcb)
# auxiliary data are not passed to load instantiation
nfhbx1 = 0
nfhbx2 = 0
fhb = cls(
model,
nbdtim=nbdtim,
nflw=nflw,
nhed=nhed,
ifhbss=ifhbss,
ipakcb=ipakcb,
nfhbx1=nfhbx1,
nfhbx2=nfhbx2,
ifhbpt=ifhbpt,
bdtimecnstm=bdtimecnstm,
bdtime=bdtime,
cnstm5=cnstm5,
ds5=ds5,
cnstm7=cnstm7,
ds7=ds7,
unitnumber=unitnumber,
filenames=filenames,
)
# return fhb object
return fhb
@staticmethod
def _ftype():
return "FHB"
@staticmethod
def _defaultunit():
return 40
| 34.096939
| 82
| 0.493603
|
f3defd2cff36622eb542e808f794d181b835ea84
| 1,868
|
py
|
Python
|
scanner/util/script/poc_apache_proxy_connect.py
|
Shinpachi8/webscanner
|
52aa377f0ee903a04ed19ea07433d0718697833d
|
[
"Apache-2.0"
] | 5
|
2018-04-27T12:34:08.000Z
|
2020-11-09T10:47:02.000Z
|
scanner/util/script/poc_apache_proxy_connect.py
|
Shinpachi8/webscanner
|
52aa377f0ee903a04ed19ea07433d0718697833d
|
[
"Apache-2.0"
] | null | null | null |
scanner/util/script/poc_apache_proxy_connect.py
|
Shinpachi8/webscanner
|
52aa377f0ee903a04ed19ea07433d0718697833d
|
[
"Apache-2.0"
] | 3
|
2019-04-08T19:58:20.000Z
|
2020-04-07T15:35:07.000Z
|
#!/usr/bin/env python
# coding=utf-8
import socket
import urlparse
class ApacheProxyConnect(object):
'''
this class aim to detect the apache proxy enable function
code from avws decode script: apache_proxy_connect_enable.script
'''
def __init__(self, url):
self.url = self.normal_url(url)
self.parsed_url = urlparse.urlparse(self.url)
self.msg = {'vuln_name': 'apache proxy connect',
'url': self.url,
'proof': 'www.acunetix.wvs:443',
'severity': 'medium'}
def normal_url(self, url):
if not url.startswith('http:') and not url.startswith('https:'):
url = 'http://' + url
return url
def verify(self):
payload = "GET " + '/' + "@" + 'www.acunetix.wvs' + ":" + '443' + "/" + '/' + " HTTP/1.1\r\n"
# get port
if ':' in self.parsed_url.netloc:
netloc, port = self.parsed_url.netloc.split(':')
else:
port = '80'
netloc = self.parsed_url.netloc
payload += "Host: " + netloc + "\r\n\r\n"
# socket connect
remoteserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
remoteserver.settimeout(10)
try:
remoteserver.connect((netloc, int(port)))
remoteserver.send(get)
response = remoteserver.recv(4096)
except:
response = ''
finally:
remoteserver.close()
if response.find('The proxy server could not handle the request <em><a href="www.acunetix.wvs:443">') > -1:
return self.msg
else:
return None
def verify(ip, port=80, name='', timeout=10, types='ip'):
if types == 'ip':
url = "{}:{}".format(ip, port)
else:
url = ip
result = ApacheProxyConnect(url).verify()
return result
| 27.470588
| 115
| 0.551392
|
455f2ca33fd2be62f20f15e5ea0fdf75ca8f6d9d
| 6,491
|
py
|
Python
|
dvc/fs/http.py
|
daavoo/dvc
|
04349a50eb638f3684e486cdf72bcb1c1e25613d
|
[
"Apache-2.0"
] | null | null | null |
dvc/fs/http.py
|
daavoo/dvc
|
04349a50eb638f3684e486cdf72bcb1c1e25613d
|
[
"Apache-2.0"
] | 41
|
2021-06-02T11:13:03.000Z
|
2022-03-31T07:15:36.000Z
|
dvc/fs/http.py
|
daavoo/dvc
|
04349a50eb638f3684e486cdf72bcb1c1e25613d
|
[
"Apache-2.0"
] | null | null | null |
import logging
import os.path
import threading
from typing import Optional
from funcy import cached_property, memoize, wrap_prop, wrap_with
from dvc import prompt
from dvc.exceptions import DvcException, HTTPError
from dvc.path_info import HTTPURLInfo
from dvc.progress import Tqdm
from dvc.scheme import Schemes
from .base import BaseFileSystem
logger = logging.getLogger(__name__)
@wrap_with(threading.Lock())
@memoize
def ask_password(host, user):
return prompt.password(
"Enter a password for "
"host '{host}' user '{user}'".format(host=host, user=user)
)
class HTTPFileSystem(BaseFileSystem): # pylint:disable=abstract-method
scheme = Schemes.HTTP
PATH_CLS = HTTPURLInfo
PARAM_CHECKSUM = "etag"
CAN_TRAVERSE = False
SESSION_RETRIES = 5
SESSION_BACKOFF_FACTOR = 0.1
REQUEST_TIMEOUT = 60
CHUNK_SIZE = 2 ** 16
def __init__(self, **config):
super().__init__(**config)
url = config.get("url")
if url:
self.path_info = self.PATH_CLS(url)
self.user = config.get("user", None)
self.host = self.path_info.host
else:
self.path_info = None
self.auth = config.get("auth", None)
self.custom_auth_header = config.get("custom_auth_header", None)
self.password = config.get("password", None)
self.ask_password = config.get("ask_password", False)
self.headers = {}
self.ssl_verify = config.get("ssl_verify", True)
self.method = config.get("method", "POST")
def _auth_method(self):
from requests.auth import HTTPBasicAuth, HTTPDigestAuth
if self.auth:
if self.ask_password and self.password is None:
self.password = ask_password(self.host, self.user)
if self.auth == "basic":
return HTTPBasicAuth(self.user, self.password)
if self.auth == "digest":
return HTTPDigestAuth(self.user, self.password)
if self.auth == "custom" and self.custom_auth_header:
self.headers.update({self.custom_auth_header: self.password})
return None
def _generate_download_url(self, path_info):
return path_info.url
@wrap_prop(threading.Lock())
@cached_property
def _session(self):
import requests
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
session = requests.Session()
session.verify = self.ssl_verify
retries = Retry(
total=self.SESSION_RETRIES,
backoff_factor=self.SESSION_BACKOFF_FACTOR,
)
session.mount("http://", HTTPAdapter(max_retries=retries))
session.mount("https://", HTTPAdapter(max_retries=retries))
return session
def request(self, method, url, **kwargs):
import requests
kwargs.setdefault("allow_redirects", True)
kwargs.setdefault("timeout", self.REQUEST_TIMEOUT)
try:
res = self._session.request(
method,
url,
auth=self._auth_method(),
headers=self.headers,
**kwargs,
)
redirect_no_location = (
kwargs["allow_redirects"]
and res.status_code in (301, 302)
and "location" not in res.headers
)
if redirect_no_location:
# AWS s3 doesn't like to add a location header to its redirects
# from https://s3.amazonaws.com/<bucket name>/* type URLs.
# This should be treated as an error
raise requests.exceptions.RequestException
return res
except requests.exceptions.RequestException:
raise DvcException(f"could not perform a {method} request")
def _head(self, url):
response = self.request("HEAD", url)
if response.ok:
return response
# Sometimes servers are configured to forbid HEAD requests
# Context: https://github.com/iterative/dvc/issues/4131
with self.request("GET", url, stream=True) as r:
if r.ok:
return r
return response
def exists(self, path_info) -> bool:
res = self._head(path_info.url)
if res.status_code == 404:
return False
if bool(res):
return True
raise HTTPError(res.status_code, res.reason)
def info(self, path_info):
resp = self._head(path_info.url)
etag = resp.headers.get("ETag") or resp.headers.get("Content-MD5")
size = self._content_length(resp)
return {"etag": etag, "size": size}
def _upload_fobj(self, fobj, to_info):
def chunks(fobj):
while True:
chunk = fobj.read(self.CHUNK_SIZE)
if not chunk:
break
yield chunk
response = self.request(self.method, to_info.url, data=chunks(fobj))
if response.status_code not in (200, 201):
raise HTTPError(response.status_code, response.reason)
def _download(self, from_info, to_file, name=None, no_progress_bar=False):
response = self.request("GET", from_info.url, stream=True)
if response.status_code != 200:
raise HTTPError(response.status_code, response.reason)
with open(to_file, "wb") as fd:
with Tqdm.wrapattr(
fd,
"write",
total=None
if no_progress_bar
else self._content_length(response),
leave=False,
desc=from_info.url if name is None else name,
disable=no_progress_bar,
) as fd_wrapped:
for chunk in response.iter_content(chunk_size=self.CHUNK_SIZE):
fd_wrapped.write(chunk)
def _upload(
self, from_file, to_info, name=None, no_progress_bar=False, **_kwargs
):
with open(from_file, "rb") as fobj:
self.upload_fobj(
fobj,
to_info,
no_progress_bar=no_progress_bar,
desc=name or to_info.url,
total=None if no_progress_bar else os.path.getsize(from_file),
)
@staticmethod
def _content_length(response) -> Optional[int]:
res = response.headers.get("Content-Length")
return int(res) if res else None
| 32.293532
| 79
| 0.598213
|
a84e0120ffd209d29964e3d65aacdb2b76e27f75
| 129,093
|
py
|
Python
|
src/elarian/utils/generated/messaging_model_pb2.py
|
ElarianLtd/python-sdk
|
f603688dffba4b46c5a9f208a75b3dc3d75ed565
|
[
"MIT"
] | 4
|
2021-05-27T23:15:21.000Z
|
2021-12-29T11:40:02.000Z
|
src/elarian/utils/generated/messaging_model_pb2.py
|
ElarianLtd/python-sdk
|
f603688dffba4b46c5a9f208a75b3dc3d75ed565
|
[
"MIT"
] | 2
|
2021-05-20T05:51:02.000Z
|
2021-07-13T11:25:54.000Z
|
src/elarian/utils/generated/messaging_model_pb2.py
|
ElarianLtd/python-sdk
|
f603688dffba4b46c5a9f208a75b3dc3d75ed565
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: messaging_model.proto
"""Generated protocol buffer code."""
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2
from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
import elarian.utils.generated.common_model_pb2 as common__model__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='messaging_model.proto',
package='com.elarian.hera.proto',
syntax='proto3',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x15messaging_model.proto\x12\x16\x63om.elarian.hera.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x12\x63ommon_model.proto\"c\n\x16MessagingChannelNumber\x12\x39\n\x07\x63hannel\x18\x01 \x01(\x0e\x32(.com.elarian.hera.proto.MessagingChannel\x12\x0e\n\x06number\x18\x02 \x01(\t\"Q\n\x10MediaMessageBody\x12\x0b\n\x03url\x18\x01 \x01(\t\x12\x30\n\x05media\x18\x02 \x01(\x0e\x32!.com.elarian.hera.proto.MediaType\"\x96\x01\n\x13LocationMessageBody\x12\x10\n\x08latitude\x18\x01 \x01(\x01\x12\x11\n\tlongitude\x18\x02 \x01(\x01\x12+\n\x05label\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x07\x61\x64\x64ress\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"m\n\x10\x45mailMessageBody\x12\x0f\n\x07subject\x18\x01 \x01(\t\x12\x12\n\nbody_plain\x18\x02 \x01(\t\x12\x11\n\tbody_html\x18\x03 \x01(\t\x12\x0f\n\x07\x63\x63_list\x18\x04 \x03(\t\x12\x10\n\x08\x62\x63\x63_list\x18\x05 \x03(\t\"\x99\x01\n\x13TemplateMessageBody\x12\n\n\x02id\x18\x01 \x01(\t\x12G\n\x06params\x18\x02 \x03(\x0b\x32\x37.com.elarian.hera.proto.TemplateMessageBody.ParamsEntry\x1a-\n\x0bParamsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"j\n\rSayCallAction\x12\x0c\n\x04text\x18\x01 \x01(\t\x12\x38\n\x05voice\x18\x02 \x01(\x0e\x32).com.elarian.hera.proto.TextToSpeechVoice\x12\x11\n\tplay_beep\x18\x03 \x01(\x08\"\x1d\n\x0ePlayCallAction\x12\x0b\n\x03url\x18\x01 \x01(\t\"\x9f\x02\n\x13GetDigitsCallAction\x12\x34\n\x03say\x18\x01 \x01(\x0b\x32%.com.elarian.hera.proto.SayCallActionH\x00\x12\x36\n\x04play\x18\x02 \x01(\x0b\x32&.com.elarian.hera.proto.PlayCallActionH\x00\x12*\n\x07timeout\x18\x03 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x33\n\rfinish_on_key\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12/\n\nnum_digits\x18\x05 \x01(\x0b\x32\x1b.google.protobuf.Int32ValueB\x08\n\x06prompt\"\xc9\x02\n\x16GetRecordingCallAction\x12\x34\n\x03say\x18\x01 \x01(\x0b\x32%.com.elarian.hera.proto.SayCallActionH\x00\x12\x36\n\x04play\x18\x02 \x01(\x0b\x32&.com.elarian.hera.proto.PlayCallActionH\x00\x12*\n\x07timeout\x18\x03 \x01(\x0b\x32\x19.google.protobuf.Duration\x12-\n\nmax_length\x18\x04 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x33\n\rfinish_on_key\x18\x05 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x11\n\tplay_beep\x18\x06 \x01(\x08\x12\x14\n\x0ctrim_silence\x18\x07 \x01(\x08\x42\x08\n\x06prompt\"\x19\n\x17RecordSessionCallAction\"\x8f\x02\n\x0e\x44ialCallAction\x12@\n\x10\x63ustomer_numbers\x18\x01 \x03(\x0b\x32&.com.elarian.hera.proto.CustomerNumber\x12\x0e\n\x06record\x18\x02 \x01(\x08\x12\x12\n\nsequential\x18\x03 \x01(\x08\x12\x33\n\rringback_tone\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12/\n\tcaller_id\x18\x05 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\x0cmax_duration\x18\x06 \x01(\x0b\x32\x1b.google.protobuf.Int32Value\"w\n\x11\x45nqueueCallAction\x12\x30\n\nhold_music\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x30\n\nqueue_name\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x9d\x01\n\x11\x44\x65queueCallAction\x12\x46\n\x0e\x63hannel_number\x18\x01 \x01(\x0b\x32..com.elarian.hera.proto.MessagingChannelNumber\x12\x0e\n\x06record\x18\x02 \x01(\x08\x12\x30\n\nqueue_name\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\x12\n\x10RejectCallAction\"!\n\x12RedirectCallAction\x12\x0b\n\x03url\x18\x01 \x01(\t\"\x8f\x05\n\x0fVoiceCallAction\x12\x34\n\x03say\x18\x01 \x01(\x0b\x32%.com.elarian.hera.proto.SayCallActionH\x00\x12\x36\n\x04play\x18\x02 \x01(\x0b\x32&.com.elarian.hera.proto.PlayCallActionH\x00\x12\x41\n\nget_digits\x18\x03 \x01(\x0b\x32+.com.elarian.hera.proto.GetDigitsCallActionH\x00\x12\x36\n\x04\x64ial\x18\x04 \x01(\x0b\x32&.com.elarian.hera.proto.DialCallActionH\x00\x12I\n\x0erecord_session\x18\x05 \x01(\x0b\x32/.com.elarian.hera.proto.RecordSessionCallActionH\x00\x12G\n\rget_recording\x18\x06 \x01(\x0b\x32..com.elarian.hera.proto.GetRecordingCallActionH\x00\x12<\n\x07\x65nqueue\x18\x07 \x01(\x0b\x32).com.elarian.hera.proto.EnqueueCallActionH\x00\x12<\n\x07\x64\x65queue\x18\x08 \x01(\x0b\x32).com.elarian.hera.proto.DequeueCallActionH\x00\x12:\n\x06reject\x18\t \x01(\x0b\x32(.com.elarian.hera.proto.RejectCallActionH\x00\x12>\n\x08redirect\x18\n \x01(\x0b\x32*.com.elarian.hera.proto.RedirectCallActionH\x00\x42\x07\n\x05\x65ntry\"X\n\x1cVoiceCallDialplanMessageBody\x12\x38\n\x07\x61\x63tions\x18\x01 \x03(\x0b\x32\'.com.elarian.hera.proto.VoiceCallAction\"\x8d\x01\n\x12VoiceCallDialInput\x12\x1a\n\x12\x64\x65stination_number\x18\x01 \x01(\t\x12.\n\nstarted_at\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\x08\x64uration\x18\x03 \x01(\x0b\x32\x19.google.protobuf.Duration\"\xa1\x02\n\x13VoiceCallQueueInput\x12/\n\x0b\x65nqueued_at\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x64\x65queued_at\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x38\n\x12\x64\x65queued_to_number\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12;\n\x15\x64\x65queued_to_sessionId\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\x0equeue_duration\x18\x05 \x01(\x0b\x32\x19.google.protobuf.Duration\"\xf3\x03\n\x19VoiceCallInputMessageBody\x12\x41\n\tdirection\x18\x01 \x01(\x0e\x32..com.elarian.hera.proto.CustomerEventDirection\x12\x37\n\x06status\x18\x02 \x01(\x0e\x32\'.com.elarian.hera.proto.VoiceCallStatus\x12.\n\nstarted_at\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x42\n\x0changup_cause\x18\x04 \x01(\x0e\x32,.com.elarian.hera.proto.VoiceCallHangupCause\x12\x31\n\x0b\x64tmf_digits\x18\x05 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x33\n\rrecording_url\x18\x06 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12=\n\tdial_data\x18\x07 \x01(\x0b\x32*.com.elarian.hera.proto.VoiceCallDialInput\x12?\n\nqueue_data\x18\x08 \x01(\x0b\x32+.com.elarian.hera.proto.VoiceCallQueueInput\"}\n\x14UssdInputMessageBody\x12\x39\n\x06status\x18\x01 \x01(\x0e\x32).com.elarian.hera.proto.UssdSessionStatus\x12*\n\x04text\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"8\n\x13UssdMenuMessageBody\x12\x0c\n\x04text\x18\x01 \x01(\t\x12\x13\n\x0bis_terminal\x18\x02 \x01(\x08\"\xb9\x03\n\x13OutboundMessageBody\x12\x0e\n\x04text\x18\x01 \x01(\tH\x00\x12\x39\n\x05media\x18\x02 \x01(\x0b\x32(.com.elarian.hera.proto.MediaMessageBodyH\x00\x12?\n\x08location\x18\x03 \x01(\x0b\x32+.com.elarian.hera.proto.LocationMessageBodyH\x00\x12\x39\n\x05\x65mail\x18\x04 \x01(\x0b\x32(.com.elarian.hera.proto.EmailMessageBodyH\x00\x12?\n\x08template\x18\x05 \x01(\x0b\x32+.com.elarian.hera.proto.TemplateMessageBodyH\x00\x12\r\n\x03url\x18\x06 \x01(\tH\x00\x12\x45\n\x05voice\x18\x07 \x01(\x0b\x32\x34.com.elarian.hera.proto.VoiceCallDialplanMessageBodyH\x00\x12;\n\x04ussd\x18\x08 \x01(\x0b\x32+.com.elarian.hera.proto.UssdMenuMessageBodyH\x00\x42\x07\n\x05\x65ntry\"o\n\x19PromptMessageMenuItemBody\x12\x0e\n\x04text\x18\x02 \x01(\tH\x00\x12\x39\n\x05media\x18\x03 \x01(\x0b\x32(.com.elarian.hera.proto.MediaMessageBodyH\x00\x42\x07\n\x05\x65ntry\"\xe6\x02\n\x12InboundMessageBody\x12\x0e\n\x04text\x18\x01 \x01(\tH\x00\x12\x39\n\x05media\x18\x02 \x01(\x0b\x32(.com.elarian.hera.proto.MediaMessageBodyH\x00\x12?\n\x08location\x18\x03 \x01(\x0b\x32+.com.elarian.hera.proto.LocationMessageBodyH\x00\x12\x39\n\x05\x65mail\x18\x04 \x01(\x0b\x32(.com.elarian.hera.proto.EmailMessageBodyH\x00\x12\x42\n\x05voice\x18\x05 \x01(\x0b\x32\x31.com.elarian.hera.proto.VoiceCallInputMessageBodyH\x00\x12<\n\x04ussd\x18\x06 \x01(\x0b\x32,.com.elarian.hera.proto.UssdInputMessageBodyH\x00\x42\x07\n\x05\x65ntry\"\x9f\x01\n\x1aOutboundMessageReplyPrompt\x12@\n\x06\x61\x63tion\x18\x01 \x01(\x0e\x32\x30.com.elarian.hera.proto.PromptMessageReplyAction\x12?\n\x04menu\x18\x02 \x03(\x0b\x32\x31.com.elarian.hera.proto.PromptMessageMenuItemBody\"\x8d\x02\n\x0fOutboundMessage\x12\x39\n\x04\x62ody\x18\x01 \x01(\x0b\x32+.com.elarian.hera.proto.OutboundMessageBody\x12\x0e\n\x06labels\x18\x02 \x03(\t\x12\x32\n\x0cprovider_tag\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\x0breply_token\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12H\n\x0creply_prompt\x18\x05 \x01(\x0b\x32\x32.com.elarian.hera.proto.OutboundMessageReplyPrompt*\x8a\x02\n\x10MessagingChannel\x12!\n\x1dMESSAGING_CHANNEL_UNSPECIFIED\x10\x00\x12\x19\n\x15MESSAGING_CHANNEL_SMS\x10\x01\x12\x1b\n\x17MESSAGING_CHANNEL_VOICE\x10\x02\x12\x1a\n\x16MESSAGING_CHANNEL_USSD\x10\x03\x12\"\n\x1eMESSAGING_CHANNEL_FB_MESSENGER\x10\x04\x12\x1e\n\x1aMESSAGING_CHANNEL_TELEGRAM\x10\x05\x12\x1e\n\x1aMESSAGING_CHANNEL_WHATSAPP\x10\x06\x12\x1b\n\x17MESSAGING_CHANNEL_EMAIL\x10\x07*\x8a\x01\n\x16MessagingConsentUpdate\x12(\n$MESSAGING_CONSENT_UPDATE_UNSPECIFIED\x10\x00\x12\"\n\x1eMESSAGING_CONSENT_UPDATE_ALLOW\x10\x01\x12\"\n\x1eMESSAGING_CONSENT_UPDATE_BLOCK\x10\x02*\xe1\x02\n\x1cMessagingConsentUpdateStatus\x12/\n+MESSAGING_CONSENT_UPDATE_STATUS_UNSPECIFIED\x10\x00\x12*\n&MESSAGING_CONSENT_UPDATE_STATUS_QUEUED\x10\x64\x12.\n)MESSAGING_CONSENT_UPDATE_STATUS_COMPLETED\x10\xac\x02\x12;\n6MESSAGING_CONSENT_UPDATE_STATUS_INVALID_CHANNEL_NUMBER\x10\x91\x03\x12?\n:MESSAGING_CONSENT_UPDATE_STATUS_DECOMMISSIONED_CUSTOMER_ID\x10\x92\x03\x12\x36\n1MESSAGING_CONSENT_UPDATE_STATUS_APPLICATION_ERROR\x10\xf5\x03*\xd4\x01\n\x19MessagingSessionEndReason\x12,\n(MESSAGING_SESSION_END_REASON_UNSPECIFIED\x10\x00\x12\x30\n,MESSAGING_SESSION_END_REASON_NORMAL_CLEARING\x10\x64\x12,\n\'MESSAGING_SESSION_END_REASON_INACTIVITY\x10\xc8\x01\x12)\n$MESSAGING_SESSION_END_REASON_FAILURE\x10\xac\x02*\x97\x01\n\x0fMessageReaction\x12 \n\x1cMESSAGE_REACTION_UNSPECIFIED\x10\x00\x12\x1c\n\x18MESSAGE_REACTION_CLICKED\x10\x64\x12\"\n\x1dMESSAGE_REACTION_UNSUBSCRIBED\x10\xc8\x01\x12 \n\x1bMESSAGE_REACTION_COMPLAINED\x10\xc9\x01*\x91\x02\n\x18PromptMessageReplyAction\x12+\n\'PROMPT_MESSAGE_REPLY_ACTION_UNSPECIFIED\x10\x00\x12$\n PROMPT_MESSAGE_REPLY_ACTION_TEXT\x10\x01\x12,\n(PROMPT_MESSAGE_REPLY_ACTION_PHONE_NUMBER\x10\x02\x12%\n!PROMPT_MESSAGE_REPLY_ACTION_EMAIL\x10\x03\x12(\n$PROMPT_MESSAGE_REPLY_ACTION_LOCATION\x10\x04\x12#\n\x1fPROMPT_MESSAGE_REPLY_ACTION_URL\x10\x05*\xb8\t\n\x15MessageDeliveryStatus\x12\'\n#MESSAGE_DELIVERY_STATUS_UNSPECIFIED\x10\x00\x12\"\n\x1eMESSAGE_DELIVERY_STATUS_QUEUED\x10\x64\x12 \n\x1cMESSAGE_DELIVERY_STATUS_SENT\x10\x65\x12&\n!MESSAGE_DELIVERY_STATUS_DELIVERED\x10\xac\x02\x12!\n\x1cMESSAGE_DELIVERY_STATUS_READ\x10\xad\x02\x12%\n MESSAGE_DELIVERY_STATUS_RECEIVED\x10\xae\x02\x12.\n)MESSAGE_DELIVERY_STATUS_SESSION_INITIATED\x10\xaf\x02\x12#\n\x1eMESSAGE_DELIVERY_STATUS_FAILED\x10\x90\x03\x12\'\n\"MESSAGE_DELIVERY_STATUS_NO_CONSENT\x10\x91\x03\x12*\n%MESSAGE_DELIVERY_STATUS_NO_CAPABILITY\x10\x92\x03\x12$\n\x1fMESSAGE_DELIVERY_STATUS_EXPIRED\x10\x93\x03\x12\x33\n.MESSAGE_DELIVERY_STATUS_NO_SESSION_IN_PROGRESS\x10\x94\x03\x12\x36\n1MESSAGE_DELIVERY_STATUS_OTHER_SESSION_IN_PROGRESS\x10\x95\x03\x12\x30\n+MESSAGE_DELIVERY_STATUS_INVALID_REPLY_TOKEN\x10\x96\x03\x12\x33\n.MESSAGE_DELIVERY_STATUS_INVALID_CHANNEL_NUMBER\x10\x97\x03\x12*\n%MESSAGE_DELIVERY_STATUS_NOT_SUPPORTED\x10\x98\x03\x12\x38\n3MESSAGE_DELIVERY_STATUS_INVALID_REPLY_TO_MESSAGE_ID\x10\x99\x03\x12\x30\n+MESSAGE_DELIVERY_STATUS_INVALID_CUSTOMER_ID\x10\x9a\x03\x12.\n)MESSAGE_DELIVERY_STATUS_DUPLICATE_REQUEST\x10\x9b\x03\x12*\n%MESSAGE_DELIVERY_STATUS_TAG_NOT_FOUND\x10\x9c\x03\x12\x36\n1MESSAGE_DELIVERY_STATUS_CUSTOMER_NUMBER_NOT_FOUND\x10\x9d\x03\x12\x36\n1MESSAGE_DELIVERY_STATUS_DECOMMISSIONED_CUSTOMERID\x10\x9e\x03\x12%\n MESSAGE_DELIVERY_STATUS_REJECTED\x10\x9f\x03\x12,\n\'MESSAGE_DELIVERY_STATUS_INVALID_REQUEST\x10\xa0\x03\x12\x31\n,MESSAGE_DELIVERY_STATUS_INSUFFICIENT_CREDITS\x10\xa1\x03\x12.\n)MESSAGE_DELIVERY_STATUS_APPLICATION_ERROR\x10\xf5\x03*\xa5\x06\n\x0fVoiceCallStatus\x12!\n\x1dVOICE_CALL_STATUS_UNSPECIFIED\x10\x00\x12\x1c\n\x18VOICE_CALL_STATUS_QUEUED\x10\x64\x12\x1e\n\x1aVOICE_CALL_STATUS_ANSWERED\x10\x65\x12\x1d\n\x19VOICE_CALL_STATUS_RINGING\x10\x66\x12\x1d\n\x18VOICE_CALL_STATUS_ACTIVE\x10\xc8\x01\x12\x1e\n\x19VOICE_CALL_STATUS_DIALING\x10\xc9\x01\x12%\n VOICE_CALL_STATUS_DIAL_COMPLETED\x10\xca\x01\x12\x1e\n\x19VOICE_CALL_STATUS_BRIDGED\x10\xcb\x01\x12\x1f\n\x1aVOICE_CALL_STATUS_ENQUEUED\x10\xcc\x01\x12\x1f\n\x1aVOICE_CALL_STATUS_DEQUEUED\x10\xcd\x01\x12\"\n\x1dVOICE_CALL_STATUS_TRANSFERRED\x10\xce\x01\x12)\n$VOICE_CALL_STATUS_TRANSFER_COMPLETED\x10\xcf\x01\x12 \n\x1bVOICE_CALL_STATUS_COMPLETED\x10\xac\x02\x12*\n%VOICE_CALL_STATUS_INSUFFICIENT_CREDIT\x10\x90\x03\x12#\n\x1eVOICE_CALL_STATUS_NOT_ANSWERED\x10\x91\x03\x12+\n&VOICE_CALL_STATUS_INVALID_PHONE_NUMBER\x10\x92\x03\x12\x30\n+VOICE_CALL_STATUS_DESTINATION_NOT_SUPPORTED\x10\x93\x03\x12\x30\n+VOICE_CALL_STATUS_DECOMMISSIONED_CUSTOMERID\x10\x94\x03\x12\x1e\n\x19VOICE_CALL_STATUS_EXPIRED\x10\x95\x03\x12-\n(VOICE_CALL_STATUS_INVALID_CHANNEL_NUMBER\x10\x96\x03\x12(\n#VOICE_CALL_STATUS_APPLICATION_ERROR\x10\xf5\x03*\xc9\x05\n\x14VoiceCallHangupCause\x12\'\n#VOICE_CALL_HANGUP_CAUSE_UNSPECIFIED\x10\x00\x12.\n*VOICE_CALL_HANGUP_CAUSE_UNALLOCATED_NUMBER\x10\x01\x12%\n!VOICE_CALL_HANGUP_CAUSE_USER_BUSY\x10\x11\x12+\n\'VOICE_CALL_HANGUP_CAUSE_NORMAL_CLEARING\x10\x10\x12,\n(VOICE_CALL_HANGUP_CAUSE_NO_USER_RESPONSE\x10\x12\x12%\n!VOICE_CALL_HANGUP_CAUSE_NO_ANSWER\x10\x13\x12-\n)VOICE_CALL_HANGUP_CAUSE_SUBSCRIBER_ABSENT\x10\x14\x12)\n%VOICE_CALL_HANGUP_CAUSE_CALL_REJECTED\x10\x15\x12.\n*VOICE_CALL_HANGUP_CAUSE_NORMAL_UNSPECIFIED\x10\x1f\x12\x34\n0VOICE_CALL_HANGUP_CAUSE_NORMAL_TEMPORARY_FAILURE\x10)\x12/\n+VOICE_CALL_HANGUP_CAUSE_SERVICE_UNAVAILABLE\x10?\x12\x34\n0VOICE_CALL_HANGUP_CAUSE_RECOVERY_ON_TIMER_EXPIRE\x10\x66\x12.\n)VOICE_CALL_HANGUP_CAUSE_ORIGINATOR_CANCEL\x10\xe7\x03\x12&\n!VOICE_CALL_HANGUP_CAUSE_LOSE_RACE\x10\xf6\x03\x12\x30\n+VOICE_CALL_HANGUP_CAUSE_USER_NOT_REGISTERED\x10\xde\x04*\xc5\x01\n\x11UssdSessionStatus\x12#\n\x1fUSSD_SESSION_STATUS_UNSPECIFIED\x10\x00\x12\x1e\n\x1aUSSD_SESSION_STATUS_ACTIVE\x10\x64\x12#\n\x1eUSSD_SESSION_STATUS_INCOMPLETE\x10\xc8\x01\x12\"\n\x1dUSSD_SESSION_STATUS_COMPLETED\x10\xc9\x01\x12\"\n\x1dUSSD_SESSION_STATUS_APP_ERROR\x10\xac\x02*y\n\x11TextToSpeechVoice\x12$\n TEXT_TO_SPEECH_VOICE_UNSPECIFIED\x10\x00\x12\x1d\n\x19TEXT_TO_SPEECH_VOICE_MALE\x10\x01\x12\x1f\n\x1bTEXT_TO_SPEECH_VOICE_FEMALE\x10\x02\x62\x06proto3'
,
dependencies=[google_dot_protobuf_dot_wrappers__pb2.DESCRIPTOR,google_dot_protobuf_dot_duration__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,common__model__pb2.DESCRIPTOR,])
_MESSAGINGCHANNEL = _descriptor.EnumDescriptor(
name='MessagingChannel',
full_name='com.elarian.hera.proto.MessagingChannel',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='MESSAGING_CHANNEL_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MESSAGING_CHANNEL_SMS', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MESSAGING_CHANNEL_VOICE', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MESSAGING_CHANNEL_USSD', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MESSAGING_CHANNEL_FB_MESSENGER', index=4, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MESSAGING_CHANNEL_TELEGRAM', index=5, number=5,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MESSAGING_CHANNEL_WHATSAPP', index=6, number=6,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MESSAGING_CHANNEL_EMAIL', index=7, number=7,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=5392,
serialized_end=5658,
)
_sym_db.RegisterEnumDescriptor(_MESSAGINGCHANNEL)
MessagingChannel = enum_type_wrapper.EnumTypeWrapper(_MESSAGINGCHANNEL)
_MESSAGINGCONSENTUPDATE = _descriptor.EnumDescriptor(
name='MessagingConsentUpdate',
full_name='com.elarian.hera.proto.MessagingConsentUpdate',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='MESSAGING_CONSENT_UPDATE_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MESSAGING_CONSENT_UPDATE_ALLOW', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MESSAGING_CONSENT_UPDATE_BLOCK', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=5661,
serialized_end=5799,
)
_sym_db.RegisterEnumDescriptor(_MESSAGINGCONSENTUPDATE)
MessagingConsentUpdate = enum_type_wrapper.EnumTypeWrapper(_MESSAGINGCONSENTUPDATE)
_MESSAGINGCONSENTUPDATESTATUS = _descriptor.EnumDescriptor(
name='MessagingConsentUpdateStatus',
full_name='com.elarian.hera.proto.MessagingConsentUpdateStatus',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='MESSAGING_CONSENT_UPDATE_STATUS_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MESSAGING_CONSENT_UPDATE_STATUS_QUEUED', index=1, number=100,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MESSAGING_CONSENT_UPDATE_STATUS_COMPLETED', index=2, number=300,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MESSAGING_CONSENT_UPDATE_STATUS_INVALID_CHANNEL_NUMBER', index=3, number=401,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MESSAGING_CONSENT_UPDATE_STATUS_DECOMMISSIONED_CUSTOMER_ID', index=4, number=402,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MESSAGING_CONSENT_UPDATE_STATUS_APPLICATION_ERROR', index=5, number=501,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=5802,
serialized_end=6155,
)
_sym_db.RegisterEnumDescriptor(_MESSAGINGCONSENTUPDATESTATUS)
MessagingConsentUpdateStatus = enum_type_wrapper.EnumTypeWrapper(_MESSAGINGCONSENTUPDATESTATUS)
_MESSAGINGSESSIONENDREASON = _descriptor.EnumDescriptor(
name='MessagingSessionEndReason',
full_name='com.elarian.hera.proto.MessagingSessionEndReason',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='MESSAGING_SESSION_END_REASON_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MESSAGING_SESSION_END_REASON_NORMAL_CLEARING', index=1, number=100,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MESSAGING_SESSION_END_REASON_INACTIVITY', index=2, number=200,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MESSAGING_SESSION_END_REASON_FAILURE', index=3, number=300,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=6158,
serialized_end=6370,
)
_sym_db.RegisterEnumDescriptor(_MESSAGINGSESSIONENDREASON)
MessagingSessionEndReason = enum_type_wrapper.EnumTypeWrapper(_MESSAGINGSESSIONENDREASON)
_MESSAGEREACTION = _descriptor.EnumDescriptor(
name='MessageReaction',
full_name='com.elarian.hera.proto.MessageReaction',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='MESSAGE_REACTION_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MESSAGE_REACTION_CLICKED', index=1, number=100,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MESSAGE_REACTION_UNSUBSCRIBED', index=2, number=200,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MESSAGE_REACTION_COMPLAINED', index=3, number=201,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=6373,
serialized_end=6524,
)
_sym_db.RegisterEnumDescriptor(_MESSAGEREACTION)
MessageReaction = enum_type_wrapper.EnumTypeWrapper(_MESSAGEREACTION)
_PROMPTMESSAGEREPLYACTION = _descriptor.EnumDescriptor(
name='PromptMessageReplyAction',
full_name='com.elarian.hera.proto.PromptMessageReplyAction',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='PROMPT_MESSAGE_REPLY_ACTION_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='PROMPT_MESSAGE_REPLY_ACTION_TEXT', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='PROMPT_MESSAGE_REPLY_ACTION_PHONE_NUMBER', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='PROMPT_MESSAGE_REPLY_ACTION_EMAIL', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='PROMPT_MESSAGE_REPLY_ACTION_LOCATION', index=4, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='PROMPT_MESSAGE_REPLY_ACTION_URL', index=5, number=5,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=6527,
serialized_end=6800,
)
_sym_db.RegisterEnumDescriptor(_PROMPTMESSAGEREPLYACTION)
PromptMessageReplyAction = enum_type_wrapper.EnumTypeWrapper(_PROMPTMESSAGEREPLYACTION)
_MESSAGEDELIVERYSTATUS = _descriptor.EnumDescriptor(
name='MessageDeliveryStatus',
full_name='com.elarian.hera.proto.MessageDeliveryStatus',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='MESSAGE_DELIVERY_STATUS_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MESSAGE_DELIVERY_STATUS_QUEUED', index=1, number=100,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MESSAGE_DELIVERY_STATUS_SENT', index=2, number=101,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MESSAGE_DELIVERY_STATUS_DELIVERED', index=3, number=300,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MESSAGE_DELIVERY_STATUS_READ', index=4, number=301,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MESSAGE_DELIVERY_STATUS_RECEIVED', index=5, number=302,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MESSAGE_DELIVERY_STATUS_SESSION_INITIATED', index=6, number=303,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MESSAGE_DELIVERY_STATUS_FAILED', index=7, number=400,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MESSAGE_DELIVERY_STATUS_NO_CONSENT', index=8, number=401,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MESSAGE_DELIVERY_STATUS_NO_CAPABILITY', index=9, number=402,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MESSAGE_DELIVERY_STATUS_EXPIRED', index=10, number=403,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MESSAGE_DELIVERY_STATUS_NO_SESSION_IN_PROGRESS', index=11, number=404,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MESSAGE_DELIVERY_STATUS_OTHER_SESSION_IN_PROGRESS', index=12, number=405,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MESSAGE_DELIVERY_STATUS_INVALID_REPLY_TOKEN', index=13, number=406,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MESSAGE_DELIVERY_STATUS_INVALID_CHANNEL_NUMBER', index=14, number=407,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MESSAGE_DELIVERY_STATUS_NOT_SUPPORTED', index=15, number=408,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MESSAGE_DELIVERY_STATUS_INVALID_REPLY_TO_MESSAGE_ID', index=16, number=409,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MESSAGE_DELIVERY_STATUS_INVALID_CUSTOMER_ID', index=17, number=410,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MESSAGE_DELIVERY_STATUS_DUPLICATE_REQUEST', index=18, number=411,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MESSAGE_DELIVERY_STATUS_TAG_NOT_FOUND', index=19, number=412,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MESSAGE_DELIVERY_STATUS_CUSTOMER_NUMBER_NOT_FOUND', index=20, number=413,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MESSAGE_DELIVERY_STATUS_DECOMMISSIONED_CUSTOMERID', index=21, number=414,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MESSAGE_DELIVERY_STATUS_REJECTED', index=22, number=415,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MESSAGE_DELIVERY_STATUS_INVALID_REQUEST', index=23, number=416,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MESSAGE_DELIVERY_STATUS_INSUFFICIENT_CREDITS', index=24, number=417,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MESSAGE_DELIVERY_STATUS_APPLICATION_ERROR', index=25, number=501,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=6803,
serialized_end=8011,
)
_sym_db.RegisterEnumDescriptor(_MESSAGEDELIVERYSTATUS)
MessageDeliveryStatus = enum_type_wrapper.EnumTypeWrapper(_MESSAGEDELIVERYSTATUS)
_VOICECALLSTATUS = _descriptor.EnumDescriptor(
name='VoiceCallStatus',
full_name='com.elarian.hera.proto.VoiceCallStatus',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='VOICE_CALL_STATUS_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='VOICE_CALL_STATUS_QUEUED', index=1, number=100,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='VOICE_CALL_STATUS_ANSWERED', index=2, number=101,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='VOICE_CALL_STATUS_RINGING', index=3, number=102,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='VOICE_CALL_STATUS_ACTIVE', index=4, number=200,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='VOICE_CALL_STATUS_DIALING', index=5, number=201,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='VOICE_CALL_STATUS_DIAL_COMPLETED', index=6, number=202,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='VOICE_CALL_STATUS_BRIDGED', index=7, number=203,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='VOICE_CALL_STATUS_ENQUEUED', index=8, number=204,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='VOICE_CALL_STATUS_DEQUEUED', index=9, number=205,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='VOICE_CALL_STATUS_TRANSFERRED', index=10, number=206,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='VOICE_CALL_STATUS_TRANSFER_COMPLETED', index=11, number=207,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='VOICE_CALL_STATUS_COMPLETED', index=12, number=300,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='VOICE_CALL_STATUS_INSUFFICIENT_CREDIT', index=13, number=400,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='VOICE_CALL_STATUS_NOT_ANSWERED', index=14, number=401,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='VOICE_CALL_STATUS_INVALID_PHONE_NUMBER', index=15, number=402,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='VOICE_CALL_STATUS_DESTINATION_NOT_SUPPORTED', index=16, number=403,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='VOICE_CALL_STATUS_DECOMMISSIONED_CUSTOMERID', index=17, number=404,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='VOICE_CALL_STATUS_EXPIRED', index=18, number=405,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='VOICE_CALL_STATUS_INVALID_CHANNEL_NUMBER', index=19, number=406,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='VOICE_CALL_STATUS_APPLICATION_ERROR', index=20, number=501,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=8014,
serialized_end=8819,
)
_sym_db.RegisterEnumDescriptor(_VOICECALLSTATUS)
VoiceCallStatus = enum_type_wrapper.EnumTypeWrapper(_VOICECALLSTATUS)
_VOICECALLHANGUPCAUSE = _descriptor.EnumDescriptor(
name='VoiceCallHangupCause',
full_name='com.elarian.hera.proto.VoiceCallHangupCause',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='VOICE_CALL_HANGUP_CAUSE_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='VOICE_CALL_HANGUP_CAUSE_UNALLOCATED_NUMBER', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='VOICE_CALL_HANGUP_CAUSE_USER_BUSY', index=2, number=17,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='VOICE_CALL_HANGUP_CAUSE_NORMAL_CLEARING', index=3, number=16,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='VOICE_CALL_HANGUP_CAUSE_NO_USER_RESPONSE', index=4, number=18,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='VOICE_CALL_HANGUP_CAUSE_NO_ANSWER', index=5, number=19,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='VOICE_CALL_HANGUP_CAUSE_SUBSCRIBER_ABSENT', index=6, number=20,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='VOICE_CALL_HANGUP_CAUSE_CALL_REJECTED', index=7, number=21,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='VOICE_CALL_HANGUP_CAUSE_NORMAL_UNSPECIFIED', index=8, number=31,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='VOICE_CALL_HANGUP_CAUSE_NORMAL_TEMPORARY_FAILURE', index=9, number=41,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='VOICE_CALL_HANGUP_CAUSE_SERVICE_UNAVAILABLE', index=10, number=63,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='VOICE_CALL_HANGUP_CAUSE_RECOVERY_ON_TIMER_EXPIRE', index=11, number=102,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='VOICE_CALL_HANGUP_CAUSE_ORIGINATOR_CANCEL', index=12, number=487,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='VOICE_CALL_HANGUP_CAUSE_LOSE_RACE', index=13, number=502,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='VOICE_CALL_HANGUP_CAUSE_USER_NOT_REGISTERED', index=14, number=606,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=8822,
serialized_end=9535,
)
_sym_db.RegisterEnumDescriptor(_VOICECALLHANGUPCAUSE)
VoiceCallHangupCause = enum_type_wrapper.EnumTypeWrapper(_VOICECALLHANGUPCAUSE)
_USSDSESSIONSTATUS = _descriptor.EnumDescriptor(
name='UssdSessionStatus',
full_name='com.elarian.hera.proto.UssdSessionStatus',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='USSD_SESSION_STATUS_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='USSD_SESSION_STATUS_ACTIVE', index=1, number=100,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='USSD_SESSION_STATUS_INCOMPLETE', index=2, number=200,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='USSD_SESSION_STATUS_COMPLETED', index=3, number=201,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='USSD_SESSION_STATUS_APP_ERROR', index=4, number=300,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=9538,
serialized_end=9735,
)
_sym_db.RegisterEnumDescriptor(_USSDSESSIONSTATUS)
UssdSessionStatus = enum_type_wrapper.EnumTypeWrapper(_USSDSESSIONSTATUS)
_TEXTTOSPEECHVOICE = _descriptor.EnumDescriptor(
name='TextToSpeechVoice',
full_name='com.elarian.hera.proto.TextToSpeechVoice',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='TEXT_TO_SPEECH_VOICE_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TEXT_TO_SPEECH_VOICE_MALE', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TEXT_TO_SPEECH_VOICE_FEMALE', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=9737,
serialized_end=9858,
)
_sym_db.RegisterEnumDescriptor(_TEXTTOSPEECHVOICE)
TextToSpeechVoice = enum_type_wrapper.EnumTypeWrapper(_TEXTTOSPEECHVOICE)
MESSAGING_CHANNEL_UNSPECIFIED = 0
MESSAGING_CHANNEL_SMS = 1
MESSAGING_CHANNEL_VOICE = 2
MESSAGING_CHANNEL_USSD = 3
MESSAGING_CHANNEL_FB_MESSENGER = 4
MESSAGING_CHANNEL_TELEGRAM = 5
MESSAGING_CHANNEL_WHATSAPP = 6
MESSAGING_CHANNEL_EMAIL = 7
MESSAGING_CONSENT_UPDATE_UNSPECIFIED = 0
MESSAGING_CONSENT_UPDATE_ALLOW = 1
MESSAGING_CONSENT_UPDATE_BLOCK = 2
MESSAGING_CONSENT_UPDATE_STATUS_UNSPECIFIED = 0
MESSAGING_CONSENT_UPDATE_STATUS_QUEUED = 100
MESSAGING_CONSENT_UPDATE_STATUS_COMPLETED = 300
MESSAGING_CONSENT_UPDATE_STATUS_INVALID_CHANNEL_NUMBER = 401
MESSAGING_CONSENT_UPDATE_STATUS_DECOMMISSIONED_CUSTOMER_ID = 402
MESSAGING_CONSENT_UPDATE_STATUS_APPLICATION_ERROR = 501
MESSAGING_SESSION_END_REASON_UNSPECIFIED = 0
MESSAGING_SESSION_END_REASON_NORMAL_CLEARING = 100
MESSAGING_SESSION_END_REASON_INACTIVITY = 200
MESSAGING_SESSION_END_REASON_FAILURE = 300
MESSAGE_REACTION_UNSPECIFIED = 0
MESSAGE_REACTION_CLICKED = 100
MESSAGE_REACTION_UNSUBSCRIBED = 200
MESSAGE_REACTION_COMPLAINED = 201
PROMPT_MESSAGE_REPLY_ACTION_UNSPECIFIED = 0
PROMPT_MESSAGE_REPLY_ACTION_TEXT = 1
PROMPT_MESSAGE_REPLY_ACTION_PHONE_NUMBER = 2
PROMPT_MESSAGE_REPLY_ACTION_EMAIL = 3
PROMPT_MESSAGE_REPLY_ACTION_LOCATION = 4
PROMPT_MESSAGE_REPLY_ACTION_URL = 5
MESSAGE_DELIVERY_STATUS_UNSPECIFIED = 0
MESSAGE_DELIVERY_STATUS_QUEUED = 100
MESSAGE_DELIVERY_STATUS_SENT = 101
MESSAGE_DELIVERY_STATUS_DELIVERED = 300
MESSAGE_DELIVERY_STATUS_READ = 301
MESSAGE_DELIVERY_STATUS_RECEIVED = 302
MESSAGE_DELIVERY_STATUS_SESSION_INITIATED = 303
MESSAGE_DELIVERY_STATUS_FAILED = 400
MESSAGE_DELIVERY_STATUS_NO_CONSENT = 401
MESSAGE_DELIVERY_STATUS_NO_CAPABILITY = 402
MESSAGE_DELIVERY_STATUS_EXPIRED = 403
MESSAGE_DELIVERY_STATUS_NO_SESSION_IN_PROGRESS = 404
MESSAGE_DELIVERY_STATUS_OTHER_SESSION_IN_PROGRESS = 405
MESSAGE_DELIVERY_STATUS_INVALID_REPLY_TOKEN = 406
MESSAGE_DELIVERY_STATUS_INVALID_CHANNEL_NUMBER = 407
MESSAGE_DELIVERY_STATUS_NOT_SUPPORTED = 408
MESSAGE_DELIVERY_STATUS_INVALID_REPLY_TO_MESSAGE_ID = 409
MESSAGE_DELIVERY_STATUS_INVALID_CUSTOMER_ID = 410
MESSAGE_DELIVERY_STATUS_DUPLICATE_REQUEST = 411
MESSAGE_DELIVERY_STATUS_TAG_NOT_FOUND = 412
MESSAGE_DELIVERY_STATUS_CUSTOMER_NUMBER_NOT_FOUND = 413
MESSAGE_DELIVERY_STATUS_DECOMMISSIONED_CUSTOMERID = 414
MESSAGE_DELIVERY_STATUS_REJECTED = 415
MESSAGE_DELIVERY_STATUS_INVALID_REQUEST = 416
MESSAGE_DELIVERY_STATUS_INSUFFICIENT_CREDITS = 417
MESSAGE_DELIVERY_STATUS_APPLICATION_ERROR = 501
VOICE_CALL_STATUS_UNSPECIFIED = 0
VOICE_CALL_STATUS_QUEUED = 100
VOICE_CALL_STATUS_ANSWERED = 101
VOICE_CALL_STATUS_RINGING = 102
VOICE_CALL_STATUS_ACTIVE = 200
VOICE_CALL_STATUS_DIALING = 201
VOICE_CALL_STATUS_DIAL_COMPLETED = 202
VOICE_CALL_STATUS_BRIDGED = 203
VOICE_CALL_STATUS_ENQUEUED = 204
VOICE_CALL_STATUS_DEQUEUED = 205
VOICE_CALL_STATUS_TRANSFERRED = 206
VOICE_CALL_STATUS_TRANSFER_COMPLETED = 207
VOICE_CALL_STATUS_COMPLETED = 300
VOICE_CALL_STATUS_INSUFFICIENT_CREDIT = 400
VOICE_CALL_STATUS_NOT_ANSWERED = 401
VOICE_CALL_STATUS_INVALID_PHONE_NUMBER = 402
VOICE_CALL_STATUS_DESTINATION_NOT_SUPPORTED = 403
VOICE_CALL_STATUS_DECOMMISSIONED_CUSTOMERID = 404
VOICE_CALL_STATUS_EXPIRED = 405
VOICE_CALL_STATUS_INVALID_CHANNEL_NUMBER = 406
VOICE_CALL_STATUS_APPLICATION_ERROR = 501
VOICE_CALL_HANGUP_CAUSE_UNSPECIFIED = 0
VOICE_CALL_HANGUP_CAUSE_UNALLOCATED_NUMBER = 1
VOICE_CALL_HANGUP_CAUSE_USER_BUSY = 17
VOICE_CALL_HANGUP_CAUSE_NORMAL_CLEARING = 16
VOICE_CALL_HANGUP_CAUSE_NO_USER_RESPONSE = 18
VOICE_CALL_HANGUP_CAUSE_NO_ANSWER = 19
VOICE_CALL_HANGUP_CAUSE_SUBSCRIBER_ABSENT = 20
VOICE_CALL_HANGUP_CAUSE_CALL_REJECTED = 21
VOICE_CALL_HANGUP_CAUSE_NORMAL_UNSPECIFIED = 31
VOICE_CALL_HANGUP_CAUSE_NORMAL_TEMPORARY_FAILURE = 41
VOICE_CALL_HANGUP_CAUSE_SERVICE_UNAVAILABLE = 63
VOICE_CALL_HANGUP_CAUSE_RECOVERY_ON_TIMER_EXPIRE = 102
VOICE_CALL_HANGUP_CAUSE_ORIGINATOR_CANCEL = 487
VOICE_CALL_HANGUP_CAUSE_LOSE_RACE = 502
VOICE_CALL_HANGUP_CAUSE_USER_NOT_REGISTERED = 606
USSD_SESSION_STATUS_UNSPECIFIED = 0
USSD_SESSION_STATUS_ACTIVE = 100
USSD_SESSION_STATUS_INCOMPLETE = 200
USSD_SESSION_STATUS_COMPLETED = 201
USSD_SESSION_STATUS_APP_ERROR = 300
TEXT_TO_SPEECH_VOICE_UNSPECIFIED = 0
TEXT_TO_SPEECH_VOICE_MALE = 1
TEXT_TO_SPEECH_VOICE_FEMALE = 2
_MESSAGINGCHANNELNUMBER = _descriptor.Descriptor(
name='MessagingChannelNumber',
full_name='com.elarian.hera.proto.MessagingChannelNumber',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='channel', full_name='com.elarian.hera.proto.MessagingChannelNumber.channel', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='number', full_name='com.elarian.hera.proto.MessagingChannelNumber.number', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=166,
serialized_end=265,
)
_MEDIAMESSAGEBODY = _descriptor.Descriptor(
name='MediaMessageBody',
full_name='com.elarian.hera.proto.MediaMessageBody',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='url', full_name='com.elarian.hera.proto.MediaMessageBody.url', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='media', full_name='com.elarian.hera.proto.MediaMessageBody.media', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=267,
serialized_end=348,
)
_LOCATIONMESSAGEBODY = _descriptor.Descriptor(
name='LocationMessageBody',
full_name='com.elarian.hera.proto.LocationMessageBody',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='latitude', full_name='com.elarian.hera.proto.LocationMessageBody.latitude', index=0,
number=1, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='longitude', full_name='com.elarian.hera.proto.LocationMessageBody.longitude', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='label', full_name='com.elarian.hera.proto.LocationMessageBody.label', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='address', full_name='com.elarian.hera.proto.LocationMessageBody.address', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=351,
serialized_end=501,
)
_EMAILMESSAGEBODY = _descriptor.Descriptor(
name='EmailMessageBody',
full_name='com.elarian.hera.proto.EmailMessageBody',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='subject', full_name='com.elarian.hera.proto.EmailMessageBody.subject', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='body_plain', full_name='com.elarian.hera.proto.EmailMessageBody.body_plain', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='body_html', full_name='com.elarian.hera.proto.EmailMessageBody.body_html', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='cc_list', full_name='com.elarian.hera.proto.EmailMessageBody.cc_list', index=3,
number=4, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='bcc_list', full_name='com.elarian.hera.proto.EmailMessageBody.bcc_list', index=4,
number=5, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=503,
serialized_end=612,
)
_TEMPLATEMESSAGEBODY_PARAMSENTRY = _descriptor.Descriptor(
name='ParamsEntry',
full_name='com.elarian.hera.proto.TemplateMessageBody.ParamsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='com.elarian.hera.proto.TemplateMessageBody.ParamsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='com.elarian.hera.proto.TemplateMessageBody.ParamsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=723,
serialized_end=768,
)
_TEMPLATEMESSAGEBODY = _descriptor.Descriptor(
name='TemplateMessageBody',
full_name='com.elarian.hera.proto.TemplateMessageBody',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='com.elarian.hera.proto.TemplateMessageBody.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='params', full_name='com.elarian.hera.proto.TemplateMessageBody.params', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_TEMPLATEMESSAGEBODY_PARAMSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=615,
serialized_end=768,
)
_SAYCALLACTION = _descriptor.Descriptor(
name='SayCallAction',
full_name='com.elarian.hera.proto.SayCallAction',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='text', full_name='com.elarian.hera.proto.SayCallAction.text', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='voice', full_name='com.elarian.hera.proto.SayCallAction.voice', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='play_beep', full_name='com.elarian.hera.proto.SayCallAction.play_beep', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=770,
serialized_end=876,
)
_PLAYCALLACTION = _descriptor.Descriptor(
name='PlayCallAction',
full_name='com.elarian.hera.proto.PlayCallAction',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='url', full_name='com.elarian.hera.proto.PlayCallAction.url', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=878,
serialized_end=907,
)
_GETDIGITSCALLACTION = _descriptor.Descriptor(
name='GetDigitsCallAction',
full_name='com.elarian.hera.proto.GetDigitsCallAction',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='say', full_name='com.elarian.hera.proto.GetDigitsCallAction.say', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='play', full_name='com.elarian.hera.proto.GetDigitsCallAction.play', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='timeout', full_name='com.elarian.hera.proto.GetDigitsCallAction.timeout', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='finish_on_key', full_name='com.elarian.hera.proto.GetDigitsCallAction.finish_on_key', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='num_digits', full_name='com.elarian.hera.proto.GetDigitsCallAction.num_digits', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='prompt', full_name='com.elarian.hera.proto.GetDigitsCallAction.prompt',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=910,
serialized_end=1197,
)
_GETRECORDINGCALLACTION = _descriptor.Descriptor(
name='GetRecordingCallAction',
full_name='com.elarian.hera.proto.GetRecordingCallAction',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='say', full_name='com.elarian.hera.proto.GetRecordingCallAction.say', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='play', full_name='com.elarian.hera.proto.GetRecordingCallAction.play', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='timeout', full_name='com.elarian.hera.proto.GetRecordingCallAction.timeout', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='max_length', full_name='com.elarian.hera.proto.GetRecordingCallAction.max_length', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='finish_on_key', full_name='com.elarian.hera.proto.GetRecordingCallAction.finish_on_key', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='play_beep', full_name='com.elarian.hera.proto.GetRecordingCallAction.play_beep', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='trim_silence', full_name='com.elarian.hera.proto.GetRecordingCallAction.trim_silence', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='prompt', full_name='com.elarian.hera.proto.GetRecordingCallAction.prompt',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=1200,
serialized_end=1529,
)
_RECORDSESSIONCALLACTION = _descriptor.Descriptor(
name='RecordSessionCallAction',
full_name='com.elarian.hera.proto.RecordSessionCallAction',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1531,
serialized_end=1556,
)
_DIALCALLACTION = _descriptor.Descriptor(
name='DialCallAction',
full_name='com.elarian.hera.proto.DialCallAction',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='customer_numbers', full_name='com.elarian.hera.proto.DialCallAction.customer_numbers', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='record', full_name='com.elarian.hera.proto.DialCallAction.record', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='sequential', full_name='com.elarian.hera.proto.DialCallAction.sequential', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='ringback_tone', full_name='com.elarian.hera.proto.DialCallAction.ringback_tone', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='caller_id', full_name='com.elarian.hera.proto.DialCallAction.caller_id', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='max_duration', full_name='com.elarian.hera.proto.DialCallAction.max_duration', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1559,
serialized_end=1830,
)
_ENQUEUECALLACTION = _descriptor.Descriptor(
name='EnqueueCallAction',
full_name='com.elarian.hera.proto.EnqueueCallAction',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='hold_music', full_name='com.elarian.hera.proto.EnqueueCallAction.hold_music', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='queue_name', full_name='com.elarian.hera.proto.EnqueueCallAction.queue_name', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1832,
serialized_end=1951,
)
_DEQUEUECALLACTION = _descriptor.Descriptor(
name='DequeueCallAction',
full_name='com.elarian.hera.proto.DequeueCallAction',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='channel_number', full_name='com.elarian.hera.proto.DequeueCallAction.channel_number', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='record', full_name='com.elarian.hera.proto.DequeueCallAction.record', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='queue_name', full_name='com.elarian.hera.proto.DequeueCallAction.queue_name', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1954,
serialized_end=2111,
)
_REJECTCALLACTION = _descriptor.Descriptor(
name='RejectCallAction',
full_name='com.elarian.hera.proto.RejectCallAction',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2113,
serialized_end=2131,
)
_REDIRECTCALLACTION = _descriptor.Descriptor(
name='RedirectCallAction',
full_name='com.elarian.hera.proto.RedirectCallAction',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='url', full_name='com.elarian.hera.proto.RedirectCallAction.url', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2133,
serialized_end=2166,
)
_VOICECALLACTION = _descriptor.Descriptor(
name='VoiceCallAction',
full_name='com.elarian.hera.proto.VoiceCallAction',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='say', full_name='com.elarian.hera.proto.VoiceCallAction.say', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='play', full_name='com.elarian.hera.proto.VoiceCallAction.play', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='get_digits', full_name='com.elarian.hera.proto.VoiceCallAction.get_digits', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='dial', full_name='com.elarian.hera.proto.VoiceCallAction.dial', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='record_session', full_name='com.elarian.hera.proto.VoiceCallAction.record_session', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='get_recording', full_name='com.elarian.hera.proto.VoiceCallAction.get_recording', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='enqueue', full_name='com.elarian.hera.proto.VoiceCallAction.enqueue', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='dequeue', full_name='com.elarian.hera.proto.VoiceCallAction.dequeue', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='reject', full_name='com.elarian.hera.proto.VoiceCallAction.reject', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='redirect', full_name='com.elarian.hera.proto.VoiceCallAction.redirect', index=9,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='entry', full_name='com.elarian.hera.proto.VoiceCallAction.entry',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=2169,
serialized_end=2824,
)
_VOICECALLDIALPLANMESSAGEBODY = _descriptor.Descriptor(
name='VoiceCallDialplanMessageBody',
full_name='com.elarian.hera.proto.VoiceCallDialplanMessageBody',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='actions', full_name='com.elarian.hera.proto.VoiceCallDialplanMessageBody.actions', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2826,
serialized_end=2914,
)
_VOICECALLDIALINPUT = _descriptor.Descriptor(
name='VoiceCallDialInput',
full_name='com.elarian.hera.proto.VoiceCallDialInput',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='destination_number', full_name='com.elarian.hera.proto.VoiceCallDialInput.destination_number', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='started_at', full_name='com.elarian.hera.proto.VoiceCallDialInput.started_at', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='duration', full_name='com.elarian.hera.proto.VoiceCallDialInput.duration', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2917,
serialized_end=3058,
)
_VOICECALLQUEUEINPUT = _descriptor.Descriptor(
name='VoiceCallQueueInput',
full_name='com.elarian.hera.proto.VoiceCallQueueInput',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='enqueued_at', full_name='com.elarian.hera.proto.VoiceCallQueueInput.enqueued_at', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='dequeued_at', full_name='com.elarian.hera.proto.VoiceCallQueueInput.dequeued_at', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='dequeued_to_number', full_name='com.elarian.hera.proto.VoiceCallQueueInput.dequeued_to_number', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='dequeued_to_sessionId', full_name='com.elarian.hera.proto.VoiceCallQueueInput.dequeued_to_sessionId', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='queue_duration', full_name='com.elarian.hera.proto.VoiceCallQueueInput.queue_duration', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3061,
serialized_end=3350,
)
_VOICECALLINPUTMESSAGEBODY = _descriptor.Descriptor(
name='VoiceCallInputMessageBody',
full_name='com.elarian.hera.proto.VoiceCallInputMessageBody',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='direction', full_name='com.elarian.hera.proto.VoiceCallInputMessageBody.direction', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='status', full_name='com.elarian.hera.proto.VoiceCallInputMessageBody.status', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='started_at', full_name='com.elarian.hera.proto.VoiceCallInputMessageBody.started_at', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='hangup_cause', full_name='com.elarian.hera.proto.VoiceCallInputMessageBody.hangup_cause', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='dtmf_digits', full_name='com.elarian.hera.proto.VoiceCallInputMessageBody.dtmf_digits', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='recording_url', full_name='com.elarian.hera.proto.VoiceCallInputMessageBody.recording_url', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='dial_data', full_name='com.elarian.hera.proto.VoiceCallInputMessageBody.dial_data', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='queue_data', full_name='com.elarian.hera.proto.VoiceCallInputMessageBody.queue_data', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3353,
serialized_end=3852,
)
_USSDINPUTMESSAGEBODY = _descriptor.Descriptor(
name='UssdInputMessageBody',
full_name='com.elarian.hera.proto.UssdInputMessageBody',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='com.elarian.hera.proto.UssdInputMessageBody.status', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='text', full_name='com.elarian.hera.proto.UssdInputMessageBody.text', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3854,
serialized_end=3979,
)
_USSDMENUMESSAGEBODY = _descriptor.Descriptor(
name='UssdMenuMessageBody',
full_name='com.elarian.hera.proto.UssdMenuMessageBody',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='text', full_name='com.elarian.hera.proto.UssdMenuMessageBody.text', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='is_terminal', full_name='com.elarian.hera.proto.UssdMenuMessageBody.is_terminal', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3981,
serialized_end=4037,
)
_OUTBOUNDMESSAGEBODY = _descriptor.Descriptor(
name='OutboundMessageBody',
full_name='com.elarian.hera.proto.OutboundMessageBody',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='text', full_name='com.elarian.hera.proto.OutboundMessageBody.text', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='media', full_name='com.elarian.hera.proto.OutboundMessageBody.media', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='location', full_name='com.elarian.hera.proto.OutboundMessageBody.location', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='email', full_name='com.elarian.hera.proto.OutboundMessageBody.email', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='template', full_name='com.elarian.hera.proto.OutboundMessageBody.template', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='url', full_name='com.elarian.hera.proto.OutboundMessageBody.url', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='voice', full_name='com.elarian.hera.proto.OutboundMessageBody.voice', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='ussd', full_name='com.elarian.hera.proto.OutboundMessageBody.ussd', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='entry', full_name='com.elarian.hera.proto.OutboundMessageBody.entry',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=4040,
serialized_end=4481,
)
_PROMPTMESSAGEMENUITEMBODY = _descriptor.Descriptor(
name='PromptMessageMenuItemBody',
full_name='com.elarian.hera.proto.PromptMessageMenuItemBody',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='text', full_name='com.elarian.hera.proto.PromptMessageMenuItemBody.text', index=0,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='media', full_name='com.elarian.hera.proto.PromptMessageMenuItemBody.media', index=1,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='entry', full_name='com.elarian.hera.proto.PromptMessageMenuItemBody.entry',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=4483,
serialized_end=4594,
)
_INBOUNDMESSAGEBODY = _descriptor.Descriptor(
name='InboundMessageBody',
full_name='com.elarian.hera.proto.InboundMessageBody',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='text', full_name='com.elarian.hera.proto.InboundMessageBody.text', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='media', full_name='com.elarian.hera.proto.InboundMessageBody.media', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='location', full_name='com.elarian.hera.proto.InboundMessageBody.location', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='email', full_name='com.elarian.hera.proto.InboundMessageBody.email', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='voice', full_name='com.elarian.hera.proto.InboundMessageBody.voice', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='ussd', full_name='com.elarian.hera.proto.InboundMessageBody.ussd', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='entry', full_name='com.elarian.hera.proto.InboundMessageBody.entry',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=4597,
serialized_end=4955,
)
_OUTBOUNDMESSAGEREPLYPROMPT = _descriptor.Descriptor(
name='OutboundMessageReplyPrompt',
full_name='com.elarian.hera.proto.OutboundMessageReplyPrompt',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='action', full_name='com.elarian.hera.proto.OutboundMessageReplyPrompt.action', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='menu', full_name='com.elarian.hera.proto.OutboundMessageReplyPrompt.menu', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4958,
serialized_end=5117,
)
_OUTBOUNDMESSAGE = _descriptor.Descriptor(
name='OutboundMessage',
full_name='com.elarian.hera.proto.OutboundMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='body', full_name='com.elarian.hera.proto.OutboundMessage.body', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='labels', full_name='com.elarian.hera.proto.OutboundMessage.labels', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='provider_tag', full_name='com.elarian.hera.proto.OutboundMessage.provider_tag', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='reply_token', full_name='com.elarian.hera.proto.OutboundMessage.reply_token', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='reply_prompt', full_name='com.elarian.hera.proto.OutboundMessage.reply_prompt', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5120,
serialized_end=5389,
)
_MESSAGINGCHANNELNUMBER.fields_by_name['channel'].enum_type = _MESSAGINGCHANNEL
_MEDIAMESSAGEBODY.fields_by_name['media'].enum_type = common__model__pb2._MEDIATYPE
_LOCATIONMESSAGEBODY.fields_by_name['label'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_LOCATIONMESSAGEBODY.fields_by_name['address'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_TEMPLATEMESSAGEBODY_PARAMSENTRY.containing_type = _TEMPLATEMESSAGEBODY
_TEMPLATEMESSAGEBODY.fields_by_name['params'].message_type = _TEMPLATEMESSAGEBODY_PARAMSENTRY
_SAYCALLACTION.fields_by_name['voice'].enum_type = _TEXTTOSPEECHVOICE
_GETDIGITSCALLACTION.fields_by_name['say'].message_type = _SAYCALLACTION
_GETDIGITSCALLACTION.fields_by_name['play'].message_type = _PLAYCALLACTION
_GETDIGITSCALLACTION.fields_by_name['timeout'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_GETDIGITSCALLACTION.fields_by_name['finish_on_key'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_GETDIGITSCALLACTION.fields_by_name['num_digits'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT32VALUE
_GETDIGITSCALLACTION.oneofs_by_name['prompt'].fields.append(
_GETDIGITSCALLACTION.fields_by_name['say'])
_GETDIGITSCALLACTION.fields_by_name['say'].containing_oneof = _GETDIGITSCALLACTION.oneofs_by_name['prompt']
_GETDIGITSCALLACTION.oneofs_by_name['prompt'].fields.append(
_GETDIGITSCALLACTION.fields_by_name['play'])
_GETDIGITSCALLACTION.fields_by_name['play'].containing_oneof = _GETDIGITSCALLACTION.oneofs_by_name['prompt']
_GETRECORDINGCALLACTION.fields_by_name['say'].message_type = _SAYCALLACTION
_GETRECORDINGCALLACTION.fields_by_name['play'].message_type = _PLAYCALLACTION
_GETRECORDINGCALLACTION.fields_by_name['timeout'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_GETRECORDINGCALLACTION.fields_by_name['max_length'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_GETRECORDINGCALLACTION.fields_by_name['finish_on_key'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_GETRECORDINGCALLACTION.oneofs_by_name['prompt'].fields.append(
_GETRECORDINGCALLACTION.fields_by_name['say'])
_GETRECORDINGCALLACTION.fields_by_name['say'].containing_oneof = _GETRECORDINGCALLACTION.oneofs_by_name['prompt']
_GETRECORDINGCALLACTION.oneofs_by_name['prompt'].fields.append(
_GETRECORDINGCALLACTION.fields_by_name['play'])
_GETRECORDINGCALLACTION.fields_by_name['play'].containing_oneof = _GETRECORDINGCALLACTION.oneofs_by_name['prompt']
_DIALCALLACTION.fields_by_name['customer_numbers'].message_type = common__model__pb2._CUSTOMERNUMBER
_DIALCALLACTION.fields_by_name['ringback_tone'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_DIALCALLACTION.fields_by_name['caller_id'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_DIALCALLACTION.fields_by_name['max_duration'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT32VALUE
_ENQUEUECALLACTION.fields_by_name['hold_music'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_ENQUEUECALLACTION.fields_by_name['queue_name'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_DEQUEUECALLACTION.fields_by_name['channel_number'].message_type = _MESSAGINGCHANNELNUMBER
_DEQUEUECALLACTION.fields_by_name['queue_name'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_VOICECALLACTION.fields_by_name['say'].message_type = _SAYCALLACTION
_VOICECALLACTION.fields_by_name['play'].message_type = _PLAYCALLACTION
_VOICECALLACTION.fields_by_name['get_digits'].message_type = _GETDIGITSCALLACTION
_VOICECALLACTION.fields_by_name['dial'].message_type = _DIALCALLACTION
_VOICECALLACTION.fields_by_name['record_session'].message_type = _RECORDSESSIONCALLACTION
_VOICECALLACTION.fields_by_name['get_recording'].message_type = _GETRECORDINGCALLACTION
_VOICECALLACTION.fields_by_name['enqueue'].message_type = _ENQUEUECALLACTION
_VOICECALLACTION.fields_by_name['dequeue'].message_type = _DEQUEUECALLACTION
_VOICECALLACTION.fields_by_name['reject'].message_type = _REJECTCALLACTION
_VOICECALLACTION.fields_by_name['redirect'].message_type = _REDIRECTCALLACTION
_VOICECALLACTION.oneofs_by_name['entry'].fields.append(
_VOICECALLACTION.fields_by_name['say'])
_VOICECALLACTION.fields_by_name['say'].containing_oneof = _VOICECALLACTION.oneofs_by_name['entry']
_VOICECALLACTION.oneofs_by_name['entry'].fields.append(
_VOICECALLACTION.fields_by_name['play'])
_VOICECALLACTION.fields_by_name['play'].containing_oneof = _VOICECALLACTION.oneofs_by_name['entry']
_VOICECALLACTION.oneofs_by_name['entry'].fields.append(
_VOICECALLACTION.fields_by_name['get_digits'])
_VOICECALLACTION.fields_by_name['get_digits'].containing_oneof = _VOICECALLACTION.oneofs_by_name['entry']
_VOICECALLACTION.oneofs_by_name['entry'].fields.append(
_VOICECALLACTION.fields_by_name['dial'])
_VOICECALLACTION.fields_by_name['dial'].containing_oneof = _VOICECALLACTION.oneofs_by_name['entry']
_VOICECALLACTION.oneofs_by_name['entry'].fields.append(
_VOICECALLACTION.fields_by_name['record_session'])
_VOICECALLACTION.fields_by_name['record_session'].containing_oneof = _VOICECALLACTION.oneofs_by_name['entry']
_VOICECALLACTION.oneofs_by_name['entry'].fields.append(
_VOICECALLACTION.fields_by_name['get_recording'])
_VOICECALLACTION.fields_by_name['get_recording'].containing_oneof = _VOICECALLACTION.oneofs_by_name['entry']
_VOICECALLACTION.oneofs_by_name['entry'].fields.append(
_VOICECALLACTION.fields_by_name['enqueue'])
_VOICECALLACTION.fields_by_name['enqueue'].containing_oneof = _VOICECALLACTION.oneofs_by_name['entry']
_VOICECALLACTION.oneofs_by_name['entry'].fields.append(
_VOICECALLACTION.fields_by_name['dequeue'])
_VOICECALLACTION.fields_by_name['dequeue'].containing_oneof = _VOICECALLACTION.oneofs_by_name['entry']
_VOICECALLACTION.oneofs_by_name['entry'].fields.append(
_VOICECALLACTION.fields_by_name['reject'])
_VOICECALLACTION.fields_by_name['reject'].containing_oneof = _VOICECALLACTION.oneofs_by_name['entry']
_VOICECALLACTION.oneofs_by_name['entry'].fields.append(
_VOICECALLACTION.fields_by_name['redirect'])
_VOICECALLACTION.fields_by_name['redirect'].containing_oneof = _VOICECALLACTION.oneofs_by_name['entry']
_VOICECALLDIALPLANMESSAGEBODY.fields_by_name['actions'].message_type = _VOICECALLACTION
_VOICECALLDIALINPUT.fields_by_name['started_at'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_VOICECALLDIALINPUT.fields_by_name['duration'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_VOICECALLQUEUEINPUT.fields_by_name['enqueued_at'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_VOICECALLQUEUEINPUT.fields_by_name['dequeued_at'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_VOICECALLQUEUEINPUT.fields_by_name['dequeued_to_number'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_VOICECALLQUEUEINPUT.fields_by_name['dequeued_to_sessionId'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_VOICECALLQUEUEINPUT.fields_by_name['queue_duration'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_VOICECALLINPUTMESSAGEBODY.fields_by_name['direction'].enum_type = common__model__pb2._CUSTOMEREVENTDIRECTION
_VOICECALLINPUTMESSAGEBODY.fields_by_name['status'].enum_type = _VOICECALLSTATUS
_VOICECALLINPUTMESSAGEBODY.fields_by_name['started_at'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_VOICECALLINPUTMESSAGEBODY.fields_by_name['hangup_cause'].enum_type = _VOICECALLHANGUPCAUSE
_VOICECALLINPUTMESSAGEBODY.fields_by_name['dtmf_digits'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_VOICECALLINPUTMESSAGEBODY.fields_by_name['recording_url'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_VOICECALLINPUTMESSAGEBODY.fields_by_name['dial_data'].message_type = _VOICECALLDIALINPUT
_VOICECALLINPUTMESSAGEBODY.fields_by_name['queue_data'].message_type = _VOICECALLQUEUEINPUT
_USSDINPUTMESSAGEBODY.fields_by_name['status'].enum_type = _USSDSESSIONSTATUS
_USSDINPUTMESSAGEBODY.fields_by_name['text'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_OUTBOUNDMESSAGEBODY.fields_by_name['media'].message_type = _MEDIAMESSAGEBODY
_OUTBOUNDMESSAGEBODY.fields_by_name['location'].message_type = _LOCATIONMESSAGEBODY
_OUTBOUNDMESSAGEBODY.fields_by_name['email'].message_type = _EMAILMESSAGEBODY
_OUTBOUNDMESSAGEBODY.fields_by_name['template'].message_type = _TEMPLATEMESSAGEBODY
_OUTBOUNDMESSAGEBODY.fields_by_name['voice'].message_type = _VOICECALLDIALPLANMESSAGEBODY
_OUTBOUNDMESSAGEBODY.fields_by_name['ussd'].message_type = _USSDMENUMESSAGEBODY
_OUTBOUNDMESSAGEBODY.oneofs_by_name['entry'].fields.append(
_OUTBOUNDMESSAGEBODY.fields_by_name['text'])
_OUTBOUNDMESSAGEBODY.fields_by_name['text'].containing_oneof = _OUTBOUNDMESSAGEBODY.oneofs_by_name['entry']
_OUTBOUNDMESSAGEBODY.oneofs_by_name['entry'].fields.append(
_OUTBOUNDMESSAGEBODY.fields_by_name['media'])
_OUTBOUNDMESSAGEBODY.fields_by_name['media'].containing_oneof = _OUTBOUNDMESSAGEBODY.oneofs_by_name['entry']
_OUTBOUNDMESSAGEBODY.oneofs_by_name['entry'].fields.append(
_OUTBOUNDMESSAGEBODY.fields_by_name['location'])
_OUTBOUNDMESSAGEBODY.fields_by_name['location'].containing_oneof = _OUTBOUNDMESSAGEBODY.oneofs_by_name['entry']
_OUTBOUNDMESSAGEBODY.oneofs_by_name['entry'].fields.append(
_OUTBOUNDMESSAGEBODY.fields_by_name['email'])
_OUTBOUNDMESSAGEBODY.fields_by_name['email'].containing_oneof = _OUTBOUNDMESSAGEBODY.oneofs_by_name['entry']
_OUTBOUNDMESSAGEBODY.oneofs_by_name['entry'].fields.append(
_OUTBOUNDMESSAGEBODY.fields_by_name['template'])
_OUTBOUNDMESSAGEBODY.fields_by_name['template'].containing_oneof = _OUTBOUNDMESSAGEBODY.oneofs_by_name['entry']
_OUTBOUNDMESSAGEBODY.oneofs_by_name['entry'].fields.append(
_OUTBOUNDMESSAGEBODY.fields_by_name['url'])
_OUTBOUNDMESSAGEBODY.fields_by_name['url'].containing_oneof = _OUTBOUNDMESSAGEBODY.oneofs_by_name['entry']
_OUTBOUNDMESSAGEBODY.oneofs_by_name['entry'].fields.append(
_OUTBOUNDMESSAGEBODY.fields_by_name['voice'])
_OUTBOUNDMESSAGEBODY.fields_by_name['voice'].containing_oneof = _OUTBOUNDMESSAGEBODY.oneofs_by_name['entry']
_OUTBOUNDMESSAGEBODY.oneofs_by_name['entry'].fields.append(
_OUTBOUNDMESSAGEBODY.fields_by_name['ussd'])
_OUTBOUNDMESSAGEBODY.fields_by_name['ussd'].containing_oneof = _OUTBOUNDMESSAGEBODY.oneofs_by_name['entry']
_PROMPTMESSAGEMENUITEMBODY.fields_by_name['media'].message_type = _MEDIAMESSAGEBODY
_PROMPTMESSAGEMENUITEMBODY.oneofs_by_name['entry'].fields.append(
_PROMPTMESSAGEMENUITEMBODY.fields_by_name['text'])
_PROMPTMESSAGEMENUITEMBODY.fields_by_name['text'].containing_oneof = _PROMPTMESSAGEMENUITEMBODY.oneofs_by_name['entry']
_PROMPTMESSAGEMENUITEMBODY.oneofs_by_name['entry'].fields.append(
_PROMPTMESSAGEMENUITEMBODY.fields_by_name['media'])
_PROMPTMESSAGEMENUITEMBODY.fields_by_name['media'].containing_oneof = _PROMPTMESSAGEMENUITEMBODY.oneofs_by_name['entry']
_INBOUNDMESSAGEBODY.fields_by_name['media'].message_type = _MEDIAMESSAGEBODY
_INBOUNDMESSAGEBODY.fields_by_name['location'].message_type = _LOCATIONMESSAGEBODY
_INBOUNDMESSAGEBODY.fields_by_name['email'].message_type = _EMAILMESSAGEBODY
_INBOUNDMESSAGEBODY.fields_by_name['voice'].message_type = _VOICECALLINPUTMESSAGEBODY
_INBOUNDMESSAGEBODY.fields_by_name['ussd'].message_type = _USSDINPUTMESSAGEBODY
_INBOUNDMESSAGEBODY.oneofs_by_name['entry'].fields.append(
_INBOUNDMESSAGEBODY.fields_by_name['text'])
_INBOUNDMESSAGEBODY.fields_by_name['text'].containing_oneof = _INBOUNDMESSAGEBODY.oneofs_by_name['entry']
_INBOUNDMESSAGEBODY.oneofs_by_name['entry'].fields.append(
_INBOUNDMESSAGEBODY.fields_by_name['media'])
_INBOUNDMESSAGEBODY.fields_by_name['media'].containing_oneof = _INBOUNDMESSAGEBODY.oneofs_by_name['entry']
_INBOUNDMESSAGEBODY.oneofs_by_name['entry'].fields.append(
_INBOUNDMESSAGEBODY.fields_by_name['location'])
_INBOUNDMESSAGEBODY.fields_by_name['location'].containing_oneof = _INBOUNDMESSAGEBODY.oneofs_by_name['entry']
_INBOUNDMESSAGEBODY.oneofs_by_name['entry'].fields.append(
_INBOUNDMESSAGEBODY.fields_by_name['email'])
_INBOUNDMESSAGEBODY.fields_by_name['email'].containing_oneof = _INBOUNDMESSAGEBODY.oneofs_by_name['entry']
_INBOUNDMESSAGEBODY.oneofs_by_name['entry'].fields.append(
_INBOUNDMESSAGEBODY.fields_by_name['voice'])
_INBOUNDMESSAGEBODY.fields_by_name['voice'].containing_oneof = _INBOUNDMESSAGEBODY.oneofs_by_name['entry']
_INBOUNDMESSAGEBODY.oneofs_by_name['entry'].fields.append(
_INBOUNDMESSAGEBODY.fields_by_name['ussd'])
_INBOUNDMESSAGEBODY.fields_by_name['ussd'].containing_oneof = _INBOUNDMESSAGEBODY.oneofs_by_name['entry']
_OUTBOUNDMESSAGEREPLYPROMPT.fields_by_name['action'].enum_type = _PROMPTMESSAGEREPLYACTION
_OUTBOUNDMESSAGEREPLYPROMPT.fields_by_name['menu'].message_type = _PROMPTMESSAGEMENUITEMBODY
_OUTBOUNDMESSAGE.fields_by_name['body'].message_type = _OUTBOUNDMESSAGEBODY
_OUTBOUNDMESSAGE.fields_by_name['provider_tag'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_OUTBOUNDMESSAGE.fields_by_name['reply_token'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_OUTBOUNDMESSAGE.fields_by_name['reply_prompt'].message_type = _OUTBOUNDMESSAGEREPLYPROMPT
DESCRIPTOR.message_types_by_name['MessagingChannelNumber'] = _MESSAGINGCHANNELNUMBER
DESCRIPTOR.message_types_by_name['MediaMessageBody'] = _MEDIAMESSAGEBODY
DESCRIPTOR.message_types_by_name['LocationMessageBody'] = _LOCATIONMESSAGEBODY
DESCRIPTOR.message_types_by_name['EmailMessageBody'] = _EMAILMESSAGEBODY
DESCRIPTOR.message_types_by_name['TemplateMessageBody'] = _TEMPLATEMESSAGEBODY
DESCRIPTOR.message_types_by_name['SayCallAction'] = _SAYCALLACTION
DESCRIPTOR.message_types_by_name['PlayCallAction'] = _PLAYCALLACTION
DESCRIPTOR.message_types_by_name['GetDigitsCallAction'] = _GETDIGITSCALLACTION
DESCRIPTOR.message_types_by_name['GetRecordingCallAction'] = _GETRECORDINGCALLACTION
DESCRIPTOR.message_types_by_name['RecordSessionCallAction'] = _RECORDSESSIONCALLACTION
DESCRIPTOR.message_types_by_name['DialCallAction'] = _DIALCALLACTION
DESCRIPTOR.message_types_by_name['EnqueueCallAction'] = _ENQUEUECALLACTION
DESCRIPTOR.message_types_by_name['DequeueCallAction'] = _DEQUEUECALLACTION
DESCRIPTOR.message_types_by_name['RejectCallAction'] = _REJECTCALLACTION
DESCRIPTOR.message_types_by_name['RedirectCallAction'] = _REDIRECTCALLACTION
DESCRIPTOR.message_types_by_name['VoiceCallAction'] = _VOICECALLACTION
DESCRIPTOR.message_types_by_name['VoiceCallDialplanMessageBody'] = _VOICECALLDIALPLANMESSAGEBODY
DESCRIPTOR.message_types_by_name['VoiceCallDialInput'] = _VOICECALLDIALINPUT
DESCRIPTOR.message_types_by_name['VoiceCallQueueInput'] = _VOICECALLQUEUEINPUT
DESCRIPTOR.message_types_by_name['VoiceCallInputMessageBody'] = _VOICECALLINPUTMESSAGEBODY
DESCRIPTOR.message_types_by_name['UssdInputMessageBody'] = _USSDINPUTMESSAGEBODY
DESCRIPTOR.message_types_by_name['UssdMenuMessageBody'] = _USSDMENUMESSAGEBODY
DESCRIPTOR.message_types_by_name['OutboundMessageBody'] = _OUTBOUNDMESSAGEBODY
DESCRIPTOR.message_types_by_name['PromptMessageMenuItemBody'] = _PROMPTMESSAGEMENUITEMBODY
DESCRIPTOR.message_types_by_name['InboundMessageBody'] = _INBOUNDMESSAGEBODY
DESCRIPTOR.message_types_by_name['OutboundMessageReplyPrompt'] = _OUTBOUNDMESSAGEREPLYPROMPT
DESCRIPTOR.message_types_by_name['OutboundMessage'] = _OUTBOUNDMESSAGE
DESCRIPTOR.enum_types_by_name['MessagingChannel'] = _MESSAGINGCHANNEL
DESCRIPTOR.enum_types_by_name['MessagingConsentUpdate'] = _MESSAGINGCONSENTUPDATE
DESCRIPTOR.enum_types_by_name['MessagingConsentUpdateStatus'] = _MESSAGINGCONSENTUPDATESTATUS
DESCRIPTOR.enum_types_by_name['MessagingSessionEndReason'] = _MESSAGINGSESSIONENDREASON
DESCRIPTOR.enum_types_by_name['MessageReaction'] = _MESSAGEREACTION
DESCRIPTOR.enum_types_by_name['PromptMessageReplyAction'] = _PROMPTMESSAGEREPLYACTION
DESCRIPTOR.enum_types_by_name['MessageDeliveryStatus'] = _MESSAGEDELIVERYSTATUS
DESCRIPTOR.enum_types_by_name['VoiceCallStatus'] = _VOICECALLSTATUS
DESCRIPTOR.enum_types_by_name['VoiceCallHangupCause'] = _VOICECALLHANGUPCAUSE
DESCRIPTOR.enum_types_by_name['UssdSessionStatus'] = _USSDSESSIONSTATUS
DESCRIPTOR.enum_types_by_name['TextToSpeechVoice'] = _TEXTTOSPEECHVOICE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
MessagingChannelNumber = _reflection.GeneratedProtocolMessageType('MessagingChannelNumber', (_message.Message,), {
'DESCRIPTOR' : _MESSAGINGCHANNELNUMBER,
'__module__' : 'messaging_model_pb2'
# @@protoc_insertion_point(class_scope:com.elarian.hera.proto.MessagingChannelNumber)
})
_sym_db.RegisterMessage(MessagingChannelNumber)
MediaMessageBody = _reflection.GeneratedProtocolMessageType('MediaMessageBody', (_message.Message,), {
'DESCRIPTOR' : _MEDIAMESSAGEBODY,
'__module__' : 'messaging_model_pb2'
# @@protoc_insertion_point(class_scope:com.elarian.hera.proto.MediaMessageBody)
})
_sym_db.RegisterMessage(MediaMessageBody)
LocationMessageBody = _reflection.GeneratedProtocolMessageType('LocationMessageBody', (_message.Message,), {
'DESCRIPTOR' : _LOCATIONMESSAGEBODY,
'__module__' : 'messaging_model_pb2'
# @@protoc_insertion_point(class_scope:com.elarian.hera.proto.LocationMessageBody)
})
_sym_db.RegisterMessage(LocationMessageBody)
EmailMessageBody = _reflection.GeneratedProtocolMessageType('EmailMessageBody', (_message.Message,), {
'DESCRIPTOR' : _EMAILMESSAGEBODY,
'__module__' : 'messaging_model_pb2'
# @@protoc_insertion_point(class_scope:com.elarian.hera.proto.EmailMessageBody)
})
_sym_db.RegisterMessage(EmailMessageBody)
TemplateMessageBody = _reflection.GeneratedProtocolMessageType('TemplateMessageBody', (_message.Message,), {
'ParamsEntry' : _reflection.GeneratedProtocolMessageType('ParamsEntry', (_message.Message,), {
'DESCRIPTOR' : _TEMPLATEMESSAGEBODY_PARAMSENTRY,
'__module__' : 'messaging_model_pb2'
# @@protoc_insertion_point(class_scope:com.elarian.hera.proto.TemplateMessageBody.ParamsEntry)
})
,
'DESCRIPTOR' : _TEMPLATEMESSAGEBODY,
'__module__' : 'messaging_model_pb2'
# @@protoc_insertion_point(class_scope:com.elarian.hera.proto.TemplateMessageBody)
})
_sym_db.RegisterMessage(TemplateMessageBody)
_sym_db.RegisterMessage(TemplateMessageBody.ParamsEntry)
SayCallAction = _reflection.GeneratedProtocolMessageType('SayCallAction', (_message.Message,), {
'DESCRIPTOR' : _SAYCALLACTION,
'__module__' : 'messaging_model_pb2'
# @@protoc_insertion_point(class_scope:com.elarian.hera.proto.SayCallAction)
})
_sym_db.RegisterMessage(SayCallAction)
PlayCallAction = _reflection.GeneratedProtocolMessageType('PlayCallAction', (_message.Message,), {
'DESCRIPTOR' : _PLAYCALLACTION,
'__module__' : 'messaging_model_pb2'
# @@protoc_insertion_point(class_scope:com.elarian.hera.proto.PlayCallAction)
})
_sym_db.RegisterMessage(PlayCallAction)
GetDigitsCallAction = _reflection.GeneratedProtocolMessageType('GetDigitsCallAction', (_message.Message,), {
'DESCRIPTOR' : _GETDIGITSCALLACTION,
'__module__' : 'messaging_model_pb2'
# @@protoc_insertion_point(class_scope:com.elarian.hera.proto.GetDigitsCallAction)
})
_sym_db.RegisterMessage(GetDigitsCallAction)
GetRecordingCallAction = _reflection.GeneratedProtocolMessageType('GetRecordingCallAction', (_message.Message,), {
'DESCRIPTOR' : _GETRECORDINGCALLACTION,
'__module__' : 'messaging_model_pb2'
# @@protoc_insertion_point(class_scope:com.elarian.hera.proto.GetRecordingCallAction)
})
_sym_db.RegisterMessage(GetRecordingCallAction)
RecordSessionCallAction = _reflection.GeneratedProtocolMessageType('RecordSessionCallAction', (_message.Message,), {
'DESCRIPTOR' : _RECORDSESSIONCALLACTION,
'__module__' : 'messaging_model_pb2'
# @@protoc_insertion_point(class_scope:com.elarian.hera.proto.RecordSessionCallAction)
})
_sym_db.RegisterMessage(RecordSessionCallAction)
DialCallAction = _reflection.GeneratedProtocolMessageType('DialCallAction', (_message.Message,), {
'DESCRIPTOR' : _DIALCALLACTION,
'__module__' : 'messaging_model_pb2'
# @@protoc_insertion_point(class_scope:com.elarian.hera.proto.DialCallAction)
})
_sym_db.RegisterMessage(DialCallAction)
EnqueueCallAction = _reflection.GeneratedProtocolMessageType('EnqueueCallAction', (_message.Message,), {
'DESCRIPTOR' : _ENQUEUECALLACTION,
'__module__' : 'messaging_model_pb2'
# @@protoc_insertion_point(class_scope:com.elarian.hera.proto.EnqueueCallAction)
})
_sym_db.RegisterMessage(EnqueueCallAction)
DequeueCallAction = _reflection.GeneratedProtocolMessageType('DequeueCallAction', (_message.Message,), {
'DESCRIPTOR' : _DEQUEUECALLACTION,
'__module__' : 'messaging_model_pb2'
# @@protoc_insertion_point(class_scope:com.elarian.hera.proto.DequeueCallAction)
})
_sym_db.RegisterMessage(DequeueCallAction)
RejectCallAction = _reflection.GeneratedProtocolMessageType('RejectCallAction', (_message.Message,), {
'DESCRIPTOR' : _REJECTCALLACTION,
'__module__' : 'messaging_model_pb2'
# @@protoc_insertion_point(class_scope:com.elarian.hera.proto.RejectCallAction)
})
_sym_db.RegisterMessage(RejectCallAction)
RedirectCallAction = _reflection.GeneratedProtocolMessageType('RedirectCallAction', (_message.Message,), {
'DESCRIPTOR' : _REDIRECTCALLACTION,
'__module__' : 'messaging_model_pb2'
# @@protoc_insertion_point(class_scope:com.elarian.hera.proto.RedirectCallAction)
})
_sym_db.RegisterMessage(RedirectCallAction)
VoiceCallAction = _reflection.GeneratedProtocolMessageType('VoiceCallAction', (_message.Message,), {
'DESCRIPTOR' : _VOICECALLACTION,
'__module__' : 'messaging_model_pb2'
# @@protoc_insertion_point(class_scope:com.elarian.hera.proto.VoiceCallAction)
})
_sym_db.RegisterMessage(VoiceCallAction)
VoiceCallDialplanMessageBody = _reflection.GeneratedProtocolMessageType('VoiceCallDialplanMessageBody', (_message.Message,), {
'DESCRIPTOR' : _VOICECALLDIALPLANMESSAGEBODY,
'__module__' : 'messaging_model_pb2'
# @@protoc_insertion_point(class_scope:com.elarian.hera.proto.VoiceCallDialplanMessageBody)
})
_sym_db.RegisterMessage(VoiceCallDialplanMessageBody)
VoiceCallDialInput = _reflection.GeneratedProtocolMessageType('VoiceCallDialInput', (_message.Message,), {
'DESCRIPTOR' : _VOICECALLDIALINPUT,
'__module__' : 'messaging_model_pb2'
# @@protoc_insertion_point(class_scope:com.elarian.hera.proto.VoiceCallDialInput)
})
_sym_db.RegisterMessage(VoiceCallDialInput)
VoiceCallQueueInput = _reflection.GeneratedProtocolMessageType('VoiceCallQueueInput', (_message.Message,), {
'DESCRIPTOR' : _VOICECALLQUEUEINPUT,
'__module__' : 'messaging_model_pb2'
# @@protoc_insertion_point(class_scope:com.elarian.hera.proto.VoiceCallQueueInput)
})
_sym_db.RegisterMessage(VoiceCallQueueInput)
VoiceCallInputMessageBody = _reflection.GeneratedProtocolMessageType('VoiceCallInputMessageBody', (_message.Message,), {
'DESCRIPTOR' : _VOICECALLINPUTMESSAGEBODY,
'__module__' : 'messaging_model_pb2'
# @@protoc_insertion_point(class_scope:com.elarian.hera.proto.VoiceCallInputMessageBody)
})
_sym_db.RegisterMessage(VoiceCallInputMessageBody)
UssdInputMessageBody = _reflection.GeneratedProtocolMessageType('UssdInputMessageBody', (_message.Message,), {
'DESCRIPTOR' : _USSDINPUTMESSAGEBODY,
'__module__' : 'messaging_model_pb2'
# @@protoc_insertion_point(class_scope:com.elarian.hera.proto.UssdInputMessageBody)
})
_sym_db.RegisterMessage(UssdInputMessageBody)
UssdMenuMessageBody = _reflection.GeneratedProtocolMessageType('UssdMenuMessageBody', (_message.Message,), {
'DESCRIPTOR' : _USSDMENUMESSAGEBODY,
'__module__' : 'messaging_model_pb2'
# @@protoc_insertion_point(class_scope:com.elarian.hera.proto.UssdMenuMessageBody)
})
_sym_db.RegisterMessage(UssdMenuMessageBody)
OutboundMessageBody = _reflection.GeneratedProtocolMessageType('OutboundMessageBody', (_message.Message,), {
'DESCRIPTOR' : _OUTBOUNDMESSAGEBODY,
'__module__' : 'messaging_model_pb2'
# @@protoc_insertion_point(class_scope:com.elarian.hera.proto.OutboundMessageBody)
})
_sym_db.RegisterMessage(OutboundMessageBody)
PromptMessageMenuItemBody = _reflection.GeneratedProtocolMessageType('PromptMessageMenuItemBody', (_message.Message,), {
'DESCRIPTOR' : _PROMPTMESSAGEMENUITEMBODY,
'__module__' : 'messaging_model_pb2'
# @@protoc_insertion_point(class_scope:com.elarian.hera.proto.PromptMessageMenuItemBody)
})
_sym_db.RegisterMessage(PromptMessageMenuItemBody)
InboundMessageBody = _reflection.GeneratedProtocolMessageType('InboundMessageBody', (_message.Message,), {
'DESCRIPTOR' : _INBOUNDMESSAGEBODY,
'__module__' : 'messaging_model_pb2'
# @@protoc_insertion_point(class_scope:com.elarian.hera.proto.InboundMessageBody)
})
_sym_db.RegisterMessage(InboundMessageBody)
OutboundMessageReplyPrompt = _reflection.GeneratedProtocolMessageType('OutboundMessageReplyPrompt', (_message.Message,), {
'DESCRIPTOR' : _OUTBOUNDMESSAGEREPLYPROMPT,
'__module__' : 'messaging_model_pb2'
# @@protoc_insertion_point(class_scope:com.elarian.hera.proto.OutboundMessageReplyPrompt)
})
_sym_db.RegisterMessage(OutboundMessageReplyPrompt)
OutboundMessage = _reflection.GeneratedProtocolMessageType('OutboundMessage', (_message.Message,), {
'DESCRIPTOR' : _OUTBOUNDMESSAGE,
'__module__' : 'messaging_model_pb2'
# @@protoc_insertion_point(class_scope:com.elarian.hera.proto.OutboundMessage)
})
_sym_db.RegisterMessage(OutboundMessage)
_TEMPLATEMESSAGEBODY_PARAMSENTRY._options = None
# @@protoc_insertion_point(module_scope)
| 48.935936
| 14,252
| 0.785155
|
e39a8b77156b7c6f2b27f4cba59ff4709813ee47
| 198
|
py
|
Python
|
allennlp_models/lm/modules/__init__.py
|
matt-peters/allennlp-models
|
cdd505ed539fdc2b82e4cc0a23eae4bfd3368e7e
|
[
"Apache-2.0"
] | 402
|
2020-03-11T22:58:35.000Z
|
2022-03-29T09:05:27.000Z
|
allennlp_models/lm/modules/__init__.py
|
matt-peters/allennlp-models
|
cdd505ed539fdc2b82e4cc0a23eae4bfd3368e7e
|
[
"Apache-2.0"
] | 116
|
2020-03-11T01:26:57.000Z
|
2022-03-25T13:03:56.000Z
|
allennlp_models/lm/modules/__init__.py
|
matt-peters/allennlp-models
|
cdd505ed539fdc2b82e4cc0a23eae4bfd3368e7e
|
[
"Apache-2.0"
] | 140
|
2020-03-11T00:51:35.000Z
|
2022-03-29T09:05:36.000Z
|
# flake8: noqa: F403
from allennlp_models.lm.modules.seq2seq_encoders import *
from allennlp_models.lm.modules.language_model_heads import *
from allennlp_models.lm.modules.token_embedders import *
| 39.6
| 61
| 0.843434
|
1661d637d8c10153347eed6286d4a33eb1bba3a3
| 1,075
|
py
|
Python
|
h/renderers.py
|
noscripter/h
|
a7a4095a46683ea08dae62335bbcd53f7ab313e2
|
[
"MIT"
] | null | null | null |
h/renderers.py
|
noscripter/h
|
a7a4095a46683ea08dae62335bbcd53f7ab313e2
|
[
"MIT"
] | null | null | null |
h/renderers.py
|
noscripter/h
|
a7a4095a46683ea08dae62335bbcd53f7ab313e2
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Taken from:
# https://pyramid-cookbook.readthedocs.org/en/latest/templates/customrenderers.html
# with minor modifications
import unicodecsv as csv
from h._compat import StringIO
class CSV(object):
def __init__(self, info):
pass
def __call__(self, value, system):
""" Returns a plain CSV-encoded string with content-type
``text/csv``. The content-type may be overridden by
setting ``request.response.content_type``."""
request = system.get('request')
if request is not None:
response = request.response
ct = response.content_type
if ct == response.default_content_type:
response.content_type = 'text/csv'
fout = StringIO()
writer = csv.writer(fout,
delimiter=',',
quotechar=',',
quoting=csv.QUOTE_MINIMAL)
writer.writerow(value.get('header', []))
writer.writerows(value.get('rows', []))
return fout.getvalue()
| 30.714286
| 83
| 0.583256
|
b177878966dad6cb1804dc5f45f561c73f523a64
| 41
|
py
|
Python
|
doctor-octopus/tasks/aws/__init__.py
|
tiagorkrebs/doctor-octopus
|
876ed43468a68ac0fa96fd4b964517a886f67feb
|
[
"MIT"
] | null | null | null |
doctor-octopus/tasks/aws/__init__.py
|
tiagorkrebs/doctor-octopus
|
876ed43468a68ac0fa96fd4b964517a886f67feb
|
[
"MIT"
] | null | null | null |
doctor-octopus/tasks/aws/__init__.py
|
tiagorkrebs/doctor-octopus
|
876ed43468a68ac0fa96fd4b964517a886f67feb
|
[
"MIT"
] | 1
|
2020-08-21T01:35:15.000Z
|
2020-08-21T01:35:15.000Z
|
from .route53 import *
from .cli import *
| 20.5
| 22
| 0.731707
|
37d03868dee4630d15004b19ed29f8f432a66f8d
| 3,481
|
py
|
Python
|
rootfs/api/admin.py
|
jianxiaoguo/manager
|
148e9d14dcf472750d3eb637794cdabb2a2edeea
|
[
"Apache-2.0"
] | null | null | null |
rootfs/api/admin.py
|
jianxiaoguo/manager
|
148e9d14dcf472750d3eb637794cdabb2a2edeea
|
[
"Apache-2.0"
] | null | null | null |
rootfs/api/admin.py
|
jianxiaoguo/manager
|
148e9d14dcf472750d3eb637794cdabb2a2edeea
|
[
"Apache-2.0"
] | null | null | null |
from decimal import Decimal
from django import forms
from django.contrib import admin
from api.models import Cluster, Funding, Bill
from api.models.charge_rules import ChargeRule
is_superuser = True
empty_value_display = '-'
admin.site.site_header = 'Workflow Manager'
admin.site.site_title = 'Workflow Manager'
class ClusterAdmin(admin.ModelAdmin):
list_display = ('name', 'ingress',)
search_fields = ('name',)
class ChargeRuleForm(forms.ModelForm):
def clean_price_unit(self):
price_unit_data = self.cleaned_data['price_unit']
resource_type_data = self.cleaned_data['resource_type']
credit_unit, measurement_unit, time_unit = price_unit_data.split(r'/')
print(price_unit_data)
if time_unit not in ['hour', 'day'] or \
(resource_type_data == 1 and measurement_unit not in [
'mcores']) or \
(resource_type_data in [2, 3] and measurement_unit not in [
'MB']) or \
(resource_type_data == 4 and measurement_unit not in (
'bytes')): # noqa
raise forms.ValidationError('price_unit is invalid')
return price_unit_data
def clean_price(self):
price = self.cleaned_data['price']
if self.cleaned_data['price'] < 0:
raise forms.ValidationError('price must be a positive number')
return price
class Meta:
model = ChargeRule
fields = ('name', 'resource_type', 'price_unit', 'price')
help_texts = {
'price_unit': 'example: credit/MB/day,credit/mcores/day,credit/bytes/hour', # noqa
}
class ChargeRuleAdmin(admin.ModelAdmin):
form = ChargeRuleForm
list_display = ('name', 'resource_type', 'price_unit', 'price', 'created')
search_fields = ('name', 'resource_type')
class FundingAdmin(admin.ModelAdmin):
list_display = (
'uuid', 'owner', 'operator', 'trade_type', 'credit', 'trade_credit',
'bill', 'remark', 'created')
fieldsets = (
('base', {
'fields': (
'owner', 'trade_type', 'trade_credit', 'remark')
}),
)
list_filter = ('uuid', 'owner', 'trade_type')
def save_model(self, request, funding, form, change):
try:
credit = Funding.objects.filter(
owner_id=request.POST['owner']).latest('created').credit
except Funding.DoesNotExist:
credit = 0
trade_credit = request.POST.get('trade_credit')
funding.credit = credit + Decimal(trade_credit)
funding.operator = request.user.username
funding.save()
def has_change_permission(self, request, obj=None):
return False
def has_delete_permission(self, request, obj=None):
return False
class BillAdmin(admin.ModelAdmin):
list_display = (
'uuid', 'owner', 'cluster', 'app_id', 'charge_rule_info',
'resource_info', 'total_price', 'start_time', 'end_time', 'created')
list_filter = ('uuid', 'cluster', 'owner')
search_fields = ('uuid', )
def has_add_permission(self, request, obj=None):
return False
def has_change_permission(self, request, obj=None):
return False
def has_delete_permission(self, request, obj=None):
return False
admin.site.register(Cluster, ClusterAdmin)
admin.site.register(ChargeRule, ChargeRuleAdmin)
admin.site.register(Funding, FundingAdmin)
admin.site.register(Bill, BillAdmin)
| 32.53271
| 94
| 0.638035
|
e7a34a5804ab06bd33b1c4de20d3fe624a8ee827
| 28,374
|
py
|
Python
|
python/gr_digital_rf/digital_rf_source.py
|
w2naf/digital_rf
|
482608dcc5608b9d9a0aacf77e75f83edbec1f0e
|
[
"BSD-3-Clause"
] | null | null | null |
python/gr_digital_rf/digital_rf_source.py
|
w2naf/digital_rf
|
482608dcc5608b9d9a0aacf77e75f83edbec1f0e
|
[
"BSD-3-Clause"
] | null | null | null |
python/gr_digital_rf/digital_rf_source.py
|
w2naf/digital_rf
|
482608dcc5608b9d9a0aacf77e75f83edbec1f0e
|
[
"BSD-3-Clause"
] | null | null | null |
# ----------------------------------------------------------------------------
# Copyright (c) 2017 Massachusetts Institute of Technology (MIT)
# All rights reserved.
#
# Distributed under the terms of the BSD 3-clause license.
#
# The full license is in the LICENSE file, distributed with this software.
# ----------------------------------------------------------------------------
"""Module defining a Digital RF Source block."""
from __future__ import absolute_import, division, print_function
import os
import traceback
import gnuradio.blocks
import h5py
import numpy as np
import pmt
from digital_rf import DigitalRFReader, util
from gnuradio import gr
import six
H5T_LOOKUP = {
# (class, itemsize, is_complex): {name, dtype, fillvalue}
(h5py.h5t.INTEGER, 1, False): dict(
name='s8', dtype=np.int8, fillvalue=np.iinfo(np.int8).min,
),
(h5py.h5t.INTEGER, 2, False): dict(
name='s16', dtype=np.int16, fillvalue=np.iinfo(np.int16).min,
),
(h5py.h5t.INTEGER, 4, False): dict(
name='s32', dtype=np.int32, fillvalue=np.iinfo(np.int32).min,
),
(h5py.h5t.INTEGER, 8, False): dict(
name='s64', dtype=np.int64, fillvalue=np.iinfo(np.int64).min,
),
(h5py.h5t.FLOAT, 4, False): dict(
name='f32', dtype=np.float32, fillvalue=np.nan,
),
(h5py.h5t.FLOAT, 8, False): dict(
name='f64', dtype=np.float64, fillvalue=np.nan,
),
(h5py.h5t.INTEGER, 1, True): dict(
name='sc8',
dtype=np.dtype([('r', np.int8), ('i', np.int8)]),
fillvalue=(np.iinfo(np.int8).min,)*2,
),
(h5py.h5t.INTEGER, 2, True): dict(
name='sc16',
dtype=np.dtype([('r', np.int16), ('i', np.int16)]),
fillvalue=(np.iinfo(np.int16).min,)*2,
),
(h5py.h5t.INTEGER, 4, True): dict(
name='sc32',
dtype=np.dtype([('r', np.int32), ('i', np.int32)]),
fillvalue=(np.iinfo(np.int32).min,)*2,
),
(h5py.h5t.INTEGER, 8, True): dict(
name='sc64',
dtype=np.dtype([('r', np.int64), ('i', np.int64)]),
fillvalue=(np.iinfo(np.int64).min,)*2,
),
(h5py.h5t.FLOAT, 4, True): dict(
name='fc32', dtype=np.complex64, fillvalue=(np.nan+np.nan*1j),
),
(h5py.h5t.FLOAT, 8, True): dict(
name='fc64', dtype=np.complex128, fillvalue=(np.nan+np.nan*1j),
),
}
def get_h5type(cls, size, is_complex):
try:
typedict = H5T_LOOKUP[(cls, size, is_complex)]
except KeyError:
raise ValueError('HDF5 data type not supported for reading.')
return typedict
class digital_rf_channel_source(gr.sync_block):
"""Source block for reading a channel of Digital RF data."""
def __init__(
self, channel_dir, start=None, end=None, repeat=False, gapless=False,
min_chunksize=None,
):
"""Read a channel of data from a Digital RF directory.
In addition to outputting samples from Digital RF format data, this
block also emits a 'properties' message containing inherent channel
properties and adds stream tags using the channel's accompanying
Digital Metadata. See the Notes section for details on what the
messages and stream tags contain.
Parameters
----------
channel_dir : string | list of strings
Either a single channel directory containing 'drf_properties.h5'
and timestamped subdirectories with Digital RF files, or a list of
such. A directory can be a file system path or a url, where the url
points to a channel directory. Each must be a local path, or start
with 'http://'', 'file://'', or 'ftp://''.
Other Parameters
----------------
start : None | int | float | string, optional
A value giving the start of the channel's playback.
If None or '', the start of the channel's available data is used.
If an integer, it is interpreted as a sample index given in the
number of samples since the epoch (time_since_epoch*sample_rate).
If a float, it is interpreted as a UTC timestamp (seconds since
epoch).
If a string, four forms are permitted:
1) a string which can be evaluated to an integer/float and
interpreted as above,
2) a string beginning with '+' and followed by an integer
(float) expression, interpreted as samples (seconds) from
the start of the data, and
3) a time in ISO8601 format, e.g. '2016-01-01T16:24:00Z'
4) 'now' ('nowish'), indicating the current time (rounded up)
end : None | int | float | string, optional
A value giving the end of the channel's playback.
If None or '', the end of the channel's available data is used.
See `start` for a description of how this value is interpreted.
repeat : bool, optional
If True, loop the data continuously from the start after the end
is reached. If False, stop after the data is read once.
gapless : bool, optional
If True, output default-filled samples for any missing data between
start and end. If False, skip missing samples and add an `rx_time`
stream tag to indicate the gap.
min_chunksize : None | int, optional
Minimum number of samples to output at once. This value can be used
to adjust the source's performance to reduce underruns and
processing time. If None, a sensible default will be used.
Notes
-----
A channel directory must contain subdirectories/files in the format:
[YYYY-MM-DDTHH-MM-SS]/rf@[seconds].[%03i milliseconds].h5
Each directory provided is considered the same channel. An error is
raised if their sample rates differ, or if their time periods overlap.
Upon start, this block sends a 'properties' message on its output
message port that contains a dictionary with one key, the channel's
name, and a value which is a dictionary of properties found in the
channel's 'drf_properties.h5' file.
This block emits the following stream tags at the appropriate sample
for each of the channel's accompanying Digital Metadata samples:
rx_time : (int secs, float frac) tuple
Time since epoch of the sample.
rx_rate : float
Sample rate in Hz.
rx_freq : float | 1-D array of floats
Center frequency or frequencies of the subchannels based on
the 'center_frequencies' metadata field.
metadata : dict
Any additional Digital Metadata fields are added to this
dictionary tag of metadata.
"""
if isinstance(channel_dir, six.string_types):
channel_dir = [channel_dir]
# eventually, we should re-factor DigitalRFReader and associated so
# that reading from a list of channel directories is possible
# with a DigitalRFChannelReader class or similar
# until then, split the path and use existing DigitalRFReader
top_level_dirs = []
chs = set()
for ch_dir in channel_dir:
top_level_dir, ch = os.path.split(ch_dir)
top_level_dirs.append(top_level_dir)
chs.add(ch)
if len(chs) == 1:
ch = chs.pop()
else:
raise ValueError('Channel directories must have the same name.')
self._ch = ch
self._Reader = DigitalRFReader(top_level_dirs)
self._properties = self._Reader.get_properties(self._ch)
typeclass = self._properties['H5Tget_class']
itemsize = self._properties['H5Tget_size']
is_complex = self._properties['is_complex']
vlen = self._properties['num_subchannels']
sr = self._properties['samples_per_second']
self._itemsize = itemsize
self._sample_rate = sr
self._sample_rate_pmt = pmt.from_double(float(sr))
# determine output signature from HDF5 type metadata
typedict = get_h5type(typeclass, itemsize, is_complex)
self._outtype = typedict['name']
self._itemtype = typedict['dtype']
self._fillvalue = typedict['fillvalue']
if vlen == 1:
out_sig = [self._itemtype]
else:
out_sig = [(self._itemtype, vlen)]
gr.sync_block.__init__(
self,
name="digital_rf_channel_source",
in_sig=None,
out_sig=out_sig,
)
self.message_port_register_out(pmt.intern('properties'))
self._id = pmt.intern(self._ch)
self._tag_queue = {}
self._start = start
self._end = end
self._repeat = repeat
self._gapless = gapless
if min_chunksize is None:
# FIXME: it shouldn't have to be quite this high
self._min_chunksize = int(sr)
else:
self._min_chunksize = min_chunksize
# reduce CPU usage and underruns by setting a minimum number of samples
# to handle at once
# (really want to set_min_noutput_items, but no way to do that from
# Python)
self.set_output_multiple(self._min_chunksize)
try:
self._DMDReader = self._Reader.get_digital_metadata(self._ch)
except IOError:
self._DMDReader = None
def _queue_tags(self, sample, tags):
"""Queue stream tags to be attached to data in the work function.
In addition to the tags specified in the `tags` dictionary, this will
add `rx_time` and `rx_rate` tags giving the sample time and rate.
Parameters
----------
sample : int
Sample index for the sample to tag, given in the number of samples
since the epoch (time_since_epoch*sample_rate).
tags : dict
Dictionary containing the tags to add with keys specifying the tag
name. The value is cast as an appropriate pmt type, while the name
will be turned into a pmt string in the work function.
"""
# add to current queued tags for sample if applicable
tag_dict = self._tag_queue.get(sample, {})
if not tag_dict:
# add time and rate tags
time = sample/self._sample_rate
tag_dict['rx_time'] = pmt.make_tuple(
pmt.from_uint64(int(np.uint64(time))),
pmt.from_double(float(time % 1)),
)
tag_dict['rx_rate'] = self._sample_rate_pmt
for k, v in tags.items():
try:
pmt_val = pmt.to_pmt(v)
except ValueError:
traceback.print_exc()
errstr = (
"Can't add tag for '{0}' because its value of {1} failed"
" to convert to a pmt value."
)
print(errstr.format(k, v))
else:
tag_dict[k] = pmt_val
self._tag_queue[sample] = tag_dict
def start(self):
self._bounds = self._Reader.get_bounds(self._ch)
self._start_sample = util.parse_identifier_to_sample(
self._start, self._sample_rate, self._bounds[0],
)
self._end_sample = util.parse_identifier_to_sample(
self._end, self._sample_rate, self._bounds[0],
)
if self._start_sample is None:
self._read_start_sample = self._bounds[0]
else:
self._read_start_sample = self._start_sample
# add default tags to first sample
self._queue_tags(self._read_start_sample, {})
# replace longdouble samples_per_second with float for pmt conversion
properties_message = self._properties.copy()
properties_message['samples_per_second'] = \
float(properties_message['samples_per_second'])
self.message_port_pub(
pmt.intern('properties'),
pmt.to_pmt({self._ch: properties_message}),
)
return super(digital_rf_channel_source, self).start()
def work(self, input_items, output_items):
out = output_items[0]
nsamples = len(out)
next_index = 0
# repeat reading until we succeed or return
while next_index < nsamples:
read_start = self._read_start_sample
# read_end is inclusive, hence the -1
read_end = self._read_start_sample + (nsamples - next_index) - 1
# creating a read function that has an output argument so data can
# be copied directly would be nice
# also should move EOFError checking into reader once watchdog
# bounds functionality is implemented
try:
if self._end_sample is None:
if read_end > self._bounds[1]:
self._bounds = self._Reader.get_bounds(self._ch)
read_end = min(read_end, self._bounds[1])
else:
if read_end > self._end_sample:
read_end = self._end_sample
if read_start > read_end:
raise EOFError
# read data
data_dict = self._Reader.read(
read_start, read_end, self._ch,
)
# handled all samples through read_end regardless of whether
# they were written to the output vector
self._read_start_sample = read_end + 1
# early escape for no data
if not data_dict:
if self._gapless:
# output empty samples if no data and gapless output
stop_index = next_index + read_end + 1 - read_start
out[next_index:stop_index] = self._fillvalue
next_index = stop_index
else:
# clear any existing tags
self._tag_queue.clear()
# add tag at next potential sample to indicate skip
self._queue_tags(self._read_start_sample, {})
continue
# read corresponding metadata
if self._DMDReader is not None:
meta_dict = self._DMDReader.read(
read_start, read_end,
)
for sample, meta in meta_dict.items():
# add tags from Digital Metadata
# (in addition to default time and rate tags)
# eliminate sample_rate_* tags with duplicate info
meta.pop('sample_rate_denominator', None)
meta.pop('sample_rate_numerator', None)
# get center frequencies for rx_freq tag, squeeze()[()]
# to get single value if possible else pass as an array
cf = meta.pop('center_frequencies', None)
if cf is not None:
cf = cf.ravel().squeeze()[()]
tags = dict(
rx_freq=cf,
# all other metadata goes in metadata tag
metadata=meta,
)
self._queue_tags(sample, tags)
# add data and tags to output
next_continuous_sample = read_start
for sample, data in data_dict.items():
# detect data skip
if sample > next_continuous_sample:
if self._gapless:
# advance output by skipped number of samples
nskipped = sample - next_continuous_sample
sample_index = next_index + nskipped
out[next_index:sample_index] = self._fillvalue
next_index = sample_index
else:
# emit new time tag at sample to indicate skip
self._queue_tags(sample, {})
# output data
n = data.shape[0]
stop_index = next_index + n
end_sample = sample + n
out[next_index:stop_index] = data.squeeze()
# output tags
for tag_sample in sorted(self._tag_queue.keys()):
if tag_sample < sample:
# drop tags from before current data block
del self._tag_queue[tag_sample]
continue
elif tag_sample >= end_sample:
# wait to output tags from after current data block
break
offset = (
self.nitems_written(0) # offset @ start of work
+ next_index # additional offset of data block
+ (tag_sample - sample)
)
tag_dict = self._tag_queue.pop(tag_sample)
for name, val in tag_dict.items():
self.add_item_tag(
0, offset, pmt.intern(name), val, self._id,
)
# advance next output index and continuous sample
next_index = stop_index # <=== next_index += n
next_continuous_sample = end_sample
except EOFError:
if self._repeat:
if self._start_sample is None:
self._read_start_sample = self._bounds[0]
else:
self._read_start_sample = self._start_sample
self._queue_tags(self._read_start_sample, {})
continue
else:
break
if next_index == 0:
# return WORK_DONE
return -1
return next_index
def get_gapless(self):
return self._gapless
def set_gapless(self, gapless):
self._gapless = gapless
def get_repeat(self):
return self._repeat
def set_repeat(self, repeat):
self._repeat = repeat
class digital_rf_source(gr.hier_block2):
"""Source block for reading Digital RF data."""
def __init__(
self, top_level_dir, channels=None, start=None, end=None,
repeat=False, throttle=False, gapless=False, min_chunksize=None,
):
"""Read data from a directory containing Digital RF channels.
In addition to outputting samples from Digital RF format data, this
block also emits a 'properties' message containing inherent channel
properties and adds stream tags using the channel's accompanying
Digital Metadata. See the Notes section for details on what the
messages and stream tags contain.
Parameters
----------
top_level_dir : string
Either a single top-level directory containing Digital RF channel
directories, or a list of such. A directory can be a file system
path or a url, where the url points to a top level directory. Each
must be a local path, or start with 'http://'', 'file://'', or
'ftp://''.
Other Parameters
----------------
channels : None | string | int | iterable of previous, optional
If None, use all available channels in alphabetical order.
Otherwise, use the channels in the order specified in the given
iterable (a string or int is taken as a single-element iterable).
A string is used to specify the channel name, while an int is used
to specify the channel index in the sorted list of available
channel names.
start : None | string | int | iterable of previous, optional
Can be a single value or an iterable of values corresponding to
`channels` giving the start of the channel's playback.
If None or '', the start of the channel's available data is used.
If an integer, it is interpreted as a sample index given in the
number of samples since the epoch (time_since_epoch*sample_rate).
If a float, it is interpreted as a UTC timestamp (seconds since
epoch).
If a string, four forms are permitted:
1) a string which can be evaluated to an integer/float and
interpreted as above,
2) a string beginning with '+' and followed by an integer
(float) expression, interpreted as samples (seconds) from
the start of the data, and
3) a time in ISO8601 format, e.g. '2016-01-01T16:24:00Z'
4) 'now' ('nowish'), indicating the current time (rounded up)
end : None | string | int | iterable of previous, optional
Can be a single value or an iterable of values corresponding to
`channels` giving the end of the channel's playback.
If None or '', the end of the channel's available data is used.
See `start` for a description of how this value is interpreted.
repeat : bool, optional
If True, loop the data continuously from the start after the end
is reached. If False, stop after the data is read once.
throttle : bool, optional
If True, playback the samples at their recorded sample rate. If
False, read samples as quickly as possible.
gapless : bool, optional
If True, output zeroed samples for any missing data between start
and end. If False, skip missing samples and add an `rx_time` stream
tag to indicate the gap.
min_chunksize : None | int, optional
Minimum number of samples to output at once. This value can be used
to adjust the source's performance to reduce underruns and
processing time. If None, a sensible default will be used.
Notes
-----
A top-level directory must contain files in the format:
[channel]/[YYYY-MM-DDTHH-MM-SS]/rf@[seconds].[%03i milliseconds].h5
If more than one top level directory contains the same channel_name
subdirectory, this is considered the same channel. An error is raised
if their sample rates differ, or if their time periods overlap.
Upon start, this block sends 'properties' messages on its output
message port that contains a dictionaries with one key, the channel's
name, and a value which is a dictionary of properties found in the
channel's 'drf_properties.h5' file.
This block emits the following stream tags at the appropriate sample
for each of the channel's accompanying Digital Metadata samples:
rx_time : (int secs, float frac) tuple
Time since epoch of the sample.
rx_rate : float
Sample rate in Hz.
rx_freq : float | 1-D array of floats
Center frequency or frequencies of the subchannels based on
the 'center_frequencies' metadata field.
metadata : dict
Any additional Digital Metadata fields are added to this
dictionary tag of metadata.
"""
options = locals()
del options['self']
del options['top_level_dir']
del options['channels']
del options['start']
del options['end']
del options['throttle']
Reader = DigitalRFReader(top_level_dir)
available_channel_names = Reader.get_channels()
self._channel_names = self._get_channel_names(
channels, available_channel_names,
)
if start is None or isinstance(start, six.string_types):
start = [start]*len(self._channel_names)
try:
s_iter = iter(start)
except TypeError:
s_iter = iter([start])
if end is None or isinstance(end, six.string_types):
end = [end]*len(self._channel_names)
try:
e_iter = iter(end)
except TypeError:
e_iter = iter([end])
# make sources for each channel
self._channels = []
for ch, s, e in zip(self._channel_names, s_iter, e_iter):
chsrc = digital_rf_channel_source(
os.path.join(top_level_dir, ch), start=s, end=e, **options
)
self._channels.append(chsrc)
out_sig_dtypes = [list(src.out_sig())[0] for src in self._channels]
out_sig = gr.io_signaturev(
len(out_sig_dtypes), len(out_sig_dtypes),
[s.itemsize for s in out_sig_dtypes],
)
in_sig = gr.io_signature(0, 0, 0)
gr.hier_block2.__init__(
self,
name="digital_rf_source",
input_signature=in_sig,
output_signature=out_sig,
)
msg_port_name = pmt.intern('properties')
self.message_port_register_hier_out('properties')
for k, src in enumerate(self._channels):
if throttle:
throt = gnuradio.blocks.throttle(
list(src.out_sig())[0].itemsize, float(src._sample_rate),
ignore_tags=True,
)
self.connect(src, throt, (self, k))
else:
self.connect(src, (self, k))
self.msg_connect(src, msg_port_name, self, msg_port_name)
@staticmethod
def _get_channel_names(channels, available_channel_names):
# channels can be None, in which case we use all available
if channels is None:
return available_channel_names
# or channels can be a string for a single channel
if isinstance(channels, six.string_types):
channels = [channels]
unselected_channels = available_channel_names[:]
channel_names = []
# now channels should be an iterable of strings or indexes
try:
ch_iter = iter(channels)
except TypeError:
# unless channels is potentially a single index
ch_iter = iter([channels])
for ch in ch_iter:
# make None and index ch into string channel name
if ch is None or ch == '':
# use first available channel (alphabetical)
try:
ch_name = unselected_channels[0]
except IndexError:
raise ValueError(
'"None" invalid for channel, all available '
'channels have been selected.'
)
else:
# try ch as a list index into available channels
try:
ch_name = available_channel_names[int(ch)]
except (TypeError, ValueError):
# not an index, that's fine
ch_name = ch
except IndexError:
raise IndexError(
'Channel index {0} does not exist.'.format(ch)
)
# now assume ch is a string, get from unselected channel list
try:
unselected_channels.remove(ch_name)
except ValueError:
errstr = (
'Channel {0} does not exist or has already been '
'selected.'
)
raise ValueError(errstr.format(ch_name))
channel_names.append(ch_name)
return channel_names
def get_gapless(self):
return self._channels[0]._gapless
def set_gapless(self, gapless):
for ch in self._channels:
ch.set_gapless(gapless)
def get_repeat(self):
return self._channels[0]._repeat
def set_repeat(self, repeat):
for ch in self._channels:
ch.set_repeat(repeat)
| 40.884726
| 79
| 0.568337
|
03224cef1a888c87f7b5feac8fb7d7e9f5bebf50
| 10,437
|
py
|
Python
|
hack/generators/release-controllers/content/art_namespaces_rbac.py
|
obnoxxx/release
|
4ad5c88b6b211111e8afd8bb8a7465256fd6976a
|
[
"Apache-2.0"
] | null | null | null |
hack/generators/release-controllers/content/art_namespaces_rbac.py
|
obnoxxx/release
|
4ad5c88b6b211111e8afd8bb8a7465256fd6976a
|
[
"Apache-2.0"
] | null | null | null |
hack/generators/release-controllers/content/art_namespaces_rbac.py
|
obnoxxx/release
|
4ad5c88b6b211111e8afd8bb8a7465256fd6976a
|
[
"Apache-2.0"
] | null | null | null |
def add_imagestream_namespace_rbac(gendoc):
resources = gendoc
context = gendoc.context
puller_subjects = []
if not context.private:
puller_subjects.append({
'apiGroup': 'rbac.authorization.k8s.io',
'kind': 'Group',
'name': 'system:authenticated'
})
else:
puller_subjects.append({
'apiGroup': 'rbac.authorization.k8s.io',
'kind': 'Group',
'name': 'openshift-priv-admins'
})
puller_subjects.append({
'apiGroup': 'rbac.authorization.k8s.io',
'kind': 'Group',
'name': 'qe'
})
puller_subjects.append({
'apiGroup': 'rbac.authorization.k8s.io',
'kind': 'Group',
'name': 'release-team'
})
resources.append({
'apiVersion': 'rbac.authorization.k8s.io/v1beta1',
'kind': 'RoleBinding',
'metadata': {
'name': 'image-puller',
'namespace': context.is_namespace
},
'roleRef': {
'apiGroup': 'rbac.authorization.k8s.io',
'kind': 'ClusterRole',
'name': 'system:image-puller'
},
'subjects': puller_subjects,
})
resources.append({
'apiVersion': 'rbac.authorization.k8s.io/v1beta1',
'kind': 'RoleBinding',
'metadata': {
'name': 'user-viewer',
'namespace': context.is_namespace
},
'roleRef': {
'apiGroup': 'rbac.authorization.k8s.io',
'kind': 'ClusterRole',
'name': 'view'
},
'subjects': puller_subjects,
})
resources.append({
'apiVersion': 'authorization.openshift.io/v1',
'kind': 'Role',
'metadata': {
'name': f'release-controller-modify',
'namespace': context.is_namespace
},
'rules': [
{
'apiGroups': [''],
'resourceNames': ['release-upgrade-graph'],
'resources': ['secrets'],
'verbs': ['get', 'update', 'patch']
},
{
'apiGroups': ['image.openshift.io'],
'resources': ['imagestreams', 'imagestreamtags'],
'verbs': ['get',
'list',
'watch',
'create',
'delete',
'update',
'patch']
},
{
'apiGroups': [''],
'resources': ['events'],
'verbs': ['create', 'patch', 'update']
}]
})
if not context.suffix:
# Special permissions for x86_64 public rc
resources.append({
'apiVersion': 'authorization.openshift.io/v1',
'kind': 'Role',
'metadata': {
'name': 'release-controller-modify-ocp',
'namespace': 'openshift'
},
'rules': [{
'apiGroups': ['image.openshift.io'],
'resourceNames': ['origin-v4.0'],
'resources': ['imagestreams'],
'verbs': ['get', 'list', 'watch', 'update', 'patch']
}]
})
resources.append({
'apiVersion': 'authorization.openshift.io/v1',
'kind': 'Role',
'metadata': {
'name': 'release-controller-modify-ocp',
'namespace': 'origin'
},
'rules': [{
'apiGroups': ['image.openshift.io'],
'resourceNames': ['release',
*context.config.releases,
],
'resources': ['imagestreams'],
'verbs': ['get', 'list', 'watch', 'update', 'patch']
}]
})
resources.append({
'apiVersion': 'authorization.openshift.io/v1',
'kind': 'Role',
'metadata': {
'name': f'release-controller-import-ocp',
'namespace': context.is_namespace
},
'rules': [{
'apiGroups': ['image.openshift.io'],
'resources': ['imagestreamimports'],
'verbs': ['create']
}]
})
resources.append({
'apiVersion': 'authorization.openshift.io/v1',
'kind': 'Role',
'metadata': {
'name': f'release-controller{context.suffix}-prowjob',
'namespace': context.config.rc_deployment_namespace,
},
'rules': [{
'apiGroups': ['prow.k8s.io'],
'resources': ['prowjobs'],
'verbs': ['get',
'list',
'watch',
'create',
'delete',
'update',
'patch']
}]
})
resources.append({
'apiVersion': 'rbac.authorization.k8s.io/v1',
'kind': 'RoleBinding',
'metadata': {
'name': 'release-controller-binding-ocp',
'namespace': context.is_namespace,
},
'roleRef': {
'kind': 'Role',
'name': f'release-controller-modify'
},
'subjects': [{
'kind': 'ServiceAccount',
'name': context.rc_serviceaccount_name,
'namespace': context.config.rc_deployment_namespace
}]
})
if not context.suffix:
# Special permissions just for x86_64 public release controller
resources.append({
'apiVersion': 'rbac.authorization.k8s.io/v1',
'kind': 'RoleBinding',
'metadata': {
'name': 'release-controller-binding-ocp',
'namespace': 'openshift'
},
'roleRef': {
'kind': 'Role',
'name': 'release-controller-modify-ocp'
},
'subjects': [{
'kind': 'ServiceAccount',
'name': 'release-controller-ocp',
'namespace': context.config.rc_deployment_namespace
}]
})
resources.append({
'apiVersion': 'rbac.authorization.k8s.io/v1',
'kind': 'RoleBinding',
'metadata': {
'name': 'release-controller-binding-ocp',
'namespace': 'origin'
},
'roleRef': {
'kind': 'Role',
'name': 'release-controller-modify-ocp'
},
'subjects': [{
'kind': 'ServiceAccount',
'name': 'release-controller-ocp',
'namespace': context.config.rc_deployment_namespace,
}]
})
resources.append({
'apiVersion': 'rbac.authorization.k8s.io/v1',
'kind': 'RoleBinding',
'metadata': {
'name': 'release-controller-binding-view',
'namespace': context.is_namespace
},
'roleRef': {
'kind': 'ClusterRole',
'name': 'view'
},
'subjects': [{
'kind': 'ServiceAccount',
'name': context.rc_serviceaccount_name,
'namespace': context.config.rc_deployment_namespace
}]
})
resources.append({
'apiVersion': 'rbac.authorization.k8s.io/v1',
'kind': 'RoleBinding',
'metadata': {
'name': f'release-controller-binding-prowjob-{context.is_namespace}',
'namespace': context.config.rc_deployment_namespace
},
'roleRef': {
'kind': 'Role',
'name': f'release-controller{context.suffix}-prowjob'
},
'subjects': [{
'kind': 'ServiceAccount',
'name': context.rc_serviceaccount_name,
'namespace': context.config.rc_deployment_namespace
}]
})
resources.append({
'apiVersion': 'v1',
'kind': 'Namespace',
'metadata': {
'name': context.jobs_namespace,
}
})
resources.append({
'apiVersion': 'rbac.authorization.k8s.io/v1',
'kind': 'RoleBinding',
'metadata': {
'name': f'release-controller-binding-ocp',
'namespace': context.jobs_namespace,
},
'roleRef': {
'kind': 'ClusterRole',
'name': 'edit'
},
'subjects': [{
'kind': 'ServiceAccount',
'name': context.rc_serviceaccount_name,
'namespace': context.config.rc_deployment_namespace
}]
})
resources.append({
'apiVersion': 'rbac.authorization.k8s.io/v1',
'kind': 'RoleBinding',
'metadata': {
'name': 'release-controller-binding-promote',
'namespace': context.is_namespace,
},
'roleRef': {
'kind': 'ClusterRole',
'name': 'system:image-builder'
},
'subjects': [{
'kind': 'ServiceAccount',
'name': 'builder',
'namespace': context.jobs_namespace,
}]
})
resources.append({
'apiVersion': 'rbac.authorization.k8s.io/v1',
'kind': 'RoleBinding',
'metadata': {
'name': 'release-controller-binding-import',
'namespace': context.is_namespace,
},
'roleRef': {
'kind': 'Role',
'name': f'release-controller-import-ocp',
'namespace': context.is_namespace,
},
'subjects': [{
'kind': 'ServiceAccount',
'name': 'builder',
'namespace': context.jobs_namespace,
}]
})
resources.append({
'apiVersion': 'v1',
'kind': 'Secret',
'metadata': {
'name': 'release-upgrade-graph',
'namespace': context.is_namespace
}
})
resources.append({
'apiVersion': 'v1',
'kind': 'ServiceAccount',
'metadata': {
'annotations': {} if not context.private else {
f'serviceaccounts.openshift.io/oauth-redirectreference.{context.rc_serviceaccount_name}': '{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"%s"}}' % context.rc_route_name
},
'name': context.rc_serviceaccount_name,
'namespace': context.config.rc_deployment_namespace,
}
})
| 31.0625
| 224
| 0.466322
|
a596b1313393f329c2af19dc341411326e3c0d8c
| 4,340
|
py
|
Python
|
tests/test_validation.py
|
abetkin/webtypes
|
af62501b91368f18acb569d6c3a587f0b7a125f7
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_validation.py
|
abetkin/webtypes
|
af62501b91368f18acb569d6c3a587f0b7a125f7
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_validation.py
|
abetkin/webtypes
|
af62501b91368f18acb569d6c3a587f0b7a125f7
|
[
"BSD-3-Clause"
] | null | null | null |
from webtypes import Route, test, types, validators
from webtypes.server.app import App
def str_path_param(param: str):
return {'param': param}
def int_path_param(param: int):
return {'param': param}
def str_query_param(param: str):
return {'param': param}
def int_query_param(param: int):
return {'param': param}
def bool_query_param(param: bool):
return {'param': param}
def str_query_param_with_default(param: str=''):
return {'param': param}
def int_query_param_with_default(param: int=None):
return {'param': param}
def bool_query_param_with_default(param: bool=False):
return {'param': param}
class User(types.Type):
name = validators.String(max_length=10)
age = validators.Integer(minimum=0, allow_null=True, default=None)
def type_body_param(user: User):
return {"user": user}
routes = [
# Path parameters
Route(url='/str_path_param/{param}/', method='GET', handler=str_path_param),
Route(url='/int_path_param/{param}/', method='GET', handler=int_path_param),
# Query parameters
Route(url='/str_query_param/', method='GET', handler=str_query_param),
Route(url='/int_query_param/', method='GET', handler=int_query_param),
Route(url='/bool_query_param/', method='GET', handler=bool_query_param),
Route(url='/str_query_param_with_default/', method='GET', handler=str_query_param_with_default),
Route(url='/int_query_param_with_default/', method='GET', handler=int_query_param_with_default),
Route(url='/bool_query_param_with_default/', method='GET', handler=bool_query_param_with_default),
# Body parameters
Route(url='/type_body_param/', method='POST', handler=type_body_param),
]
app = App(routes=routes)
client = test.TestClient(app)
def test_str_path_param():
response = client.get('/str_path_param/123/')
assert response.json() == {'param': '123'}
def test_int_path_param():
response = client.get('/int_path_param/123/')
assert response.json() == {'param': 123}
def test_str_query_param():
response = client.get('/str_query_param/?param=123')
assert response.json() == {'param': '123'}
response = client.get('/str_query_param/')
assert response.json() == {'param': 'The "param" field is required.'}
def test_str_query_param_with_default():
response = client.get('/str_query_param_with_default/?param=123')
assert response.json() == {'param': '123'}
response = client.get('/str_query_param_with_default/')
assert response.json() == {'param': ''}
def test_int_query_param():
response = client.get('/int_query_param/?param=123')
assert response.json() == {'param': 123}
response = client.get('/int_query_param/')
assert response.json() == {'param': 'The "param" field is required.'}
def test_int_query_param_with_default():
response = client.get('/int_query_param_with_default/?param=123')
assert response.json() == {'param': 123}
response = client.get('/int_query_param_with_default/')
assert response.json() == {'param': None}
def test_bool_query_param():
response = client.get('/bool_query_param/?param=true')
assert response.json() == {'param': True}
response = client.get('/bool_query_param/?param=false')
assert response.json() == {'param': False}
response = client.get('/bool_query_param/')
assert response.json() == {'param': 'The "param" field is required.'}
def test_bool_query_param_with_default():
response = client.get('/bool_query_param_with_default/?param=true')
assert response.json() == {'param': True}
response = client.get('/bool_query_param_with_default/?param=false')
assert response.json() == {'param': False}
response = client.get('/bool_query_param_with_default/')
assert response.json() == {'param': False}
def test_type_body_param():
response = client.post('/type_body_param/', json={'name': 'tom'})
assert response.json() == {'user': {'name': 'tom', 'age': None}}
response = client.post('/type_body_param/', json={'name': 'x' * 100})
assert response.status_code == 400
assert response.json() == {'name': 'Must have no more than 10 characters.'}
response = client.post('/type_body_param/', json={})
assert response.status_code == 400
assert response.json() == {'name': 'The "name" field is required.'}
| 30.56338
| 102
| 0.689171
|
24ec8520f06c3af6372fe8d05cdc3053beb08f80
| 292
|
py
|
Python
|
e005-smallest-multiple.py
|
bayramcicek/mini-programs
|
3f876e3274b7beeb5e7413ac9c5275813d9f0d2d
|
[
"Unlicense"
] | null | null | null |
e005-smallest-multiple.py
|
bayramcicek/mini-programs
|
3f876e3274b7beeb5e7413ac9c5275813d9f0d2d
|
[
"Unlicense"
] | null | null | null |
e005-smallest-multiple.py
|
bayramcicek/mini-programs
|
3f876e3274b7beeb5e7413ac9c5275813d9f0d2d
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/python3.6
# created by cicek on 06.10.2018 18:13
number = 2000
while True:
counter = 0
for i in range(1,21):
if (number % i == 0):
counter += 1
if (counter == 20):
print(number)
exit(0)
else:
number += 20
# 232792560
| 15.368421
| 38
| 0.510274
|
46c5790047a08881a7d543a3c27cc17db64a59a9
| 468
|
py
|
Python
|
Exercicios/iptu/iptu.py
|
Arthurnevs/E3
|
500df893c9762c738ce7307c025888cef5bea4b3
|
[
"Apache-2.0"
] | null | null | null |
Exercicios/iptu/iptu.py
|
Arthurnevs/E3
|
500df893c9762c738ce7307c025888cef5bea4b3
|
[
"Apache-2.0"
] | null | null | null |
Exercicios/iptu/iptu.py
|
Arthurnevs/E3
|
500df893c9762c738ce7307c025888cef5bea4b3
|
[
"Apache-2.0"
] | null | null | null |
'''
UFCG
PROGRAMAÇÃO 1
JOSE ARTHUR NEVES DE BRITO - 119210204
IPTU
'''
area = float(input())
valor = float(input())
opcao = input()
iptu = area * valor
vista = iptu - (iptu * 0.2)
duas = iptu - (iptu * 0.1)
tres = iptu - (iptu * 0.05)
if(opcao == 'vista'):
print('Total: R$ {:.2f}'.format(vista))
elif(opcao == '2x'):
print('Total: R$ {:.2f}. Parcelas: R$ {:.2f}'.format(duas,duas/2))
else:
print('Total: R$ {:.2f}. Parcelas: R$ {:.2f}'.format(tres,tres/3))
| 18.72
| 67
| 0.589744
|
50aba43c248d4172ffb660dbea7710a1dee06319
| 6,256
|
py
|
Python
|
tensorflow/contrib/keras/python/keras/models_test.py
|
AlexChrisF/udacity
|
b7f85a74058fc63ccb7601c418450ab934ef5953
|
[
"Apache-2.0"
] | 28
|
2017-04-08T09:47:57.000Z
|
2020-07-12T03:10:46.000Z
|
tensorflow/contrib/keras/python/keras/models_test.py
|
AlexChrisF/udacity
|
b7f85a74058fc63ccb7601c418450ab934ef5953
|
[
"Apache-2.0"
] | 7
|
2017-07-13T09:40:59.000Z
|
2019-04-08T22:46:51.000Z
|
tensorflow/contrib/keras/python/keras/models_test.py
|
AlexChrisF/udacity
|
b7f85a74058fc63ccb7601c418450ab934ef5953
|
[
"Apache-2.0"
] | 38
|
2017-04-28T04:15:48.000Z
|
2019-09-28T05:11:46.000Z
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for training routines."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import numpy as np
from tensorflow.contrib.keras.python import keras
from tensorflow.python.platform import test
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
class TestModelSaving(test.TestCase):
def test_sequential_model_saving(self):
if h5py is None:
return # Skip test if models cannot be saved.
with self.test_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.RepeatVector(3))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
model.compile(loss=keras.losses.MSE,
optimizer=keras.optimizers.RMSprop(lr=0.0001),
metrics=[keras.metrics.categorical_accuracy],
sample_weight_mode='temporal')
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
model.train_on_batch(x, y)
out = model.predict(x)
_, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
new_model = keras.models.load_model(fname)
os.remove(fname)
out2 = new_model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
# test that new updates are the same with both models
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
model.train_on_batch(x, y)
new_model.train_on_batch(x, y)
out = model.predict(x)
out2 = new_model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
def test_sequential_model_saving_2(self):
if h5py is None:
return # Skip test if models cannot be saved.
with self.test_session():
# test with custom optimizer, loss
class CustomOp(keras.optimizers.RMSprop):
pass
def custom_loss(y_true, y_pred):
return keras.losses.mse(y_true, y_pred)
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss=custom_loss, optimizer=CustomOp(), metrics=['acc'])
x = np.random.random((1, 3))
y = np.random.random((1, 3))
model.train_on_batch(x, y)
out = model.predict(x)
_, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(
fname,
custom_objects={'CustomOp': CustomOp,
'custom_loss': custom_loss})
os.remove(fname)
out2 = model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
def test_fuctional_model_saving(self):
if h5py is None:
return # Skip test if models cannot be saved.
with self.test_session():
inputs = keras.layers.Input(shape=(3,))
x = keras.layers.Dense(2)(inputs)
output = keras.layers.Dense(3)(x)
model = keras.models.Model(inputs, output)
model.compile(loss=keras.losses.MSE,
optimizer=keras.optimizers.RMSprop(lr=0.0001),
metrics=[keras.metrics.categorical_accuracy])
x = np.random.random((1, 3))
y = np.random.random((1, 3))
model.train_on_batch(x, y)
out = model.predict(x)
_, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(fname)
os.remove(fname)
out2 = model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
def test_saving_without_compilation(self):
if h5py is None:
return # Skip test if models cannot be saved.
with self.test_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss='mse', optimizer='sgd', metrics=['acc'])
_, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(fname)
os.remove(fname)
def test_saving_right_after_compilation(self):
if h5py is None:
return # Skip test if models cannot be saved.
with self.test_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss='mse', optimizer='sgd', metrics=['acc'])
model.model._make_train_function()
_, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(fname)
os.remove(fname)
class TestSequential(test.TestCase):
"""Most Sequential model API tests are covered in `training_test.py`.
"""
def test_sequential_pop(self):
num_hidden = 5
input_dim = 3
batch_size = 5
num_classes = 2
with self.test_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(num_hidden, input_dim=input_dim))
model.add(keras.layers.Dense(num_classes))
model.compile(loss='mse', optimizer='sgd')
x = np.random.random((batch_size, input_dim))
y = np.random.random((batch_size, num_classes))
model.fit(x, y, epochs=1)
model.pop()
self.assertEqual(len(model.layers), 1)
self.assertEqual(model.output_shape, (None, num_hidden))
model.compile(loss='mse', optimizer='sgd')
y = np.random.random((batch_size, num_hidden))
model.fit(x, y, epochs=1)
if __name__ == '__main__':
test.main()
| 32.247423
| 80
| 0.652014
|
7c2735a75d99fe10359a41e4b7da8d047210d478
| 66
|
py
|
Python
|
pgen/__init__.py
|
takipsizad/pyjs
|
54db0ba6747aca744f9f3c3e985a17e913dfb951
|
[
"ECL-2.0",
"Apache-2.0"
] | 739
|
2015-01-01T02:05:11.000Z
|
2022-03-30T15:26:16.000Z
|
pgen/__init__.py
|
takipsizad/pyjs
|
54db0ba6747aca744f9f3c3e985a17e913dfb951
|
[
"ECL-2.0",
"Apache-2.0"
] | 33
|
2015-03-25T23:17:04.000Z
|
2021-08-19T08:25:22.000Z
|
pgen/__init__.py
|
takipsizad/pyjs
|
54db0ba6747aca744f9f3c3e985a17e913dfb951
|
[
"ECL-2.0",
"Apache-2.0"
] | 167
|
2015-01-01T22:27:47.000Z
|
2022-03-17T13:29:19.000Z
|
import os
import sys
sys.path.append(os.path.dirname(__file__))
| 11
| 42
| 0.772727
|
3ce65d71546a32d250b055681d0cd61fdb62f389
| 1,920
|
py
|
Python
|
os_ken/tests/unit/packet/test_geneve.py
|
rolaya/os-ken
|
10009e41539c737c7c423f13e4f5bc5f46d219ff
|
[
"Apache-2.0"
] | 1
|
2019-04-24T04:01:07.000Z
|
2019-04-24T04:01:07.000Z
|
os_ken/tests/unit/packet/test_geneve.py
|
anlaneg/os-ken
|
379a7694c3129cc0156343af71f4fca8830d9de5
|
[
"Apache-2.0"
] | null | null | null |
os_ken/tests/unit/packet/test_geneve.py
|
anlaneg/os-ken
|
379a7694c3129cc0156343af71f4fca8830d9de5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2016 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import logging
import os
import sys
import unittest
from nose.tools import eq_
from nose.tools import ok_
from os_ken.lib import pcaplib
from os_ken.lib.packet import geneve
from os_ken.lib.packet import packet
from os_ken.utils import binary_str
LOG = logging.getLogger(__name__)
GENEVE_DATA_DIR = os.path.join(
os.path.dirname(sys.modules[__name__].__file__),
'../../packet_data/pcap/')
class Test_geneve(unittest.TestCase):
"""
Test case for os_ken.lib.packet.geneve.
"""
def test_parser(self):
files = [
'geneve_unknown',
]
for f in files:
# print('*** testing %s ...' % f)
for _, buf in pcaplib.Reader(
open(GENEVE_DATA_DIR + f + '.pcap', 'rb')):
# Checks if message can be parsed as expected.
pkt = packet.Packet(buf)
geneve_pkt = pkt.get_protocol(geneve.geneve)
ok_(isinstance(geneve_pkt, geneve.geneve),
'Failed to parse Geneve message: %s' % pkt)
# Checks if message can be serialized as expected.
pkt.serialize()
eq_(buf, pkt.data,
"b'%s' != b'%s'" % (binary_str(buf), binary_str(pkt.data)))
| 30.47619
| 79
| 0.647917
|
e9fb9ada293f82b4ea263e374bc309874556d206
| 794
|
py
|
Python
|
flask_babelplus/constants.py
|
intact/flask-babelplus
|
29960f1b489e7fd73a028eefd1ff097fbbfd1f1d
|
[
"BSD-3-Clause"
] | null | null | null |
flask_babelplus/constants.py
|
intact/flask-babelplus
|
29960f1b489e7fd73a028eefd1ff097fbbfd1f1d
|
[
"BSD-3-Clause"
] | null | null | null |
flask_babelplus/constants.py
|
intact/flask-babelplus
|
29960f1b489e7fd73a028eefd1ff097fbbfd1f1d
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
flask_babelplus.constants
~~~~~~~~~~~~~~~~~~~~~~~~~
This module contains the constants that are used in this
extension.
:copyright: (c) 2013 by Armin Ronacher, Daniel Neuhäuser and contributors.
:license: BSD, see LICENSE for more details.
"""
from werkzeug import ImmutableDict
DEFAULT_LOCALE = "en"
DEFAULT_TIMEZONE = "UTC"
DEFAULT_DATE_FORMATS = ImmutableDict({
'time': 'medium',
'date': 'medium',
'datetime': 'medium',
'time.short': None,
'time.medium': None,
'time.full': None,
'time.long': None,
'date.short': None,
'date.medium': None,
'date.full': None,
'date.long': None,
'datetime.short': None,
'datetime.medium': None,
'datetime.full': None,
'datetime.long': None,
})
| 24.060606
| 78
| 0.610831
|
de8de610a6badbdce7560609b3e1e6b12d2821be
| 43
|
py
|
Python
|
music_bot/__init__.py
|
Lin2D2/Discord_Music_Bot_2
|
be5aa9637fd1a3de4412981f8d40edd3abeb3c71
|
[
"BSD-3-Clause"
] | null | null | null |
music_bot/__init__.py
|
Lin2D2/Discord_Music_Bot_2
|
be5aa9637fd1a3de4412981f8d40edd3abeb3c71
|
[
"BSD-3-Clause"
] | null | null | null |
music_bot/__init__.py
|
Lin2D2/Discord_Music_Bot_2
|
be5aa9637fd1a3de4412981f8d40edd3abeb3c71
|
[
"BSD-3-Clause"
] | null | null | null |
from music_bot.bot import Bot
bot = Bot()
| 10.75
| 29
| 0.72093
|
6117f61b215fe1b21ce32c1865ff37777b8ee798
| 651
|
py
|
Python
|
Models/session.py
|
fossabot/Taurus
|
f042addc24a3b76713649e08a0f2b3756bdeac28
|
[
"MIT"
] | null | null | null |
Models/session.py
|
fossabot/Taurus
|
f042addc24a3b76713649e08a0f2b3756bdeac28
|
[
"MIT"
] | 1
|
2019-07-13T14:50:49.000Z
|
2019-07-13T14:50:49.000Z
|
Models/session.py
|
fossabot/Taurus
|
f042addc24a3b76713649e08a0f2b3756bdeac28
|
[
"MIT"
] | 1
|
2019-07-13T14:48:18.000Z
|
2019-07-13T14:48:18.000Z
|
#!/usr/bin/python
from sqlalchemy import Column, Integer, Sequence, DateTime
from Models import base
class Session (base):
__tablename__ = 'T_Sessions'
id = Column (Integer, Sequence ('session_id_seq'), primary_key = True)
Session = Column (Integer, unique = True)
Password = Column (Integer)
CreateTime = Column (DateTime)
User = Column (Integer)
def __init__ (self, session, password, createtime, user):
self.Session = session
self.Password = password
self.CreateTime = createtime
self.User = user
def __repr__ (self):
return "<User ('%d', '%d', '%s', '%d')>" % (self.Session, self.Password,
self.CreateTime, self.User)
| 25.038462
| 74
| 0.700461
|
a1e713ef4e35a6ea2e1ce88799697f0a4e1939a5
| 5,548
|
py
|
Python
|
doc/conf.py
|
MOFplus/molsys_rel
|
ff8b181fefc0ba03c5dd14fe2dde613298155203
|
[
"MIT"
] | null | null | null |
doc/conf.py
|
MOFplus/molsys_rel
|
ff8b181fefc0ba03c5dd14fe2dde613298155203
|
[
"MIT"
] | null | null | null |
doc/conf.py
|
MOFplus/molsys_rel
|
ff8b181fefc0ba03c5dd14fe2dde613298155203
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import sphinx_rtd_theme
#
# molsys documentation build configuration file, created by
# sphinx-quickstart on Mon Aug 21 14:29:21 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx.ext.autosectionlabel']
intersphinx_mapping = {"MOFplus":('file:///opt/sandbox/mofplus/doc/_build/html',None)}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'molsys'
copyright = u'2019-2021, Roberto Amabile, Johannes P. Duerholt, Julian Keupp, Rochus Schmid'
author = u'Roberto Amabile, Johannes P. Duerholt, Julian Keupp, Rochus Schmid'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.0'
# The full version, including alpha/beta/rc tags.
release = u'1.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# Autosummary options
#autosummary_generate = True
# Napoleon settings
napoleon_google_docstring = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
html_theme_pass = [sphinx_rtd_theme.get_html_theme_path()]
#html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'molsysdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'molsys.tex', u'molsys Documentation',
u'Roberto Amabile, Johannes P. Duerholt, Julian Keupp, Rochus Schmid', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'molsys', u'molsys Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'molsys', u'molsys Documentation',
author, 'molsys', 'One line description of project.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| 30.822222
| 92
| 0.69106
|
be1195d779bd07aa5300fc210952e6347c367320
| 3,636
|
py
|
Python
|
tests/data/cifar_10/source/keras_cnn_cifar_10.py
|
pengk19/sagemaker-python-sdk
|
0866a304fea44522fd1e3b6c4509cd05dda064dd
|
[
"Apache-2.0"
] | 1
|
2021-07-16T20:14:59.000Z
|
2021-07-16T20:14:59.000Z
|
tests/data/cifar_10/source/keras_cnn_cifar_10.py
|
pengk19/sagemaker-python-sdk
|
0866a304fea44522fd1e3b6c4509cd05dda064dd
|
[
"Apache-2.0"
] | null | null | null |
tests/data/cifar_10/source/keras_cnn_cifar_10.py
|
pengk19/sagemaker-python-sdk
|
0866a304fea44522fd1e3b6c4509cd05dda064dd
|
[
"Apache-2.0"
] | 1
|
2019-10-06T10:53:30.000Z
|
2019-10-06T10:53:30.000Z
|
# Copyright 2017-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import, division, print_function
import tensorflow as tf
from tensorflow.python.keras.layers import Activation, Conv2D, Dense, Dropout, Flatten, MaxPooling2D
from tensorflow.python.keras.models import Sequential
from tensorflow.python.training.rmsprop import RMSPropOptimizer
HEIGHT = 32
WIDTH = 32
DEPTH = 3
NUM_CLASSES = 10
NUM_DATA_BATCHES = 5
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 10000 * NUM_DATA_BATCHES
BATCH_SIZE = 128
INPUT_TENSOR_NAME = "inputs_input" # needs to match the name of the first layer + "_input"
def keras_model_fn(hyperparameters):
"""keras_model_fn receives hyperparameters from the training job and returns a compiled keras model.
The model will be transformed into a TensorFlow Estimator before training and it will be saved in a
TensorFlow Serving SavedModel at the end of training.
Args:
hyperparameters: The hyperparameters passed to the SageMaker TrainingJob that runs your TensorFlow
training script.
Returns: A compiled Keras model
"""
model = Sequential()
model.add(Conv2D(32, (3, 3), padding="same", name="inputs", input_shape=(HEIGHT, WIDTH, DEPTH)))
model.add(Activation("relu"))
model.add(Conv2D(32, (3, 3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(Conv2D(64, (3, 3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation("relu"))
model.add(Dropout(0.5))
model.add(Dense(NUM_CLASSES))
model.add(Activation("softmax"))
opt = RMSPropOptimizer(
learning_rate=hyperparameters["learning_rate"], decay=hyperparameters["decay"]
)
model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])
return model
def serving_input_fn(hyperpameters):
inputs = {INPUT_TENSOR_NAME: tf.placeholder(tf.float32, [None, 32, 32, 3])}
return tf.estimator.export.ServingInputReceiver(inputs, inputs)
def train_input_fn(training_dir, hyperparameters):
return _generate_synthetic_data(tf.estimator.ModeKeys.TRAIN, batch_size=BATCH_SIZE)
def eval_input_fn(training_dir, hyperparameters):
return _generate_synthetic_data(tf.estimator.ModeKeys.EVAL, batch_size=BATCH_SIZE)
def _generate_synthetic_data(mode, batch_size):
input_shape = [batch_size, HEIGHT, WIDTH, DEPTH]
images = tf.truncated_normal(
input_shape, dtype=tf.float32, stddev=1e-1, name="synthetic_images"
)
labels = tf.random_uniform(
[batch_size, NUM_CLASSES],
minval=0,
maxval=NUM_CLASSES - 1,
dtype=tf.float32,
name="synthetic_labels",
)
images = tf.contrib.framework.local_variable(images, name="images")
labels = tf.contrib.framework.local_variable(labels, name="labels")
return {INPUT_TENSOR_NAME: images}, labels
| 36
| 106
| 0.726898
|
6b0591f5e92f923c5792678c840617a3c1ebb760
| 4,025
|
py
|
Python
|
tests/utils_tests.py
|
gmr/tornado-dynamodb
|
902867ed33a033bac6f047b7c8053326d6587b8a
|
[
"BSD-3-Clause"
] | 2
|
2016-01-01T09:20:36.000Z
|
2016-11-04T08:53:43.000Z
|
tests/utils_tests.py
|
gmr/tornado-dynamodb
|
902867ed33a033bac6f047b7c8053326d6587b8a
|
[
"BSD-3-Clause"
] | 1
|
2016-03-13T15:35:25.000Z
|
2016-04-01T12:49:39.000Z
|
tests/utils_tests.py
|
gmr/tornado-dynamodb
|
902867ed33a033bac6f047b7c8053326d6587b8a
|
[
"BSD-3-Clause"
] | null | null | null |
import datetime
import unittest
import uuid
import arrow
from tornado_dynamodb import utils
class UTC(datetime.tzinfo):
def utcoffset(self, dt):
return datetime.timedelta(0)
def tzname(self, dt):
return 'UTC'
def dst(self, dt):
return datetime.timedelta(0)
class MarshallTests(unittest.TestCase):
maxDiff = None
def test_complex_document(self):
uuid_value = uuid.uuid4()
arrow_value = arrow.utcnow()
dt_value = datetime.datetime.utcnow().replace(tzinfo=UTC())
value = {
'key1': 'str',
'key2': 10,
'key3': {
'sub-key1': 20,
'sub-key2': True,
'sub-key3': 'value'
},
'key4': None,
'key5': ['one', 'two', 'three', 4, None, True],
'key6': set(['a', 'b', 'c']),
'key7': {1, 2, 3, 4},
'key8': arrow_value,
'key9': uuid_value,
'key10': b'\0x01\0x02\0x03',
'key11': {b'\0x01\0x02\0x03', b'\0x04\0x05\0x06'},
'key12': dt_value
}
expectation = {
'key1': {'S': 'str'},
'key2': {'N': '10'},
'key3': {'M':
{
'sub-key1': {'N': '20'},
'sub-key2': {'BOOL': True},
'sub-key3': {'S': 'value'}
}
},
'key4': {'NULL': True},
'key5': {'L': [{'S': 'one'}, {'S': 'two'}, {'S': 'three'},
{'N': '4'}, {'NULL': True}, {'BOOL': True}]},
'key6': {'SS': ['a', 'b', 'c']},
'key7': {'NS': ['1', '2', '3', '4']},
'key8': {'S': arrow_value.isoformat()},
'key9': {'S': str(uuid_value)},
'key10': {'B': b'\0x01\0x02\0x03'},
'key11': {'BS': [b'\0x01\0x02\0x03', b'\0x04\0x05\0x06']},
'key12': {'S': dt_value.isoformat()}
}
self.assertDictEqual(expectation, utils.marshall(value))
def test_value_error_raised_on_unsupported_type(self):
self.assertRaises(ValueError, utils.marshall, {'key': self})
def test_value_error_raised_on_mixed_set(self):
self.assertRaises(ValueError, utils.marshall, {'key': {1, 'two', 3}})
class UnmarshallTests(unittest.TestCase):
maxDiff = None
def test_complex_document(self):
uuid_value = uuid.uuid4()
dt_value = arrow.utcnow()
value = {
'key1': {'S': 'str'},
'key2': {'N': '10'},
'key3': {'M':
{
'sub-key1': {'N': '20'},
'sub-key2': {'BOOL': True},
'sub-key3': {'S': 'value'}
}
},
'key4': {'NULL': True},
'key5': {'L': [{'S': 'one'}, {'S': 'two'}, {'S': 'three'},
{'N': '4'}, {'NULL': True}, {'BOOL': True}]},
'key6': {'SS': ['a', 'b', 'c']},
'key7': {'NS': ['1', '2', '3', '4']},
'key8': {'S': dt_value.isoformat()},
'key9': {'S': str(uuid_value)},
'key10': {'B': b'\0x01\0x02\0x03'},
'key11': {'BS': [b'\0x01\0x02\0x03', b'\0x04\0x05\0x06']}
}
expectation = {
'key1': 'str',
'key2': 10,
'key3': {
'sub-key1': 20,
'sub-key2': True,
'sub-key3': 'value'
},
'key4': None,
'key5': ['one', 'two', 'three', 4, None, True],
'key6': {'a', 'b', 'c'},
'key7': {1, 2, 3, 4},
'key8': dt_value.isoformat(),
'key9': uuid_value,
'key10': b'\0x01\0x02\0x03',
'key11': {b'\0x01\0x02\0x03', b'\0x04\0x05\0x06'}
}
self.assertDictEqual(expectation, utils.unmarshall(value))
def test_value_error_raised_on_unsupported_type(self):
self.assertRaises(ValueError, utils.unmarshall, {'key': {'T': 1}})
| 32.723577
| 77
| 0.427578
|
638661fe2dd1f5ca9d407fcd48912d55605bd576
| 8,241
|
py
|
Python
|
django_harmonization/HeartData/dependent_columns.py
|
chrisroederucdenver/Kao-Harmonization-Release
|
1a90db58cd378244a8aba138e27f049376045729
|
[
"Apache-2.0"
] | null | null | null |
django_harmonization/HeartData/dependent_columns.py
|
chrisroederucdenver/Kao-Harmonization-Release
|
1a90db58cd378244a8aba138e27f049376045729
|
[
"Apache-2.0"
] | null | null | null |
django_harmonization/HeartData/dependent_columns.py
|
chrisroederucdenver/Kao-Harmonization-Release
|
1a90db58cd378244a8aba138e27f049376045729
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
'''
Copyright 2017 The Regents of the University of Colorado
Licensed under the Apache License, Version 2.0 (the "License")
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
'''
dependent_columns.py
Python Version: 3.6.3
Find a column that is very similar to others from the extracted analysis matrix. Concretely, this
script finds the stand-alone probability of a value in a columna and compares it with the conditional
probability of another column having a different (it's most popular) value.
usage: dependent_columns.py <matrix file>
output: the really badly dependent columns
"a, b, value, p(a=value), p(a=value|b=value)
CAD CABG 2.0 0.640333333333 0.981661272923
NB. a healthy value for one phenotype often comes with healthy values for other phenotypes, so
I discount signs of dependence on values associated with a healthy state.
This is research code for demonstration purposes only.
croeder 10/2017 chris.roeder@ucdenver.edu
independant ==> p(a) = p(a|b)
'''
import sys
import csv
import re
from collections import defaultdict
import types
RATIO_CUTOFF_L = 0.5
RATIO_CUTOFF_H = 2.0
MAX_VALUES=5
CONTINUOUS_NAMES = {'Weig', 'KCCQ', 'CrCv', 'Tota', 'SBPv', 'HR v', 'LVFv', 'BNP'}
def _get_matrix(file_name):
""" Reads the analysis matrix to analyze. Trims header row and id column (1st of each) """
# return reader[1:][1:]
header = None
with open(file_name, "r") as file:
matrix = []
reader = csv.reader(file)
for string_row in reader:
number_row=[]
if header == None:
header = string_row
else:
for s in string_row:
s = s.strip()
if s == "NA":
number_row.append(None)
elif type(s) == types.StringType and s[0] == '\'' and s[-1] == '\'':
clean_string = re.search('\'(.+)\'', s).group(1)
number_row.append(clean_string) # use re to remove quotes
else:
number_row.append(float(s))
matrix.append(number_row)
# add in a test case that is identical ...
return (header, matrix)
def _get_value_counts(index_a, index_b, matrix):
""" Returns three dictionaries. The first two represent frequency distributions of columns indexed by a and b.
The third counts how often a value appears in both columns
(value -> frequency in a, value -> frequency in b, value -> frequency in both)
"""
col_a = defaultdict(int)
col_b = defaultdict(int)
both = defaultdict(int)
for row in matrix:
if index_a < len(row) and index_b < len(row):
val_a = row[index_a]
col_a[val_a] += 1
val_b = row[index_b]
col_b[val_b] += 1
if row[index_a] == row[index_b] and row[index_a] != None:
both[row[index_a]] += 1
return (col_a, col_b, both)
def _probability(value, col_a_counts_map):
""" p(a=value) = counts of a at value, dividied by total values of a """
sum=0
for key in col_a_counts_map.keys():
sum += col_a_counts_map[key]
if col_a_counts_map[value] == 0:
print("nothing:", value)
return col_a_counts_map[value] / float(sum)
def _conditional_probability(col_a_counts_map, col_b_counts_map, both_counts_map):
""" For values that a might take, maps to p(a=v|b=v).
= the number f times both columns a and b have a value divided by the number of times column b has that value.
returns: value->prob for given maps.
NB: note that it insists on the value being the same on both sides.
"""
map={}
for key in col_a_counts_map.keys(): # a here because it's only values of a we care about
if (key in col_b_counts_map.keys()):
conditional_prob = both_counts_map[key] / float(col_b_counts_map[key]) # b here because it's the prior
map[key]=conditional_prob
return map
# ----------------------------------------------------------------------------------------------
def _find_all_pairs(list):
""" returns all pairs created from the given list. (a, b) is not the same as (b, a). """
pairs = set();
for value_a in list:
for value_b in list:
pairs.add((value_a, value_b))
return pairs
def _frequency_of_pair(a, b, v, w, matrix):
""" builds a map from (v, w) -> (p(a=v and b=w) for all possible pairs (v, w)
a, b indeces of columns to be compared
v, w respective values of columns a and b, we're looking at
(not cheap)
"""
count=0
worry_count=0
for row in matrix:
if a < len(row) and b < len(row):
if row[a] == v and row[b] == w:
count += 1
else :
##print "worry?", a, b, len(row), row
worry_count += 1
return count
def _relatedness_of_pairs(a, b, col_a_counts_map, col_b_counts_map, both_counts_map, matrix):
""" For all pairs of values (v, w) shared between columns (keys in both_counts_map), maps (v, w) to p(a=v|b=w).
returns: (v, w) -> p(a=v) / p(a=v | b=w) as a measure of relatedness.
A ratio of 1.0 means they are independent. The more the ratio drops below 1, the more the condition of b
affects a.
"""
map={}
if len(both_counts_map.keys()) < MAX_VALUES :
value_pairs_list = _find_all_pairs(both_counts_map.keys())
for value_pair in value_pairs_list:
# p(a=v | b=w) = p(a=v ^ b=w) / p(b=w)
# = (count(a and b) / rows-in- matrix) / (count(b=w) / rows-in-matrix)
# = counts(a and b) / count(b=w)
cond_prob = _frequency_of_pair(a, b, value_pair[0], value_pair[1], matrix) / float(col_b_counts_map[value_pair[1]])
if cond_prob != 0:
prob = _probability(value_pair[0], col_a_counts_map)
map[value_pair] = cond_prob / float(prob)
return map
def _all_pairs_all_values(header, matrix):
"""For all pairs of columns/phenotypes, and for all values shared in each pair, find the relatedness between columns for each """
uber_map={}
for i in range (1, len(header)):
if header[i].strip() not in CONTINUOUS_NAMES:
for j in range (1, len(header)):
if header[j].strip() not in CONTINUOUS_NAMES and i !=j:
(col_a_counts_map, col_b_counts_map, both_counts_map) = _get_value_counts(i,j , matrix)
map = _relatedness_of_pairs(i, j, col_a_counts_map, col_b_counts_map, both_counts_map, matrix)
uber_map[(i,j)]=map
return uber_map
def show_uber_map(header, matrix):
uber_map = _all_pairs_all_values(header, matrix)
for (pair, map) in uber_map.items():
show=False
sum=0
n=0
if len(map) > 0:
for (value, ratio) in map.items():
sum += ratio
n += 1
if (ratio < RATIO_CUTOFF_L or ratio > RATIO_CUTOFF_H) and pair[0] != pair[1]:
show=True
average = sum / float(n)
if show or average < RATIO_CUTOFF_L or average > RATIO_CUTOFF_H:
print(pair, header[pair[0]], header[pair[1]])
for (value, ratio) in map.items():
if (ratio < RATIO_CUTOFF_L or ratio > RATIO_CUTOFF_H) :
print(" [value:{} ratio:{}] ".format( value, ratio))
print("")
if __name__ == "__main__" :
file_name = sys.argv[1]
(header, matrix) = _get_matrix(file_name)
show_uber_map(header, matrix)
| 36.303965
| 135
| 0.597622
|
b06bd76de2e71f0902c5e7e4efae00c328adf3bc
| 5,990
|
py
|
Python
|
scraps/views.py
|
yejia/osl_notebook
|
f866fc753d7b0c1d8fec6f790c7298729e7f70ae
|
[
"MIT"
] | null | null | null |
scraps/views.py
|
yejia/osl_notebook
|
f866fc753d7b0c1d8fec6f790c7298729e7f70ae
|
[
"MIT"
] | null | null | null |
scraps/views.py
|
yejia/osl_notebook
|
f866fc753d7b0c1d8fec6f790c7298729e7f70ae
|
[
"MIT"
] | null | null | null |
from django.http import HttpResponse,HttpResponseRedirect
from django.shortcuts import render_to_response
from django import forms
from django.forms import ModelForm
from django.db.models import F
from django.db import connection
from django.utils import simplejson
from django.contrib import messages
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.template import RequestContext
from django.core.exceptions import ObjectDoesNotExist
from django.core.mail import send_mass_mail
from django.utils.translation import ugettext as _
from notebook.notes.models import create_model, create_model_form
from notebook.scraps.models import Scrap, Folder
from notebook.notes.views import getT, getW, getNote, get_public_notes, get_public_tags, remove_private_tag_notes, __get_ws_tags
from notebook.notes.views import folders_index, settings_tag_add, settings_tag_update, settings_tag, settings_tags
from notebook.notes.views import getSearchResults, getlogger, __getQStr, __get_notes_context
from notebook.notes.views import __get_folder_context, __get_pre_url
import notebook
import datetime
from datetime import date
log = getlogger('scraps.views')
#this method is used for processing the request users send via the browser button
@login_required
def add_scrap(request):
username = request.user.username
N = getNote(username, 'scrapbook')
T = getT(username)
#W = getW(username)
#w = W.objects.get(name='scrapbook')
if request.method == 'POST':
tags = T.objects.all()
#form require tags to be required. So don't use form now, and use the code from add_note in notebook.notes.views for adding a snippet
#AddNForm = create_model_form("AddNForm_add_scrap_post_"+str(username), N, fields={'tags':forms.ModelMultipleChoiceField(queryset=tags)})
n = N()
post = request.POST.copy()
tag_names = post.getlist('item[tags][]')
tags = []
for tag_name in tag_names:
t, created = T.objects.get_or_create(name=tag_name)
#==============Don't need below any more since add_tags will do this logic=================================================================
# if created or not w.tags.filter(name=t.name).exists():
# w.tags.add(t)
#===============================================================================
#tags.append(t.id)
tags.append(t.name)
#if not tag_names:
# tags = [T.objects.get(name='untagged').id]
if not tags or (len(tags) == 1 and tags[0] == u''):
tags = None
#f = AddNForm(post, instance=n)
#log.debug("f.errors:"+str(f.errors))
#TODO:handle errors such as url broken
#n = f.save(commit=False)
n.title = post.get('title')
n.desc = post.get('desc')
n.url = post.get('url')
private = post.get('private', False)
if private in ['true', 'on']:
n.private = True
else:
n.private = False
n.vote = post.get('vote')
n.save()
n.add_tags(tags, 'scrapbook')
n.save() #called this specifically to save the url to the social db as well
return render_to_response("include/notes/addNote_result.html",\
{'message':_('Scrap is successfully added! You can close this window, or it will be closed for you in 1 second.')})
else:
tags = __get_ws_tags(request, username, 'scrapbook')
from django.forms import TextInput
#by adding the tags field specifically here, we avoided it using tags of another user (a strange error which repeat even after changing class names and variable names)
AddNForm_scrap = create_model_form("AddNForm_add_scrap_get_"+str(username), N, fields={#'tags':forms.ModelMultipleChoiceField(queryset=tags)
}, options={'exclude':['deleted'],
'fields':['url','title','tags','desc','vote','private'],
'widgets':{'title': TextInput(attrs={'size': 80}),
}})
url = request.GET.get('url')
title = request.GET.get('title')
desc = request.GET.get('desc')
#default_tag_id = T.objects.get(name='untagged').id
addNoteForm = AddNForm_scrap(initial={'url': url, 'title':title, 'desc':desc#, 'tags': [default_tag_id]
})
#no need of the custimized form in the scrapbook template
return render_to_response('scrapbook/notes/addNote.html', {'addNoteForm': addNoteForm, 'desc':desc, 'url':url, 'tags':tags})
@login_required
def share(request, username):
print 'share in note called'
note_ids = request.POST.getlist('note_ids')
N = getNote(request.user.username)
msgs = []
for note_id in note_ids:
note = N.objects.get(id=note_id)
message = 'From osl scraps:'+' '+note.title+' '+note.url+' '
desc = note.desc
desc = desc.replace('\r','')
desc = desc.replace('\n','')#TODO:
if len(desc) > 100:
desc = desc[:300] + '...... view more from http://new.notebook.opensourcelearning.org/'+\
username+'/scrapbook/scraps/note/'+unicode(note.id)+'/'
message = message+desc
msg = (message.encode('utf8'), '', 'yuanliangliu@gmail.com', ['buzz@gmail.com'])
msgs.append(msg)
#share_note(note_id, username)
send_mass_mail(tuple(msgs), fail_silently=False)
return HttpResponse('success', mimetype="text/plain")
| 43.722628
| 175
| 0.593322
|
c105a82518997d420e323f87ffb2bf320c47a878
| 4,028
|
py
|
Python
|
assignments/08_synthetic_dna/test.py
|
cvk1988/biosystems-analytics-2020
|
68ef2146b3b1b38406f1452ba90c469068a663b2
|
[
"MIT"
] | 1
|
2021-05-19T19:07:56.000Z
|
2021-05-19T19:07:56.000Z
|
assignments/08_synthetic_dna/test.py
|
cvk1988/biosystems-analytics-2020
|
68ef2146b3b1b38406f1452ba90c469068a663b2
|
[
"MIT"
] | 1
|
2020-02-11T20:15:59.000Z
|
2020-02-11T20:15:59.000Z
|
assignments/08_synthetic_dna/test.py
|
cvk1988/biosystems-analytics-2020
|
68ef2146b3b1b38406f1452ba90c469068a663b2
|
[
"MIT"
] | 24
|
2020-01-15T17:34:40.000Z
|
2021-08-23T05:57:24.000Z
|
#!/usr/bin/env python3
"""tests for moog.py"""
import os
import random
import re
import string
from subprocess import getstatusoutput
from Bio import SeqIO
from Bio.SeqUtils import GC
from numpy import mean
from itertools import chain
prg = './moog.py'
# --------------------------------------------------
def random_string():
"""generate a random string"""
return ''.join(random.choices(string.ascii_uppercase + string.digits, k=5))
# --------------------------------------------------
def test_exists():
"""usage"""
assert os.path.isfile(prg)
# --------------------------------------------------
def test_usage():
"""usage"""
for flag in ['-h', '--help']:
rv, out = getstatusoutput('{} {}'.format(prg, flag))
assert rv == 0
assert re.match("usage", out, re.IGNORECASE)
# --------------------------------------------------
def test_bad_seqtype():
"""die on bad seqtype"""
bad = random_string()
rv, out = getstatusoutput(f'{prg} -t {bad}')
assert rv != 0
assert re.match('usage:', out, re.I)
assert re.search(
f"-t/--seqtype: invalid choice: '{bad}' \(choose from 'dna', 'rna'\)",
out)
# --------------------------------------------------
def test_bad_pctgc():
"""die on bad pctgc"""
bad = random.randint(1, 10)
rv, out = getstatusoutput(f'{prg} -p {bad}')
assert rv != 0
assert re.match('usage:', out, re.I)
assert re.search(f'--pctgc "{float(bad)}" must be between 0 and 1', out)
# --------------------------------------------------
def test_defaults():
"""runs on good input"""
out_file = 'out.fa'
try:
if os.path.isfile(out_file):
os.remove(out_file)
rv, out = getstatusoutput(prg)
assert rv == 0
assert out == f'Done, wrote 10 DNA sequences to "{out_file}".'
assert os.path.isfile(out_file)
# correct number of seqs
seqs = list(SeqIO.parse(out_file, 'fasta'))
assert len(seqs) == 10
# the lengths are in the correct range
seq_lens = list(map(lambda seq: len(seq.seq), seqs))
assert max(seq_lens) <= 75
assert min(seq_lens) >= 50
# bases are correct
bases = ''.join(
sorted(
set(chain(map(lambda seq: ''.join(sorted(set(seq.seq))),
seqs)))))
assert bases == 'ACGT'
# the pct GC is about right
gc = list(map(lambda seq: GC(seq.seq) / 100, seqs))
assert .47 <= mean(gc) <= .53
finally:
if os.path.isfile(out_file):
os.remove(out_file)
# --------------------------------------------------
def test_options():
"""runs on good input"""
out_file = random_string() + '.fasta'
try:
if os.path.isfile(out_file):
os.remove(out_file)
min_len = random.randint(50, 99)
max_len = random.randint(100, 150)
num_seqs = random.randint(100, 150)
pct_gc = random.random()
cmd = (f'{prg} -m {min_len} -x {max_len} -o {out_file} '
f'-n {num_seqs} -t rna -p {pct_gc:.02f} -s 1')
rv, out = getstatusoutput(cmd)
assert rv == 0
assert out == f'Done, wrote {num_seqs} RNA sequences to "{out_file}".'
assert os.path.isfile(out_file)
# correct number of seqs
seqs = list(SeqIO.parse(out_file, 'fasta'))
assert len(seqs) == num_seqs
# the lengths are in the correct range
seq_lens = list(map(lambda seq: len(seq.seq), seqs))
assert max(seq_lens) <= max_len
assert min(seq_lens) >= min_len
# bases are correct
bases = set(''.join(
map(lambda seq: ''.join(sorted(set(seq.seq))), seqs)))
assert bases == set('ACGU')
# the pct GC is about right
gc = list(map(lambda seq: GC(seq.seq) / 100, seqs))
assert pct_gc - .3 <= mean(gc) <= pct_gc + .3
finally:
if os.path.isfile(out_file):
os.remove(out_file)
| 27.589041
| 79
| 0.514647
|
29d71b60d6f27be0100d798e8b7dc6cfe6346996
| 398
|
py
|
Python
|
qubo_nn/nn/__init__.py
|
instance01/qubo-nn
|
6f8058565f4b6ab4a8300501fc2f67cdaeed482f
|
[
"MIT"
] | 9
|
2021-09-17T09:40:59.000Z
|
2022-03-29T13:41:25.000Z
|
qubo_nn/nn/__init__.py
|
instance01/qubo-nn
|
6f8058565f4b6ab4a8300501fc2f67cdaeed482f
|
[
"MIT"
] | null | null | null |
qubo_nn/nn/__init__.py
|
instance01/qubo-nn
|
6f8058565f4b6ab4a8300501fc2f67cdaeed482f
|
[
"MIT"
] | 4
|
2022-03-06T19:26:19.000Z
|
2022-03-29T13:41:37.000Z
|
from qubo_nn.nn.models import Optimizer
from qubo_nn.nn.models import ReverseOptimizer
from qubo_nn.nn.models import AutoEncoderOptimizer
from qubo_nn.nn.models import RNNOptimizer
from qubo_nn.nn.models import A3Optimizer
from qubo_nn.nn.models import Resistance1
from qubo_nn.nn.models import Resistance2
from qubo_nn.nn.models import QbsolvOptimizer
from qubo_nn.nn.models import RedAEOptimizer
| 39.8
| 50
| 0.864322
|
2a700149c93a9755bcd39b36beb77013196c3ded
| 150
|
py
|
Python
|
test/test_prose_flip.py
|
erichaase/topcoder-python
|
de285d8092a94f2ec1b5c0c33eba55b5c27a5390
|
[
"MIT"
] | 1
|
2017-03-25T17:40:57.000Z
|
2017-03-25T17:40:57.000Z
|
test/test_prose_flip.py
|
erichaase/topcoder-python
|
de285d8092a94f2ec1b5c0c33eba55b5c27a5390
|
[
"MIT"
] | null | null | null |
test/test_prose_flip.py
|
erichaase/topcoder-python
|
de285d8092a94f2ec1b5c0c33eba55b5c27a5390
|
[
"MIT"
] | null | null | null |
from test.assert_json import assert_json
from topcoder.prose_flip import solution
def test_prose_flip ():
assert_json('prose_flip', solution)
| 25
| 43
| 0.793333
|
929bebcb5ac470f5e07ad587a2c8b8a57ef28b49
| 49,352
|
py
|
Python
|
tests/test_meshes.py
|
bruinxiong/pytorch3d
|
6b8a2223fe6bd981801750ec8becd81d71d148ec
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_meshes.py
|
bruinxiong/pytorch3d
|
6b8a2223fe6bd981801750ec8becd81d71d148ec
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_meshes.py
|
bruinxiong/pytorch3d
|
6b8a2223fe6bd981801750ec8becd81d71d148ec
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import itertools
import random
import unittest
import numpy as np
import torch
from common_testing import TestCaseMixin
from pytorch3d.structures.meshes import Meshes
def init_mesh(
num_meshes: int = 10,
max_v: int = 100,
max_f: int = 300,
lists_to_tensors: bool = False,
device: str = "cpu",
requires_grad: bool = False,
):
"""
Function to generate a Meshes object of N meshes with
random numbers of vertices and faces.
Args:
num_meshes: Number of meshes to generate.
max_v: Max number of vertices per mesh.
max_f: Max number of faces per mesh.
lists_to_tensors: Determines whether the generated meshes should be
constructed from lists (=False) or
a tensor (=True) of faces/verts.
Returns:
Meshes object.
"""
device = torch.device(device)
verts_list = []
faces_list = []
# Randomly generate numbers of faces and vertices in each mesh.
if lists_to_tensors:
# If we define faces/verts with tensors, f/v has to be the
# same for each mesh in the batch.
f = torch.randint(1, max_f, size=(1,), dtype=torch.int32)
v = torch.randint(3, high=max_v, size=(1,), dtype=torch.int32)
f = f.repeat(num_meshes)
v = v.repeat(num_meshes)
else:
# For lists of faces and vertices, we can sample different v/f
# per mesh.
f = torch.randint(max_f, size=(num_meshes,), dtype=torch.int32)
v = torch.randint(3, high=max_v, size=(num_meshes,), dtype=torch.int32)
# Generate the actual vertices and faces.
for i in range(num_meshes):
verts = torch.rand(
(v[i], 3),
dtype=torch.float32,
device=device,
requires_grad=requires_grad,
)
faces = torch.randint(v[i], size=(f[i], 3), dtype=torch.int64, device=device)
verts_list.append(verts)
faces_list.append(faces)
if lists_to_tensors:
verts_list = torch.stack(verts_list)
faces_list = torch.stack(faces_list)
return Meshes(verts=verts_list, faces=faces_list)
def init_simple_mesh(device: str = "cpu"):
"""
Returns a Meshes data structure of simple mesh examples.
Returns:
Meshes object.
"""
device = torch.device(device)
verts = [
torch.tensor(
[[0.1, 0.3, 0.5], [0.5, 0.2, 0.1], [0.6, 0.8, 0.7]],
dtype=torch.float32,
device=device,
),
torch.tensor(
[[0.1, 0.3, 0.3], [0.6, 0.7, 0.8], [0.2, 0.3, 0.4], [0.1, 0.5, 0.3]],
dtype=torch.float32,
device=device,
),
torch.tensor(
[
[0.7, 0.3, 0.6],
[0.2, 0.4, 0.8],
[0.9, 0.5, 0.2],
[0.2, 0.3, 0.4],
[0.9, 0.3, 0.8],
],
dtype=torch.float32,
device=device,
),
]
faces = [
torch.tensor([[0, 1, 2]], dtype=torch.int64, device=device),
torch.tensor([[0, 1, 2], [1, 2, 3]], dtype=torch.int64, device=device),
torch.tensor(
[
[1, 2, 0],
[0, 1, 3],
[2, 3, 1],
[4, 3, 2],
[4, 0, 1],
[4, 3, 1],
[4, 2, 1],
],
dtype=torch.int64,
device=device,
),
]
return Meshes(verts=verts, faces=faces)
class TestMeshes(TestCaseMixin, unittest.TestCase):
def setUp(self) -> None:
np.random.seed(42)
torch.manual_seed(42)
def test_simple(self):
mesh = init_simple_mesh("cuda:0")
# Check that faces/verts per mesh are set in init:
self.assertClose(mesh._num_faces_per_mesh.cpu(), torch.tensor([1, 2, 7]))
self.assertClose(mesh._num_verts_per_mesh.cpu(), torch.tensor([3, 4, 5]))
# Check computed tensors
self.assertClose(
mesh.verts_packed_to_mesh_idx().cpu(),
torch.tensor([0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2]),
)
self.assertClose(
mesh.mesh_to_verts_packed_first_idx().cpu(), torch.tensor([0, 3, 7])
)
self.assertClose(
mesh.verts_padded_to_packed_idx().cpu(),
torch.tensor([0, 1, 2, 5, 6, 7, 8, 10, 11, 12, 13, 14]),
)
self.assertClose(
mesh.faces_packed_to_mesh_idx().cpu(),
torch.tensor([0, 1, 1, 2, 2, 2, 2, 2, 2, 2]),
)
self.assertClose(
mesh.mesh_to_faces_packed_first_idx().cpu(), torch.tensor([0, 1, 3])
)
self.assertClose(
mesh.num_edges_per_mesh().cpu(), torch.tensor([3, 5, 10], dtype=torch.int32)
)
self.assertClose(
mesh.mesh_to_edges_packed_first_idx().cpu(),
torch.tensor([0, 3, 8], dtype=torch.int64),
)
def test_init_error(self):
# Check if correct errors are raised when verts/faces are on
# different devices
mesh = init_mesh(10, 10, 100)
verts_list = mesh.verts_list() # all tensors on cpu
verts_list = [
v.to("cuda:0") if random.uniform(0, 1) > 0.5 else v for v in verts_list
]
faces_list = mesh.faces_list()
with self.assertRaises(ValueError) as cm:
Meshes(verts=verts_list, faces=faces_list)
self.assertTrue("same device" in cm.msg)
verts_padded = mesh.verts_padded() # on cpu
verts_padded = verts_padded.to("cuda:0")
faces_padded = mesh.faces_padded()
with self.assertRaises(ValueError) as cm:
Meshes(verts=verts_padded, faces=faces_padded)
self.assertTrue("same device" in cm.msg)
def test_simple_random_meshes(self):
# Define the test mesh object either as a list or tensor of faces/verts.
for lists_to_tensors in (False, True):
N = 10
mesh = init_mesh(N, 100, 300, lists_to_tensors=lists_to_tensors)
verts_list = mesh.verts_list()
faces_list = mesh.faces_list()
# Check batch calculations.
verts_padded = mesh.verts_padded()
faces_padded = mesh.faces_padded()
verts_per_mesh = mesh.num_verts_per_mesh()
faces_per_mesh = mesh.num_faces_per_mesh()
for n in range(N):
v = verts_list[n].shape[0]
f = faces_list[n].shape[0]
self.assertClose(verts_padded[n, :v, :], verts_list[n])
if verts_padded.shape[1] > v:
self.assertTrue(verts_padded[n, v:, :].eq(0).all())
self.assertClose(faces_padded[n, :f, :], faces_list[n])
if faces_padded.shape[1] > f:
self.assertTrue(faces_padded[n, f:, :].eq(-1).all())
self.assertEqual(verts_per_mesh[n], v)
self.assertEqual(faces_per_mesh[n], f)
# Check compute packed.
verts_packed = mesh.verts_packed()
vert_to_mesh = mesh.verts_packed_to_mesh_idx()
mesh_to_vert = mesh.mesh_to_verts_packed_first_idx()
faces_packed = mesh.faces_packed()
face_to_mesh = mesh.faces_packed_to_mesh_idx()
mesh_to_face = mesh.mesh_to_faces_packed_first_idx()
curv, curf = 0, 0
for n in range(N):
v = verts_list[n].shape[0]
f = faces_list[n].shape[0]
self.assertClose(verts_packed[curv : curv + v, :], verts_list[n])
self.assertClose(faces_packed[curf : curf + f, :] - curv, faces_list[n])
self.assertTrue(vert_to_mesh[curv : curv + v].eq(n).all())
self.assertTrue(face_to_mesh[curf : curf + f].eq(n).all())
self.assertTrue(mesh_to_vert[n] == curv)
self.assertTrue(mesh_to_face[n] == curf)
curv += v
curf += f
# Check compute edges and compare with numpy unique.
edges = mesh.edges_packed().cpu().numpy()
edge_to_mesh_idx = mesh.edges_packed_to_mesh_idx().cpu().numpy()
num_edges_per_mesh = mesh.num_edges_per_mesh().cpu().numpy()
npfaces_packed = mesh.faces_packed().cpu().numpy()
e01 = npfaces_packed[:, [0, 1]]
e12 = npfaces_packed[:, [1, 2]]
e20 = npfaces_packed[:, [2, 0]]
npedges = np.concatenate((e12, e20, e01), axis=0)
npedges = np.sort(npedges, axis=1)
unique_edges, unique_idx = np.unique(npedges, return_index=True, axis=0)
self.assertTrue(np.allclose(edges, unique_edges))
temp = face_to_mesh.cpu().numpy()
temp = np.concatenate((temp, temp, temp), axis=0)
edge_to_mesh = temp[unique_idx]
self.assertTrue(np.allclose(edge_to_mesh_idx, edge_to_mesh))
num_edges = np.bincount(edge_to_mesh, minlength=N)
self.assertTrue(np.allclose(num_edges_per_mesh, num_edges))
mesh_to_edges_packed_first_idx = (
mesh.mesh_to_edges_packed_first_idx().cpu().numpy()
)
self.assertTrue(
np.allclose(mesh_to_edges_packed_first_idx[1:], num_edges.cumsum()[:-1])
)
self.assertTrue(mesh_to_edges_packed_first_idx[0] == 0)
def test_allempty(self):
verts_list = []
faces_list = []
mesh = Meshes(verts=verts_list, faces=faces_list)
self.assertEqual(len(mesh), 0)
self.assertEqual(mesh.verts_padded().shape[0], 0)
self.assertEqual(mesh.faces_padded().shape[0], 0)
self.assertEqual(mesh.verts_packed().shape[0], 0)
self.assertEqual(mesh.faces_packed().shape[0], 0)
self.assertEqual(mesh.num_faces_per_mesh().shape[0], 0)
self.assertEqual(mesh.num_verts_per_mesh().shape[0], 0)
def test_empty(self):
N, V, F = 10, 100, 300
device = torch.device("cuda:0")
verts_list = []
faces_list = []
valid = torch.randint(2, size=(N,), dtype=torch.uint8, device=device)
for n in range(N):
if valid[n]:
v = torch.randint(
3, high=V, size=(1,), dtype=torch.int32, device=device
)[0]
f = torch.randint(F, size=(1,), dtype=torch.int32, device=device)[0]
verts = torch.rand((v, 3), dtype=torch.float32, device=device)
faces = torch.randint(v, size=(f, 3), dtype=torch.int64, device=device)
else:
verts = torch.tensor([], dtype=torch.float32, device=device)
faces = torch.tensor([], dtype=torch.int64, device=device)
verts_list.append(verts)
faces_list.append(faces)
mesh = Meshes(verts=verts_list, faces=faces_list)
verts_padded = mesh.verts_padded()
faces_padded = mesh.faces_padded()
verts_per_mesh = mesh.num_verts_per_mesh()
faces_per_mesh = mesh.num_faces_per_mesh()
for n in range(N):
v = len(verts_list[n])
f = len(faces_list[n])
if v > 0:
self.assertClose(verts_padded[n, :v, :], verts_list[n])
if verts_padded.shape[1] > v:
self.assertTrue(verts_padded[n, v:, :].eq(0).all())
if f > 0:
self.assertClose(faces_padded[n, :f, :], faces_list[n])
if faces_padded.shape[1] > f:
self.assertTrue(faces_padded[n, f:, :].eq(-1).all())
self.assertTrue(verts_per_mesh[n] == v)
self.assertTrue(faces_per_mesh[n] == f)
def test_padding(self):
N, V, F = 10, 100, 300
device = torch.device("cuda:0")
verts, faces = [], []
valid = torch.randint(2, size=(N,), dtype=torch.uint8, device=device)
num_verts, num_faces = (
torch.zeros(N, dtype=torch.int32),
torch.zeros(N, dtype=torch.int32),
)
for n in range(N):
verts.append(torch.rand((V, 3), dtype=torch.float32, device=device))
this_faces = torch.full((F, 3), -1, dtype=torch.int64, device=device)
if valid[n]:
v = torch.randint(
3, high=V, size=(1,), dtype=torch.int32, device=device
)[0]
f = torch.randint(F, size=(1,), dtype=torch.int32, device=device)[0]
this_faces[:f, :] = torch.randint(
v, size=(f, 3), dtype=torch.int64, device=device
)
num_verts[n] = v
num_faces[n] = f
faces.append(this_faces)
mesh = Meshes(verts=torch.stack(verts), faces=torch.stack(faces))
# Check verts/faces per mesh are set correctly in init.
self.assertListEqual(mesh._num_faces_per_mesh.tolist(), num_faces.tolist())
self.assertListEqual(mesh._num_verts_per_mesh.tolist(), [V] * N)
for n, (vv, ff) in enumerate(zip(mesh.verts_list(), mesh.faces_list())):
self.assertClose(ff, faces[n][: num_faces[n]])
self.assertClose(vv, verts[n])
new_faces = [ff.clone() for ff in faces]
v = torch.randint(3, high=V, size=(1,), dtype=torch.int32, device=device)[0]
f = torch.randint(F - 10, size=(1,), dtype=torch.int32, device=device)[0]
this_faces = torch.full((F, 3), -1, dtype=torch.int64, device=device)
this_faces[10 : f + 10, :] = torch.randint(
v, size=(f, 3), dtype=torch.int64, device=device
)
new_faces[3] = this_faces
with self.assertRaisesRegex(ValueError, "Padding of faces"):
Meshes(verts=torch.stack(verts), faces=torch.stack(new_faces))
def test_clone(self):
N = 5
mesh = init_mesh(N, 10, 100)
for force in [0, 1]:
if force:
# force mesh to have computed attributes
mesh.verts_packed()
mesh.edges_packed()
mesh.verts_padded()
new_mesh = mesh.clone()
# Modify tensors in both meshes.
new_mesh._verts_list[0] = new_mesh._verts_list[0] * 5
# Check cloned and original Meshes objects do not share tensors.
self.assertFalse(
torch.allclose(new_mesh._verts_list[0], mesh._verts_list[0])
)
self.assertSeparate(new_mesh.verts_packed(), mesh.verts_packed())
self.assertSeparate(new_mesh.verts_padded(), mesh.verts_padded())
self.assertSeparate(new_mesh.faces_packed(), mesh.faces_packed())
self.assertSeparate(new_mesh.faces_padded(), mesh.faces_padded())
self.assertSeparate(new_mesh.edges_packed(), mesh.edges_packed())
def test_detach(self):
N = 5
mesh = init_mesh(N, 10, 100, requires_grad=True)
for force in [0, 1]:
if force:
# force mesh to have computed attributes
mesh.verts_packed()
mesh.edges_packed()
mesh.verts_padded()
new_mesh = mesh.detach()
self.assertFalse(new_mesh.verts_packed().requires_grad)
self.assertClose(new_mesh.verts_packed(), mesh.verts_packed())
self.assertFalse(new_mesh.verts_padded().requires_grad)
self.assertClose(new_mesh.verts_padded(), mesh.verts_padded())
for v, newv in zip(mesh.verts_list(), new_mesh.verts_list()):
self.assertFalse(newv.requires_grad)
self.assertClose(newv, v)
def test_laplacian_packed(self):
def naive_laplacian_packed(meshes):
verts_packed = meshes.verts_packed()
edges_packed = meshes.edges_packed()
V = verts_packed.shape[0]
L = torch.zeros((V, V), dtype=torch.float32, device=meshes.device)
for e in edges_packed:
L[e[0], e[1]] = 1
# symetric
L[e[1], e[0]] = 1
deg = L.sum(1).view(-1, 1)
deg[deg > 0] = 1.0 / deg[deg > 0]
L = L * deg
diag = torch.eye(V, dtype=torch.float32, device=meshes.device)
L.masked_fill_(diag > 0, -1)
return L
# Note that we don't test with random meshes for this case, as the
# definition of Laplacian is defined for simple graphs (aka valid meshes)
meshes = init_simple_mesh("cuda:0")
lapl_naive = naive_laplacian_packed(meshes)
lapl = meshes.laplacian_packed().to_dense()
# check with naive
self.assertClose(lapl, lapl_naive)
def test_offset_verts(self):
def naive_offset_verts(mesh, vert_offsets_packed):
# new Meshes class
new_verts_packed = mesh.verts_packed() + vert_offsets_packed
new_verts_list = list(
new_verts_packed.split(mesh.num_verts_per_mesh().tolist(), 0)
)
new_faces_list = [f.clone() for f in mesh.faces_list()]
return Meshes(verts=new_verts_list, faces=new_faces_list)
N = 5
mesh = init_mesh(N, 30, 100, lists_to_tensors=True)
all_v = mesh.verts_packed().size(0)
verts_per_mesh = mesh.num_verts_per_mesh()
for force, deform_shape in itertools.product([False, True], [(all_v, 3), 3]):
if force:
# force mesh to have computed attributes
mesh._compute_packed(refresh=True)
mesh._compute_padded()
mesh._compute_edges_packed()
mesh.verts_padded_to_packed_idx()
mesh._compute_face_areas_normals(refresh=True)
mesh._compute_vertex_normals(refresh=True)
deform = torch.rand(deform_shape, dtype=torch.float32, device=mesh.device)
# new meshes class to hold the deformed mesh
new_mesh_naive = naive_offset_verts(mesh, deform)
new_mesh = mesh.offset_verts(deform)
# check verts_list & faces_list
verts_cumsum = torch.cumsum(verts_per_mesh, 0).tolist()
verts_cumsum.insert(0, 0)
for i in range(N):
item_offset = (
deform
if deform.ndim == 1
else deform[verts_cumsum[i] : verts_cumsum[i + 1]]
)
self.assertClose(
new_mesh.verts_list()[i],
mesh.verts_list()[i] + item_offset,
)
self.assertClose(
new_mesh.verts_list()[i], new_mesh_naive.verts_list()[i]
)
self.assertClose(mesh.faces_list()[i], new_mesh_naive.faces_list()[i])
self.assertClose(
new_mesh.faces_list()[i], new_mesh_naive.faces_list()[i]
)
# check faces and vertex normals
self.assertClose(
new_mesh.verts_normals_list()[i],
new_mesh_naive.verts_normals_list()[i],
atol=1e-6,
)
self.assertClose(
new_mesh.faces_normals_list()[i],
new_mesh_naive.faces_normals_list()[i],
atol=1e-6,
)
# check padded & packed
self.assertClose(new_mesh.faces_padded(), new_mesh_naive.faces_padded())
self.assertClose(new_mesh.verts_padded(), new_mesh_naive.verts_padded())
self.assertClose(new_mesh.faces_packed(), new_mesh_naive.faces_packed())
self.assertClose(new_mesh.verts_packed(), new_mesh_naive.verts_packed())
self.assertClose(new_mesh.edges_packed(), new_mesh_naive.edges_packed())
self.assertClose(
new_mesh.verts_packed_to_mesh_idx(),
new_mesh_naive.verts_packed_to_mesh_idx(),
)
self.assertClose(
new_mesh.mesh_to_verts_packed_first_idx(),
new_mesh_naive.mesh_to_verts_packed_first_idx(),
)
self.assertClose(
new_mesh.num_verts_per_mesh(), new_mesh_naive.num_verts_per_mesh()
)
self.assertClose(
new_mesh.faces_packed_to_mesh_idx(),
new_mesh_naive.faces_packed_to_mesh_idx(),
)
self.assertClose(
new_mesh.mesh_to_faces_packed_first_idx(),
new_mesh_naive.mesh_to_faces_packed_first_idx(),
)
self.assertClose(
new_mesh.num_faces_per_mesh(), new_mesh_naive.num_faces_per_mesh()
)
self.assertClose(
new_mesh.edges_packed_to_mesh_idx(),
new_mesh_naive.edges_packed_to_mesh_idx(),
)
self.assertClose(
new_mesh.verts_padded_to_packed_idx(),
new_mesh_naive.verts_padded_to_packed_idx(),
)
self.assertTrue(all(new_mesh.valid == new_mesh_naive.valid))
self.assertTrue(new_mesh.equisized == new_mesh_naive.equisized)
# check face areas, normals and vertex normals
self.assertClose(
new_mesh.verts_normals_packed(),
new_mesh_naive.verts_normals_packed(),
atol=1e-6,
)
self.assertClose(
new_mesh.verts_normals_padded(),
new_mesh_naive.verts_normals_padded(),
atol=1e-6,
)
self.assertClose(
new_mesh.faces_normals_packed(),
new_mesh_naive.faces_normals_packed(),
atol=1e-6,
)
self.assertClose(
new_mesh.faces_normals_padded(),
new_mesh_naive.faces_normals_padded(),
atol=1e-6,
)
self.assertClose(
new_mesh.faces_areas_packed(), new_mesh_naive.faces_areas_packed()
)
self.assertClose(
new_mesh.mesh_to_edges_packed_first_idx(),
new_mesh_naive.mesh_to_edges_packed_first_idx(),
)
def test_scale_verts(self):
def naive_scale_verts(mesh, scale):
if not torch.is_tensor(scale):
scale = torch.ones(len(mesh)).mul_(scale)
# new Meshes class
new_verts_list = [
scale[i] * v.clone() for (i, v) in enumerate(mesh.verts_list())
]
new_faces_list = [f.clone() for f in mesh.faces_list()]
return Meshes(verts=new_verts_list, faces=new_faces_list)
N = 5
for test in ["tensor", "scalar"]:
for force in (False, True):
mesh = init_mesh(N, 10, 100, lists_to_tensors=True)
if force:
# force mesh to have computed attributes
mesh.verts_packed()
mesh.edges_packed()
mesh.verts_padded()
mesh._compute_face_areas_normals(refresh=True)
mesh._compute_vertex_normals(refresh=True)
if test == "tensor":
scales = torch.rand(N)
elif test == "scalar":
scales = torch.rand(1)[0].item()
new_mesh_naive = naive_scale_verts(mesh, scales)
new_mesh = mesh.scale_verts(scales)
for i in range(N):
if test == "tensor":
self.assertClose(
scales[i] * mesh.verts_list()[i], new_mesh.verts_list()[i]
)
else:
self.assertClose(
scales * mesh.verts_list()[i], new_mesh.verts_list()[i]
)
self.assertClose(
new_mesh.verts_list()[i], new_mesh_naive.verts_list()[i]
)
self.assertClose(
mesh.faces_list()[i], new_mesh_naive.faces_list()[i]
)
self.assertClose(
new_mesh.faces_list()[i], new_mesh_naive.faces_list()[i]
)
# check face and vertex normals
self.assertClose(
new_mesh.verts_normals_list()[i],
new_mesh_naive.verts_normals_list()[i],
)
self.assertClose(
new_mesh.faces_normals_list()[i],
new_mesh_naive.faces_normals_list()[i],
)
# check padded & packed
self.assertClose(new_mesh.faces_padded(), new_mesh_naive.faces_padded())
self.assertClose(new_mesh.verts_padded(), new_mesh_naive.verts_padded())
self.assertClose(new_mesh.faces_packed(), new_mesh_naive.faces_packed())
self.assertClose(new_mesh.verts_packed(), new_mesh_naive.verts_packed())
self.assertClose(new_mesh.edges_packed(), new_mesh_naive.edges_packed())
self.assertClose(
new_mesh.verts_packed_to_mesh_idx(),
new_mesh_naive.verts_packed_to_mesh_idx(),
)
self.assertClose(
new_mesh.mesh_to_verts_packed_first_idx(),
new_mesh_naive.mesh_to_verts_packed_first_idx(),
)
self.assertClose(
new_mesh.num_verts_per_mesh(), new_mesh_naive.num_verts_per_mesh()
)
self.assertClose(
new_mesh.faces_packed_to_mesh_idx(),
new_mesh_naive.faces_packed_to_mesh_idx(),
)
self.assertClose(
new_mesh.mesh_to_faces_packed_first_idx(),
new_mesh_naive.mesh_to_faces_packed_first_idx(),
)
self.assertClose(
new_mesh.num_faces_per_mesh(), new_mesh_naive.num_faces_per_mesh()
)
self.assertClose(
new_mesh.edges_packed_to_mesh_idx(),
new_mesh_naive.edges_packed_to_mesh_idx(),
)
self.assertClose(
new_mesh.verts_padded_to_packed_idx(),
new_mesh_naive.verts_padded_to_packed_idx(),
)
self.assertTrue(all(new_mesh.valid == new_mesh_naive.valid))
self.assertTrue(new_mesh.equisized == new_mesh_naive.equisized)
# check face areas, normals and vertex normals
self.assertClose(
new_mesh.verts_normals_packed(),
new_mesh_naive.verts_normals_packed(),
)
self.assertClose(
new_mesh.verts_normals_padded(),
new_mesh_naive.verts_normals_padded(),
)
self.assertClose(
new_mesh.faces_normals_packed(),
new_mesh_naive.faces_normals_packed(),
)
self.assertClose(
new_mesh.faces_normals_padded(),
new_mesh_naive.faces_normals_padded(),
)
self.assertClose(
new_mesh.faces_areas_packed(), new_mesh_naive.faces_areas_packed()
)
self.assertClose(
new_mesh.mesh_to_edges_packed_first_idx(),
new_mesh_naive.mesh_to_edges_packed_first_idx(),
)
def test_extend_list(self):
N = 10
mesh = init_mesh(5, 10, 100)
for force in [0, 1]:
if force:
# force some computes to happen
mesh._compute_packed(refresh=True)
mesh._compute_padded()
mesh._compute_edges_packed()
mesh.verts_padded_to_packed_idx()
new_mesh = mesh.extend(N)
self.assertEqual(len(mesh) * 10, len(new_mesh))
for i in range(len(mesh)):
for n in range(N):
self.assertClose(
mesh.verts_list()[i], new_mesh.verts_list()[i * N + n]
)
self.assertClose(
mesh.faces_list()[i], new_mesh.faces_list()[i * N + n]
)
self.assertTrue(mesh.valid[i] == new_mesh.valid[i * N + n])
self.assertAllSeparate(
mesh.verts_list()
+ new_mesh.verts_list()
+ mesh.faces_list()
+ new_mesh.faces_list()
)
self.assertTrue(new_mesh._verts_packed is None)
self.assertTrue(new_mesh._faces_packed is None)
self.assertTrue(new_mesh._verts_padded is None)
self.assertTrue(new_mesh._faces_padded is None)
self.assertTrue(new_mesh._edges_packed is None)
with self.assertRaises(ValueError):
mesh.extend(N=-1)
def test_to(self):
mesh = init_mesh(5, 10, 100)
cpu_device = torch.device("cpu")
converted_mesh = mesh.to("cpu")
self.assertEqual(cpu_device, converted_mesh.device)
self.assertEqual(cpu_device, mesh.device)
self.assertIs(mesh, converted_mesh)
converted_mesh = mesh.to(cpu_device)
self.assertEqual(cpu_device, converted_mesh.device)
self.assertEqual(cpu_device, mesh.device)
self.assertIs(mesh, converted_mesh)
cuda_device = torch.device("cuda")
converted_mesh = mesh.to("cuda")
self.assertEqual(cuda_device, converted_mesh.device)
self.assertEqual(cpu_device, mesh.device)
self.assertIsNot(mesh, converted_mesh)
converted_mesh = mesh.to(cuda_device)
self.assertEqual(cuda_device, converted_mesh.device)
self.assertEqual(cpu_device, mesh.device)
self.assertIsNot(mesh, converted_mesh)
def test_split_mesh(self):
mesh = init_mesh(5, 10, 100)
split_sizes = [2, 3]
split_meshes = mesh.split(split_sizes)
self.assertTrue(len(split_meshes[0]) == 2)
self.assertTrue(
split_meshes[0].verts_list()
== [mesh.get_mesh_verts_faces(0)[0], mesh.get_mesh_verts_faces(1)[0]]
)
self.assertTrue(len(split_meshes[1]) == 3)
self.assertTrue(
split_meshes[1].verts_list()
== [
mesh.get_mesh_verts_faces(2)[0],
mesh.get_mesh_verts_faces(3)[0],
mesh.get_mesh_verts_faces(4)[0],
]
)
split_sizes = [2, 0.3]
with self.assertRaises(ValueError):
mesh.split(split_sizes)
def test_update_padded(self):
# Define the test mesh object either as a list or tensor of faces/verts.
N = 10
for lists_to_tensors in (False, True):
for force in (True, False):
mesh = init_mesh(N, 100, 300, lists_to_tensors=lists_to_tensors)
num_verts_per_mesh = mesh.num_verts_per_mesh()
if force:
# force mesh to have computed attributes
mesh.verts_packed()
mesh.edges_packed()
mesh.laplacian_packed()
mesh.faces_areas_packed()
new_verts = torch.rand((mesh._N, mesh._V, 3), device=mesh.device)
new_verts_list = [
new_verts[i, : num_verts_per_mesh[i]] for i in range(N)
]
new_mesh = mesh.update_padded(new_verts)
# check the attributes assigned at construction time
self.assertEqual(new_mesh._N, mesh._N)
self.assertEqual(new_mesh._F, mesh._F)
self.assertEqual(new_mesh._V, mesh._V)
self.assertEqual(new_mesh.equisized, mesh.equisized)
self.assertTrue(all(new_mesh.valid == mesh.valid))
self.assertNotSeparate(
new_mesh.num_verts_per_mesh(), mesh.num_verts_per_mesh()
)
self.assertClose(
new_mesh.num_verts_per_mesh(), mesh.num_verts_per_mesh()
)
self.assertNotSeparate(
new_mesh.num_faces_per_mesh(), mesh.num_faces_per_mesh()
)
self.assertClose(
new_mesh.num_faces_per_mesh(), mesh.num_faces_per_mesh()
)
# check that the following attributes are not assigned
self.assertIsNone(new_mesh._verts_list)
self.assertIsNone(new_mesh._faces_areas_packed)
self.assertIsNone(new_mesh._faces_normals_packed)
self.assertIsNone(new_mesh._verts_normals_packed)
check_tensors = [
"_faces_packed",
"_verts_packed_to_mesh_idx",
"_faces_packed_to_mesh_idx",
"_mesh_to_verts_packed_first_idx",
"_mesh_to_faces_packed_first_idx",
"_edges_packed",
"_edges_packed_to_mesh_idx",
"_mesh_to_edges_packed_first_idx",
"_faces_packed_to_edges_packed",
"_num_edges_per_mesh",
]
for k in check_tensors:
v = getattr(new_mesh, k)
if not force:
self.assertIsNone(v)
else:
v_old = getattr(mesh, k)
self.assertNotSeparate(v, v_old)
self.assertClose(v, v_old)
# check verts/faces padded
self.assertClose(new_mesh.verts_padded(), new_verts)
self.assertNotSeparate(new_mesh.verts_padded(), new_verts)
self.assertClose(new_mesh.faces_padded(), mesh.faces_padded())
self.assertNotSeparate(new_mesh.faces_padded(), mesh.faces_padded())
# check verts/faces list
for i in range(N):
self.assertNotSeparate(
new_mesh.faces_list()[i], mesh.faces_list()[i]
)
self.assertClose(new_mesh.faces_list()[i], mesh.faces_list()[i])
self.assertSeparate(new_mesh.verts_list()[i], mesh.verts_list()[i])
self.assertClose(new_mesh.verts_list()[i], new_verts_list[i])
# check verts/faces packed
self.assertClose(new_mesh.verts_packed(), torch.cat(new_verts_list))
self.assertSeparate(new_mesh.verts_packed(), mesh.verts_packed())
self.assertClose(new_mesh.faces_packed(), mesh.faces_packed())
# check pad_to_packed
self.assertClose(
new_mesh.verts_padded_to_packed_idx(),
mesh.verts_padded_to_packed_idx(),
)
# check edges
self.assertClose(new_mesh.edges_packed(), mesh.edges_packed())
def test_get_mesh_verts_faces(self):
device = torch.device("cuda:0")
verts_list = []
faces_list = []
verts_faces = [(10, 100), (20, 200)]
for (V, F) in verts_faces:
verts = torch.rand((V, 3), dtype=torch.float32, device=device)
faces = torch.randint(V, size=(F, 3), dtype=torch.int64, device=device)
verts_list.append(verts)
faces_list.append(faces)
mesh = Meshes(verts=verts_list, faces=faces_list)
for i, (V, F) in enumerate(verts_faces):
verts, faces = mesh.get_mesh_verts_faces(i)
self.assertTrue(len(verts) == V)
self.assertClose(verts, verts_list[i])
self.assertTrue(len(faces) == F)
self.assertClose(faces, faces_list[i])
with self.assertRaises(ValueError):
mesh.get_mesh_verts_faces(5)
with self.assertRaises(ValueError):
mesh.get_mesh_verts_faces(0.2)
def test_get_bounding_boxes(self):
device = torch.device("cuda:0")
verts_list = []
faces_list = []
for (V, F) in [(10, 100)]:
verts = torch.rand((V, 3), dtype=torch.float32, device=device)
faces = torch.randint(V, size=(F, 3), dtype=torch.int64, device=device)
verts_list.append(verts)
faces_list.append(faces)
mins = torch.min(verts, dim=0)[0]
maxs = torch.max(verts, dim=0)[0]
bboxes_gt = torch.stack([mins, maxs], dim=1).unsqueeze(0)
mesh = Meshes(verts=verts_list, faces=faces_list)
bboxes = mesh.get_bounding_boxes()
self.assertClose(bboxes_gt, bboxes)
def test_padded_to_packed_idx(self):
device = torch.device("cuda:0")
verts_list = []
faces_list = []
verts_faces = [(10, 100), (20, 200), (30, 300)]
for (V, F) in verts_faces:
verts = torch.rand((V, 3), dtype=torch.float32, device=device)
faces = torch.randint(V, size=(F, 3), dtype=torch.int64, device=device)
verts_list.append(verts)
faces_list.append(faces)
mesh = Meshes(verts=verts_list, faces=faces_list)
verts_padded_to_packed_idx = mesh.verts_padded_to_packed_idx()
verts_packed = mesh.verts_packed()
verts_padded = mesh.verts_padded()
verts_padded_flat = verts_padded.view(-1, 3)
self.assertClose(verts_padded_flat[verts_padded_to_packed_idx], verts_packed)
idx = verts_padded_to_packed_idx.view(-1, 1).expand(-1, 3)
self.assertClose(verts_padded_flat.gather(0, idx), verts_packed)
def test_getitem(self):
device = torch.device("cuda:0")
verts_list = []
faces_list = []
verts_faces = [(10, 100), (20, 200), (30, 300)]
for (V, F) in verts_faces:
verts = torch.rand((V, 3), dtype=torch.float32, device=device)
faces = torch.randint(V, size=(F, 3), dtype=torch.int64, device=device)
verts_list.append(verts)
faces_list.append(faces)
mesh = Meshes(verts=verts_list, faces=faces_list)
def check_equal(selected, indices):
for selectedIdx, index in enumerate(indices):
self.assertClose(
selected.verts_list()[selectedIdx], mesh.verts_list()[index]
)
self.assertClose(
selected.faces_list()[selectedIdx], mesh.faces_list()[index]
)
# int index
index = 1
mesh_selected = mesh[index]
self.assertTrue(len(mesh_selected) == 1)
check_equal(mesh_selected, [index])
# list index
index = [1, 2]
mesh_selected = mesh[index]
self.assertTrue(len(mesh_selected) == len(index))
check_equal(mesh_selected, index)
# slice index
index = slice(0, 2, 1)
mesh_selected = mesh[index]
check_equal(mesh_selected, [0, 1])
# bool tensor
index = torch.tensor([1, 0, 1], dtype=torch.bool, device=device)
mesh_selected = mesh[index]
self.assertTrue(len(mesh_selected) == index.sum())
check_equal(mesh_selected, [0, 2])
# int tensor
index = torch.tensor([1, 2], dtype=torch.int64, device=device)
mesh_selected = mesh[index]
self.assertTrue(len(mesh_selected) == index.numel())
check_equal(mesh_selected, index.tolist())
# invalid index
index = torch.tensor([1, 0, 1], dtype=torch.float32, device=device)
with self.assertRaises(IndexError):
mesh_selected = mesh[index]
index = 1.2
with self.assertRaises(IndexError):
mesh_selected = mesh[index]
def test_compute_faces_areas(self):
verts = torch.tensor(
[
[0.0, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.0, 0.0],
[0.25, 0.8, 0.0],
],
dtype=torch.float32,
)
faces = torch.tensor([[0, 1, 2], [0, 3, 4]], dtype=torch.int64)
mesh = Meshes(verts=[verts], faces=[faces])
face_areas = mesh.faces_areas_packed()
expected_areas = torch.tensor([0.125, 0.2])
self.assertClose(face_areas, expected_areas)
def test_compute_normals(self):
# Simple case with one mesh where normals point in either +/- ijk
verts = torch.tensor(
[
[0.1, 0.3, 0.0],
[0.5, 0.2, 0.0],
[0.6, 0.8, 0.0],
[0.0, 0.3, 0.2],
[0.0, 0.2, 0.5],
[0.0, 0.8, 0.7],
[0.5, 0.0, 0.2],
[0.6, 0.0, 0.5],
[0.8, 0.0, 0.7],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
],
dtype=torch.float32,
)
faces = torch.tensor(
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11]], dtype=torch.int64
)
mesh = Meshes(verts=[verts], faces=[faces])
self.assertFalse(mesh.has_verts_normals())
verts_normals_expected = torch.tensor(
[
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[-1.0, 0.0, 0.0],
[-1.0, 0.0, 0.0],
[-1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
]
)
faces_normals_expected = verts_normals_expected[[0, 3, 6, 9], :]
self.assertTrue(
torch.allclose(mesh.verts_normals_list()[0], verts_normals_expected)
)
self.assertTrue(mesh.has_verts_normals())
self.assertTrue(
torch.allclose(mesh.faces_normals_list()[0], faces_normals_expected)
)
self.assertTrue(
torch.allclose(mesh.verts_normals_packed(), verts_normals_expected)
)
self.assertTrue(
torch.allclose(mesh.faces_normals_packed(), faces_normals_expected)
)
# Multiple meshes in the batch with equal sized meshes
meshes_extended = mesh.extend(3)
for m in meshes_extended.verts_normals_list():
self.assertClose(m, verts_normals_expected)
for f in meshes_extended.faces_normals_list():
self.assertClose(f, faces_normals_expected)
# Multiple meshes in the batch with different sized meshes
# Check padded and packed normals are the correct sizes.
verts2 = torch.tensor(
[
[0.1, 0.3, 0.0],
[0.5, 0.2, 0.0],
[0.6, 0.8, 0.0],
[0.0, 0.3, 0.2],
[0.0, 0.2, 0.5],
[0.0, 0.8, 0.7],
],
dtype=torch.float32,
)
faces2 = torch.tensor([[0, 1, 2], [3, 4, 5]], dtype=torch.int64)
verts_list = [verts, verts2]
faces_list = [faces, faces2]
meshes = Meshes(verts=verts_list, faces=faces_list)
verts_normals_padded = meshes.verts_normals_padded()
faces_normals_padded = meshes.faces_normals_padded()
for n in range(len(meshes)):
v = verts_list[n].shape[0]
f = faces_list[n].shape[0]
if verts_normals_padded.shape[1] > v:
self.assertTrue(verts_normals_padded[n, v:, :].eq(0).all())
self.assertTrue(
torch.allclose(
verts_normals_padded[n, :v, :].view(-1, 3),
verts_normals_expected[:v, :],
)
)
if faces_normals_padded.shape[1] > f:
self.assertTrue(faces_normals_padded[n, f:, :].eq(0).all())
self.assertTrue(
torch.allclose(
faces_normals_padded[n, :f, :].view(-1, 3),
faces_normals_expected[:f, :],
)
)
verts_normals_packed = meshes.verts_normals_packed()
faces_normals_packed = meshes.faces_normals_packed()
self.assertTrue(
list(verts_normals_packed.shape) == [verts.shape[0] + verts2.shape[0], 3]
)
self.assertTrue(
list(faces_normals_packed.shape) == [faces.shape[0] + faces2.shape[0], 3]
)
# Single mesh where two faces share one vertex so the normal is
# the weighted sum of the two face normals.
verts = torch.tensor(
[
[0.1, 0.3, 0.0],
[0.5, 0.2, 0.0],
[0.0, 0.3, 0.2], # vertex is shared between two faces
[0.0, 0.2, 0.5],
[0.0, 0.8, 0.7],
],
dtype=torch.float32,
)
faces = torch.tensor([[0, 1, 2], [2, 3, 4]], dtype=torch.int64)
mesh = Meshes(verts=[verts], faces=[faces])
verts_normals_expected = torch.tensor(
[
[-0.2408, -0.9631, -0.1204],
[-0.2408, -0.9631, -0.1204],
[-0.9389, -0.3414, -0.0427],
[-1.0000, 0.0000, 0.0000],
[-1.0000, 0.0000, 0.0000],
]
)
faces_normals_expected = torch.tensor(
[[-0.2408, -0.9631, -0.1204], [-1.0000, 0.0000, 0.0000]]
)
self.assertTrue(
torch.allclose(
mesh.verts_normals_list()[0], verts_normals_expected, atol=4e-5
)
)
self.assertTrue(
torch.allclose(
mesh.faces_normals_list()[0], faces_normals_expected, atol=4e-5
)
)
# Check empty mesh has empty normals
meshes = Meshes(verts=[], faces=[])
self.assertEqual(meshes.verts_normals_packed().shape[0], 0)
self.assertEqual(meshes.verts_normals_padded().shape[0], 0)
self.assertEqual(meshes.verts_normals_list(), [])
self.assertEqual(meshes.faces_normals_packed().shape[0], 0)
self.assertEqual(meshes.faces_normals_padded().shape[0], 0)
self.assertEqual(meshes.faces_normals_list(), [])
def test_assigned_normals(self):
verts = torch.rand(2, 6, 3)
faces = torch.randint(6, size=(2, 4, 3))
no_normals = Meshes(verts=verts, faces=faces)
self.assertFalse(no_normals.has_verts_normals())
for verts_normals in [list(verts.unbind(0)), verts]:
yes_normals = Meshes(
verts=verts.clone(), faces=faces, verts_normals=verts_normals
)
self.assertTrue(yes_normals.has_verts_normals())
self.assertClose(yes_normals.verts_normals_padded(), verts)
yes_normals.offset_verts_(torch.FloatTensor([1, 2, 3]))
self.assertClose(yes_normals.verts_normals_padded(), verts)
yes_normals.offset_verts_(torch.FloatTensor([1, 2, 3]).expand(12, 3))
self.assertFalse(torch.allclose(yes_normals.verts_normals_padded(), verts))
def test_compute_faces_areas_cpu_cuda(self):
num_meshes = 10
max_v = 100
max_f = 300
mesh_cpu = init_mesh(num_meshes, max_v, max_f, device="cpu")
device = torch.device("cuda:0")
mesh_cuda = mesh_cpu.to(device)
face_areas_cpu = mesh_cpu.faces_areas_packed()
face_normals_cpu = mesh_cpu.faces_normals_packed()
face_areas_cuda = mesh_cuda.faces_areas_packed()
face_normals_cuda = mesh_cuda.faces_normals_packed()
self.assertClose(face_areas_cpu, face_areas_cuda.cpu(), atol=1e-6)
# because of the normalization of the normals with arbitrarily small values,
# normals can become unstable. Thus only compare normals, for faces
# with areas > eps=1e-6
nonzero = face_areas_cpu > 1e-6
self.assertClose(
face_normals_cpu[nonzero], face_normals_cuda.cpu()[nonzero], atol=1e-6
)
@staticmethod
def compute_packed_with_init(
num_meshes: int = 10, max_v: int = 100, max_f: int = 300, device: str = "cpu"
):
mesh = init_mesh(num_meshes, max_v, max_f, device=device)
torch.cuda.synchronize()
def compute_packed():
mesh._compute_packed(refresh=True)
torch.cuda.synchronize()
return compute_packed
@staticmethod
def compute_padded_with_init(
num_meshes: int = 10, max_v: int = 100, max_f: int = 300, device: str = "cpu"
):
mesh = init_mesh(num_meshes, max_v, max_f, device=device)
torch.cuda.synchronize()
def compute_padded():
mesh._compute_padded(refresh=True)
torch.cuda.synchronize()
return compute_padded
| 40.254486
| 88
| 0.546462
|
7f270aa922b170438b550bdfe986b31eb387df69
| 472
|
py
|
Python
|
sample_assignment/migrations/0002_alter_contract_id.py
|
ptrck/django-clone
|
5c868b65ac6a3e3367595f8aa54abc42ef0d0144
|
[
"MIT"
] | 55
|
2020-08-15T18:41:35.000Z
|
2022-03-29T09:53:02.000Z
|
sample_assignment/migrations/0002_alter_contract_id.py
|
ptrck/django-clone
|
5c868b65ac6a3e3367595f8aa54abc42ef0d0144
|
[
"MIT"
] | 236
|
2020-08-17T12:37:58.000Z
|
2022-03-31T11:04:19.000Z
|
sample_assignment/migrations/0002_alter_contract_id.py
|
ptrck/django-clone
|
5c868b65ac6a3e3367595f8aa54abc42ef0d0144
|
[
"MIT"
] | 6
|
2020-08-16T15:18:22.000Z
|
2022-02-02T09:59:34.000Z
|
# Generated by Django 3.2 on 2021-04-22 14:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("sample_assignment", "0001_initial"),
]
operations = [
migrations.AlterField(
model_name="contract",
name="id",
field=models.BigAutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
]
| 22.47619
| 87
| 0.586864
|
439eabc436ef2d0ca079b679e9268369edd32fbf
| 4,772
|
py
|
Python
|
keepassc/helper.py
|
thorkill/keepassc
|
b3779291fa651701c4a1a3306eb00a653493cbe8
|
[
"ISC"
] | 238
|
2015-01-05T17:57:41.000Z
|
2021-11-14T20:08:23.000Z
|
keepassc/helper.py
|
thorkill/keepassc
|
b3779291fa651701c4a1a3306eb00a653493cbe8
|
[
"ISC"
] | 10
|
2015-01-07T05:53:44.000Z
|
2021-07-25T10:11:44.000Z
|
keepassc/helper.py
|
thorkill/keepassc
|
b3779291fa651701c4a1a3306eb00a653493cbe8
|
[
"ISC"
] | 20
|
2015-01-05T14:27:28.000Z
|
2019-12-06T01:59:42.000Z
|
# -*- coding: utf-8 -*-
import struct
from os import makedirs, remove
from os.path import isdir, isfile
from Cryptodome.Hash import SHA256
from Cryptodome.Cipher import AES
def parse_config(control):
'''Parse the config file.
It's important that a line in the file is written without spaces,
that means
- 'foo=bar' is a valid line
- 'foo = bar' is not a valid one
'''
config = {'del_clip': True, # standard config
'clip_delay': 20,
'lock_db': True,
'lock_delay': 60,
'rem_db': True,
'rem_key': False,
'skip_menu': False,
'pin': True}
if isfile(control.config_home):
try:
handler = open(control.config_home, 'r')
except Exception as err: # don't know if this is good style
print(err.__str__())
else:
for line in handler:
key, val = line.split('=')
if val == 'True\n':
val = True
elif val == 'False\n':
val = False
else:
val = int(val)
if key in config:
config[key] = val
handler.close()
else: # write standard config
write_config(control, config)
return config
def write_config(control, config):
'''Function to write the config file'''
config_dir = control.config_home[:-7]
if not isdir(config_dir):
if isfile(config_dir):
remove(config_dir)
makedirs(config_dir)
try:
handler = open(control.config_home, 'w')
except Exception as err:
print(err.__str__())
return False
else:
for key, val in config.items():
handler.write(key + '=' + str(val) + '\n')
handler.close()
return True
def transform_key(masterkey, seed1, seed2, rounds):
"""This method creates the key to decrypt the database"""
if masterkey is None or seed1 is None or seed2 is None or rounds is None:
raise TypeError('None type not allowed')
aes = AES.new(seed1, AES.MODE_ECB)
# Encrypt the created hash
for i in range(rounds):
masterkey = aes.encrypt(masterkey)
# Finally, hash it again...
sha_obj = SHA256.new()
sha_obj.update(masterkey)
masterkey = sha_obj.digest()
# ...and hash the result together with the randomseed
sha_obj = SHA256.new()
sha_obj.update(seed2 + masterkey)
return sha_obj.digest()
def get_passwordkey(key):
"""This method hashes key"""
if key is None:
raise TypeError('None type not allowed')
sha = SHA256.new()
sha.update(key.encode('utf-8'))
return sha.digest()
def get_filekey(keyfile):
"""This method creates a key from a keyfile."""
try:
handler = open(keyfile, 'rb')
buf = handler.read()
except:
raise OSError('Could not open or read file.')
else:
handler.close()
sha = SHA256.new()
if len(buf) == 33:
sha.update(buf)
return sha.digest()
elif len(buf) == 65:
sha.update(struct.unpack('<65s', buf)[0].decode())
return sha.digest()
else:
while buf:
if len(buf) <= 2049:
sha.update(buf)
buf = []
else:
sha.update(buf[:2048])
buf = buf[2048:]
return sha.digest()
def get_remote_filekey(buf):
"""This method creates a key from a keyfile."""
sha = SHA256.new()
if len(buf) == 33:
sha.update(buf)
return sha.digest()
elif len(buf) == 65:
sha.update(struct.unpack('<65s', buf)[0].decode())
return sha.digest()
else:
while buf:
if len(buf) <= 2049:
sha.update(buf)
buf = []
else:
sha.update(buf[:2048])
buf = buf[2048:]
return sha.digest()
def get_key(password, keyfile, remote = False):
"""Get a key generated from KeePass-password and -keyfile"""
if password is None and keyfile is None:
raise TypeError('None type not allowed')
elif password is None:
if remote is True:
masterkey = get_remote_filekey(keyfile)
else:
masterkey = get_filekey(keyfile)
elif password is not None and keyfile is not None:
passwordkey = get_passwordkey(password)
if remote is True:
filekey = get_remote_filekey(keyfile)
else:
filekey = get_filekey(keyfile)
sha = SHA256.new()
sha.update(passwordkey+filekey)
masterkey = sha.digest()
else:
masterkey = get_passwordkey(password)
return masterkey
| 28.070588
| 77
| 0.557209
|
a80cb1eec9c63a6e52fdbb9bc786cab7869a03b2
| 14,452
|
py
|
Python
|
python/paddle/fluid/regularizer.py
|
wangwin/Paddle
|
b7d185d6caf78630d228dfcb90750a21d637583d
|
[
"Apache-2.0"
] | 1
|
2019-06-13T11:32:16.000Z
|
2019-06-13T11:32:16.000Z
|
python/paddle/fluid/regularizer.py
|
wangwin/Paddle
|
b7d185d6caf78630d228dfcb90750a21d637583d
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/fluid/regularizer.py
|
wangwin/Paddle
|
b7d185d6caf78630d228dfcb90750a21d637583d
|
[
"Apache-2.0"
] | 2
|
2019-08-16T12:03:28.000Z
|
2019-09-03T13:02:57.000Z
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from . import framework
from .framework import in_dygraph_mode, _varbase_creator
from . import core
import logging
__all__ = ['L1Decay', 'L2Decay', 'L1DecayRegularizer', 'L2DecayRegularizer']
def _create_regularization_of_grad(param,
grad,
regularization=None,
_repeat_regularizer=None):
""" Create and add backward regularization Operators
Function helper of append_regularization_ops.
"""
# If no gradient or no regularization is specified, then we don't need to do anything
if grad is None or (param.regularizer is None and regularization is None):
return grad
regularization_term = None
if param.regularizer is not None:
if regularization is not None:
_repeat_regularizer.append(param.name)
# Add variable for regularization term in grad block
regularization_term = param.regularizer(param, grad, grad.block)
elif regularization is not None:
regularization_term = regularization(param, grad, grad.block)
assert regularization_term is not None
new_grad = grad
if grad.type == core.VarDesc.VarType.SELECTED_ROWS:
# FIXME(zcd): If the grad is SELECTED_ROWS, after regularization,
# the grad's type and name will be changed. But the gradient's name
# is used in ParallelExecutor Reduce mode, so I add a flag for
# the new_grad here.
new_grad = grad.block.create_var(
name=grad.name + core.kNewGradSuffix(),
dtype=param.dtype,
shape=param.shape,
lod_level=param.lod_level,
type=core.VarDesc.VarType.LOD_TENSOR)
inputs = {"X": [grad, regularization_term]}
outputs = {"Out": [new_grad]}
if in_dygraph_mode():
new_grad = core.ops.sum([grad, regularization_term])
else:
grad.block.append_op(type='sum', inputs=inputs, outputs=outputs)
return new_grad
def append_regularization_ops(parameters_and_grads, regularization=None):
"""Create and add backward regularization Operators
Creates and adds backward regularization operators in the BlockDesc.
This will add gradients of the regularizer function to the gradients
of the parameters and return these modified gradients. This is the
same as implementing weight decay in optimizers for regularization.
Args:
parameters_and_grads: A list of (parameters, gradients) pairs
that need to be regularized.
regularization: A global regularizer. If the parameter is not
set. It will be applied with regularizer.
Returns:
list[(Variable, Variable)]: list of (parameters, gradients) \
pair with the regularized gradient
Raises:
Exception: Unknown regularization type
"""
params_and_grads = []
_repeat_regularizer = []
if in_dygraph_mode():
for param, grad in parameters_and_grads:
new_grad = _create_regularization_of_grad(
param, grad, regularization, _repeat_regularizer)
params_and_grads.append((param, new_grad))
else:
with framework.name_scope('regularization'):
for param, grad in parameters_and_grads:
with param.block.program._optimized_guard([param, grad]):
new_grad = _create_regularization_of_grad(
param, grad, regularization, _repeat_regularizer)
params_and_grads.append((param, new_grad))
if len(_repeat_regularizer) > 0:
param_name_strlist = ", ".join(_repeat_regularizer)
logging.info(
"Regularization of [%s] have been set by ParamAttr or WeightNormParamAttr already. "
"So, the Regularization of Optimizer will not take effect for these parameters!"
% param_name_strlist)
return params_and_grads
class WeightDecayRegularizer(object):
"""Base class for weight decay regularizers
Defines the common interface of weight-decay regularizers.
Weight-decay regularizers are added only during the backward
pass for faster regularization. They add operations to the network
that correspond to gradient of the regularization function.
Users should not use this class directly, but need to use one
of its implementations
"""
def __init__(self):
pass
def __call__(self, param, grad, block):
"""Add corresponding weight decay operations to the network
"""
raise NotImplementedError()
def __str__(self):
"""Debug string
"""
raise NotImplementedError()
class L2DecayRegularizer(WeightDecayRegularizer):
"""
Implement the L2 Weight Decay Regularization, which helps to prevent the model over-fitting.
It can be set in :ref:`api_fluid_ParamAttr` or ``optimizer`` (such as :ref:`api_fluid_optimizer_SGDOptimizer` ).
When set in ``ParamAttr`` , it only takes effect for trainable parameters in this layer. When set in
``optimizer`` , it takes effect for all trainable parameters. When set together, ``ParamAttr`` has
higher priority than ``optimizer`` .
In the implementation, the formula of L2 Weight Decay Regularization is as follows:
.. math::
L2WeightDecay = reg\_coeff * parameter
Args:
regularization_coeff(float, optional): regularization coeff. Default:0.0
Examples:
.. code-block:: python
# Example1: set Regularizer in optimizer
import paddle.fluid as fluid
main_prog = fluid.Program()
startup_prog = fluid.Program()
with fluid.program_guard(main_prog, startup_prog):
data = fluid.layers.data(name='image', shape=[3, 28, 28], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
hidden = fluid.layers.fc(input=data, size=128, act='relu')
prediction = fluid.layers.fc(input=hidden, size=10, act='softmax')
loss = fluid.layers.cross_entropy(input=prediction, label=label)
avg_loss = fluid.layers.mean(loss)
optimizer = fluid.optimizer.Adagrad(
learning_rate=1e-4,
regularization=fluid.regularizer.L2Decay(
regularization_coeff=0.1))
optimizer.minimize(avg_loss)
# Example2: set Regularizer both in ParamAttr and optimizer
import paddle.fluid as fluid
l1 = fluid.regularizer.L1Decay(regularization_coeff=0.1)
l2 = fluid.regularizer.L2Decay(regularization_coeff=0.1)
x = fluid.layers.uniform_random([3,4])
# set L1 regularization in fluid.ParamAttr
w_param = fluid.ParamAttr(regularizer=l1)
hidden1 = fluid.layers.fc(x, 8, param_attr=w_param) # fc_0.w_0(L1), fc_0.b_0
hidden2 = fluid.layers.fc(hidden1, 16, param_attr=w_param) # fc_1.w_0(L1), fc_1.b_0
predict = fluid.layers.fc(hidden2, 32) # fc_3.w_0, fc_3.b_0
avg_loss = fluid.layers.mean(predict)
# set L2 regularization in optimizer
optimizer = fluid.optimizer.SGD(learning_rate=1e-4, regularization=l2)
optimizer.minimize(avg_loss)
# it will Print Message:
# Regularization of [fc_0.w_0, fc_1.w_0] have been set by ParamAttr or WeightNormParamAttr already.
# So, the Regularization of Optimizer will not take effect for these parameters!
"""
def __init__(self, regularization_coeff=0.0):
assert regularization_coeff is not None
super(L2DecayRegularizer, self).__init__()
self._regularization_coeff = regularization_coeff
def __call__(self, param, grad, block):
"""Add L2 weight decay ops to network
Adds L2 weight decay ops.
L2WeightDecay = reg_coeff * parameter
Args:
param: parameter variable for which regularization is applied
block: block in which variable is to be created
Returns:
new variable for weight decay
"""
assert isinstance(param, framework.Parameter)
assert isinstance(block, framework.Block)
inputs = {"X": [param]}
attrs = {"scale": self._regularization_coeff}
if framework.in_dygraph_mode():
return core.ops.scale(param, "scale", self._regularization_coeff)
else:
decay = block.create_var(
dtype=param.dtype, shape=param.shape, lod_level=param.lod_level)
# Append Op to calculate decay
block.append_op(
type='scale',
inputs={"X": param},
outputs={"Out": decay},
attrs={"scale": self._regularization_coeff})
return decay
def __str__(self):
return "L2Decay, regularization_coeff=%f" % self._regularization_coeff
class L1DecayRegularizer(WeightDecayRegularizer):
"""
Implement the L1 Weight Decay Regularization, which encourages the weights to be sparse.
It can be set in :ref:`api_fluid_ParamAttr` or ``optimizer`` (such as :ref:`api_fluid_optimizer_SGDOptimizer` ).
When set in ``ParamAttr`` , it only takes effect for trainable parameters in this layer. When set in
``optimizer`` , it takes effect for all trainable parameters. When set together, ``ParamAttr`` has
higher priority than ``optimizer`` .
In the implementation, the formula of L1 Weight Decay Regularization is as follows:
.. math::
L1WeightDecay = reg\_coeff * sign(parameter)
Args:
regularization_coeff(float, optional): regularization coeff. Default:0.0.
Examples:
.. code-block:: python
# Example1: set Regularizer in optimizer
import paddle.fluid as fluid
main_prog = fluid.Program()
startup_prog = fluid.Program()
with fluid.program_guard(main_prog, startup_prog):
data = fluid.layers.data(name='image', shape=[3, 28, 28], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
hidden = fluid.layers.fc(input=data, size=128, act='relu')
prediction = fluid.layers.fc(input=hidden, size=10, act='softmax')
loss = fluid.layers.cross_entropy(input=prediction, label=label)
avg_loss = fluid.layers.mean(loss)
optimizer = fluid.optimizer.Adagrad(
learning_rate=1e-4,
regularization=fluid.regularizer.L1DecayRegularizer(
regularization_coeff=0.1))
optimizer.minimize(avg_loss)
# Example2: set Regularizer both in ParamAttr and optimizer
import paddle.fluid as fluid
l1 = fluid.regularizer.L1Decay(regularization_coeff=0.1)
l2 = fluid.regularizer.L2Decay(regularization_coeff=0.1)
x = fluid.layers.uniform_random([3,4])
# set L1 regularization in fluid.ParamAttr
w_param = fluid.ParamAttr(regularizer=l1)
hidden1 = fluid.layers.fc(x, 8, param_attr=w_param) # fc_0.w_0(L1), fc_0.b_0
hidden2 = fluid.layers.fc(hidden1, 16, param_attr=w_param) # fc_1.w_0(L1), fc_1.b_0
predict = fluid.layers.fc(hidden2, 32) # fc_3.w_0, fc_3.b_0
avg_loss = fluid.layers.mean(predict)
# set L2 regularization in optimizer
optimizer = fluid.optimizer.SGD(learning_rate=1e-4, regularization=l2)
optimizer.minimize(avg_loss)
# it will Print Message:
# Regularization of [fc_0.w_0, fc_1.w_0] have been set by ParamAttr or WeightNormParamAttr already.
# So, the Regularization of Optimizer will not take effect for these parameters!
"""
def __init__(self, regularization_coeff=0.0):
assert regularization_coeff is not None
super(L1DecayRegularizer, self).__init__()
self._regularization_coeff = regularization_coeff
def __call__(self, param, grad, block):
"""Add L1 weight decay ops to network
Adds L1 weight decay ops.
L1WeightDecay = reg_coeff * sign(parameter)
Args:
param: parameter variable for which regularization is applied
block: block in which variable is to be created
Returns:
new variable for weight decay
"""
assert isinstance(param, framework.Parameter)
assert isinstance(block, framework.Block)
if framework.in_dygraph_mode():
decay = block.create_var(dtype=param.dtype, shape=param.shape)
else:
decay = block.create_var(
dtype=param.dtype, shape=param.shape, lod_level=param.lod_level)
# Append sign op
block.append_op(
type='sign', inputs={"X": param}, outputs={"Out": decay})
# Append scale op to the output of sign op
block.append_op(
type='scale',
inputs={"X": decay},
outputs={"Out": decay},
attrs={"scale": self._regularization_coeff})
return decay
def __str__(self):
return "L1Decay, regularization_coeff=%f" % self._regularization_coeff
# We short the class name, since users will use the regulaizer with the package
# name. The sample code:
#
# import paddle.fluid as fluid
#
# hidden = fluid.layers.fc(...,
# param_attr=fluid.regularizer.Xavier())
#
# It is no need to add a `Regularizer` as the class suffix
L1Decay = L1DecayRegularizer
L2Decay = L2DecayRegularizer
| 39.594521
| 117
| 0.645585
|
5487ab7f67e9c1d5ae9feeb0e06052f763567839
| 4,671
|
py
|
Python
|
huaweicloud-sdk-bss/huaweicloudsdkbss/v2/model/resource_type.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | 1
|
2021-04-16T07:59:28.000Z
|
2021-04-16T07:59:28.000Z
|
huaweicloud-sdk-bss/huaweicloudsdkbss/v2/model/resource_type.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | null | null | null |
huaweicloud-sdk-bss/huaweicloudsdkbss/v2/model/resource_type.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | 1
|
2022-01-17T02:24:18.000Z
|
2022-01-17T02:24:18.000Z
|
# coding: utf-8
import pprint
import re
import six
class ResourceType:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'resource_type_code': 'str',
'resource_type_name': 'str',
'resource_type_desc': 'str'
}
attribute_map = {
'resource_type_code': 'resource_type_code',
'resource_type_name': 'resource_type_name',
'resource_type_desc': 'resource_type_desc'
}
def __init__(self, resource_type_code=None, resource_type_name=None, resource_type_desc=None):
"""ResourceType - a model defined in huaweicloud sdk"""
self._resource_type_code = None
self._resource_type_name = None
self._resource_type_desc = None
self.discriminator = None
if resource_type_code is not None:
self.resource_type_code = resource_type_code
if resource_type_name is not None:
self.resource_type_name = resource_type_name
if resource_type_desc is not None:
self.resource_type_desc = resource_type_desc
@property
def resource_type_code(self):
"""Gets the resource_type_code of this ResourceType.
资源类型的编码。例如ECS的VM为“hws.resource.type.vm”。
:return: The resource_type_code of this ResourceType.
:rtype: str
"""
return self._resource_type_code
@resource_type_code.setter
def resource_type_code(self, resource_type_code):
"""Sets the resource_type_code of this ResourceType.
资源类型的编码。例如ECS的VM为“hws.resource.type.vm”。
:param resource_type_code: The resource_type_code of this ResourceType.
:type: str
"""
self._resource_type_code = resource_type_code
@property
def resource_type_name(self):
"""Gets the resource_type_name of this ResourceType.
资源类型的名称。
:return: The resource_type_name of this ResourceType.
:rtype: str
"""
return self._resource_type_name
@resource_type_name.setter
def resource_type_name(self, resource_type_name):
"""Sets the resource_type_name of this ResourceType.
资源类型的名称。
:param resource_type_name: The resource_type_name of this ResourceType.
:type: str
"""
self._resource_type_name = resource_type_name
@property
def resource_type_desc(self):
"""Gets the resource_type_desc of this ResourceType.
资源类型的描述。
:return: The resource_type_desc of this ResourceType.
:rtype: str
"""
return self._resource_type_desc
@resource_type_desc.setter
def resource_type_desc(self, resource_type_desc):
"""Sets the resource_type_desc of this ResourceType.
资源类型的描述。
:param resource_type_desc: The resource_type_desc of this ResourceType.
:type: str
"""
self._resource_type_desc = resource_type_desc
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ResourceType):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 28.309091
| 98
| 0.60137
|
d0ccfcf0ccd41cc01d09349cff50b3442313ef51
| 5,610
|
py
|
Python
|
framework/boards/NRF52832DK.py
|
leeeastwood/Haiway
|
153b6861f864966c454f97febe03ae191a9a8657
|
[
"MIT"
] | 162
|
2019-01-04T14:23:52.000Z
|
2021-12-26T05:51:34.000Z
|
framework/boards/NRF52832DK.py
|
leeeastwood/Haiway
|
153b6861f864966c454f97febe03ae191a9a8657
|
[
"MIT"
] | 6
|
2019-01-04T14:32:15.000Z
|
2020-08-07T06:47:34.000Z
|
framework/boards/NRF52832DK.py
|
leeeastwood/Haiway
|
153b6861f864966c454f97febe03ae191a9a8657
|
[
"MIT"
] | 130
|
2019-01-04T14:24:33.000Z
|
2021-06-25T16:48:56.000Z
|
#!/bin/false
# This file is part of Espruino, a JavaScript interpreter for Microcontrollers
#
# Copyright (C) 2013 Gordon Williams <gw@pur3.co.uk>
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# ----------------------------------------------------------------------------------------
# This file contains information for a specific board - the available pins, and where LEDs,
# Buttons, and other in-built peripherals are. It is used to build documentation as well
# as various source and header files for Espruino.
# ----------------------------------------------------------------------------------------
import pinutils;
info = {
'name' : "nRF52 Development Kit",
'link' : [ "https://www.nordicsemi.com/eng/Products/Bluetooth-low-energy/nRF52-DK" ],
'espruino_page_link' : 'nRF52832DK',
# This is the PCA10036
'default_console' : "EV_SERIAL1",
'default_console_tx' : "D6",
'default_console_rx' : "D8",
'default_console_baudrate' : "9600",
'variables' : 2250, # How many variables are allocated for Espruino to use. RAM will be overflowed if this number is too high and code won't compile.
# 'bootloader' : 1,
'binary_name' : 'espruino_%v_nrf52832.hex',
'build' : {
'optimizeflags' : '-Os',
'libraries' : [
'BLUETOOTH',
'NET',
'GRAPHICS',
'NFC',
'NEOPIXEL'
],
'makefile' : [
'DEFINES+=-DCONFIG_GPIO_AS_PINRESET', # Allow the reset pin to work
'DEFINES += -DBOARD_PCA10040 -DPCA10040'
]
}
};
chip = {
'part' : "NRF52832",
'family' : "NRF52",
'package' : "QFN48",
'ram' : 64,
'flash' : 512,
'speed' : 64,
'usart' : 1,
'spi' : 1,
'i2c' : 1,
'adc' : 1,
'dac' : 0,
'saved_code' : {
'address' : ((118 - 10) * 4096), # Bootloader takes pages 120-127, FS takes 118-119
'page_size' : 4096,
'pages' : 10,
'flash_available' : 512 - ((31 + 8 + 2 + 10)*4) # Softdevice uses 31 pages of flash, bootloader 8, FS 2, code 10. Each page is 4 kb.
},
};
devices = {
'BTN1' : { 'pin' : 'D13', 'pinstate' : 'IN_PULLDOWN' }, # Pin negated in software
'BTN2' : { 'pin' : 'D14', 'pinstate' : 'IN_PULLDOWN' }, # Pin negated in software
'BTN3' : { 'pin' : 'D15', 'pinstate' : 'IN_PULLDOWN' }, # Pin negated in software
'BTN4' : { 'pin' : 'D16', 'pinstate' : 'IN_PULLDOWN' }, # Pin negated in software
'LED1' : { 'pin' : 'D17' }, # Pin negated in software
'LED2' : { 'pin' : 'D18' }, # Pin negated in software
'LED3' : { 'pin' : 'D19' }, # Pin negated in software
'LED4' : { 'pin' : 'D20' }, # Pin negated in software
'RX_PIN_NUMBER' : { 'pin' : 'D8'},
'TX_PIN_NUMBER' : { 'pin' : 'D6'},
'CTS_PIN_NUMBER' : { 'pin' : 'D7'},
'RTS_PIN_NUMBER' : { 'pin' : 'D5'},
# Pin D22 is used for clock when driving neopixels - as not specifying a pin seems to break things
};
# left-right, or top-bottom order
board = {
'left' : [ 'VDD', 'VDD', 'RESET', 'VDD','5V','GND','GND','','','D3','D4','D28','D29','D30','D31'],
'right' : [
'D27', 'D26', 'D2', 'GND', 'D25','D24','D23', 'D22','D20','D19','',
'D18','D17','D16','D15','D14','D13','D12','D11','',
'D10','D9','D8','D7','D6','D5','D21','D1','D0'],
'_notes' : {
'D6' : "Serial console RX",
'D8' : "Serial console TX"
}
};
board["_css"] = """
#board {
width: 528px;
height: 800px;
top: 0px;
left : 200px;
background-image: url(img/NRF52832DK.jpg);
}
#boardcontainer {
height: 900px;
}
#left {
top: 219px;
right: 466px;
}
#right {
top: 150px;
left: 466px;
}
.leftpin { height: 17px; }
.rightpin { height: 17px; }
""";
def get_pins():
pins = pinutils.generate_pins(0,31) # 32 General Purpose I/O Pins.
pinutils.findpin(pins, "PD0", True)["functions"]["XL1"]=0;
pinutils.findpin(pins, "PD1", True)["functions"]["XL2"]=0;
pinutils.findpin(pins, "PD5", True)["functions"]["RTS"]=0;
pinutils.findpin(pins, "PD6", True)["functions"]["TXD"]=0;
pinutils.findpin(pins, "PD7", True)["functions"]["CTS"]=0;
pinutils.findpin(pins, "PD8", True)["functions"]["RXD"]=0;
pinutils.findpin(pins, "PD9", True)["functions"]["NFC1"]=0;
pinutils.findpin(pins, "PD10", True)["functions"]["NFC2"]=0;
pinutils.findpin(pins, "PD2", True)["functions"]["ADC1_IN0"]=0;
pinutils.findpin(pins, "PD3", True)["functions"]["ADC1_IN1"]=0;
pinutils.findpin(pins, "PD4", True)["functions"]["ADC1_IN2"]=0;
pinutils.findpin(pins, "PD5", True)["functions"]["ADC1_IN3"]=0;
pinutils.findpin(pins, "PD28", True)["functions"]["ADC1_IN4"]=0;
pinutils.findpin(pins, "PD29", True)["functions"]["ADC1_IN5"]=0;
pinutils.findpin(pins, "PD30", True)["functions"]["ADC1_IN6"]=0;
pinutils.findpin(pins, "PD31", True)["functions"]["ADC1_IN7"]=0;
# Make buttons and LEDs negated
pinutils.findpin(pins, "PD13", True)["functions"]["NEGATED"]=0;
pinutils.findpin(pins, "PD14", True)["functions"]["NEGATED"]=0;
pinutils.findpin(pins, "PD15", True)["functions"]["NEGATED"]=0;
pinutils.findpin(pins, "PD16", True)["functions"]["NEGATED"]=0;
pinutils.findpin(pins, "PD17", True)["functions"]["NEGATED"]=0;
pinutils.findpin(pins, "PD18", True)["functions"]["NEGATED"]=0;
pinutils.findpin(pins, "PD19", True)["functions"]["NEGATED"]=0;
pinutils.findpin(pins, "PD20", True)["functions"]["NEGATED"]=0;
# everything is non-5v tolerant
for pin in pins:
pin["functions"]["3.3"]=0;
#The boot/reset button will function as a reset button in normal operation. Pin reset on PD21 needs to be enabled on the nRF52832 device for this to work.
return pins
| 36.666667
| 156
| 0.603743
|
5b5b6aef4eafb6ad7adc058a0e434e38bfdea703
| 2,739
|
py
|
Python
|
submission_form/views/group.py
|
NAKKA-K/dw2018_server
|
63d74b1206860d0d2213efbc8a7969be7976c4fd
|
[
"MIT"
] | null | null | null |
submission_form/views/group.py
|
NAKKA-K/dw2018_server
|
63d74b1206860d0d2213efbc8a7969be7976c4fd
|
[
"MIT"
] | 6
|
2018-02-08T12:26:04.000Z
|
2018-02-09T06:14:12.000Z
|
submission_form/views/group.py
|
NAKKA-K/dw2018_server
|
63d74b1206860d0d2213efbc8a7969be7976c4fd
|
[
"MIT"
] | null | null | null |
from django.views import generic
from django.urls import reverse_lazy
from django.http import Http404
from django.shortcuts import get_object_or_404
from submission_form.views.LoginRequiredMessageMixin import LoginRequiredMessageMixin
from submission_form.models import Group
from submission_form.forms import GroupForm
class GroupIndexView(LoginRequiredMessageMixin, generic.ListView):
"""科目の一覧."""
model = Group
template_name = 'submission_form/group_list.html'
context_object_name='group_list'
paginate_by = 20
def get_queryset(self):
"""所属Orgの科目のみ抽出"""
try:
if self.request.session['user_info']['org'] is None:
raise Group.DoesNotExist
return Group.objects\
.filter(organization_id = self.request.session['user_info']['org'])
except Group.DoesNotExist:
return None
class GroupCreateView(LoginRequiredMessageMixin, generic.CreateView):
"""科目の作成."""
model = Group
template_name = 'submission_form/group_form.html'
form_class = GroupForm
success_url = reverse_lazy('submission_form:group_index')
def get(self, request, **kwargs):
is_teacher = StudentOrTeacherGetter.is_teacher(request.user)
if not is_teacher:
raise Http404 # 先生でなければ、PageNotFound
return super().get(request, **kwargs)
def form_valid(self, form):
group = form.save(commit = False)
group.organization_id = \
Organization.objects\
.get(id = self.request.session['user_info']['org'])
group.save()
return super().form_valid(form)
class GroupUpdateView(LoginRequiredMessageMixin, generic.UpdateView):
"""科目名の更新."""
model = Group
template_name = 'submission_form/group_form.html'
form_class = GroupForm
success_url = reverse_lazy('submission_form:group_index')
def get(self, request, **kwargs):
"""先生、所属Orgの科目以外なら404を返す"""
user_info = StudentOrTeacherGetter.getInfo(request.user)
if not request.session['is_teacher']\
or request.session['user_info']['org'] != str(\
get_object_or_404(Group, id = kwargs['pk'])\
.organization_id.id):
raise Http404
return super().get(request, **kwargs)
class GroupDeleteView(LoginRequiredMessageMixin, generic.DeleteView):
"""科目の削除."""
model = Group
context_object_name='group'
template_name = 'submission_form/group_confirm_delete.html'
success_url = reverse_lazy('submission_form:group_index')
def get(self, request, **kwargs):
"""先生、所属Orgの科目以外なら404を返す"""
if not request.session['is_teacher']\
or request.session['user_info']['org'] != str(\
get_object_or_404(Group, id = kwargs['pk'])\
.organization_id.id):
raise Http404
return super().get(request, **kwargs)
| 29.771739
| 85
| 0.711208
|
40e6bd2dc93d6ef96a1c4ac8a6b637abd5bbfc91
| 36,868
|
py
|
Python
|
tests/test_models.py
|
stevearc/flywheel
|
ac6eea314f6d88b593cf809336d8723df0b78f6f
|
[
"MIT"
] | 72
|
2016-09-05T08:54:13.000Z
|
2021-11-14T05:53:46.000Z
|
tests/test_models.py
|
mathcamp/flywheel
|
ac6eea314f6d88b593cf809336d8723df0b78f6f
|
[
"MIT"
] | 29
|
2015-01-25T08:20:31.000Z
|
2016-08-29T19:01:06.000Z
|
tests/test_models.py
|
stevearc/flywheel
|
ac6eea314f6d88b593cf809336d8723df0b78f6f
|
[
"MIT"
] | 19
|
2016-09-09T07:21:05.000Z
|
2021-12-25T05:10:49.000Z
|
""" Tests for models """
import six
import sys
import json
from datetime import datetime
from decimal import Decimal
from mock import patch, ANY
from dynamo3 import ItemUpdate
from flywheel import (Field, Composite, Model, NUMBER, STRING, GlobalIndex,
ConditionalCheckFailedException)
from flywheel.fields.types import UTC, register_type, TypeDefinition
from flywheel.tests import DynamoSystemTest
try:
import unittest2 as unittest # pylint: disable=F0401
except ImportError:
import unittest
# pylint: disable=E1101
class Widget(Model):
""" Test model with composite fields """
__metadata__ = {
'global_indexes': [
GlobalIndex('ts-index', 'userid', 'ts').throughput(1, 1)
],
'throughput': {
'read': 1,
'write': 1,
},
}
userid = Field(hash_key=True)
c_range = Composite('userid', 'id', range_key=True)
c_index = Composite('userid', 'id', index='comp-index')
c_plain = Composite('userid', 'id')
_c_private = Composite('userid', 'id')
id = Field()
ts = Field(data_type=NUMBER)
def __init__(self, userid, id, ts):
super(Widget, self).__init__(userid, id=id, ts=ts)
class Post(Model):
""" Test model with composite fields """
__metadata__ = {
'global_indexes': [
GlobalIndex('score-index', 'c_all', 'score'),
]
}
hkey = Composite('userid', 'id', hash_key=True)
userid = Field()
id = Field()
c_all = Composite('userid', 'id', 'about', 'text')
score = Composite('likes', 'ts', 'deleted', data_type=NUMBER,
merge=lambda x, y, z: None if z else x + y)
likes = Field(data_type=int, default=0)
ts = Field(type=float, default=0)
deleted = Field(type=bool, default=False)
points = Field(type=Decimal, default=Decimal('0'))
about = Field()
text = Field()
tags = Field(data_type=set)
keywords = Composite('text', 'about', data_type=set,
merge=lambda t, a: t.split() + a.split(), coerce=True)
def __init__(self, userid, id, ts, text='foo', about='bar'):
super(Post, self).__init__(userid=userid, id=id, ts=ts, text=text,
about=about)
class CustomDataType(TypeDefinition):
""" Custom data type that prepends 'custom:' before the string """
data_type = 'customtype'
ddb_data_type = STRING
def coerce(self, value, force):
return value
def ddb_dump(self, value):
return 'custom:' + value
def ddb_load(self, value):
return value[len('custom:'):]
register_type(CustomDataType)
class CustomPkey(Model):
""" Test model with datetime primary key """
hkey = Field(data_type=CustomDataType, hash_key=True)
text = Field()
class TestComposite(DynamoSystemTest):
""" Tests for composite fields """
models = [Widget, Post]
def test_composite_field(self):
""" Composite fields should be a combination of their components """
w = Widget('a', 'b', 1)
self.assertEquals(w.c_index, 'a:b')
self.assertEquals(w.c_plain, 'a:b')
def test_composite_store(self):
""" Composite fields stored properly in dynamodb """
w = Widget('a', 'b', 1)
self.engine.save(w)
tablename = Widget.meta_.ddb_tablename(self.engine.namespace)
item = six.next(self.dynamo.scan(tablename))
self.assertEquals(item['c_range'], w.c_range)
self.assertEquals(item['c_index'], w.c_index)
self.assertEquals(item['c_plain'], w.c_plain)
def test_no_change_composite_hash(self):
""" Changing the hash key raises an exception """
w = Post('a', 'b', 1)
self.engine.save(w)
with self.assertRaises(AttributeError):
w.userid = 'other'
with self.assertRaises(AttributeError):
w.id = 'other'
def test_update_composite_fields(self):
""" When updating a field all relevant composite fields are updated """
w = Post('a', 'b', 1)
self.engine.save(w)
w.text = 'foobar'
w.sync()
tablename = w.meta_.ddb_tablename(self.engine.namespace)
results = self.dynamo.batch_get(tablename,
[{w.meta_.hash_key.name: w.hk_}])
results = list(results)
self.assertEquals(results[0]['text'], w.text)
self.assertEquals(results[0]['c_all'], w.c_all)
def test_composite_score(self):
""" Composite score should be a combination of subfields """
w = Post('a', 'a', 5)
w.likes = 7
self.assertEquals(w.score, 12)
def test_update_composite_score(self):
""" When updating a field, update score if necessary """
w = Post('a', 'b', 4)
self.engine.save(w)
w.likes += 2
w.sync()
tablename = w.meta_.ddb_tablename(self.engine.namespace)
results = self.dynamo.batch_get(tablename,
[{w.meta_.hash_key.name: w.hk_}])
results = list(results)
self.assertEquals(results[0]['score'], 6)
def test_set_composite_null(self):
""" Composite fields can be set to None """
p = Post('a', 'b', 2)
self.engine.sync(p)
self.assertEquals(p.score, 2)
p.deleted = True
p.sync()
self.assertIsNone(p.score)
result = self.engine(Post).filter(c_all=p.c_all)\
.index('score-index').first()
self.assertIsNone(result)
def test_private_composite(self):
""" Composite fields can be private """
w = Widget('a', 'b', 1)
self.engine.save(w)
self.assertEqual(w.c_plain, w._c_private)
class Article(Model):
""" Super simple test model """
title = Field(hash_key=True)
text = Field()
views = Field(data_type=int)
def __init__(self, title='Drugs win Drug War', **kwargs):
super(Article, self).__init__(title, **kwargs)
class TestModelMutation(DynamoSystemTest):
""" Tests for model mutation methods """
models = [Post, Article, CustomPkey]
def test_save(self):
""" Saving item puts it in the database """
a = Article()
self.engine.save(a)
tablename = a.meta_.ddb_tablename(self.engine.namespace)
result = six.next(self.dynamo.scan(tablename))
self.assertEquals(result['title'], a.title)
self.assertIsNone(result.get('text'))
def test_save_conflict(self):
""" Saving a duplicate item will raise an exception """
a = Article(text='unfortunately')
self.engine.save(a)
a2 = Article(text='obviously')
with self.assertRaises(ConditionalCheckFailedException):
self.engine.save(a2, overwrite=False)
def test_save_overwrite(self):
""" Saving a duplicate item with overwrite=True overwrites existing """
a = Article()
self.engine.save(a)
a2 = Article(text='obviously')
self.engine.save(a2, overwrite=True)
tablename = a.meta_.ddb_tablename(self.engine.namespace)
result = six.next(self.dynamo.scan(tablename))
self.assertEquals(result['title'], a2.title)
self.assertEquals(result['text'], a2.text)
def test_sync_new(self):
""" Sync on a new item will create the item """
p = Post('a', 'b', 4)
self.engine.sync(p, raise_on_conflict=False)
p2 = self.engine.scan(Post).first()
self.assertEquals(p, p2)
def test_conflict_sync_new(self):
""" sync on a new item with raise_on_conflict=True creates item """
p = Post('a', 'b', 4)
self.engine.sync(p, raise_on_conflict=True)
p2 = self.engine.scan(Post).first()
self.assertEquals(p, p2)
def test_sync_merges_fields(self):
""" Syncing two new items with same pkey merges other fields """
a = Article('a')
a.text = 'foobar'
self.engine.sync(a, raise_on_conflict=True)
a2 = Article('a')
a2.views = 3
self.engine.sync(a2, raise_on_conflict=True)
self.assertEquals(a2.text, 'foobar')
self.assertEquals(a2.views, 3)
def test_sync_only_updates_changed(self):
""" Sync only updates fields that have been changed """
with patch.object(self.engine, 'dynamo') as dynamo:
captured_updates = []
def update_item(_, __, updates, *___, **____):
""" Mock update_item and capture the passed updates """
captured_updates.extend(updates)
return {}
dynamo.update_item.side_effect = update_item
p = Post('a', 'b', 4)
self.engine.save(p)
p.ts = 4
p.tags = set('a')
p.points = Decimal('2')
p.sync(raise_on_conflict=False)
self.assertEqual(len(captured_updates), 2)
self.assertTrue(ItemUpdate.put('tags', ANY) in captured_updates)
self.assertTrue(ItemUpdate.put('points', ANY) in captured_updates)
def test_sync_constraints(self):
""" Sync can accept more complex constraints """
p = Post('a', 'b', 4)
self.engine.save(p)
p.ts = 7
p.sync(constraints=[Post.ts < 5])
p2 = self.engine.scan(Post).first()
self.assertEquals(p2.ts, 7)
def test_sync_constraints_fail(self):
""" Sync fails if complex constraints fail """
p = Post('a', 'b', 4)
self.engine.save(p)
p.ts = 7
with self.assertRaises(ConditionalCheckFailedException):
p.sync(constraints=[Post.ts > 5])
def test_sync_constraints_must_raise(self):
""" Sync with constraints fails if raise_on_conflict is False """
p = Post('a', 'b', 4)
with self.assertRaises(ValueError):
self.engine.sync(p, raise_on_conflict=False,
constraints=[Post.ts < 5])
def test_delete(self):
""" Model can delete itself """
p = Post('a', 'b', 4)
self.engine.save(p)
p.delete()
results = self.engine.scan(Post).all()
self.assertEquals(results, [])
def test_delete_no_conflict(self):
""" Delete should delete item if no conflicts """
p = Post('a', 'b', 4)
self.engine.save(p)
p.delete(raise_on_conflict=True)
results = self.engine.scan(Post).all()
self.assertEquals(results, [])
def test_delete_conflict(self):
""" Delete raise_on_conflict=True should raise exception on conflict """
p = Post('a', 'b', 4)
self.engine.save(p)
p2 = self.engine.scan(Post).first()
p.ts = 10
p.sync()
with self.assertRaises(ConditionalCheckFailedException):
p2.delete(raise_on_conflict=True)
def test_refresh(self):
""" Refreshing model should refresh data """
p = Post('a', 'b', 4)
self.engine.save(p)
p2 = self.engine.scan(Post).first()
p.ts = 10
p.sync()
p2.refresh()
self.assertEquals(p2.ts, p.ts)
def test_refresh_multiple_models(self):
""" Can refresh multiple model types """
p = Post('a', 'b', 4)
self.engine.save(p)
p2 = self.engine.scan(Post).first()
p2.ts = 10
p2.sync()
a = Article(text='unfortunately')
self.engine.save(a)
a2 = self.engine.scan(Article).first()
a2.text = 'obviously'
a2.sync()
self.engine.refresh([a, p])
self.assertEquals(p.ts, p2.ts)
self.assertEquals(a.text, a2.text)
def test_refresh_missing(self):
""" Refreshing a set of models should work even if one is missing """
p1 = Post('a', 'b', 4)
p2 = Post('a', 'c', 5)
p3 = Post('a', 'd', 6)
self.engine.save([p1, p2])
self.engine.refresh([p1, p2, p3])
self.assertEqual(p1.id, 'b')
self.assertEqual(p2.id, 'c')
self.assertEqual(p3.id, 'd')
self.assertEqual(p1.ts, 4)
self.assertEqual(p2.ts, 5)
self.assertEqual(p3.ts, 6)
def test_refresh_custom_pkey(self):
""" Refresh works when model declares custom Primary Key """
p = CustomPkey('key', text='foo')
self.engine.save(p)
p2 = self.engine.scan(CustomPkey).first()
p.text = 'bar'
p.sync()
p2.refresh()
self.assertEquals(p2.text, p.text)
def test_sync_blank(self):
""" Sync creates item even if only primary key is set """
a = Article()
self.engine.sync(a)
tablename = a.meta_.ddb_tablename(self.engine.namespace)
results = list(self.dynamo.scan(tablename))
self.assertEquals(len(results), 1)
result = dict(results[0])
self.assertEquals(result, {
'title': a.title,
})
def test_sync_no_conflict(self):
""" Sync raise_on_conflict=True used just syncs object """
p = Post('a', 'b', 4)
self.engine.save(p)
p.text = "hey"
p.sync(raise_on_conflict=True)
p2 = self.engine.scan(Post).first()
self.assertEquals(p2.text, p.text)
def test_sync_conflict(self):
""" With sync raise_on_conflict=True, parallel writes raise error """
p = Post('a', 'b', 4)
p.text = "foo"
self.engine.save(p)
p2 = self.engine.scan(Post).first()
p.text = "hey"
p.sync()
p2.text = "hi"
with self.assertRaises(ConditionalCheckFailedException):
p2.sync(raise_on_conflict=True)
def test_sync_exist_conflict(self):
""" When syncing, double-create raises error """
p = Post('a', 'b', 4)
self.engine.save(p)
p2 = self.engine.scan(Post).first()
p.text = "hey"
p.sync()
p2.text = "hi"
with self.assertRaises(ConditionalCheckFailedException):
p2.sync(raise_on_conflict=True)
def test_sync_composite_conflict(self):
""" Sync where composite key conflicts raises error """
p = Post('a', 'b', 0, 'me', 'hi')
self.engine.save(p)
p2 = self.engine.scan(Post).first()
p.about = "hey"
p.sync()
p2.text = "hey"
with self.assertRaises(ConditionalCheckFailedException):
p2.sync(raise_on_conflict=True)
def test_sync_update(self):
""" Sync should pull down most recent model """
p = Post('a', 'b', 4)
self.engine.save(p)
p2 = self.engine.scan(Post).first()
p.text = "hey"
p.sync()
p2.foobar = 'baz'
p2.sync()
self.assertEquals(p2.text, p.text)
def test_sync_only_update(self):
""" Sync should pull down most recent model even if no changes """
p = Post('a', 'b', 4)
self.engine.save(p)
p2 = self.engine.scan(Post).first()
p.text = "hey"
p.sync()
p2.sync()
self.assertEquals(p2.text, p.text)
def test_sync_update_delete(self):
""" Sync should remove any attributes that have been deleted """
p = Post('a', 'b', 4)
p.foobar = 'baz'
self.engine.save(p)
p2 = self.engine.scan(Post).first()
p.foobar = None
p.sync()
p2.sync()
with self.assertRaises(AttributeError):
_ = p2.foobar
def test_incr(self):
""" Parallel increments add """
p = Post('a', 'b', 4)
self.engine.save(p)
p2 = self.engine.scan(Post).first()
p.incr_(points=5)
p.sync()
p2.incr_(points=3)
p2.sync(raise_on_conflict=False)
self.assertEquals(p2.points, 8)
def test_incr_float(self):
""" Increment works on floats """
p = Post('a', 'b', 4.5)
self.engine.save(p)
p.incr_(ts=5)
self.assertEquals(p.ts, 9.5)
p.sync()
self.assertEquals(p.ts, 9.5)
def test_incr_decimal(self):
""" Increment works on Decimals """
p = Post('a', 'b', 0)
p.points = Decimal('1.5')
self.engine.save(p)
p.incr_(points=2)
self.assertEquals(p.points, Decimal('3.5'))
p.sync()
self.assertEquals(p.points, Decimal('3.5'))
self.assertTrue(isinstance(p.points, Decimal))
def test_incr_no_conflict(self):
""" Parallel increments with raise_on_conflict=True works """
p = Post('a', 'b', 4)
self.engine.save(p)
p2 = self.engine.scan(Post).first()
p.incr_(points=5)
p.sync()
p2.incr_(points=3)
p2.sync(raise_on_conflict=True)
self.assertEqual(p2.points, 8)
def test_double_incr(self):
""" Incrementing a field twice should work fine """
p = Post('a', 'b', 4)
p.likes = 2
self.engine.save(p)
p.incr_(likes=5)
p.incr_(likes=3)
self.assertEquals(p.likes, 10)
p.sync()
self.assertEquals(p.likes, 10)
def test_incr_set(self):
""" Increment then set value raises exception """
p = Post('a', 'b', 4)
self.engine.save(p)
p.incr_(likes=7)
with self.assertRaises(ValueError):
p.likes = 2
def test_set_incr(self):
""" Set value then increment raises exception """
p = Post('a', 'b', 4)
self.engine.save(p)
p.likes = 2
with self.assertRaises(ValueError):
p.incr_(likes=5)
def test_incr_read(self):
""" Value changes immediately on incr """
p = Post('a', 'b', ts=4)
self.engine.save(p)
p.incr_(ts=6, likes=3)
self.assertEquals(p.ts, 10)
self.assertEquals(p.likes, 3)
def test_incr_unpersisted(self):
""" Calling incr_ on unpersisted item merges with existing data """
a = Article(views=2)
self.engine.save(a)
a = Article()
a.incr_(views=4)
self.engine.sync(a)
a = self.engine.scan(Article).first()
self.assertEqual(a.views, 6)
def test_incr_composite_piece(self):
""" Incrementing a field will change any dependent composite fields """
p = Post('a', 'b', 0)
self.engine.save(p)
p.incr_(likes=4)
p.sync()
tablename = p.meta_.ddb_tablename(self.engine.namespace)
result = six.next(self.dynamo.scan(tablename))
self.assertEquals(result['ts'], 0)
self.assertEquals(result['likes'], 4)
self.assertEquals(result['score'], 4)
def test_incr_composite(self):
""" Incr a field and sync changes any dependent fields """
p = Post('a', 'b', 0)
self.engine.save(p)
p.incr_(likes=4)
p.sync(raise_on_conflict=True)
tablename = p.meta_.ddb_tablename(self.engine.namespace)
result = six.next(self.dynamo.scan(tablename))
self.assertEquals(result['ts'], 0)
self.assertEquals(result['likes'], 4)
self.assertEquals(result['score'], 4)
def test_no_incr_primary_key(self):
""" Cannot increment a primary key """
p = Post('a', 'b', 0)
self.engine.save(p)
with self.assertRaises(AttributeError):
p.incr_(userid=4)
def test_no_incr_string(self):
""" Cannot increment a string """
p = Post('a', 'b', 0)
self.engine.save(p)
with self.assertRaises(TypeError):
p.incr_(text='hi')
def test_no_incr_composite(self):
""" Cannot increment a composite field """
p = Post('a', 'b', 0)
self.engine.save(p)
with self.assertRaises(TypeError):
p.incr_(score=4)
def test_add_to_set(self):
""" Adding a value to a set should be atomic """
p = Post('a', 'b', 0)
self.engine.save(p)
p2 = self.engine.scan(Post).first()
p.add_(tags='a')
p2.add_(tags=set(['b', 'c']))
p.sync()
p2.sync()
self.assertEqual(p2.tags, set(['a', 'b', 'c']))
def test_add_to_set_conflict(self):
""" Concurrent add to set with raise_on_conflict=True works """
p = Post('a', 'b', 0)
self.engine.save(p)
p2 = self.engine.scan(Post).first()
p.add_(tags='a')
p2.add_(tags=set(['b', 'c']))
p.sync()
p2.sync(raise_on_conflict=True)
self.assertEqual(p2.tags, set(['a', 'b', 'c']))
def test_add_to_set_presync(self):
""" Adding to a set should update local model value """
p = Post('a', 'b', 0)
p.add_(tags='a')
self.assertEqual(p.tags, set(['a']))
def test_dirty_requires_change(self):
""" Don't mark fields dirty if the value hasn't changed """
p = Post('a', 'b', 0)
p.about = 'foobar'
p.tags = set(['foo'])
self.engine.save(p)
p.about = 'foobar'
p.tags = set(['foo'])
self.assertEqual(p.__dirty__, set())
def test_set_add_conflict(self):
""" This is less a test and more documenting bad behavior """
p = Post('a', 'b', 0)
self.engine.save(p)
# TODO: Right now if you add() and add_() to a set, the add()'s will be
# ignored. It would be nice to at least provide a warning, but
# preferably an error, when this happens.
p.tags.add('foo')
p.add_(tags='bar')
p.sync()
ret = self.engine.scan(Post).one()
self.assertEqual(ret.tags, set(['bar']))
def test_no_add_string(self):
""" Cannot add_ to string fields """
p = Post('a', 'b', 0)
with self.assertRaises(TypeError):
p.add_(about='something')
def test_no_add_number(self):
""" Cannot add_ to number fields """
p = Post('a', 'b', 0)
with self.assertRaises(TypeError):
p.add_(likes=4)
def test_no_add_composite(self):
""" Cannot add_ to composite fields """
p = Post('a', 'b', 0)
with self.assertRaises(TypeError):
p.add_(keywords=4)
def test_remove_from_set_presync(self):
""" Removing from a set should update local model value """
p = Post('a', 'b', 0)
p.tags = set(['a', 'b', 'c'])
self.engine.save(p)
p.remove_(tags=set(['a', 'b']))
self.assertEqual(p.tags, set(['c']))
def test_remove_from_set(self):
""" Removing values from a set should be atomic """
p = Post('a', 'b', 0)
p.tags = set(['a', 'b', 'c', 'd'])
self.engine.save(p)
p2 = self.engine.scan(Post).first()
p.remove_(tags='a')
p2.remove_(tags=set(['b', 'c']))
p.sync()
p2.sync()
self.assertEqual(p2.tags, set(['d']))
def test_remove_set_keyerror(self):
""" Cannot remove missing elements from set """
p = Post('a', 'b', 0)
with self.assertRaises(KeyError):
p.remove_(tags='a')
def test_mutate_set_one_op(self):
""" Can only atomically add or remove in a single update """
p = Post('a', 'b', 0)
p.add_(tags='a')
with self.assertRaises(ValueError):
p.remove_(tags='b')
def test_mutate_set_smart_one_op(self):
""" If adds/removes cancel out, throw no error """
p = Post('a', 'b', 0)
p.add_(tags='a')
p.remove_(tags='a')
self.assertEqual(p.tags, set())
def test_delattr_field(self):
""" Deleting a field sets it to None and deletes it from Dynamo """
a = Article(publication='The Onion')
self.engine.save(a)
del a.text
a.sync()
stored_a = self.engine.scan(Article).first()
self.assertIsNone(stored_a.text)
def test_delattr_private_field(self):
""" Deleting a private field works like normal """
a = Article()
a._foobar = 'foobar'
del a._foobar
self.assertFalse(hasattr(a, '_foobar'))
def test_sync_refresh(self):
""" Syncing a model with no changes will refresh the data """
a = Article(text='foo')
self.engine.save(a)
a2 = self.engine.scan(Article).first()
a2.text = 'bar'
self.engine.sync(a2)
a.sync()
self.assertEqual(a.text, 'bar')
def test_sync_no_read(self):
""" Sync(no_read=True) performs a write and no reads """
a = Article(text='foo')
self.engine.save(a)
a2 = self.engine.scan(Article).first()
a2.text = 'bar'
self.engine.sync(a2)
a.sync(no_read=True)
self.assertEqual(a.text, 'foo')
class TestUpdateField(DynamoSystemTest):
""" Tests for engine.update_field """
models = [Article]
def test_update_field_default_value(self):
""" update_field: Omitting value will use current model value """
a = Article()
self.engine.save(a)
a.text = 'foobar'
self.engine.update_field(a, 'text')
self.assertEqual(a.text, 'foobar')
result = self.engine.scan(Article).first()
self.assertEquals(result.text, a.text)
def test_update_field_value(self):
""" update_field: Can pass in value to set """
a = Article()
self.engine.save(a)
self.engine.update_field(a, 'text', 'foo')
self.assertEqual(a.text, 'foo')
result = self.engine.scan(Article).first()
self.assertEquals(result.text, a.text)
def test_update_field_delete(self):
""" update_field: Passing in None will delete the field """
a = Article(text='foobar')
self.engine.save(a)
self.engine.update_field(a, 'text', None)
self.assertEqual(a.text, None)
result = self.engine.scan(Article).first()
self.assertEquals(result.text, a.text)
def test_update_field_clean(self):
""" update_field: Remove from dirty afterwards """
a = Article()
self.engine.save(a)
a.text = 'foobar'
self.assertIn('text', a.__dirty__)
self.engine.update_field(a, 'text')
self.assertNotIn('text', a.__dirty__)
def test_update_field_limit_clean(self):
""" update_field: Prior unrelated dirty fields stay dirty """
a = Article()
self.engine.save(a)
a.text = 'foobar'
a.views = 5
self.assertIn('views', a.__dirty__)
self.engine.update_field(a, 'text')
self.assertIn('views', a.__dirty__)
def test_update_field_constraints(self):
""" update_field: Can pass in constraints """
a = Article()
self.engine.save(a)
with self.assertRaises(ConditionalCheckFailedException):
self.engine.update_field(a, 'text', 'foobar', constraints=[Article.views > 2])
def test_update_field_add(self):
""" update_field: Can perform an atomic add instead of a put """
a = Article()
self.engine.save(a)
a2 = self.engine.scan(Article).first()
self.engine.update_field(a, 'views', 1, action='ADD')
self.engine.update_field(a2, 'views', 2, action='ADD')
ret = self.engine.scan(Article).first()
self.assertEqual(ret.views, 3)
def test_update_field_delete_action(self):
""" update_field: Can perform an atomic delete instead of a put """
a = Article(text='foobar')
self.engine.save(a)
self.engine.update_field(a, 'text', action='DELETE')
self.assertIsNone(a.text)
result = self.engine.scan(Article).first()
self.assertEqual(result.text, a.text)
def test_update_field_no_eq(self):
""" update_field: No default equality constraint """
a = Article(text='foo')
self.engine.save(a)
a2 = self.engine.scan(Article).first()
a2.text = 'bar'
a2.sync()
self.engine.update_field(a, 'text', 'baz')
ret = self.engine.scan(Article).first()
self.assertEqual(ret.text, 'baz')
class SetModel(Model):
""" Test model with set """
id = Field(hash_key=True)
items = Field(data_type=set)
class TestDefaults(DynamoSystemTest):
""" Test field defaults """
models = [SetModel]
def test_copy_mutable_field_default(self):
""" Model fields should not share any mutable field defaults """
m1 = SetModel('a')
m1.items.add('foo')
self.engine.save(m1)
m2 = SetModel('b')
self.assertTrue(m2.items is not m1.items)
self.assertEqual(m2.items, set())
class Store(Model):
""" Test model for indexes """
__metadata__ = {
'global_indexes': [
GlobalIndex.all('name-index', 'name', 'city'),
GlobalIndex.keys('name-emp-index', 'name', 'num_employees'),
GlobalIndex.include('name-profit-index', 'name', 'monthly_profit',
includes=['name', 'num_employees']),
],
}
city = Field(hash_key=True)
name = Field(range_key=True)
sq_feet = Field(data_type=int).all_index('size-index')
num_employees = Field(data_type=int).keys_index('emp-index')
monthly_profit = Field(data_type=float)\
.include_index('profit-index', ['name', 'num_employees'])
class TestCreate(DynamoSystemTest):
""" Test model throughput settings """
models = [Store]
def tearDown(self):
super(TestCreate, self).tearDown()
Widget.meta_.delete_dynamo_schema(self.dynamo, wait=True)
def _get_index(self, name):
""" Get a specific index from the Store table """
tablename = Store.meta_.ddb_tablename(self.engine.namespace)
desc = self.dynamo.describe_table(tablename)
for index in desc.indexes + desc.global_indexes:
if index.name == name:
return index
def test_create_local_all_index(self):
""" Create a local secondary ALL index """
index = self._get_index('size-index')
self.assertEquals(index.projection_type, 'ALL')
def test_create_local_keys_index(self):
""" Create a local secondary KEYS index """
index = self._get_index('emp-index')
self.assertEquals(index.projection_type, 'KEYS_ONLY')
def test_create_local_include_index(self):
""" Create a local secondary INCLUDE index """
index = self._get_index('profit-index')
self.assertEquals(index.projection_type, 'INCLUDE')
self.assertEquals(index.include_fields, ['name', 'num_employees'])
def test_create_global_all_index(self):
""" Create a global secondary ALL index """
index = self._get_index('name-index')
self.assertEquals(index.projection_type, 'ALL')
def test_create_global_keys_index(self):
""" Create a global secondary KEYS index """
index = self._get_index('name-emp-index')
self.assertEquals(index.projection_type, 'KEYS_ONLY')
def test_create_global_include_index(self):
""" Create a global secondary INCLUDE index """
index = self._get_index('name-profit-index')
self.assertEquals(index.include_fields, ['name', 'num_employees'])
def test_model_throughput(self):
""" Model defines the throughput """
Widget.meta_.create_dynamo_schema(self.dynamo, wait=True)
tablename = Widget.meta_.ddb_tablename()
desc = self.dynamo.describe_table(tablename)
throughput = desc.throughput
self.assertEquals(throughput.read, 1)
self.assertEquals(throughput.write, 1)
for index in desc.global_indexes:
throughput = index.throughput
self.assertEquals(throughput.read, 1)
self.assertEquals(throughput.write, 1)
def test_override_throughput(self):
""" Throughput can be overridden in the create call """
Widget.meta_.create_dynamo_schema(self.dynamo, wait=True, throughput={
'read': 3,
'write': 3,
'ts-index': {
'read': 3,
'write': 3,
},
})
tablename = Widget.meta_.ddb_tablename()
desc = self.dynamo.describe_table(tablename)
throughput = desc.throughput
self.assertEquals(throughput.read, 3)
self.assertEquals(throughput.write, 3)
for index in desc.global_indexes:
throughput = index.throughput
self.assertEquals(throughput.read, 3)
self.assertEquals(throughput.write, 3)
class TestModelMethods(unittest.TestCase):
""" Unit tests for simple model operations """
def test_comparison_with_none(self):
""" Comparing a model to None should not throw exception """
model = Article()
self.assertNotEqual(model, None)
class Bare(Model):
""" Bare-bones test model """
id = Field(hash_key=True)
score = Field(range_key=True, data_type=int)
class TestModelDefaults(unittest.TestCase):
""" Test default model methods. """
def test_default_constructor(self):
""" Model should have a default constructor """
m = Bare()
self.assertIsNone(m.id)
self.assertIsNone(m.score)
def test_default_hash_key(self):
""" Constructor can set hash key """
m = Bare('a')
self.assertEqual(m.id, 'a')
self.assertIsNone(m.score)
def test_default_range_key(self):
""" Constructor can set range key """
m = Bare('a', 5)
self.assertEqual(m.id, 'a')
self.assertEqual(m.score, 5)
def test_constructor_kwargs(self):
""" Can set any parameter with constructor kwargs """
m = Bare(foo='bar')
self.assertEqual(m.foo, 'bar')
def test_too_many_args(self):
""" Too many positional arguments to constructor raises error """
with self.assertRaises(TypeError):
Bare('a', 4, 5)
def test_refresh_no_engine(self):
""" Calling refresh() before model touches engine raises error """
m = Bare('a', 1)
with self.assertRaises(ValueError):
m.refresh()
def test_sync_no_engine(self):
""" Calling sync() before model touches engine raises error """
m = Bare('a', 1)
with self.assertRaises(ValueError):
m.sync()
def test_delete_no_engine(self):
""" Calling delete() before model touches engine raises error """
m = Bare('a', 1)
with self.assertRaises(ValueError):
m.delete()
def test_json(self):
""" Model has default JSON serialization method """
m = Bare('a', 1)
js = m.__json__()
self.assertEqual(js, {
'id': 'a',
'score': 1,
})
def test_equality(self):
""" Models have default equality method using primary key """
m1 = Bare('a', 1)
m2 = Bare('a', 1, foo='bar')
self.assertEqual(m1, m2)
self.assertEqual(hash(m1), hash(m2))
def test_inequality(self):
""" Models have default equality method using primary key """
m1 = Bare('a', 1)
m2 = Bare('a', 2)
self.assertNotEqual(m1, m2)
self.assertNotEqual(hash(m1), hash(m2))
class FloatModel(Model):
""" Test model with floats in the primary key """
hkey = Field(data_type=int, hash_key=True)
rkey = Field(data_type=float, range_key=True)
class TestRefresh(DynamoSystemTest):
""" Test model refresh """
models = [FloatModel]
def test_refresh_floating_point(self):
""" Refresh with floats should not cause problems """
p = FloatModel(4, 4.2932982983292)
self.engine.save(p)
p.refresh()
# If there is a floating point mismatch, an error will be raised by now
class DatetimeModel(Model):
""" Just something with a field that can raise when comparing """
hkey = Field(data_type=int, hash_key=True)
field = Field(data_type=datetime)
class ExplodingComparisons(DynamoSystemTest):
"""Make sure all comparisons are dealt with gracefully.
This came up when comparing datetime objects with different TZ awareness,
but applies to all error raises."""
models = [DatetimeModel]
def setUp(self):
super(ExplodingComparisons, self).setUp()
self.o = DatetimeModel(1, field=datetime.utcnow())
self.engine.save(self.o)
def test_ok(self):
""" Happy case """
self.o.field = datetime.utcnow() # Same TZ awareness, should not raise.
# Comaparing datetimes with == on 3.3 onwards doesn't raise.
if sys.version_info[:2] < (3, 3):
def test_kaboom(self):
""" Sad case """
now = datetime.utcnow().replace(tzinfo=UTC)
# Prove to ourselves this explodes.
with self.assertRaises(TypeError):
# Because pylint was confused about not doing anything with the
# =='s result
bool(self.o.field == now)
self.o.field = now
| 33.364706
| 90
| 0.587366
|
195f984fc06ba5eab16ca795da64734460f70194
| 26,203
|
py
|
Python
|
desdeo_tools/scalarization/GLIDE_II.py
|
phoopies/desdeo-tools
|
d3cb48c16b35114762386ee8368214b4b432eee0
|
[
"MIT"
] | null | null | null |
desdeo_tools/scalarization/GLIDE_II.py
|
phoopies/desdeo-tools
|
d3cb48c16b35114762386ee8368214b4b432eee0
|
[
"MIT"
] | 2
|
2022-01-13T04:05:05.000Z
|
2022-03-12T01:07:03.000Z
|
desdeo_tools/scalarization/GLIDE_II.py
|
phoopies/desdeo-tools
|
d3cb48c16b35114762386ee8368214b4b432eee0
|
[
"MIT"
] | null | null | null |
from abc import abstractmethod
from typing import Union
import numpy as np
class GLIDEError(Exception):
"""Raised when an error related to the ASF classes is encountered.
"""
class GLIDEBase:
"""
Implements the non-differentiable variant of GLIDE-II as proposed in
Ruiz, Francisco, Mariano Luque, and Kaisa Miettinen.
"Improving the computational efficiency in a global formulation (GLIDE)
for interactive multiobjective optimization."
Annals of Operations Research 197.1 (2012): 47-70.
Note:
Additional contraints produced by the GLIDE-II formulation are implemented
such that if the returned values are negative, the corresponding constraint is
violated. The returned value may be positive. In such cases, the returned value
is a measure of how close or far the corresponding feasible solution is from
violating the constraint.
Args:
utopian (np.ndarray, optional): The utopian point. Defaults to None.
nadir (np.ndarray, optional): The nadir point. Defaults to None.
rho (float, optional): The augmentation term for the scalarization function.
Defaults to 1e-6.
"""
def __init__(
self,
utopian: np.ndarray = None,
nadir: np.ndarray = None,
rho: float = 1e-6,
**kwargs
):
self.has_additional_constraints = False
self.utopian = utopian
self.nadir = nadir
self.rho = rho
self.required_keys: dict = {}
self.extras = kwargs
def __call__(self, objective_vector: np.ndarray, preference: dict) -> np.ndarray:
"""Evaluate the scalarization function value based on objective vectors and
DM preference.
Args:
objective_vector (np.ndarray): 2-dimensional array of objective values of solutions.
preference (dict): The preference given by the decision maker. The required
dictionary keys and their meanings can be found in self.required_keys variable.
Returns:
np.ndarray: The scalarized value obtained by using GLIDE-II over
objective_vector.
"""
self.preference = preference
self.objective_vector = np.atleast_2d(objective_vector)
f_minus_q = self.objective_vector - self.q
mu = np.atleast_2d(self.mu)
I_alpha = self.I_alpha
max_term = np.max(mu[:, I_alpha] * f_minus_q[:, I_alpha], axis=1)
sum_term = self.rho * np.sum(self.w * f_minus_q, axis=1)
return max_term + sum_term
def evaluate_constraints(
self, objective_vector: np.ndarray, preference: dict
) -> Union[None, np.ndarray]:
"""Evaluate the additional contraints generated by the GLIDE-II formulation.
Note:
Additional contraints produced by the GLIDE-II formulation are implemented
such that if the returned values are negative, the corresponding constraint is
violated. The returned value may be positive. In such cases, the returned value
is a measure of how close or far the corresponding feasible solution is from
violating the constraint.
Args:
objective_vector (np.ndarray): [description]
preference (dict): [description]
Returns:
Union[None, np.ndarray]: [description]
"""
if not self.has_additional_constraints:
return None
self.preference = preference
self.objective_vector = np.atleast_2d(objective_vector)
constraints = (
self.epsilon[self.I_epsilon]
+ self.s_epsilon * self.delta_epsilon[self.I_epsilon]
- self.objective_vector[:, self.I_epsilon]
)
return constraints
@property
@abstractmethod
def I_alpha(self):
pass
@property
@abstractmethod
def I_epsilon(self):
pass
@property
@abstractmethod
def mu(self):
pass
@property
@abstractmethod
def q(self):
pass
@property
@abstractmethod
def w(self):
pass
@property
@abstractmethod
def epsilon(self):
pass
@property
@abstractmethod
def s_epsilon(self):
pass
@property
@abstractmethod
def delta_epsilon(self):
pass
class reference_point_method_GLIDE(GLIDEBase):
"""
Implements the reference point method of preference elicitation and scalarization
using the non-differentiable variant of GLIDE-II as proposed in:
Ruiz, Francisco, Mariano Luque, and Kaisa Miettinen.
"Improving the computational efficiency in a global formulation (GLIDE)
for interactive multiobjective optimization."
Annals of Operations Research 197.1 (2012): 47-70.
Args:
utopian (np.ndarray, optional): The utopian point. Defaults to None.
nadir (np.ndarray, optional): The nadir point. Defaults to None.
rho (float, optional): The augmentation term for the scalarization function.
Defaults to 1e-6.
"""
def __init__(
self,
utopian: np.ndarray = None,
nadir: np.ndarray = None,
rho: float = 1e-6,
**kwargs
):
super().__init__(utopian=utopian, nadir=nadir, rho=rho, **kwargs)
self.has_additional_constraints = False
self.__I_alpha = np.full_like(
utopian, dtype=np.bool_, fill_value=True
).flatten()
self.__I_epsilon = np.full_like(
utopian, dtype=np.bool_, fill_value=False
).flatten()
self.__w = 1
self.__mu = 1 / (nadir - utopian)
self.required_keys = {
"reference point": (
"Used to calculate the direction of improvement: "
"a line parallel to the nadir-utopian vector "
"and passing through the reference point. "
"(type: numpy.ndarray)"
)
}
@property
def I_epsilon(self):
return self.__I_epsilon
@property
def I_alpha(self):
return self.__I_alpha
@property
def mu(self):
return self.__mu
@property
def w(self):
return self.__w
@property
def q(self):
return self.preference["reference point"]
@property
def epsilon(self):
msg = "This part of the code should not be reached. Contact maintaner."
raise GLIDEError(msg)
@property
def s_epsilon(self):
msg = "This part of the code should not be reached. Contact maintaner."
raise GLIDEError(msg)
@property
def delta_epsilon(self):
msg = "This part of the code should not be reached. Contact maintaner."
raise GLIDEError(msg)
class GUESS_GLIDE(GLIDEBase):
"""
Implements the GUESS method of preference elicitation and scalarization
using the non-differentiable variant of GLIDE-II as proposed in:
Ruiz, Francisco, Mariano Luque, and Kaisa Miettinen.
"Improving the computational efficiency in a global formulation (GLIDE)
for interactive multiobjective optimization."
Annals of Operations Research 197.1 (2012): 47-70.
Args:
utopian (np.ndarray, optional): The utopian point. Defaults to None.
nadir (np.ndarray, optional): The nadir point. Defaults to None.
rho (float, optional): The augmentation term for the scalarization function.
Defaults to 1e-6.
"""
def __init__(
self,
utopian: np.ndarray = None,
nadir: np.ndarray = None,
rho: float = 1e-6,
**kwargs
):
super().__init__(utopian=utopian, nadir=nadir, rho=rho, **kwargs)
self.has_additional_constraints = False
self.__I_alpha = np.full_like(
utopian, dtype=np.bool_, fill_value=True
).flatten()
self.__I_epsilon = np.full_like(
utopian, dtype=np.bool_, fill_value=False
).flatten()
self.__w = 0
self.required_keys = {
"reference point": (
"Used to calculate the direction of improvement: "
"a line going from the nadir point to the reference point. "
"(type: numpy.ndarray)"
)
}
@property
def I_epsilon(self):
return self.__I_epsilon
@property
def I_alpha(self):
return self.__I_alpha
@property
def mu(self):
return 1 / (self.nadir - self.preference["reference point"])
@property
def w(self):
return self.__w
@property
def q(self):
return self.preference["reference point"]
@property
def epsilon(self):
msg = "This part of the code should not be reached. Contact maintaner."
raise GLIDEError(msg)
@property
def s_epsilon(self):
msg = "This part of the code should not be reached. Contact maintaner."
raise GLIDEError(msg)
@property
def delta_epsilon(self):
msg = "This part of the code should not be reached. Contact maintaner."
raise GLIDEError(msg)
class AUG_GUESS_GLIDE(GUESS_GLIDE):
"""
Implements the Augmented GUESS method of preference elicitation and scalarization
using the non-differentiable variant of GLIDE-II as proposed in:
Ruiz, Francisco, Mariano Luque, and Kaisa Miettinen.
"Improving the computational efficiency in a global formulation (GLIDE)
for interactive multiobjective optimization."
Annals of Operations Research 197.1 (2012): 47-70.
Args:
utopian (np.ndarray, optional): The utopian point. Defaults to None.
nadir (np.ndarray, optional): The nadir point. Defaults to None.
rho (float, optional): The augmentation term for the scalarization function.
Defaults to 1e-6.
"""
def __init__(
self,
utopian: np.ndarray = None,
nadir: np.ndarray = None,
rho: float = 1e-6,
**kwargs
):
super().__init__(utopian=utopian, nadir=nadir, rho=rho, **kwargs)
self.__w = 1
class NIMBUS_GLIDE(GLIDEBase):
"""
Implements the NIMBUS method of preference elicitation and scalarization
using the non-differentiable variant of GLIDE-II as proposed in:
Ruiz, Francisco, Mariano Luque, and Kaisa Miettinen.
"Improving the computational efficiency in a global formulation (GLIDE)
for interactive multiobjective optimization."
Annals of Operations Research 197.1 (2012): 47-70.
Args:
utopian (np.ndarray, optional): The utopian point. Defaults to None.
nadir (np.ndarray, optional): The nadir point. Defaults to None.
rho (float, optional): The augmentation term for the scalarization function.
Defaults to 1e-6.
"""
def __init__(
self,
utopian: np.ndarray = None,
nadir: np.ndarray = None,
rho: float = 1e-6,
**kwargs
):
super().__init__(utopian=utopian, nadir=nadir, rho=rho, **kwargs)
self.__mu = self.__w = 1 / (self.nadir - self.utopian)
self.has_additional_constraints = True
self.required_keys = {
"current solution": (
"A solution preferred by the DM currently. " "(type: numpy.ndarray)"
),
"classifications": (
"A list of same length as the number of objectives. Elements can only "
"include some or all of ['<', '<=', '=', '>=', '0']. These classify "
"the different objectives as defined in the NIMBUS or GLIDE-II paper. "
"(type: list)"
),
"levels": (
"A vector containing desirable levels of objectives or constraining bounds "
"depending on the classification. Same length as the number of objectives. "
"(type: numpy.ndarray)"
),
}
@property
def improve_unconstrained(self):
indices = np.full_like(self.utopian, dtype=np.bool_, fill_value=False)
relevant = np.where(np.array(self.preference["classifications"]) == "<")[0]
indices[relevant] = True
return indices
@property
def improve_constrained(self):
indices = np.full_like(self.utopian, dtype=np.bool_, fill_value=False)
relevant = np.where(np.array(self.preference["classifications"]) == "<=")[0]
indices[relevant] = True
return indices
@property
def satisfactory(self):
indices = np.full_like(self.utopian, dtype=np.bool_, fill_value=False)
relevant = np.where(np.array(self.preference["classifications"]) == "=")[0]
indices[relevant] = True
return indices
@property
def relax_constrained(self):
indices = np.full_like(self.utopian, dtype=np.bool_, fill_value=False)
relevant = np.where(np.array(self.preference["classifications"]) == ">=")[0]
indices[relevant] = True
return indices
@property
def relax_unconstrained(self):
indices = np.full_like(self.utopian, dtype=np.bool_, fill_value=False)
relevant = np.where(np.array(self.preference["classifications"]) == "0")[0]
indices[relevant] = True
return indices
@property
def I_alpha(self):
return self.improve_unconstrained + self.improve_constrained
@property
def I_epsilon(self):
return (
self.improve_unconstrained
+ self.improve_constrained
+ self.satisfactory
+ self.relax_constrained
)
@property
def w(self):
# This was in the paper
return self.__w
# This is what I think it should be. There may be division by zero errors here.
"""return (self.objective_vector / (self.objective_vector - self.q)) / (
self.nadir - self.utopian
)"""
@property
def mu(self):
return self.__mu
@property
def q(self):
q = np.full_like(self.utopian, fill_value=0, dtype=float)
q[self.improve_unconstrained] = self.utopian[self.improve_unconstrained]
q[self.improve_constrained] = self.preference["levels"][
self.improve_constrained
]
return q
@property
def epsilon(self):
e = np.full_like(self.utopian, fill_value=np.nan, dtype=float)
case1 = (
self.improve_constrained + self.improve_unconstrained + self.satisfactory
)
case2 = self.relax_constrained
e[case1] = self.preference["current solution"][case1]
e[case2] = self.preference["levels"][case2]
return e
@property
def s_epsilon(self):
return 0
@property
def delta_epsilon(self):
return np.zeros_like(self.utopian)
class STEP_GLIDE(GLIDEBase):
"""
Implements the STEP method of preference elicitation and scalarization
using the non-differentiable variant of GLIDE-II as proposed in:
Ruiz, Francisco, Mariano Luque, and Kaisa Miettinen.
"Improving the computational efficiency in a global formulation (GLIDE)
for interactive multiobjective optimization."
Annals of Operations Research 197.1 (2012): 47-70.
Args:
utopian (np.ndarray, optional): The utopian point. Defaults to None.
nadir (np.ndarray, optional): The nadir point. Defaults to None.
rho (float, optional): The augmentation term for the scalarization function.
Defaults to 1e-6.
"""
def __init__(
self,
utopian: np.ndarray = None,
nadir: np.ndarray = None,
rho: float = 1e-6,
**kwargs
):
super().__init__(utopian=utopian, nadir=nadir, rho=rho, **kwargs)
self.__mu = (self.nadir - self.utopian) / np.max(
np.abs(np.vstack((utopian, nadir))), axis=0
)
self.__w = 0
self.I_epsilon = np.full_like(self.utopian, dtype=np.bool_, fill_value=True)
self.has_additional_constraints = True
self.required_keys = {
"current solution": (
"A solution preferred by the DM currently. " "(type: numpy.ndarray)"
),
"classifications": (
"A list of same length as the number of objectives. Elements can only "
"include some or all of [<=', '=', '>=']. These classify "
"the different objectives as defined in the GLIDE-II paper. "
"(type: list)"
),
"levels": (
"A vector containing desirable levels of objectives or constraining bounds "
"depending on the classification. Same length as the number of objectives. "
"(type: numpy.ndarray)"
),
}
@property
def improve_constrained(self):
indices = np.full_like(self.utopian, dtype=np.bool_, fill_value=False)
relevant = np.where(np.array(self.preference["classifications"]) == "<=")[0]
indices[relevant] = True
return indices
@property
def satisfactory(self):
indices = np.full_like(self.utopian, dtype=np.bool_, fill_value=False)
relevant = np.where(np.array(self.preference["classifications"]) == "=")[0]
indices[relevant] = True
return indices
@property
def relax_constrained(self):
indices = np.full_like(self.utopian, dtype=np.bool_, fill_value=False)
relevant = np.where(np.array(self.preference["classifications"]) == ">=")[0]
indices[relevant] = True
return indices
@property
def I_alpha(self):
return self.improve_constrained
@property
def w(self):
# This was in the paper
return self.__w
@property
def mu(self):
return self.__mu
@property
def q(self):
q = np.full_like(self.utopian, fill_value=0, dtype=float)
q[self.improve_constrained] = self.utopian[self.improve_constrained]
return q
@property
def epsilon(self):
e = np.full_like(self.utopian, fill_value=np.nan, dtype=float)
case1 = self.improve_constrained + self.satisfactory
case2 = self.relax_constrained
e[case1] = self.preference["current solution"][case1]
e[case2] = self.preference["levels"][case2]
return e
@property
def s_epsilon(self):
return 0
@property
def delta_epsilon(self):
return np.zeros_like(self.utopian)
class STOM_GLIDE(GLIDEBase):
"""
Implements the STOM method of preference elicitation and scalarization
using the non-differentiable variant of GLIDE-II as proposed in:
Ruiz, Francisco, Mariano Luque, and Kaisa Miettinen.
"Improving the computational efficiency in a global formulation (GLIDE)
for interactive multiobjective optimization."
Annals of Operations Research 197.1 (2012): 47-70.
Args:
utopian (np.ndarray, optional): The utopian point. Defaults to None.
nadir (np.ndarray, optional): The nadir point. Has no effect on STOM calculation. Defaults to None.
rho (float, optional): The augmentation term for the scalarization function.
Defaults to 1e-6.
"""
def __init__(
self,
utopian: np.ndarray = None,
nadir: np.ndarray = None,
rho: float = 1e-6,
**kwargs
):
super().__init__(utopian=utopian, nadir=None, rho=rho, **kwargs)
self.has_additional_constraints = False
self.__I_alpha = np.full_like(
utopian, dtype=np.bool_, fill_value=True
).flatten()
self.__I_epsilon = np.full_like(
utopian, dtype=np.bool_, fill_value=False
).flatten()
self.__w = 0
self.required_keys = {
"reference point": (
"Used to calculate the direction of improvement: "
"a line going from the reference point to the utopian point. "
"(type: numpy.ndarray)"
)
}
@property
def I_epsilon(self):
return self.__I_epsilon
@property
def I_alpha(self):
return self.__I_alpha
@property
def mu(self):
return 1 / (self.preference["reference point"] - self.utopian)
@property
def w(self):
return self.__w
@property
def q(self):
return self.utopian
@property
def epsilon(self):
msg = "This part of the code should not be reached. Contact maintaner."
raise GLIDEError(msg)
@property
def s_epsilon(self):
msg = "This part of the code should not be reached. Contact maintaner."
raise GLIDEError(msg)
@property
def delta_epsilon(self):
msg = "This part of the code should not be reached. Contact maintaner."
raise GLIDEError(msg)
class AUG_STOM_GLIDE(STOM_GLIDE):
"""
Implements the Augmented STOM method of preference elicitation and scalarization
using the non-differentiable variant of GLIDE-II as proposed in:
Ruiz, Francisco, Mariano Luque, and Kaisa Miettinen.
"Improving the computational efficiency in a global formulation (GLIDE)
for interactive multiobjective optimization."
Annals of Operations Research 197.1 (2012): 47-70.
Args:
utopian (np.ndarray, optional): The utopian point. Defaults to None.
nadir (np.ndarray, optional): The nadir point. Has no effect on STOM calculation. Defaults to None.
rho (float, optional): The augmentation term for the scalarization function.
Defaults to 1e-6.
"""
def __init__(
self,
utopian: np.ndarray = None,
nadir: np.ndarray = None,
rho: float = 1e-6,
**kwargs
):
super().__init__(utopian=utopian, nadir=None, rho=rho, **kwargs)
self.has_additional_constraints = False
self.__w = 1
class Tchebycheff_GLIDE(GLIDEBase):
"""
Implements the Tchebycheff method of preference elicitation and scalarization
using the non-differentiable variant of GLIDE-II as proposed in:
Ruiz, Francisco, Mariano Luque, and Kaisa Miettinen.
"Improving the computational efficiency in a global formulation (GLIDE)
for interactive multiobjective optimization."
Annals of Operations Research 197.1 (2012): 47-70.
Args:
utopian (np.ndarray, optional): The utopian point. Defaults to None.
nadir (np.ndarray, optional): The nadir point. Defaults to None.
rho (float, optional): The augmentation term for the scalarization function.
Defaults to 1e-6.
"""
def __init__(
self,
utopian: np.ndarray = None,
nadir: np.ndarray = None,
rho: float = 1e-6,
**kwargs
):
super().__init__(utopian=utopian, nadir=None, rho=rho, **kwargs)
self.has_additional_constraints = False
self.__I_alpha = np.full_like(
utopian, dtype=np.bool_, fill_value=True
).flatten()
self.__I_epsilon = np.full_like(
utopian, dtype=np.bool_, fill_value=False
).flatten()
self.__w = 1
self.required_keys = {
"mu": (
"Vector defining the direction of improvement of the scalarizer. "
"(type: numpy.ndarray)"
)
}
@property
def I_epsilon(self):
return self.__I_epsilon
@property
def I_alpha(self):
return self.__I_alpha
@property
def mu(self):
return self.preference["mu"]
@property
def w(self):
return self.__w
@property
def q(self):
return self.utopian
@property
def epsilon(self):
msg = "This part of the code should not be reached. Contact maintaner."
raise GLIDEError(msg)
@property
def s_epsilon(self):
msg = "This part of the code should not be reached. Contact maintaner."
raise GLIDEError(msg)
@property
def delta_epsilon(self):
msg = "This part of the code should not be reached. Contact maintaner."
raise GLIDEError(msg)
class PROJECT_GLIDE(GLIDEBase):
"""
Implements the PROJECT method of preference elicitation and scalarization
using the non-differentiable variant of GLIDE-II as proposed in:
Ruiz, Francisco, Mariano Luque, and Kaisa Miettinen.
"Improving the computational efficiency in a global formulation (GLIDE)
for interactive multiobjective optimization."
Annals of Operations Research 197.1 (2012): 47-70.
Args:
utopian (np.ndarray, optional): The utopian point. Defaults to None.
nadir (np.ndarray, optional): The nadir point. Defaults to None.
rho (float, optional): The augmentation term for the scalarization function.
Defaults to 1e-6.
"""
def __init__(
self, current_objective_vector: np.ndarray, rho: float = 1e-6, **kwargs
):
super().__init__(utopian=None, nadir=None, rho=rho, **kwargs)
self.current_objective_vector = current_objective_vector
self.has_additional_constraints = False
self.__I_alpha = np.full_like(
current_objective_vector, dtype=np.bool_, fill_value=True
).flatten()
self.__I_epsilon = np.full_like(
current_objective_vector, dtype=np.bool_, fill_value=False
).flatten()
self.__w = 0
@property
def I_epsilon(self):
return self.__I_epsilon
@property
def I_alpha(self):
return self.__I_alpha
@property
def mu(self):
return 1 / np.abs(
self.preference["reference point"] - self.current_objective_vector
)
@property
def w(self):
return self.__w
@property
def q(self):
return self.utopian
@property
def epsilon(self):
msg = "This part of the code should not be reached. Contact maintaner."
raise GLIDEError(msg)
@property
def s_epsilon(self):
msg = "This part of the code should not be reached. Contact maintaner."
raise GLIDEError(msg)
@property
def delta_epsilon(self):
msg = "This part of the code should not be reached. Contact maintaner."
raise GLIDEError(msg)
| 31.954878
| 107
| 0.626569
|
b2204a3d4efe86db540fb6e0d6a9c08d2dc42190
| 22
|
py
|
Python
|
python/script.py
|
graitz/Hello-world
|
d8a3103673f6c62774bd1b522f9348567ba3a7f9
|
[
"MIT"
] | 2
|
2020-08-04T02:16:29.000Z
|
2021-11-15T11:23:13.000Z
|
python/script.py
|
graitz/Hello-world
|
d8a3103673f6c62774bd1b522f9348567ba3a7f9
|
[
"MIT"
] | null | null | null |
python/script.py
|
graitz/Hello-world
|
d8a3103673f6c62774bd1b522f9348567ba3a7f9
|
[
"MIT"
] | 2
|
2018-10-12T16:40:11.000Z
|
2021-04-05T12:05:36.000Z
|
print('hello, world')
| 11
| 21
| 0.681818
|
dee4f610bc40847805a06d5e0bb669aaa7377501
| 2,159
|
py
|
Python
|
src/custom_auth/management/__init__.py
|
Igor-Kholupko/SAT
|
d29379887b61862d28f3079f7c49c9ff96ddd578
|
[
"Apache-2.0"
] | 4
|
2019-02-08T22:02:12.000Z
|
2019-03-26T21:02:12.000Z
|
src/custom_auth/management/__init__.py
|
Igor-Kholupko/SAT
|
d29379887b61862d28f3079f7c49c9ff96ddd578
|
[
"Apache-2.0"
] | 29
|
2019-03-11T19:04:50.000Z
|
2019-05-14T17:24:40.000Z
|
src/custom_auth/management/__init__.py
|
Igor-Kholupko/SAT
|
d29379887b61862d28f3079f7c49c9ff96ddd578
|
[
"Apache-2.0"
] | 4
|
2019-02-08T13:50:37.000Z
|
2019-03-17T10:10:32.000Z
|
from django.db import transaction
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core.management.base import BaseCommand, CommandError
class SetUserTypeBaseCommand(BaseCommand):
requires_migrations_checks = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.UserModel = get_user_model()
def add_arguments(self, parser):
parser.add_argument(
'%s' % self.UserModel.USERNAME_FIELD,
help='Specifies the user to add to group.',
)
def handle(self, *args, **options):
if not settings.DEBUG:
raise CommandError("Command available only in DEBUG.")
user = self.UserModel._default_manager.filter(
**{self.UserModel.USERNAME_FIELD: options[self.UserModel.USERNAME_FIELD]}
)
if not user.exists():
raise CommandError("User with specified %s not found." % self.UserModel.USERNAME_FIELD)
self.handle_user(user.first())
def handle_user(self, user):
raise NotImplementedError('subclasses of SetUserTypeBaseCommand must provide a handle_user() method')
def update_groups_permissions(*, apps=None, **kwargs):
if apps is None:
return
try:
Group = apps.get_model('custom_auth', 'Group')
except LookupError:
return
OldGroup = apps.get_model('auth', 'Group')
Permission = apps.get_model('auth', 'Permission')
from django.db.models import Q
from functools import reduce
from custom_auth.consts import GROUP_PERMISSIONS
with transaction.atomic():
for group, permissions in GROUP_PERMISSIONS.items():
group = Group.objects.get_or_create(group_ptr=OldGroup.objects.get_or_create(name=group)[0], name=group)[0]
group.permissions.clear()
group.permissions.add(*Permission.objects.filter(
reduce(Q.__or__, [Q(codename=permission['codename'], content_type__app_label=permission['app'].lower(),
content_type__model=permission['model'].lower()) for permission in permissions])
))
| 38.553571
| 119
| 0.670218
|
7f81b9546015b967ed472415a7104d6ea02beda9
| 3,609
|
py
|
Python
|
lambda/us-east-1_Numbers_Trivia/ask_sdk_model/interfaces/amazonpay/model/response/price.py
|
Techievena/Numbers_Trivia
|
e86daaf7e7bc2c80c703c8496daea6317e986204
|
[
"MIT"
] | 1
|
2019-02-04T21:07:06.000Z
|
2019-02-04T21:07:06.000Z
|
lambda/us-east-1_Numbers_Trivia/ask_sdk_model/interfaces/amazonpay/model/response/price.py
|
Techievena/Numbers_Trivia
|
e86daaf7e7bc2c80c703c8496daea6317e986204
|
[
"MIT"
] | 9
|
2020-03-24T16:32:57.000Z
|
2022-03-11T23:37:22.000Z
|
lambda/us-east-1_Numbers_Trivia/ask_sdk_model/interfaces/amazonpay/model/response/price.py
|
Techievena/Numbers_Trivia
|
e86daaf7e7bc2c80c703c8496daea6317e986204
|
[
"MIT"
] | null | null | null |
# coding: utf-8
#
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
from ask_sdk_model.interfaces.amazonpay.model.v1.price import Price
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional
from datetime import datetime
class Price(Price):
"""
This response object specifies amount and currency authorized/captured.
:param amount: Amount authorized/captured.
:type amount: (optional) str
:param currency_code: Currency code for the amount.
:type currency_code: (optional) str
"""
deserialized_types = {
'amount': 'str',
'currency_code': 'str'
}
attribute_map = {
'amount': 'amount',
'currency_code': 'currencyCode'
}
def __init__(self, amount=None, currency_code=None):
# type: (Optional[str], Optional[str]) -> None
"""This response object specifies amount and currency authorized/captured.
:param amount: Amount authorized/captured.
:type amount: (optional) str
:param currency_code: Currency code for the amount.
:type currency_code: (optional) str
"""
self.__discriminator_value = None
super(Price, self).__init__(amount=amount, currency_code=currency_code)
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, Price):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
| 31.382609
| 96
| 0.59601
|
f42bd7b1a9697958f20f4fbcba65203a2d7ad329
| 247
|
py
|
Python
|
Operators/EqualityOperator.py
|
dsabhrawal/python-examples
|
55b3dd6c9fd0b992bcfe3422765dc80fb143a54b
|
[
"MIT"
] | 1
|
2020-03-01T17:24:20.000Z
|
2020-03-01T17:24:20.000Z
|
Operators/EqualityOperator.py
|
dsabhrawal/python-examples
|
55b3dd6c9fd0b992bcfe3422765dc80fb143a54b
|
[
"MIT"
] | null | null | null |
Operators/EqualityOperator.py
|
dsabhrawal/python-examples
|
55b3dd6c9fd0b992bcfe3422765dc80fb143a54b
|
[
"MIT"
] | null | null | null |
# == , != (Int, float, str, bool)
x,y=6,7
p=6
print(x == y) #False
print(p == x) #True
print(p == 6.0) #True
print('Hello' == 'Hello') #True
#Chaining
print(10 == 20 == 6 == 3) #False
print(1 == 1 == 1 < 2) #True
print(1 !=2 < 3 > 1) #True
| 15.4375
| 33
| 0.506073
|
25c9d4c2d65b89b4f2417b5b7de65bd0461dc4da
| 13,406
|
py
|
Python
|
buffer.py
|
ManUtdMoon/Safe_Reachability_RL
|
e44a5666b8b2ec45e09a70686becb72933c33ef5
|
[
"MIT"
] | null | null | null |
buffer.py
|
ManUtdMoon/Safe_Reachability_RL
|
e44a5666b8b2ec45e09a70686becb72933c33ef5
|
[
"MIT"
] | null | null | null |
buffer.py
|
ManUtdMoon/Safe_Reachability_RL
|
e44a5666b8b2ec45e09a70686becb72933c33ef5
|
[
"MIT"
] | 1
|
2022-03-06T09:46:40.000Z
|
2022-03-06T09:46:40.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# =====================================
# @Time : 2020/6/10
# @Author : Yang Guan (Tsinghua Univ.)
# @FileName: buffer.py
# =====================================
import logging
import random
import numpy as np
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
from utils.segment_tree import SumSegmentTree, MinSegmentTree
class ReplayBuffer(object):
def __init__(self, args, buffer_id):
"""Create Prioritized Replay buffer.
Parameters
----------
size: int
Max number of transitions to store in the buffer. When the buffer
overflows the old memories are dropped.
"""
self.args = args
self.buffer_id = buffer_id
self._storage = []
self._maxsize = self.args.max_buffer_size
self._next_idx = 0
self.replay_starts = self.args.replay_starts
self.replay_batch_size = self.args.replay_batch_size
self.stats = {}
self.replay_times = 0
logger.info('Buffer initialized')
def get_stats(self):
self.stats.update(dict(storage=len(self._storage)))
return self.stats
def __len__(self):
return len(self._storage)
def add(self, obs_t, action, reward, obs_tp1, done, weight):
data = (obs_t, action, reward, obs_tp1, done)
if self._next_idx >= len(self._storage):
self._storage.append(data)
else:
self._storage[self._next_idx] = data
self._next_idx = (self._next_idx + 1) % self._maxsize
def _encode_sample(self, idxes):
obses_t, actions, rewards, obses_tp1, dones = [], [], [], [], []
for i in idxes:
data = self._storage[i]
obs_t, action, reward, obs_tp1, done = data
obses_t.append(np.array(obs_t, copy=False))
actions.append(np.array(action, copy=False))
rewards.append(reward)
obses_tp1.append(np.array(obs_tp1, copy=False))
dones.append(done)
return np.array(obses_t), np.array(actions), np.array(rewards), \
np.array(obses_tp1), np.array(dones)
def sample_idxes(self, batch_size):
return np.array([random.randint(0, len(self._storage) - 1) for _ in range(batch_size)], dtype=np.int32)
def sample_with_idxes(self, idxes):
return list(self._encode_sample(idxes)) + [idxes,]
def sample(self, batch_size):
idxes = self.sample_idxes(batch_size)
return self.sample_with_idxes(idxes)
def add_batch(self, batch):
for trans in batch:
self.add(*trans, 0)
def replay(self):
if len(self._storage) < self.replay_starts:
return None
if self.buffer_id == 1 and self.replay_times % self.args.buffer_log_interval == 0:
logger.info('Buffer info: {}'.format(self.get_stats()))
self.replay_times += 1
return self.sample(self.replay_batch_size)
class ReplayBufferWithCost(object):
def __init__(self, args, buffer_id):
"""Create Prioritized Replay buffer.
Parameters
----------
size: int
Max number of transitions to store in the buffer. When the buffer
overflows the old memories are dropped.
"""
self.args = args
if isinstance(self.args.random_seed, int):
self.set_seed(self.args.random_seed)
self.buffer_id = buffer_id
self._storage = []
self._maxsize = self.args.max_buffer_size
self._next_idx = 0
self.replay_starts = self.args.replay_starts
self.replay_batch_size = self.args.replay_batch_size
self.stats = {}
self.replay_times = 0
logger.info('Buffer initialized')
def set_seed(self, seed):
# self.tf.random.set_seed(seed)
np.random.seed(seed)
random.seed(seed)
# self.env.seed(seed)
def get_stats(self):
self.stats.update(dict(storage=len(self._storage)))
return self.stats
def __len__(self):
return len(self._storage)
def add(self, obs_t, action, reward, obs_tp1, done, cost, sis_info, weight):
data = (obs_t, action, reward, obs_tp1, done, cost, sis_info)
if self._next_idx >= len(self._storage):
self._storage.append(data)
else:
self._storage[self._next_idx] = data
self._next_idx = (self._next_idx + 1) % self._maxsize
def _encode_sample(self, idxes):
obses_t, actions, rewards, obses_tp1, dones, costs, sis_infos = [], [], [], [], [], [], []
for i in idxes:
data = self._storage[i]
obs_t, action, reward, obs_tp1, done, cost, sis_info = data
obses_t.append(np.array(obs_t, copy=False))
actions.append(np.array(action, copy=False))
rewards.append(reward)
obses_tp1.append(np.array(obs_tp1, copy=False))
dones.append(done)
costs.append(cost)
sis_infos.append(sis_info)
return np.array(obses_t), np.array(actions), np.array(rewards), \
np.array(obses_tp1), np.array(dones), np.array(costs), np.array(sis_infos)
def sample_idxes(self, batch_size):
return np.array([random.randint(0, len(self._storage) - 1) for _ in range(batch_size)], dtype=np.int32)
def sample_with_idxes(self, idxes):
return list(self._encode_sample(idxes)) + [idxes,]
def sample(self, batch_size):
idxes = self.sample_idxes(batch_size)
return self.sample_with_idxes(idxes)
def add_batch(self, batch):
for trans in batch:
self.add(*trans, None)
def replay(self):
if len(self._storage) < self.replay_starts:
return None
if self.buffer_id == 1 and self.replay_times % self.args.buffer_log_interval == 0:
logger.info('Buffer info: {}'.format(self.get_stats()))
self.replay_times += 1
return self.sample(self.replay_batch_size)
class PrioritizedReplayBuffer(ReplayBuffer):
def __init__(self, args, buffer_id):
"""Create Prioritized Replay buffer.
Parameters
----------
size: int
Max number of transitions to store in the buffer. When the buffer
overflows the old memories are dropped.
alpha: float
how much prioritization is used
(0 - no prioritization, 1 - full prioritization)
beta: float
To what degree to use importance weights
(0 - no corrections, 1 - full correction)
See Also
--------
ReplayBuffer.__init__
"""
super(PrioritizedReplayBuffer, self).__init__(args, buffer_id)
assert self.args.alpha > 0
self._alpha = args.replay_alpha
self._beta = args.replay_beta
it_capacity = 1
while it_capacity < self.args.size:
it_capacity *= 2
self._it_sum = SumSegmentTree(it_capacity)
self._it_min = MinSegmentTree(it_capacity)
self._max_priority = 1.0
def add(self, obs_t, action, reward, obs_tp1, done, weight):
"""See ReplayBuffer.store_effect"""
idx = self._next_idx
super(PrioritizedReplayBuffer, self).add(obs_t, action, reward,
obs_tp1, done, weight)
if weight is None:
weight = self._max_priority
self._it_sum[idx] = weight ** self._alpha
self._it_min[idx] = weight ** self._alpha
def _sample_proportional(self, batch_size):
res = []
for _ in range(batch_size):
mass = random.random() * self._it_sum.sum(0, len(self._storage))
idx = self._it_sum.find_prefixsum_idx(mass)
res.append(idx)
return np.array(res, dtype=np.int32)
def sample_idxes(self, batch_size):
return self._sample_proportional(batch_size)
def sample_with_weights_and_idxes(self, idxes):
weights = []
p_min = self._it_min.min() / self._it_sum.sum()
max_weight = (p_min * len(self._storage)) ** (-self._beta)
for idx in idxes:
p_sample = self._it_sum[idx] / self._it_sum.sum()
weight = (p_sample * len(self._storage)) ** (-self._beta)
weights.append(weight / max_weight)
weights = np.array(weights)
encoded_sample = self._encode_sample(idxes)
return list(encoded_sample) + [weights, idxes]
def sample(self, batch_size):
idxes = self.sample_idxes(batch_size)
return self.sample_with_weights_and_idxes(idxes)
def update_priorities(self, idxes, priorities):
"""Update priorities of sampled transitions.
sets priority of transition at index idxes[i] in buffer
to priorities[i].
Parameters
----------
idxes: [int]
List of idxes of sampled transitions
priorities: [float]
List of updated priorities corresponding to
transitions at the sampled idxes denoted by
variable `idxes`.
"""
assert len(idxes) == len(priorities)
for idx, priority in zip(idxes, priorities):
assert priority > 0
assert 0 <= idx < len(self._storage)
delta = priority ** self._alpha - self._it_sum[idx]
self._it_sum[idx] = priority ** self._alpha
self._it_min[idx] = priority ** self._alpha
self._max_priority = max(self._max_priority, priority)
class PrioritizedReplayBufferWithCost(ReplayBufferWithCost):
def __init__(self, args, buffer_id):
"""Create Prioritized Replay buffer.
Parameters
----------
size: int
Max number of transitions to store in the buffer. When the buffer
overflows the old memories are dropped.
alpha: float
how much prioritization is used
(0 - no prioritization, 1 - full prioritization)
beta: float
To what degree to use importance weights
(0 - no corrections, 1 - full correction)
See Also
--------
ReplayBuffer.__init__
"""
super(PrioritizedReplayBufferWithCost, self).__init__(args, buffer_id)
assert self.args.replay_alpha > 0
self._alpha = args.replay_alpha
self._beta = args.replay_beta
it_capacity = 1
while it_capacity < self.args.max_buffer_size:
it_capacity *= 2
self._it_sum = SumSegmentTree(it_capacity)
self._it_min = MinSegmentTree(it_capacity)
self._max_priority = 1.0
def add(self, obs_t, action, reward, obs_tp1, done, cost, weight):
"""See ReplayBuffer.store_effect"""
idx = self._next_idx
super(PrioritizedReplayBufferWithCost, self).add(obs_t, action, reward,
obs_tp1, done, cost, weight)
if weight is None:
weight = self._max_priority
self._it_sum[idx] = weight ** self._alpha
self._it_min[idx] = weight ** self._alpha
def _sample_proportional(self, batch_size):
res = []
for _ in range(batch_size):
mass = random.random() * self._it_sum.sum(0, len(self._storage))
idx = self._it_sum.find_prefixsum_idx(mass)
res.append(idx)
return np.array(res, dtype=np.int32)
def sample_idxes(self, batch_size):
return self._sample_proportional(batch_size)
def sample_with_weights_and_idxes(self, idxes):
weights = []
p_min = self._it_min.min() / self._it_sum.sum()
max_weight = (p_min * len(self._storage)) ** (-self._beta)
for idx in idxes:
p_sample = self._it_sum[idx] / self._it_sum.sum()
weight = (p_sample * len(self._storage)) ** (-self._beta)
weights.append(weight / max_weight)
weights = np.array(weights)
encoded_sample = self._encode_sample(idxes)
return list(encoded_sample) + [weights, idxes]
def sample(self, batch_size):
idxes = self.sample_idxes(batch_size)
return self.sample_with_weights_and_idxes(idxes)
def update_priorities(self, idxes, priorities):
"""Update priorities of sampled transitions.
sets priority of transition at index idxes[i] in buffer
to priorities[i].
Parameters
----------
idxes: [int]
List of idxes of sampled transitions
priorities: [float]
List of updated priorities corresponding to
transitions at the sampled idxes denoted by
variable `idxes`.
"""
assert len(idxes) == len(priorities)
for idx, priority in zip(idxes, priorities):
assert priority > 0
assert 0 <= idx < len(self._storage)
delta = priority ** self._alpha - self._it_sum[idx]
self._it_sum[idx] = priority ** self._alpha
self._it_min[idx] = priority ** self._alpha
self._max_priority = max(self._max_priority, priority)
def tes_perc():
from train_scripts.train_script import built_FSAC_parser
args = built_FSAC_parser()
buffer = PrioritizedReplayBufferWithCost(args, 0)
for i in range(100):
buffer.add(0, 0, 0, 0, 0, 0, None)
a = buffer.sample(16)
print(a)
if __name__ == '__main__':
tes_perc()
| 35.278947
| 111
| 0.607788
|
c0ab9929a81eae5e157713aead4fc49fabdeb903
| 5,145
|
py
|
Python
|
python/oneflow/compatible/single_client/test/ops/test_dynamic_loss_scale_schedule.py
|
wangyuyue/oneflow
|
0a71c22fe8355392acc8dc0e301589faee4c4832
|
[
"Apache-2.0"
] | 3,285
|
2020-07-31T05:51:22.000Z
|
2022-03-31T15:20:16.000Z
|
python/oneflow/compatible/single_client/test/ops/test_dynamic_loss_scale_schedule.py
|
wangyuyue/oneflow
|
0a71c22fe8355392acc8dc0e301589faee4c4832
|
[
"Apache-2.0"
] | 2,417
|
2020-07-31T06:28:58.000Z
|
2022-03-31T23:04:14.000Z
|
python/oneflow/compatible/single_client/test/ops/test_dynamic_loss_scale_schedule.py
|
wangyuyue/oneflow
|
0a71c22fe8355392acc8dc0e301589faee4c4832
|
[
"Apache-2.0"
] | 520
|
2020-07-31T05:52:42.000Z
|
2022-03-29T02:38:11.000Z
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
from test_util import GenArgList, type_name_to_flow_type, type_name_to_np_type
import oneflow.compatible.single_client.unittest
from oneflow.compatible import single_client as flow
from oneflow.compatible.single_client import typing as oft
def dynamic_loss_scale_schedule(
count_not_finite, loss_scale, good_step_counter, increment_period, multiplier, name
):
flow.user_op_builder(name).Op("dynamic_loss_scale_schedule").Input(
"count_not_finite", [count_not_finite]
).Input("loss_scale", [loss_scale]).Input(
"good_step_counter", [good_step_counter]
).Attr(
"increment_period", increment_period
).Attr(
"multiplier", multiplier
).Build().InferAndTryRun()
def _run_test(test_case, device_type, op_param):
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
@flow.global_function(function_config=func_config)
def schedule_job(count_not_finite: oft.Numpy.Placeholder((1,), dtype=flow.int64)):
with flow.scope.placement(device_type, "0:0"):
good_step_counter = flow.get_variable(
name="good_step_counter",
shape=(1,),
dtype=flow.int64,
initializer=flow.constant_initializer(
op_param["good_step_counter_value"], dtype=flow.int64
),
)
loss_scale = flow.get_variable(
name="loss_scale",
shape=(1,),
dtype=flow.float,
initializer=flow.constant_initializer(
op_param["loss_scale_value"], dtype=flow.float
),
)
dynamic_loss_scale_schedule(
count_not_finite,
loss_scale,
good_step_counter,
op_param["increment_period"],
op_param["multiplier"],
"dynamic_schedule",
)
return (good_step_counter, loss_scale)
@flow.global_function(function_config=func_config)
def fetch_job():
with flow.scope.placement(device_type, "0:0"):
good_step_counter = flow.get_variable(
name="good_step_counter",
shape=(1,),
dtype=flow.int64,
initializer=flow.constant_initializer(
op_param["good_step_counter_value"], dtype=flow.int64
),
)
loss_scale = flow.get_variable(
name="loss_scale",
shape=(1,),
dtype=flow.float,
initializer=flow.constant_initializer(
op_param["loss_scale_value"], dtype=flow.float
),
)
return (good_step_counter, loss_scale)
count_not_finite = np.array([op_param["count_not_finite"]]).astype(np.int64)
schedule_job(count_not_finite).get()
(good_step_counter, loss_scale) = fetch_job().get()
assert good_step_counter.numpy() == op_param["result_step"]
assert loss_scale.numpy() == op_param["result_loss_scale"]
@flow.unittest.skip_unless_1n1d()
class TestDynamicLossScaleSchedule(flow.unittest.TestCase):
def test_dynamic_loss_scale_schedule(test_case):
arg_dict = OrderedDict()
arg_dict["test_case"] = [test_case]
arg_dict["device_type"] = ["cpu", "gpu"]
arg_dict["op_param"] = [
{
"count_not_finite": 1,
"good_step_counter_value": 1,
"loss_scale_value": 100.0,
"increment_period": 1,
"multiplier": 2.0,
"result_step": 0,
"result_loss_scale": 50.0,
},
{
"count_not_finite": 0,
"good_step_counter_value": 1,
"loss_scale_value": 100.0,
"increment_period": 1,
"multiplier": 2.0,
"result_step": 0,
"result_loss_scale": 200.0,
},
{
"count_not_finite": 0,
"good_step_counter_value": 1,
"loss_scale_value": 100.0,
"increment_period": 10,
"multiplier": 2.0,
"result_step": 2,
"result_loss_scale": 100.0,
},
]
for arg in GenArgList(arg_dict):
_run_test(*arg)
if __name__ == "__main__":
unittest.main()
| 35.482759
| 87
| 0.598251
|
c8218e103cfe30abc316175b7480fdb8d5658ac2
| 6,679
|
py
|
Python
|
RBM/AIS_testing.py
|
mattsmart/biomodels
|
237f87489553fa1ebf5c676fab563166dd0c39e9
|
[
"MIT"
] | null | null | null |
RBM/AIS_testing.py
|
mattsmart/biomodels
|
237f87489553fa1ebf5c676fab563166dd0c39e9
|
[
"MIT"
] | null | null | null |
RBM/AIS_testing.py
|
mattsmart/biomodels
|
237f87489553fa1ebf5c676fab563166dd0c39e9
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import tensorflow_probability as tfp
import numpy as np
def run_tf_example_A():
# EXAMPLE CODE
# https://www.tensorflow.org/probability/api_docs/python/tfp/mcmc/sample_annealed_importance_chain
tfd = tfp.distributions
# Run 100 AIS chains in parallel
num_chains = 100
dims = 20
dtype = np.float32
proposal = tfd.MultivariateNormalDiag(
loc=tf.zeros([dims], dtype=dtype))
target = tfd.TransformedDistribution(
distribution=tfd.Gamma(concentration=dtype(2),
rate=dtype(3)),
bijector=tfp.bijectors.Invert(tfp.bijectors.Exp()),
event_shape=[dims])
chains_state, ais_weights, kernels_results = (
tfp.mcmc.sample_annealed_importance_chain(
num_steps=1000,
proposal_log_prob_fn=proposal.log_prob,
target_log_prob_fn=target.log_prob,
current_state=proposal.sample(num_chains),
make_kernel_fn=lambda tlp_fn: tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=tlp_fn,
step_size=0.2,
num_leapfrog_steps=2)))
log_estimated_normalizer = (tf.reduce_logsumexp(ais_weights)
- np.log(num_chains))
log_true_normalizer = tf.math.lgamma(2.) - 2. * tf.math.log(3.)
print("True", log_true_normalizer)
print("Estimated", log_estimated_normalizer)
def run_test_ising_3spin(beta=2.0, nsteps=10, nchains=100):
# EXAMPLE CODE
# N = 3 spins
# Jij = [[0, 1, 1], [1, 0, 1], [1, 1, 0]] # should behave like ferromagnet
# Z for this case is computable exactly
# Run 100 AIS chains in parallel
num_chains = nchains
dims = 1 # use p-form continuous rep. for integral
dims_N = 3
dtype = tf.float32
# fix target model
#Jij = np.array([[0.0, 1.0, 1.0], [1.0, 0.0, 1.0], [1.0, 1.0, 0.0]]) # TODO add diagonals compare
Jij = np.array([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]) # TODO add diagonals compare, will change ln Z by adding beta/2.0 * Tr(J)
WEIGHTS = np.array([[1.0, 1.0, 1.0]]).T
Jij_tf = tf.convert_to_tensor(Jij, dtype=dtype)
WEIGHTS_tf = tf.convert_to_tensor(WEIGHTS, dtype=dtype)
"""
def proposal_log_prob_fn(*states):
# given vector size N ints, return scalar for each chain
#fvals = [1.0] * len(states)
fvals = tf.ones(len(states), dtype=tf.float32)
return fvals
def target_log_prob_fn(*states):
# TODO 1: the state must be binary but appears as floats (when printed during the sim)
# maybe use Metropolis Hastings instead of HMC (requires continuous p(x))
# TODO 2: if the state must be continuous, maybe we switch to the p-dim hidden variable form and treat the integrand as e^S(h) and use S(h) as our log-prob
# given vector size N ints, return scalar for each chain
fvals = [0.0] * len(states)
for idx, state in enumerate(states):
print(Jij_tf)
print('BINARY?', state)
#print(tf.tensordot(Jij_tf, state, 1))
negative_energy = 0.5 * tf.tensordot(state, tf.tensordot(Jij_tf, state, 1), 1)
print(negative_energy)
fvals[idx] = beta * negative_energy
fvals = tf.convert_to_tensor(fvals, dtype=tf.float32)
return fvals
init_state = [0] * num_chains
for idx in range(num_chains):
sample_01_convention = np.random.binomial(1, 0.5, 3) # this should sample the uniform distribution on 3 spins
sample = sample_01_convention * 2 - 1
init_state[idx] = tf.convert_to_tensor(sample, dtype=dtype)
"""
tfd = tfp.distributions
proposal = tfd.MultivariateNormalDiag(loc=tf.zeros([dims], dtype=dtype))
proposal_log_prob_fn = proposal.log_prob
target_log_prob_const = dims_N * tf.math.log( 2.0 ) - (dims / 2.0) * tf.math.log( 2.0 * np.pi / beta)
print("target_log_prob_const", target_log_prob_const)
def target_log_prob_fn(hidden_states):
# TODO 1: the state must be binary but appears as floats (when printed during the sim)
# maybe use Metropolis Hastings instead of HMC (requires continuous p(x))
# TODO 2: if the state must be continuous, maybe we switch to the p-dim hidden variable form and treat the integrand as e^S(h) and use S(h) as our log-prob
# given vector size N ints, return scalar for each chain
fvals = [0.0] * len(hidden_states)
# TODO tensor speedup test with p > 1
for idx, hidden in enumerate(hidden_states):
term1 = tf.tensordot(hidden, hidden, 1)
cosh_arg = beta * tf.tensordot(WEIGHTS_tf, hidden, 1)
log_cosh_vec = tf.math.log( tf.math.cosh(cosh_arg) )
term2 = tf.math.reduce_sum(log_cosh_vec)
fvals[idx] = - (beta / 2.0) * term1 + term2
fvals = tf.convert_to_tensor(fvals, dtype=dtype) + target_log_prob_const
return fvals
# draw 100 samples from the proposal distribution
init_state = proposal.sample(num_chains)
#print(type(init_state))
#print(init_state)
#print('.........')
chains_state, ais_weights, kernels_results = (
tfp.mcmc.sample_annealed_importance_chain(
num_steps=nsteps,
proposal_log_prob_fn=proposal_log_prob_fn,
target_log_prob_fn=target_log_prob_fn,
current_state=init_state,
make_kernel_fn=lambda tlp_fn: tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=tlp_fn,
step_size=0.2,
num_leapfrog_steps=2)))
log_estimated_normalizer = (tf.reduce_logsumexp(ais_weights)
- np.log(num_chains))
# compute true analytically
states = [np.array([-1, -1, -1]),
np.array([-1, -1, 1]),
np.array([-1, 1, -1]),
np.array([-1, 1, 1]),
np.array([ 1, -1, -1]),
np.array([ 1, -1, 1]),
np.array([ 1, 1, -1]),
np.array([ 1, 1, 1])]
beta = beta # TODO care
boltz_factors = [np.exp(0.5 * beta * np.dot(s.T, np.dot(Jij, s))) for s in states]
Z = np.sum(boltz_factors)
log_true_normalizer = np.log(Z)
print("True", log_true_normalizer)
print("Estimated", log_estimated_normalizer)
return log_estimated_normalizer
if __name__ == '__main__':
#print("Running example A...")
#run_tf_example_A()
print("Running example B...")
#log_estimated_normalizer = run_test_ising_3spin()
nn = 10
runs = [0] * nn
for idx in range(nn):
runs[idx] = run_test_ising_3spin(beta=2.0)
print(runs)
| 39.755952
| 163
| 0.624944
|
3d5ac47d55f792793613a1b3e92cb1571ba93fff
| 519
|
py
|
Python
|
tuple_utils.py
|
seanstappas/qbert-reinforcement-learning
|
3d9c8b0821ba6df07d1711c0199a6e876ebc4ad7
|
[
"MIT"
] | 1
|
2020-08-21T03:05:03.000Z
|
2020-08-21T03:05:03.000Z
|
tuple_utils.py
|
seanstappas/qbert-reinforcement-learning
|
3d9c8b0821ba6df07d1711c0199a6e876ebc4ad7
|
[
"MIT"
] | null | null | null |
tuple_utils.py
|
seanstappas/qbert-reinforcement-learning
|
3d9c8b0821ba6df07d1711c0199a6e876ebc4ad7
|
[
"MIT"
] | null | null | null |
def list_to_tuple(lst):
return tuple(tuple(x for x in row) for row in lst)
def list_to_tuple_with_value(lst, row_num, col_num, val):
return tuple(tuple(x if i != row_num or j != col_num else val for j, x in enumerate(row))
for i, row in enumerate(lst))
def hamming_distance(s1, s2):
f1 = flatten_tuples(s1)
f2 = flatten_tuples(s2)
dist = 0
for x1, x2 in zip(f1, f2):
if x1 != x2:
dist += 1
return dist
def flatten_tuples(t):
return sum(t, ())
| 22.565217
| 93
| 0.608863
|
b020c8bd71057db94eeee4effbc41d3f89e97fbc
| 4,381
|
py
|
Python
|
client/commands/tests/initialize_test.py
|
terrorizer1980/pyre-check
|
16659c7f6f19f3c364ba3a56e6c582371a8ff348
|
[
"MIT"
] | 1
|
2020-08-08T16:01:55.000Z
|
2020-08-08T16:01:55.000Z
|
client/commands/tests/initialize_test.py
|
terrorizer1980/pyre-check
|
16659c7f6f19f3c364ba3a56e6c582371a8ff348
|
[
"MIT"
] | 4
|
2022-02-15T02:42:33.000Z
|
2022-02-28T01:30:07.000Z
|
client/commands/tests/initialize_test.py
|
terrorizer1980/pyre-check
|
16659c7f6f19f3c364ba3a56e6c582371a8ff348
|
[
"MIT"
] | 1
|
2020-11-22T12:08:51.000Z
|
2020-11-22T12:08:51.000Z
|
# Copyright (c) 2016-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-unsafe
import os
import sys
import unittest
from unittest.mock import call, mock_open, patch
from ... import commands
from ...commands import initialize
from ...commands.initialize import log
from ...exceptions import EnvironmentException
from .command_test import mock_arguments
class InitializeTest(unittest.TestCase):
@patch.object(log, "get_yes_no_input", return_value=True)
@patch.object(log, "get_optional_input", return_value="")
# pyre-fixme[56]: Argument `tools.pyre.client.commands.initialize.log` to
# decorator factory `unittest.mock.patch.object` could not be resolved in a global
# scope.
@patch.object(log, "get_input", return_value="")
@patch("shutil.which")
@patch("os.path.isfile")
@patch("subprocess.call")
@patch("builtins.open")
def test_initialize(
self,
open,
subprocess_call,
isfile,
which,
_get_input,
_get_optional_input,
get_yes_no_input,
) -> None:
get_yes_no_input.return_value = True
original_directory = "/original/directory"
arguments = mock_arguments()
def exists(path):
if path.endswith(".watchmanconfig"):
return False
elif path.endswith(".pyre_configuration"):
return False
elif path.endswith(".pyre_configuration.local"):
return False
else:
return True
isfile.side_effect = exists
# One for shutil.which("watchman"), another for shutil.which(BINARY_NAME).
which.side_effect = [True, True]
with patch.object(commands.Command, "_call_client"):
initialize.Initialize(arguments, original_directory).run()
subprocess_call.assert_has_calls([call(["watchman", "watch-project", "."])])
open.assert_any_call(os.path.abspath(".watchmanconfig"), "w+")
def exists(path):
return False
isfile.side_effect = exists
file = mock_open()
with patch("builtins.open", file), patch.object(
commands.Command, "_call_client"
), patch.object(
initialize.Initialize, "_get_local_configuration", return_value={}
), patch.object(
initialize.Initialize, "_is_local", return_value=True
):
initialize.Initialize(arguments, original_directory).run()
file().write.assert_has_calls([call("{}"), call("\n")])
def exists(path):
if path.endswith(".pyre_configuration"):
return True
return False
isfile.side_effect = exists
with patch.object(commands.Command, "_call_client"):
with self.assertRaises(EnvironmentException):
initialize.Initialize(arguments, original_directory).run()
with patch.object(commands.Command, "_call_client"), patch.object(
sys, "argv", ["/tmp/pyre/bin/pyre"]
):
which.reset_mock()
which.side_effect = [True, None, "/tmp/pyre/bin/pyre.bin"]
initialize.Initialize(arguments, original_directory)._get_configuration()
which.assert_has_calls(
[call("watchman"), call("pyre.bin"), call("/tmp/pyre/bin/pyre.bin")]
)
def test_get_local_configuration(self) -> None:
original_directory = "/original/directory"
arguments = mock_arguments()
command = initialize.Initialize(arguments, original_directory)
with patch.object(log, "get_yes_no_input") as yes_no_input, patch.object(
log, "get_input", return_value="//target/..."
):
yes_no_input.side_effect = [True]
self.assertEqual(
command._get_local_configuration(), {"targets": ["//target/..."]}
)
with patch.object(log, "get_yes_no_input") as yes_no_input, patch.object(
log, "get_input", return_value="project/a, project/b"
):
yes_no_input.side_effect = [False]
self.assertEqual(
command._get_local_configuration(),
{"source_directories": ["project/a", "project/b"]},
)
| 36.815126
| 88
| 0.619493
|
602647902c185d75b35ebeac333ef457065161ff
| 1,410
|
py
|
Python
|
python/katana/local/__init__.py
|
micah-white/katana
|
99d879884b412ebd643be43b281e9becaf5ae539
|
[
"BSD-3-Clause"
] | 1
|
2021-07-06T15:51:14.000Z
|
2021-07-06T15:51:14.000Z
|
python/katana/local/__init__.py
|
micah-white/katana
|
99d879884b412ebd643be43b281e9becaf5ae539
|
[
"BSD-3-Clause"
] | 2
|
2020-08-15T23:41:58.000Z
|
2020-08-29T04:46:35.000Z
|
python/katana/local/__init__.py
|
micah-white/katana
|
99d879884b412ebd643be43b281e9becaf5ae539
|
[
"BSD-3-Clause"
] | null | null | null |
"""
:py:mod:`katana.local` provides single-machine (local) graph data access, graph loading, and analytics. This API
supports writing new graph algorithms using high-performance parallel loops. This API does not require or utilize a
remote server and cannot load or process graphs that do not fit in memory.
"""
# Register numba overloads
import katana.native_interfacing.pyarrow
from katana.local._shared_mem_sys import initialize
from katana.local.atomic import (
ReduceLogicalAnd,
ReduceLogicalOr,
ReduceMax,
ReduceMin,
ReduceSum,
atomic_add,
atomic_max,
atomic_min,
atomic_sub,
)
from katana.local.barrier import Barrier, SimpleBarrier, get_fast_barrier
from katana.local.datastructures import AllocationPolicy, InsertBag, NUMAArray
from katana.local.dynamic_bitset import DynamicBitset
from katana.local.entity_type_manager import AtomicEntityType, EntityType, EntityTypeManager
from katana.local.graph import Graph, TxnContext
__all__ = [
"Barrier",
"DynamicBitset",
"ReduceSum",
"ReduceLogicalAnd",
"ReduceLogicalOr",
"ReduceMax",
"ReduceMin",
"InsertBag",
"NUMAArray",
"Graph",
"TxnContext",
"SimpleBarrier",
"atomic_add",
"atomic_max",
"atomic_min",
"atomic_sub",
"get_fast_barrier",
"initialize",
"AllocationPolicy",
"EntityType",
"AtomicEntityType",
"EntityTypeManager",
]
| 27.647059
| 115
| 0.734752
|
bbb90e07eb40fe4ac34a904f507eba7d735f8073
| 3,742
|
py
|
Python
|
platformio/downloader.py
|
seryoni/platformio
|
35a602cfefde288ffe72f6d21436ac6785ffcab4
|
[
"Apache-2.0"
] | null | null | null |
platformio/downloader.py
|
seryoni/platformio
|
35a602cfefde288ffe72f6d21436ac6785ffcab4
|
[
"Apache-2.0"
] | null | null | null |
platformio/downloader.py
|
seryoni/platformio
|
35a602cfefde288ffe72f6d21436ac6785ffcab4
|
[
"Apache-2.0"
] | 1
|
2019-07-17T07:16:24.000Z
|
2019-07-17T07:16:24.000Z
|
# Copyright 2014-2016 Ivan Kravets <me@ikravets.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from email.utils import parsedate_tz
from math import ceil
from os.path import getsize, join
from time import mktime
import click
import requests
from platformio import app, util
from platformio.exception import (FDSHASumMismatch, FDSizeMismatch,
FDUnrecognizedStatusCode)
class FileDownloader(object):
CHUNK_SIZE = 1024
def __init__(self, url, dest_dir=None):
self._url = url
self._fname = url.split("/")[-1]
self._destination = self._fname
if dest_dir:
self.set_destination(join(dest_dir, self._fname))
self._progressbar = None
self._request = None
# make connection
self._request = requests.get(url, stream=True,
headers=util.get_request_defheaders())
if self._request.status_code != 200:
raise FDUnrecognizedStatusCode(self._request.status_code, url)
def set_destination(self, destination):
self._destination = destination
def get_filepath(self):
return self._destination
def get_lmtime(self):
return self._request.headers['last-modified']
def get_size(self):
if "content-length" not in self._request.headers:
return -1
return int(self._request.headers['content-length'])
def start(self):
itercontent = self._request.iter_content(chunk_size=self.CHUNK_SIZE)
f = open(self._destination, "wb")
if app.is_disabled_progressbar() or self.get_size() == -1:
click.echo("Downloading...")
for chunk in itercontent:
if chunk:
f.write(chunk)
else:
chunks = int(ceil(self.get_size() / float(self.CHUNK_SIZE)))
with click.progressbar(length=chunks, label="Downloading") as pb:
for _ in pb:
f.write(next(itercontent))
f.close()
self._request.close()
self._preserve_filemtime(self.get_lmtime())
def verify(self, sha1=None):
_dlsize = getsize(self._destination)
if self.get_size() != -1 and _dlsize != self.get_size():
raise FDSizeMismatch(_dlsize, self._fname, self.get_size())
if not sha1:
return
dlsha1 = None
try:
result = util.exec_command(["sha1sum", self._destination])
dlsha1 = result['out']
except (OSError, ValueError):
try:
result = util.exec_command(
["shasum", "-a", "1", self._destination])
dlsha1 = result['out']
except (OSError, ValueError):
pass
if dlsha1:
dlsha1 = dlsha1[1:41] if dlsha1.startswith("\\") else dlsha1[:40]
if sha1 != dlsha1:
raise FDSHASumMismatch(dlsha1, self._fname, sha1)
def _preserve_filemtime(self, lmdate):
timedata = parsedate_tz(lmdate)
lmtime = mktime(timedata[:9])
util.change_filemtime(self._destination, lmtime)
def __del__(self):
if self._request:
self._request.close()
| 32.53913
| 77
| 0.619722
|
b221564efd1c433389d184367303ebf61b21f0a3
| 3,725
|
py
|
Python
|
tools/deployment/mmedit2torchserve.py
|
Jian137/mmediting-1
|
e1ac6c93441ec96696d0b530f040b91b809015b6
|
[
"Apache-2.0"
] | 1,884
|
2020-07-09T18:53:43.000Z
|
2022-03-31T12:06:18.000Z
|
tools/deployment/mmedit2torchserve.py
|
Jian137/mmediting-1
|
e1ac6c93441ec96696d0b530f040b91b809015b6
|
[
"Apache-2.0"
] | 622
|
2020-07-09T18:52:27.000Z
|
2022-03-31T14:41:09.000Z
|
tools/deployment/mmedit2torchserve.py
|
Jian137/mmediting-1
|
e1ac6c93441ec96696d0b530f040b91b809015b6
|
[
"Apache-2.0"
] | 361
|
2020-07-09T19:21:47.000Z
|
2022-03-31T09:58:27.000Z
|
# Copyright (c) OpenMMLab. All rights reserved.
from argparse import ArgumentParser, Namespace
from pathlib import Path
from tempfile import TemporaryDirectory
import mmcv
try:
from model_archiver.model_packaging import package_model
from model_archiver.model_packaging_utils import ModelExportUtils
except ImportError:
package_model = None
def mmedit2torchserve(
config_file: str,
checkpoint_file: str,
output_folder: str,
model_name: str,
model_version: str = '1.0',
force: bool = False,
):
"""Converts MMEditing model (config + checkpoint) to TorchServe `.mar`.
Args:
config_file:
In MMEditing config format.
The contents vary for each task repository.
checkpoint_file:
In MMEditing checkpoint format.
The contents vary for each task repository.
output_folder:
Folder where `{model_name}.mar` will be created.
The file created will be in TorchServe archive format.
model_name:
If not None, used for naming the `{model_name}.mar` file
that will be created under `output_folder`.
If None, `{Path(checkpoint_file).stem}` will be used.
model_version:
Model's version.
force:
If True, if there is an existing `{model_name}.mar`
file under `output_folder` it will be overwritten.
"""
mmcv.mkdir_or_exist(output_folder)
config = mmcv.Config.fromfile(config_file)
with TemporaryDirectory() as tmpdir:
config.dump(f'{tmpdir}/config.py')
args_ = Namespace(
**{
'model_file': f'{tmpdir}/config.py',
'serialized_file': checkpoint_file,
'handler': f'{Path(__file__).parent}/mmedit_handler.py',
'model_name': model_name or Path(checkpoint_file).stem,
'version': model_version,
'export_path': output_folder,
'force': force,
'requirements_file': None,
'extra_files': None,
'runtime': 'python',
'archive_format': 'default'
})
print(args_.model_name)
manifest = ModelExportUtils.generate_manifest_json(args_)
package_model(args_, manifest)
def parse_args():
parser = ArgumentParser(
description='Convert MMEditing models to TorchServe `.mar` format.')
parser.add_argument('config', type=str, help='config file path')
parser.add_argument('checkpoint', type=str, help='checkpoint file path')
parser.add_argument(
'--output-folder',
type=str,
required=True,
help='Folder where `{model_name}.mar` will be created.')
parser.add_argument(
'--model-name',
type=str,
default=None,
help='If not None, used for naming the `{model_name}.mar`'
'file that will be created under `output_folder`.'
'If None, `{Path(checkpoint_file).stem}` will be used.')
parser.add_argument(
'--model-version',
type=str,
default='1.0',
help='Number used for versioning.')
parser.add_argument(
'-f',
'--force',
action='store_true',
help='overwrite the existing `{model_name}.mar`')
args_ = parser.parse_args()
return args_
if __name__ == '__main__':
args = parse_args()
if package_model is None:
raise ImportError('`torch-model-archiver` is required.'
'Try: pip install torch-model-archiver')
mmedit2torchserve(args.config, args.checkpoint, args.output_folder,
args.model_name, args.model_version, args.force)
| 33.558559
| 76
| 0.616644
|
f6330988454b5b28443d38f260103af524593110
| 2,051
|
py
|
Python
|
warehouse/warehouse/views.py
|
howardyoo/sample-polyglot-app
|
23ccc2ab930d225f3da3886f915b4c8ea2a9e93f
|
[
"Apache-2.0"
] | null | null | null |
warehouse/warehouse/views.py
|
howardyoo/sample-polyglot-app
|
23ccc2ab930d225f3da3886f915b4c8ea2a9e93f
|
[
"Apache-2.0"
] | null | null | null |
warehouse/warehouse/views.py
|
howardyoo/sample-polyglot-app
|
23ccc2ab930d225f3da3886f915b4c8ea2a9e93f
|
[
"Apache-2.0"
] | null | null | null |
import logging
import random
import time
import requests
from concurrent.futures import ThreadPoolExecutor
from rest_framework.response import Response
from rest_framework.decorators import api_view
from django.conf import settings
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
tracing = settings.OPENTRACING_TRACING
tracer = tracing.tracer
executor = ThreadPoolExecutor(max_workers=2)
@api_view(http_method_names=["GET"])
def fetch(request, order_num):
time.sleep(1)
if random.randint(1, 1000) == 1000:
msg = "Random Service Unavailable!"
logging.warning(msg)
return Response(msg, status=503)
if not order_num:
msg = "Invalid Order Num!"
logging.warning(msg)
return Response(msg, status=400)
executor.submit(async_fetch, tracer.active_span)
if random.randint(1, 3) == 3:
requests.get(
"http://localhost:" + request.META["SERVER_PORT"] + "/check_stock")
return Response(
data={"status": "Order:" + order_num + " fetched from warehouse"},
status=202)
def async_fetch(parent_span):
with tracer.scope_manager.activate(parent_span, finish_on_close=True):
with tracer.start_active_span('async_fetch') as scope:
time.sleep(2)
if random.randint(1, 1000) == 1000:
scope.span.set_tag("error", "true")
return
@api_view(http_method_names=["GET"])
def check_stock(request):
time.sleep(1)
schedule_checking(tracer.active_span)
return Response(status=202)
def schedule_checking(parent_span):
with tracer.scope_manager.activate(parent_span, finish_on_close=True):
with tracer.start_active_span('schedule_checking') as scope:
time.sleep(1)
executor.submit(async_check, scope.span)
return
def async_check(parent_span):
with tracer.scope_manager.activate(parent_span, finish_on_close=True):
with tracer.start_active_span('async_check'):
time.sleep(1)
return
| 30.61194
| 79
| 0.691858
|
775b2d91124ac01784cac7d01801f2acb2d3b192
| 20,341
|
py
|
Python
|
fastreid/engine/defaults.py
|
polaris-c/fast-reid
|
4486e0466416f60377c35dabe66e1fb95310a563
|
[
"Apache-2.0"
] | 1
|
2020-12-24T09:32:21.000Z
|
2020-12-24T09:32:21.000Z
|
fastreid/engine/defaults.py
|
yangyueren/fast-reid-video
|
539b30d6a0ff4f6d2f5841bcbc49344795c36abe
|
[
"Apache-2.0"
] | null | null | null |
fastreid/engine/defaults.py
|
yangyueren/fast-reid-video
|
539b30d6a0ff4f6d2f5841bcbc49344795c36abe
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
This file contains components with some default boilerplate logic user may need
in training / testing. They will not work for everyone, but many users may find them useful.
The behavior of functions/classes in this file is subject to change,
since they are meant to represent the "common default behavior" people need in their projects.
"""
import argparse
import logging
import os
import sys
from collections import OrderedDict
import torch
import torch.nn.functional as F
from torch.nn.parallel import DistributedDataParallel
from fastreid.data import build_reid_test_loader, build_reid_train_loader
from fastreid.evaluation import (DatasetEvaluator, ReidEvaluator,
inference_on_dataset, print_csv_format)
from fastreid.modeling.meta_arch import build_model
from fastreid.solver import build_lr_scheduler, build_optimizer
from fastreid.utils import comm
from fastreid.utils.checkpoint import Checkpointer
from fastreid.utils.collect_env import collect_env_info
from fastreid.utils.env import seed_all_rng
from fastreid.utils.events import CommonMetricPrinter, JSONWriter, TensorboardXWriter
from fastreid.utils.file_io import PathManager
from fastreid.utils.logger import setup_logger
from . import hooks
from .train_loop import SimpleTrainer
__all__ = ["default_argument_parser", "default_setup", "DefaultPredictor", "DefaultTrainer"]
def default_argument_parser():
"""
Create a parser with some common arguments used by fastreid users.
Returns:
argparse.ArgumentParser:
"""
parser = argparse.ArgumentParser(description="fastreid Training")
parser.add_argument("--config-file", default="", metavar="FILE", help="path to config file")
parser.add_argument(
"--finetune",
action="store_true",
help="whether to attempt to finetune from the trained model",
)
parser.add_argument(
"--resume",
action="store_true",
help="whether to attempt to resume from the checkpoint directory",
)
parser.add_argument("--eval-only", action="store_true", help="perform evaluation only")
parser.add_argument("--num-gpus", type=int, default=1, help="number of gpus *per machine*")
parser.add_argument("--num-machines", type=int, default=1, help="total number of machines")
parser.add_argument(
"--machine-rank", type=int, default=0, help="the rank of this machine (unique per machine)"
)
# PyTorch still may leave orphan processes in multi-gpu training.
# Therefore we use a deterministic way to obtain port,
# so that users are aware of orphan processes by seeing the port occupied.
port = 2 ** 15 + 2 ** 14 + hash(os.getuid() if sys.platform != "win32" else 1) % 2 ** 14
parser.add_argument("--dist-url", default="tcp://127.0.0.1:{}".format(port))
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
return parser
def default_setup(cfg, args):
"""
Perform some basic common setups at the beginning of a job, including:
1. Set up the detectron2 logger
2. Log basic information about environment, cmdline arguments, and config
3. Backup the config to the output directory
Args:
cfg (CfgNode): the full config to be used
args (argparse.NameSpace): the command line arguments to be logged
"""
output_dir = cfg.OUTPUT_DIR
if comm.is_main_process() and output_dir:
PathManager.mkdirs(output_dir)
rank = comm.get_rank()
setup_logger(output_dir, distributed_rank=rank, name="fvcore")
logger = setup_logger(output_dir, distributed_rank=rank)
logger.info("Rank of current process: {}. World size: {}".format(rank, comm.get_world_size()))
logger.info("Environment info:\n" + collect_env_info())
logger.info("Command line arguments: " + str(args))
if hasattr(args, "config_file") and args.config_file != "":
logger.info(
"Contents of args.config_file={}:\n{}".format(
args.config_file, PathManager.open(args.config_file, "r").read()
)
)
logger.info("Running with full config:\n{}".format(cfg))
if comm.is_main_process() and output_dir:
# Note: some of our scripts may expect the existence of
# config.yaml in output directory
path = os.path.join(output_dir, "config.yaml")
with PathManager.open(path, "w") as f:
f.write(cfg.dump())
logger.info("Full config saved to {}".format(os.path.abspath(path)))
# make sure each worker has a different, yet deterministic seed if specified
seed_all_rng()
# cudnn benchmark has large overhead. It shouldn't be used considering the small size of
# typical validation set.
if not (hasattr(args, "eval_only") and args.eval_only):
torch.backends.cudnn.benchmark = cfg.CUDNN_BENCHMARK
class DefaultPredictor:
"""
Create a simple end-to-end predictor with the given config.
The predictor takes an BGR image, resizes it to the specified resolution,
runs the model and produces a dict of predictions.
This predictor takes care of model loading and input preprocessing for you.
If you'd like to do anything more fancy, please refer to its source code
as examples to build and use the model manually.
Attributes:
Examples:
.. code-block:: python
pred = DefaultPredictor(cfg)
inputs = cv2.imread("input.jpg")
outputs = pred(inputs)
"""
def __init__(self, cfg):
self.cfg = cfg.clone() # cfg can be modified by model
self.cfg.defrost()
self.cfg.MODEL.BACKBONE.PRETRAIN = False
self.model = build_model(self.cfg)
self.model.eval()
Checkpointer(self.model).load(cfg.MODEL.WEIGHTS)
def __call__(self, image):
"""
Args:
image (torch.tensor): an image tensor of shape (B, C, H, W).
Returns:
predictions (torch.tensor): the output features of the model
"""
inputs = {"images": image}
with torch.no_grad(): # https://github.com/sphinx-doc/sphinx/issues/4258
predictions = self.model(inputs)
# Normalize feature to compute cosine distance
features = F.normalize(predictions)
features = features.cpu().data
return features
class DefaultTrainer(SimpleTrainer):
"""
A trainer with default training logic. Compared to `SimpleTrainer`, it
contains the following logic in addition:
1. Create model, optimizer, scheduler, dataloader from the given config.
2. Load a checkpoint or `cfg.MODEL.WEIGHTS`, if exists.
3. Register a few common hooks.
It is created to simplify the **standard model training workflow** and reduce code boilerplate
for users who only need the standard training workflow, with standard features.
It means this class makes *many assumptions* about your training logic that
may easily become invalid in a new research. In fact, any assumptions beyond those made in the
:class:`SimpleTrainer` are too much for research.
The code of this class has been annotated about restrictive assumptions it mades.
When they do not work for you, you're encouraged to:
1. Overwrite methods of this class, OR:
2. Use :class:`SimpleTrainer`, which only does minimal SGD training and
nothing else. You can then add your own hooks if needed. OR:
3. Write your own training loop similar to `tools/plain_train_net.py`.
Also note that the behavior of this class, like other functions/classes in
this file, is not stable, since it is meant to represent the "common default behavior".
It is only guaranteed to work well with the standard models and training workflow in fastreid.
To obtain more stable behavior, write your own training logic with other public APIs.
Attributes:
scheduler:
checkpointer:
cfg (CfgNode):
Examples:
.. code-block:: python
trainer = DefaultTrainer(cfg)
trainer.resume_or_load() # load last checkpoint or MODEL.WEIGHTS
trainer.train()
"""
def __init__(self, cfg):
"""
Args:
cfg (CfgNode):
"""
logger = logging.getLogger("fastreid")
if not logger.isEnabledFor(logging.INFO): # setup_logger is not called for fastreid
setup_logger()
# Assume these objects must be constructed in this order.
data_loader = self.build_train_loader(cfg)
cfg = self.auto_scale_hyperparams(cfg, data_loader)
model = self.build_model(cfg)
optimizer = self.build_optimizer(cfg, model)
# For training, wrap with DDP. But don't need this for inference.
if comm.get_world_size() > 1:
# ref to https://github.com/pytorch/pytorch/issues/22049 to set `find_unused_parameters=True`
# for part of the parameters is not updated.
model = DistributedDataParallel(
model, device_ids=[comm.get_local_rank()], broadcast_buffers=False
)
super().__init__(model, data_loader, optimizer, cfg.SOLVER.AMP_ENABLED)
self.scheduler = self.build_lr_scheduler(cfg, optimizer)
# Assume no other objects need to be checkpointed.
# We can later make it checkpoint the stateful hooks
self.checkpointer = Checkpointer(
# Assume you want to save checkpoints together with logs/statistics
model,
cfg.OUTPUT_DIR,
save_to_disk=comm.is_main_process(),
optimizer=optimizer,
scheduler=self.scheduler,
)
self.start_iter = 0
if cfg.SOLVER.SWA.ENABLED:
self.max_iter = cfg.SOLVER.MAX_ITER + cfg.SOLVER.SWA.ITER
else:
self.max_iter = cfg.SOLVER.MAX_ITER
self.cfg = cfg
self.register_hooks(self.build_hooks())
def resume_or_load(self, resume=True):
"""
If `resume==True`, and last checkpoint exists, resume from it.
Otherwise, load a model specified by the config.
Args:
resume (bool): whether to do resume or not
"""
# The checkpoint stores the training iteration that just finished, thus we start
# at the next iteration (or iter zero if there's no checkpoint).
checkpoint = self.checkpointer.resume_or_load(self.cfg.MODEL.WEIGHTS, resume=resume)
if resume and self.checkpointer.has_checkpoint():
self.start_iter = checkpoint.get("iteration", -1) + 1
# The checkpoint stores the training iteration that just finished, thus we start
# at the next iteration (or iter zero if there's no checkpoint).
def build_hooks(self):
"""
Build a list of default hooks, including timing, evaluation,
checkpointing, lr scheduling, precise BN, writing events.
Returns:
list[HookBase]:
"""
logger = logging.getLogger(__name__)
cfg = self.cfg.clone()
cfg.defrost()
cfg.DATALOADER.NUM_WORKERS = 0 # save some memory and time for PreciseBN
cfg.DATASETS.NAMES = tuple([cfg.TEST.PRECISE_BN.DATASET]) # set dataset name for PreciseBN
ret = [
hooks.IterationTimer(),
hooks.LRScheduler(self.optimizer, self.scheduler),
]
if cfg.SOLVER.SWA.ENABLED:
ret.append(
hooks.SWA(
cfg.SOLVER.MAX_ITER,
cfg.SOLVER.SWA.PERIOD,
cfg.SOLVER.SWA.LR_FACTOR,
cfg.SOLVER.SWA.ETA_MIN_LR,
cfg.SOLVER.SWA.LR_SCHED,
)
)
if cfg.TEST.PRECISE_BN.ENABLED and hooks.get_bn_modules(self.model):
logger.info("Prepare precise BN dataset")
ret.append(hooks.PreciseBN(
# Run at the same freq as (but before) evaluation.
self.model,
# Build a new data loader to not affect training
self.build_train_loader(cfg),
cfg.TEST.PRECISE_BN.NUM_ITER,
))
if cfg.MODEL.FREEZE_LAYERS != [''] and cfg.SOLVER.FREEZE_ITERS > 0:
freeze_layers = ",".join(cfg.MODEL.FREEZE_LAYERS)
logger.info(f'Freeze layer group "{freeze_layers}" training for {cfg.SOLVER.FREEZE_ITERS:d} iterations')
ret.append(hooks.FreezeLayer(
self.model,
self.optimizer,
cfg.MODEL.FREEZE_LAYERS,
cfg.SOLVER.FREEZE_ITERS,
))
# Do PreciseBN before checkpointer, because it updates the model and need to
# be saved by checkpointer.
# This is not always the best: if checkpointing has a different frequency,
# some checkpoints may have more precise statistics than others.
if comm.is_main_process():
ret.append(hooks.PeriodicCheckpointer(self.checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD))
def test_and_save_results():
self._last_eval_results = self.test(self.cfg, self.model)
return self._last_eval_results
# Do evaluation after checkpointer, because then if it fails,
# we can use the saved checkpoint to debug.
ret.append(hooks.EvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results))
if comm.is_main_process():
# run writers in the end, so that evaluation metrics are written
ret.append(hooks.PeriodicWriter(self.build_writers(), 200))
return ret
def build_writers(self):
"""
Build a list of writers to be used. By default it contains
writers that write metrics to the screen,
a json file, and a tensorboard event file respectively.
If you'd like a different list of writers, you can overwrite it in
your trainer.
Returns:
list[EventWriter]: a list of :class:`EventWriter` objects.
It is now implemented by:
.. code-block:: python
return [
CommonMetricPrinter(self.max_iter),
JSONWriter(os.path.join(self.cfg.OUTPUT_DIR, "metrics.json")),
TensorboardXWriter(self.cfg.OUTPUT_DIR),
]
"""
# Assume the default print/log frequency.
return [
# It may not always print what you want to see, since it prints "common" metrics only.
CommonMetricPrinter(self.max_iter),
JSONWriter(os.path.join(self.cfg.OUTPUT_DIR, "metrics.json")),
TensorboardXWriter(self.cfg.OUTPUT_DIR),
]
def train(self):
"""
Run training.
Returns:
OrderedDict of results, if evaluation is enabled. Otherwise None.
"""
super().train(self.start_iter, self.max_iter)
if comm.is_main_process():
assert hasattr(
self, "_last_eval_results"
), "No evaluation results obtained during training!"
# verify_results(self.cfg, self._last_eval_results)
return self._last_eval_results
@classmethod
def build_model(cls, cfg):
"""
Returns:
torch.nn.Module:
It now calls :func:`fastreid.modeling.build_model`.
Overwrite it if you'd like a different model.
"""
model = build_model(cfg)
logger = logging.getLogger(__name__)
logger.info("Model:\n{}".format(model))
return model
@classmethod
def build_optimizer(cls, cfg, model):
"""
Returns:
torch.optim.Optimizer:
It now calls :func:`fastreid.solver.build_optimizer`.
Overwrite it if you'd like a different optimizer.
"""
return build_optimizer(cfg, model)
@classmethod
def build_lr_scheduler(cls, cfg, optimizer):
"""
It now calls :func:`fastreid.solver.build_lr_scheduler`.
Overwrite it if you'd like a different scheduler.
"""
return build_lr_scheduler(cfg, optimizer)
@classmethod
def build_train_loader(cls, cfg):
"""
Returns:
iterable
It now calls :func:`fastreid.data.build_detection_train_loader`.
Overwrite it if you'd like a different data loader.
"""
logger = logging.getLogger(__name__)
logger.info("Prepare training set")
return build_reid_train_loader(cfg)
@classmethod
def build_test_loader(cls, cfg, dataset_name):
"""
Returns:
iterable
It now calls :func:`fastreid.data.build_detection_test_loader`.
Overwrite it if you'd like a different data loader.
"""
return build_reid_test_loader(cfg, dataset_name)
@classmethod
def build_evaluator(cls, cfg, dataset_name, output_dir=None):
data_loader, num_query = cls.build_test_loader(cfg, dataset_name)
return data_loader, ReidEvaluator(cfg, num_query, output_dir)
@classmethod
def test(cls, cfg, model):
"""
Args:
cfg (CfgNode):
model (nn.Module):
Returns:
dict: a dict of result metrics
"""
logger = logging.getLogger(__name__)
results = OrderedDict()
for idx, dataset_name in enumerate(cfg.DATASETS.TESTS):
logger.info("Prepare testing set")
try:
data_loader, evaluator = cls.build_evaluator(cfg, dataset_name)
except NotImplementedError:
logger.warn(
"No evaluator found. implement its `build_evaluator` method."
)
results[dataset_name] = {}
continue
results_i = inference_on_dataset(model, data_loader, evaluator)
results[dataset_name] = results_i
if comm.is_main_process():
assert isinstance(
results, dict
), "Evaluator must return a dict on the main process. Got {} instead.".format(
results
)
print_csv_format(results)
if len(results) == 1: results = list(results.values())[0]
return results
@staticmethod
def auto_scale_hyperparams(cfg, data_loader):
r"""
This is used for auto-computation actual training iterations,
because some hyper-param, such as MAX_ITER, means training epochs rather than iters,
so we need to convert specific hyper-param to training iterations.
"""
cfg = cfg.clone()
frozen = cfg.is_frozen()
cfg.defrost()
iters_per_epoch = len(data_loader.dataset) // cfg.SOLVER.IMS_PER_BATCH
cfg.MODEL.HEADS.NUM_CLASSES = data_loader.dataset.num_classes
cfg.SOLVER.MAX_ITER *= iters_per_epoch
cfg.SOLVER.WARMUP_ITERS *= iters_per_epoch
cfg.SOLVER.FREEZE_ITERS *= iters_per_epoch
cfg.SOLVER.DELAY_ITERS *= iters_per_epoch
for i in range(len(cfg.SOLVER.STEPS)):
cfg.SOLVER.STEPS[i] *= iters_per_epoch
cfg.SOLVER.SWA.ITER *= iters_per_epoch
cfg.SOLVER.SWA.PERIOD *= iters_per_epoch
ckpt_multiple = cfg.SOLVER.CHECKPOINT_PERIOD / cfg.TEST.EVAL_PERIOD
# Evaluation period must be divided by 200 for writing into tensorboard.
eval_num_mod = (200 - cfg.TEST.EVAL_PERIOD * iters_per_epoch) % 200
cfg.TEST.EVAL_PERIOD = cfg.TEST.EVAL_PERIOD * iters_per_epoch + eval_num_mod
# Change checkpoint saving period consistent with evaluation period.
cfg.SOLVER.CHECKPOINT_PERIOD = int(cfg.TEST.EVAL_PERIOD * ckpt_multiple)
logger = logging.getLogger(__name__)
logger.info(
f"Auto-scaling the config to num_classes={cfg.MODEL.HEADS.NUM_CLASSES}, "
f"max_Iter={cfg.SOLVER.MAX_ITER}, wamrup_Iter={cfg.SOLVER.WARMUP_ITERS}, "
f"freeze_Iter={cfg.SOLVER.FREEZE_ITERS}, delay_Iter={cfg.SOLVER.DELAY_ITERS}, "
f"step_Iter={cfg.SOLVER.STEPS}, ckpt_Iter={cfg.SOLVER.CHECKPOINT_PERIOD}, "
f"eval_Iter={cfg.TEST.EVAL_PERIOD}."
)
if frozen: cfg.freeze()
return cfg
| 40.199605
| 116
| 0.646478
|
16ae01f0fd02c611bb11f05803a1ef282f9b8acf
| 9,232
|
py
|
Python
|
supervised_learning/decision_tree.py
|
kozo2/ML-From-Scratch-Ruby
|
43e58ae6a0545c58bda9e180272ac14e4a646a1e
|
[
"MIT"
] | 1
|
2018-05-28T15:01:52.000Z
|
2018-05-28T15:01:52.000Z
|
supervised_learning/decision_tree.py
|
kozo2/ML-From-Scratch-Ruby
|
43e58ae6a0545c58bda9e180272ac14e4a646a1e
|
[
"MIT"
] | null | null | null |
supervised_learning/decision_tree.py
|
kozo2/ML-From-Scratch-Ruby
|
43e58ae6a0545c58bda9e180272ac14e4a646a1e
|
[
"MIT"
] | null | null | null |
from __future__ import division, print_function
import numpy as np
from sklearn import datasets
import matplotlib.pyplot as plt
import sys
import os
# Import helper functions
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, dir_path + "/../utils")
from data_manipulation import divide_on_feature
from data_manipulation import train_test_split, standardize
from data_operation import calculate_entropy, accuracy_score
from data_operation import mean_squared_error, calculate_variance
sys.path.insert(0, dir_path + "/../unsupervised_learning/")
from principal_component_analysis import PCA
# Class that represents a decision node or leaf in the decision tree
class DecisionNode():
def __init__(self, feature_i=None, threshold=None,
value=None, true_branch=None, false_branch=None):
self.feature_i = feature_i # Index for the feature that is tested
self.threshold = threshold # Threshold value for feature
self.value = value # Value if the node is a leaf in the tree
self.true_branch = true_branch # 'Left' subtree
self.false_branch = false_branch # 'Right' subtree
# Super class of RegressionTree and ClassificationTree
class DecisionTree(object):
def __init__(self, min_samples_split=2, min_impurity=1e-7,
max_depth=float("inf")):
self.root = None # Root node in dec. tree
# Minimum n of samples to justify split
self.min_samples_split = min_samples_split
# The minimum impurity to justify split
self.min_impurity = min_impurity
# The maximum depth to grow the tree to
self.max_depth = max_depth
# Function to calculate impurity (classif.=>info gain, regr=>variance reduct.)
self._impurity_calculation = None
# Function to determine prediction of y at leaf
self._leaf_value_calculation = None
# If y is nominal
self.one_dim = None
def fit(self, X, y):
# Build tree
self.current_depth = 0
self.one_dim = len(np.shape(y)) == 1
self.root = self._build_tree(X, y)
def _build_tree(self, X, y):
largest_impurity = 0
best_criteria = None # Feature index and threshold
best_sets = None # Subsets of the data
expand_needed = len(np.shape(y)) == 1
if expand_needed:
y = np.expand_dims(y, axis=1)
# Add y as last column of X
X_y = np.concatenate((X, y), axis=1)
n_samples, n_features = np.shape(X)
if n_samples >= self.min_samples_split:
# Calculate the impurity for each feature
for feature_i in range(n_features):
# All values of feature_i
feature_values = np.expand_dims(X[:, feature_i], axis=1)
unique_values = np.unique(feature_values)
# Iterate through all unique values of feature column i and
# calculate the impurity
for threshold in unique_values:
Xy_1, Xy_2 = divide_on_feature(X_y, feature_i, threshold)
if len(Xy_1) > 0 and len(Xy_2) > 0:
y_1 = Xy_1[:, n_features:]
y_2 = Xy_2[:, n_features:]
# Calculate impurity
impurity = self._impurity_calculation(y, y_1, y_2)
# If this threshold resulted in a higher information gain than previously
# recorded save the threshold value and the feature
# index
if impurity > largest_impurity:
largest_impurity = impurity
best_criteria = {
"feature_i": feature_i, "threshold": threshold}
best_sets = {
"left_branch": Xy_1, "right_branch": Xy_2}
# If we have any information gain to go by we build the tree deeper
if self.current_depth < self.max_depth and largest_impurity > self.min_impurity:
leftX = best_sets["left_branch"][:, :n_features]
leftY = best_sets["left_branch"][:, n_features:] # X - all cols. but last, y - last
rightX = best_sets["right_branch"][:, :n_features]
rightY = best_sets["right_branch"][:, n_features:] # X - all cols. but last, y - last
true_branch = self._build_tree(leftX, leftY)
false_branch = self._build_tree(rightX, rightY)
self.current_depth += 1
return DecisionNode(feature_i=best_criteria["feature_i"], threshold=best_criteria[
"threshold"], true_branch=true_branch, false_branch=false_branch)
# We're at leaf => determine value
leaf_value = self._leaf_value_calculation(y)
return DecisionNode(value=leaf_value)
# Do a recursive search down the tree and make a predict of the data sample by the
# value of the leaf that we end up at
def classify_sample(self, x, tree=None):
if tree is None:
tree = self.root
# If we have a value => return prediction
if tree.value is not None:
return tree.value
# Choose the feature that we will test
feature_value = x[tree.feature_i]
# Determine if we will follow left or right branch
branch = tree.false_branch
if isinstance(feature_value, int) or isinstance(feature_value, float):
if feature_value >= tree.threshold:
branch = tree.true_branch
elif feature_value == tree.threshold:
branch = tree.true_branch
# Test subtree
return self.classify_sample(x, branch)
# Classify samples one by one and return the set of labels
def predict(self, X):
y_pred = []
for x in X:
y_pred.append(self.classify_sample(x))
return y_pred
def print_tree(self, tree=None, indent=" "):
if not tree:
tree = self.root
# If we're at leaf => print the label
if tree.value is not None:
print (tree.value)
# Go deeper down the tree
else:
# Print test
print ("%s:%s? " % (tree.feature_i, tree.threshold))
# Print the true scenario
print ("%sT->" % (indent), end="")
self.print_tree(tree.true_branch, indent + indent)
# Print the false scenario
print ("%sF->" % (indent), end="")
self.print_tree(tree.false_branch, indent + indent)
class RegressionTree(DecisionTree):
def _calculate_variance_reduction(self, y, y_1, y_2):
var_tot = calculate_variance(y)
var_1 = calculate_variance(y_1)
var_2 = calculate_variance(y_2)
frac_1 = len(y_1) / len(y)
frac_2 = len(y_2) / len(y)
# Calculate the variance reduction
variance_reduction = var_tot - (frac_1 * var_1 + frac_2 * var_2)
return sum(variance_reduction)
def _mean_of_y(self, y):
return np.mean(y, axis=0)
def fit(self, X, y):
self._impurity_calculation = self._calculate_variance_reduction
self._leaf_value_calculation = self._mean_of_y
super(RegressionTree, self).fit(X, y)
class ClassificationTree(DecisionTree):
def _calculate_information_gain(self, y, y_1, y_2):
# Calculate information gain
p = len(y_1) / len(y)
entropy = calculate_entropy(y)
info_gain = entropy - p * \
calculate_entropy(y_1) - (1 - p) * \
calculate_entropy(y_2)
return info_gain
def _majority_vote(self, y):
most_common = None
max_count = 0
results = {}
for label in np.unique(y):
count = len(y[y == label])
if count > max_count:
most_common = label
max_count = count
return most_common
def fit(self, X, y):
self._impurity_calculation = self._calculate_information_gain
self._leaf_value_calculation = self._majority_vote
super(ClassificationTree, self).fit(X, y)
def main():
print ("-- Classification Tree --")
data = datasets.load_iris()
X = data.data
y = data.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4)
clf = ClassificationTree()
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
print ("Accuracy:", accuracy_score(y_test, y_pred))
pca = PCA()
pca.plot_in_2d(X_test, y_pred)
print ("-- Regression Tree --")
X, y = datasets.make_regression(n_features=1, n_samples=100, bias=0, noise=5)
X_train, X_test, y_train, y_test = train_test_split(standardize(X), y, test_size=0.3)
clf = RegressionTree()
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
print ("Mean Squared Error:", mean_squared_error(y_test, y_pred))
# Plot the results
plt.scatter(X_test[:, 0], y_test, color='black')
plt.scatter(X_test[:, 0], y_pred, color='green')
plt.show()
if __name__ == "__main__":
main()
| 36.0625
| 100
| 0.608644
|
08b9540d50706bf5ee6c2ca79b06f35bf497ed93
| 23,736
|
py
|
Python
|
tests/components/modbus/test_init.py
|
SergioBPereira/core
|
4501906da369e23b304857b8a3512798696f26a0
|
[
"Apache-2.0"
] | null | null | null |
tests/components/modbus/test_init.py
|
SergioBPereira/core
|
4501906da369e23b304857b8a3512798696f26a0
|
[
"Apache-2.0"
] | 71
|
2020-10-15T06:45:08.000Z
|
2022-03-31T06:02:54.000Z
|
tests/components/modbus/test_init.py
|
SergioBPereira/core
|
4501906da369e23b304857b8a3512798696f26a0
|
[
"Apache-2.0"
] | 2
|
2020-09-09T05:01:51.000Z
|
2020-09-09T05:46:12.000Z
|
"""The tests for the Modbus init.
This file is responsible for testing:
- pymodbus API
- Functionality of class ModbusHub
- Coverage 100%:
__init__.py
const.py
modbus.py
validators.py
baseplatform.py (only BasePlatform)
It uses binary_sensors/sensors to do black box testing of the read calls.
"""
from datetime import timedelta
import logging
from unittest import mock
from pymodbus.exceptions import ModbusException
from pymodbus.pdu import ExceptionResponse, IllegalFunctionRequest
import pytest
import voluptuous as vol
from homeassistant.components.binary_sensor import DOMAIN as BINARY_SENSOR_DOMAIN
from homeassistant.components.modbus.const import (
ATTR_ADDRESS,
ATTR_HUB,
ATTR_STATE,
ATTR_UNIT,
ATTR_VALUE,
CALL_TYPE_COIL,
CALL_TYPE_DISCRETE,
CALL_TYPE_REGISTER_HOLDING,
CALL_TYPE_REGISTER_INPUT,
CALL_TYPE_WRITE_COIL,
CALL_TYPE_WRITE_COILS,
CALL_TYPE_WRITE_REGISTER,
CALL_TYPE_WRITE_REGISTERS,
CONF_BAUDRATE,
CONF_BYTESIZE,
CONF_DATA_TYPE,
CONF_INPUT_TYPE,
CONF_MSG_WAIT,
CONF_PARITY,
CONF_STOPBITS,
CONF_SWAP,
CONF_SWAP_BYTE,
CONF_SWAP_WORD,
DEFAULT_SCAN_INTERVAL,
MODBUS_DOMAIN as DOMAIN,
RTUOVERTCP,
SERIAL,
SERVICE_RESTART,
SERVICE_STOP,
SERVICE_WRITE_COIL,
SERVICE_WRITE_REGISTER,
TCP,
UDP,
DataType,
)
from homeassistant.components.modbus.validators import (
duplicate_entity_validator,
duplicate_modbus_validator,
number_validator,
struct_validator,
)
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.const import (
CONF_ADDRESS,
CONF_BINARY_SENSORS,
CONF_COUNT,
CONF_DELAY,
CONF_HOST,
CONF_METHOD,
CONF_NAME,
CONF_PORT,
CONF_SCAN_INTERVAL,
CONF_SENSORS,
CONF_STRUCTURE,
CONF_TIMEOUT,
CONF_TYPE,
EVENT_HOMEASSISTANT_STOP,
STATE_ON,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
)
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from .conftest import (
TEST_ENTITY_NAME,
TEST_MODBUS_HOST,
TEST_MODBUS_NAME,
TEST_PORT_SERIAL,
TEST_PORT_TCP,
ReadResult,
)
from tests.common import async_fire_time_changed
@pytest.fixture
async def mock_modbus_with_pymodbus(hass, caplog, do_config, mock_pymodbus):
"""Load integration modbus using mocked pymodbus."""
caplog.clear()
caplog.set_level(logging.ERROR)
config = {DOMAIN: do_config}
assert await async_setup_component(hass, DOMAIN, config) is True
await hass.async_block_till_done()
assert DOMAIN in hass.config.components
assert caplog.text == ""
yield mock_pymodbus
async def test_number_validator():
"""Test number validator."""
for value, value_type in [
(15, int),
(15.1, float),
("15", int),
("15.1", float),
(-15, int),
(-15.1, float),
("-15", int),
("-15.1", float),
]:
assert isinstance(number_validator(value), value_type)
try:
number_validator("x15.1")
except (vol.Invalid):
return
pytest.fail("Number_validator not throwing exception")
@pytest.mark.parametrize(
"do_config",
[
{
CONF_NAME: TEST_ENTITY_NAME,
CONF_COUNT: 2,
CONF_DATA_TYPE: DataType.STRING,
},
{
CONF_NAME: TEST_ENTITY_NAME,
CONF_COUNT: 2,
CONF_DATA_TYPE: DataType.INT,
},
{
CONF_NAME: TEST_ENTITY_NAME,
CONF_COUNT: 2,
CONF_DATA_TYPE: DataType.INT,
CONF_SWAP: CONF_SWAP_BYTE,
},
{
CONF_NAME: TEST_ENTITY_NAME,
CONF_COUNT: 2,
CONF_DATA_TYPE: DataType.CUSTOM,
CONF_STRUCTURE: ">i",
CONF_SWAP: CONF_SWAP_BYTE,
},
],
)
async def test_ok_struct_validator(do_config):
"""Test struct validator."""
try:
struct_validator(do_config)
except vol.Invalid:
pytest.fail("struct_validator unexpected exception")
@pytest.mark.parametrize(
"do_config",
[
{
CONF_NAME: TEST_ENTITY_NAME,
CONF_COUNT: 8,
CONF_DATA_TYPE: DataType.INT,
},
{
CONF_NAME: TEST_ENTITY_NAME,
CONF_COUNT: 8,
CONF_DATA_TYPE: DataType.CUSTOM,
},
{
CONF_NAME: TEST_ENTITY_NAME,
CONF_COUNT: 8,
CONF_DATA_TYPE: DataType.CUSTOM,
CONF_STRUCTURE: "no good",
},
{
CONF_NAME: TEST_ENTITY_NAME,
CONF_COUNT: 20,
CONF_DATA_TYPE: DataType.CUSTOM,
CONF_STRUCTURE: ">f",
},
{
CONF_NAME: TEST_ENTITY_NAME,
CONF_COUNT: 1,
CONF_DATA_TYPE: DataType.CUSTOM,
CONF_STRUCTURE: ">f",
CONF_SWAP: CONF_SWAP_WORD,
},
{
CONF_NAME: TEST_ENTITY_NAME,
CONF_COUNT: 1,
CONF_DATA_TYPE: DataType.STRING,
CONF_STRUCTURE: ">f",
CONF_SWAP: CONF_SWAP_WORD,
},
],
)
async def test_exception_struct_validator(do_config):
"""Test struct validator."""
try:
struct_validator(do_config)
except vol.Invalid:
return
pytest.fail("struct_validator missing exception")
@pytest.mark.parametrize(
"do_config",
[
[
{
CONF_NAME: TEST_MODBUS_NAME,
CONF_TYPE: TCP,
CONF_HOST: TEST_MODBUS_HOST,
CONF_PORT: TEST_PORT_TCP,
},
{
CONF_NAME: TEST_MODBUS_NAME,
CONF_TYPE: TCP,
CONF_HOST: TEST_MODBUS_HOST + "2",
CONF_PORT: TEST_PORT_TCP,
},
],
[
{
CONF_NAME: TEST_MODBUS_NAME,
CONF_TYPE: TCP,
CONF_HOST: TEST_MODBUS_HOST,
CONF_PORT: TEST_PORT_TCP,
},
{
CONF_NAME: TEST_MODBUS_NAME + "2",
CONF_TYPE: TCP,
CONF_HOST: TEST_MODBUS_HOST,
CONF_PORT: TEST_PORT_TCP,
},
],
],
)
async def test_duplicate_modbus_validator(do_config):
"""Test duplicate modbus validator."""
duplicate_modbus_validator(do_config)
assert len(do_config) == 1
@pytest.mark.parametrize(
"do_config",
[
[
{
CONF_NAME: TEST_MODBUS_NAME,
CONF_TYPE: TCP,
CONF_HOST: TEST_MODBUS_HOST,
CONF_PORT: TEST_PORT_TCP,
CONF_SENSORS: [
{
CONF_NAME: TEST_ENTITY_NAME,
CONF_ADDRESS: 117,
},
{
CONF_NAME: TEST_ENTITY_NAME,
CONF_ADDRESS: 119,
},
],
}
],
[
{
CONF_NAME: TEST_MODBUS_NAME,
CONF_TYPE: TCP,
CONF_HOST: TEST_MODBUS_HOST,
CONF_PORT: TEST_PORT_TCP,
CONF_SENSORS: [
{
CONF_NAME: TEST_ENTITY_NAME,
CONF_ADDRESS: 117,
},
{
CONF_NAME: TEST_ENTITY_NAME + "2",
CONF_ADDRESS: 117,
},
],
}
],
],
)
async def test_duplicate_entity_validator(do_config):
"""Test duplicate entity validator."""
duplicate_entity_validator(do_config)
assert len(do_config[0][CONF_SENSORS]) == 1
@pytest.mark.parametrize(
"do_config",
[
{
CONF_TYPE: TCP,
CONF_HOST: TEST_MODBUS_HOST,
CONF_PORT: TEST_PORT_TCP,
},
{
CONF_TYPE: TCP,
CONF_HOST: TEST_MODBUS_HOST,
CONF_PORT: TEST_PORT_TCP,
CONF_NAME: TEST_MODBUS_NAME,
CONF_TIMEOUT: 30,
CONF_DELAY: 10,
},
{
CONF_TYPE: UDP,
CONF_HOST: TEST_MODBUS_HOST,
CONF_PORT: TEST_PORT_TCP,
},
{
CONF_TYPE: UDP,
CONF_HOST: TEST_MODBUS_HOST,
CONF_PORT: TEST_PORT_TCP,
CONF_NAME: TEST_MODBUS_NAME,
CONF_TIMEOUT: 30,
CONF_DELAY: 10,
},
{
CONF_TYPE: RTUOVERTCP,
CONF_HOST: TEST_MODBUS_HOST,
CONF_PORT: TEST_PORT_TCP,
},
{
CONF_TYPE: RTUOVERTCP,
CONF_HOST: TEST_MODBUS_HOST,
CONF_PORT: TEST_PORT_TCP,
CONF_NAME: TEST_MODBUS_NAME,
CONF_TIMEOUT: 30,
CONF_DELAY: 10,
},
{
CONF_TYPE: SERIAL,
CONF_BAUDRATE: 9600,
CONF_BYTESIZE: 8,
CONF_METHOD: "rtu",
CONF_PORT: TEST_PORT_SERIAL,
CONF_PARITY: "E",
CONF_STOPBITS: 1,
CONF_MSG_WAIT: 100,
},
{
CONF_TYPE: SERIAL,
CONF_BAUDRATE: 9600,
CONF_BYTESIZE: 8,
CONF_METHOD: "rtu",
CONF_PORT: TEST_PORT_SERIAL,
CONF_PARITY: "E",
CONF_STOPBITS: 1,
CONF_NAME: TEST_MODBUS_NAME,
CONF_TIMEOUT: 30,
CONF_DELAY: 10,
},
{
CONF_TYPE: TCP,
CONF_HOST: TEST_MODBUS_HOST,
CONF_PORT: TEST_PORT_TCP,
CONF_DELAY: 5,
},
[
{
CONF_TYPE: TCP,
CONF_HOST: TEST_MODBUS_HOST,
CONF_PORT: TEST_PORT_TCP,
CONF_NAME: TEST_MODBUS_NAME,
},
{
CONF_TYPE: TCP,
CONF_HOST: TEST_MODBUS_HOST,
CONF_PORT: TEST_PORT_TCP,
CONF_NAME: f"{TEST_MODBUS_NAME}2",
},
{
CONF_TYPE: SERIAL,
CONF_BAUDRATE: 9600,
CONF_BYTESIZE: 8,
CONF_METHOD: "rtu",
CONF_PORT: TEST_PORT_SERIAL,
CONF_PARITY: "E",
CONF_STOPBITS: 1,
CONF_NAME: f"{TEST_MODBUS_NAME}3",
},
],
{
# Special test for scan_interval validator with scan_interval: 0
CONF_TYPE: TCP,
CONF_HOST: TEST_MODBUS_HOST,
CONF_PORT: TEST_PORT_TCP,
CONF_SENSORS: [
{
CONF_NAME: TEST_ENTITY_NAME,
CONF_ADDRESS: 117,
CONF_SCAN_INTERVAL: 0,
}
],
},
],
)
async def test_config_modbus(hass, caplog, mock_modbus_with_pymodbus):
"""Run configuration test for modbus."""
VALUE = "value"
FUNC = "func"
DATA = "data"
SERVICE = "service"
@pytest.mark.parametrize(
"do_config",
[
{
CONF_NAME: TEST_MODBUS_NAME,
CONF_TYPE: SERIAL,
CONF_BAUDRATE: 9600,
CONF_BYTESIZE: 8,
CONF_METHOD: "rtu",
CONF_PORT: TEST_PORT_SERIAL,
CONF_PARITY: "E",
CONF_STOPBITS: 1,
},
],
)
@pytest.mark.parametrize(
"do_write",
[
{
DATA: ATTR_VALUE,
VALUE: 15,
SERVICE: SERVICE_WRITE_REGISTER,
FUNC: CALL_TYPE_WRITE_REGISTER,
},
{
DATA: ATTR_VALUE,
VALUE: [1, 2, 3],
SERVICE: SERVICE_WRITE_REGISTER,
FUNC: CALL_TYPE_WRITE_REGISTERS,
},
{
DATA: ATTR_STATE,
VALUE: False,
SERVICE: SERVICE_WRITE_COIL,
FUNC: CALL_TYPE_WRITE_COIL,
},
{
DATA: ATTR_STATE,
VALUE: [True, False, True],
SERVICE: SERVICE_WRITE_COIL,
FUNC: CALL_TYPE_WRITE_COILS,
},
],
)
@pytest.mark.parametrize(
"do_return",
[
{VALUE: ReadResult([0x0001]), DATA: ""},
{VALUE: ExceptionResponse(0x06), DATA: "Pymodbus:"},
{VALUE: IllegalFunctionRequest(0x06), DATA: "Pymodbus:"},
{VALUE: ModbusException("fail write_"), DATA: "Pymodbus:"},
],
)
async def test_pb_service_write(
hass, do_write, do_return, caplog, mock_modbus_with_pymodbus
):
"""Run test for service write_register."""
func_name = {
CALL_TYPE_WRITE_COIL: mock_modbus_with_pymodbus.write_coil,
CALL_TYPE_WRITE_COILS: mock_modbus_with_pymodbus.write_coils,
CALL_TYPE_WRITE_REGISTER: mock_modbus_with_pymodbus.write_register,
CALL_TYPE_WRITE_REGISTERS: mock_modbus_with_pymodbus.write_registers,
}
data = {
ATTR_HUB: TEST_MODBUS_NAME,
ATTR_UNIT: 17,
ATTR_ADDRESS: 16,
do_write[DATA]: do_write[VALUE],
}
mock_modbus_with_pymodbus.reset_mock()
caplog.clear()
caplog.set_level(logging.DEBUG)
func_name[do_write[FUNC]].return_value = do_return[VALUE]
await hass.services.async_call(DOMAIN, do_write[SERVICE], data, blocking=True)
assert func_name[do_write[FUNC]].called
assert func_name[do_write[FUNC]].call_args[0] == (
data[ATTR_ADDRESS],
data[do_write[DATA]],
)
if do_return[DATA]:
assert caplog.messages[-1].startswith("Pymodbus:")
@pytest.fixture
async def mock_modbus_read_pymodbus(
hass,
do_group,
do_type,
do_scan_interval,
do_return,
do_exception,
caplog,
mock_pymodbus,
):
"""Load integration modbus using mocked pymodbus."""
caplog.clear()
caplog.set_level(logging.ERROR)
mock_pymodbus.read_coils.side_effect = do_exception
mock_pymodbus.read_discrete_inputs.side_effect = do_exception
mock_pymodbus.read_input_registers.side_effect = do_exception
mock_pymodbus.read_holding_registers.side_effect = do_exception
mock_pymodbus.read_coils.return_value = do_return
mock_pymodbus.read_discrete_inputs.return_value = do_return
mock_pymodbus.read_input_registers.return_value = do_return
mock_pymodbus.read_holding_registers.return_value = do_return
config = {
DOMAIN: [
{
CONF_TYPE: TCP,
CONF_HOST: TEST_MODBUS_HOST,
CONF_PORT: TEST_PORT_TCP,
CONF_NAME: TEST_MODBUS_NAME,
do_group: [
{
CONF_INPUT_TYPE: do_type,
CONF_NAME: TEST_ENTITY_NAME,
CONF_ADDRESS: 51,
CONF_SCAN_INTERVAL: do_scan_interval,
}
],
}
],
}
now = dt_util.utcnow()
with mock.patch("homeassistant.helpers.event.dt_util.utcnow", return_value=now):
assert await async_setup_component(hass, DOMAIN, config) is True
await hass.async_block_till_done()
assert DOMAIN in hass.config.components
assert caplog.text == ""
now = now + timedelta(seconds=DEFAULT_SCAN_INTERVAL + 60)
with mock.patch("homeassistant.helpers.event.dt_util.utcnow", return_value=now):
async_fire_time_changed(hass, now)
await hass.async_block_till_done()
yield mock_pymodbus
@pytest.mark.parametrize(
"do_domain, do_group,do_type,do_scan_interval",
[
[SENSOR_DOMAIN, CONF_SENSORS, CALL_TYPE_REGISTER_HOLDING, 10],
[SENSOR_DOMAIN, CONF_SENSORS, CALL_TYPE_REGISTER_INPUT, 10],
[BINARY_SENSOR_DOMAIN, CONF_BINARY_SENSORS, CALL_TYPE_DISCRETE, 10],
[BINARY_SENSOR_DOMAIN, CONF_BINARY_SENSORS, CALL_TYPE_COIL, 1],
],
)
@pytest.mark.parametrize(
"do_return,do_exception,do_expect_state,do_expect_value",
[
[ReadResult([1]), None, STATE_ON, "1"],
[IllegalFunctionRequest(0x99), None, STATE_UNAVAILABLE, STATE_UNAVAILABLE],
[ExceptionResponse(0x99), None, STATE_UNAVAILABLE, STATE_UNAVAILABLE],
[
ReadResult([1]),
ModbusException("fail read_"),
STATE_UNAVAILABLE,
STATE_UNAVAILABLE,
],
],
)
async def test_pb_read(
hass, do_domain, do_expect_state, do_expect_value, caplog, mock_modbus_read_pymodbus
):
"""Run test for different read."""
# Check state
entity_id = f"{do_domain}.{TEST_ENTITY_NAME}"
state = hass.states.get(entity_id).state
assert hass.states.get(entity_id).state
# this if is needed to avoid explode the
if do_domain == SENSOR_DOMAIN:
do_expect = do_expect_value
else:
do_expect = do_expect_state
assert state == do_expect
async def test_pymodbus_constructor_fail(hass, caplog):
"""Run test for failing pymodbus constructor."""
config = {
DOMAIN: [
{
CONF_NAME: TEST_MODBUS_NAME,
CONF_TYPE: TCP,
CONF_HOST: TEST_MODBUS_HOST,
CONF_PORT: TEST_PORT_TCP,
}
]
}
with mock.patch(
"homeassistant.components.modbus.modbus.ModbusTcpClient", autospec=True
) as mock_pb:
caplog.set_level(logging.ERROR)
mock_pb.side_effect = ModbusException("test no class")
assert await async_setup_component(hass, DOMAIN, config) is False
await hass.async_block_till_done()
message = f"Pymodbus: {TEST_MODBUS_NAME}: Modbus Error: test"
assert caplog.messages[0].startswith(message)
assert caplog.records[0].levelname == "ERROR"
assert mock_pb.called
async def test_pymodbus_close_fail(hass, caplog, mock_pymodbus):
"""Run test for failing pymodbus close."""
config = {
DOMAIN: [
{
CONF_TYPE: TCP,
CONF_HOST: TEST_MODBUS_HOST,
CONF_PORT: TEST_PORT_TCP,
}
]
}
caplog.set_level(logging.ERROR)
mock_pymodbus.connect.return_value = True
mock_pymodbus.close.side_effect = ModbusException("close fail")
assert await async_setup_component(hass, DOMAIN, config) is True
await hass.async_block_till_done()
# Close() is called as part of teardown
async def test_pymodbus_connect_fail(hass, caplog):
"""Run test for failing pymodbus constructor."""
config = {
DOMAIN: [
{
CONF_NAME: TEST_MODBUS_NAME,
CONF_TYPE: TCP,
CONF_HOST: TEST_MODBUS_HOST,
CONF_PORT: TEST_PORT_TCP,
}
]
}
with mock.patch(
"homeassistant.components.modbus.modbus.ModbusTcpClient", autospec=True
) as mock_pb:
caplog.set_level(logging.ERROR)
ExceptionMessage = "test connect exception"
mock_pb.connect.side_effect = ModbusException(ExceptionMessage)
assert await async_setup_component(hass, DOMAIN, config) is True
async def test_delay(hass, mock_pymodbus):
"""Run test for startup delay."""
# the purpose of this test is to test startup delay
# We "hijiack" a binary_sensor to make a proper blackbox test.
test_delay = 15
test_scan_interval = 5
entity_id = f"{BINARY_SENSOR_DOMAIN}.{TEST_ENTITY_NAME}"
config = {
DOMAIN: [
{
CONF_TYPE: TCP,
CONF_HOST: TEST_MODBUS_HOST,
CONF_PORT: TEST_PORT_TCP,
CONF_NAME: TEST_MODBUS_NAME,
CONF_DELAY: test_delay,
CONF_BINARY_SENSORS: [
{
CONF_INPUT_TYPE: CALL_TYPE_COIL,
CONF_NAME: TEST_ENTITY_NAME,
CONF_ADDRESS: 52,
CONF_SCAN_INTERVAL: test_scan_interval,
},
],
}
]
}
mock_pymodbus.read_coils.return_value = ReadResult([0x01])
now = dt_util.utcnow()
with mock.patch("homeassistant.helpers.event.dt_util.utcnow", return_value=now):
assert await async_setup_component(hass, DOMAIN, config) is True
await hass.async_block_till_done()
# pass first scan_interval
start_time = now
now = now + timedelta(seconds=(test_scan_interval + 1))
with mock.patch(
"homeassistant.helpers.event.dt_util.utcnow", return_value=now, autospec=True
):
async_fire_time_changed(hass, now)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_UNAVAILABLE
stop_time = start_time + timedelta(seconds=(test_delay + 1))
step_timedelta = timedelta(seconds=1)
while now < stop_time:
now = now + step_timedelta
with mock.patch("homeassistant.helpers.event.dt_util.utcnow", return_value=now):
async_fire_time_changed(hass, now)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_UNAVAILABLE
now = now + step_timedelta + timedelta(seconds=2)
with mock.patch("homeassistant.helpers.event.dt_util.utcnow", return_value=now):
async_fire_time_changed(hass, now)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_ON
@pytest.mark.parametrize(
"do_config",
[
{
CONF_TYPE: TCP,
CONF_HOST: TEST_MODBUS_HOST,
CONF_PORT: TEST_PORT_TCP,
CONF_SENSORS: [
{
CONF_NAME: TEST_ENTITY_NAME,
CONF_ADDRESS: 117,
CONF_SCAN_INTERVAL: 0,
}
],
},
],
)
async def test_shutdown(hass, caplog, mock_pymodbus, mock_modbus_with_pymodbus):
"""Run test for shutdown."""
hass.bus.async_fire(EVENT_HOMEASSISTANT_STOP)
await hass.async_block_till_done()
await hass.async_block_till_done()
assert mock_pymodbus.close.called
assert caplog.text == ""
@pytest.mark.parametrize(
"do_config",
[
{
CONF_SENSORS: [
{
CONF_NAME: TEST_ENTITY_NAME,
CONF_ADDRESS: 51,
}
]
},
],
)
async def test_stop_restart(hass, caplog, mock_modbus):
"""Run test for service stop."""
entity_id = f"{SENSOR_DOMAIN}.{TEST_ENTITY_NAME}"
assert hass.states.get(entity_id).state == STATE_UNKNOWN
hass.states.async_set(entity_id, 17)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == "17"
mock_modbus.reset_mock()
caplog.clear()
data = {
ATTR_HUB: TEST_MODBUS_NAME,
}
await hass.services.async_call(DOMAIN, SERVICE_STOP, data, blocking=True)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_UNAVAILABLE
assert mock_modbus.close.called
assert f"modbus {TEST_MODBUS_NAME} communication closed" in caplog.text
mock_modbus.reset_mock()
caplog.clear()
await hass.services.async_call(DOMAIN, SERVICE_RESTART, data, blocking=True)
await hass.async_block_till_done()
assert not mock_modbus.close.called
assert mock_modbus.connect.called
assert f"modbus {TEST_MODBUS_NAME} communication open" in caplog.text
mock_modbus.reset_mock()
caplog.clear()
await hass.services.async_call(DOMAIN, SERVICE_RESTART, data, blocking=True)
await hass.async_block_till_done()
assert mock_modbus.close.called
assert mock_modbus.connect.called
assert f"modbus {TEST_MODBUS_NAME} communication closed" in caplog.text
assert f"modbus {TEST_MODBUS_NAME} communication open" in caplog.text
| 29.449132
| 88
| 0.589906
|
0e6207e397f30549269b9eb7e55f23963fd3329a
| 82
|
py
|
Python
|
merge/bin/__init__.py
|
JD-8678/MLA
|
51e854027be06c2badac94c0a36e4f3ef807d780
|
[
"MIT"
] | 1
|
2020-05-29T10:40:43.000Z
|
2020-05-29T10:40:43.000Z
|
merge/bin/__init__.py
|
JD-8678/MLA
|
51e854027be06c2badac94c0a36e4f3ef807d780
|
[
"MIT"
] | null | null | null |
merge/bin/__init__.py
|
JD-8678/MLA
|
51e854027be06c2badac94c0a36e4f3ef807d780
|
[
"MIT"
] | 1
|
2020-10-08T10:14:26.000Z
|
2020-10-08T10:14:26.000Z
|
# print("bin")
from . import run_file
from . import run_url
from . import run_text
| 20.5
| 22
| 0.743902
|
1a93fabcef864a3b042e303cb590f64ab45dae82
| 558
|
py
|
Python
|
sdk/network/azure-mgmt-dns/azure/mgmt/dns/v2018_05_01/aio/__init__.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 2,728
|
2015-01-09T10:19:32.000Z
|
2022-03-31T14:50:33.000Z
|
sdk/network/azure-mgmt-dns/azure/mgmt/dns/v2018_05_01/aio/__init__.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 17,773
|
2015-01-05T15:57:17.000Z
|
2022-03-31T23:50:25.000Z
|
sdk/network/azure-mgmt-dns/azure/mgmt/dns/v2018_05_01/aio/__init__.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 1,916
|
2015-01-19T05:05:41.000Z
|
2022-03-31T19:36:44.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._dns_management_client import DnsManagementClient
__all__ = ['DnsManagementClient']
| 50.727273
| 94
| 0.578853
|
3aec37c1f2aa6a9ae78df892ea77370f686d819c
| 15,759
|
py
|
Python
|
src/werkzeug/sansio/request.py
|
bjgill/werkzeug
|
8bbc58e44fa4c59357bcc8df54267a89a179d5c8
|
[
"BSD-3-Clause"
] | 1
|
2021-04-03T16:49:38.000Z
|
2021-04-03T16:49:38.000Z
|
src/werkzeug/sansio/request.py
|
bjgill/werkzeug
|
8bbc58e44fa4c59357bcc8df54267a89a179d5c8
|
[
"BSD-3-Clause"
] | null | null | null |
src/werkzeug/sansio/request.py
|
bjgill/werkzeug
|
8bbc58e44fa4c59357bcc8df54267a89a179d5c8
|
[
"BSD-3-Clause"
] | null | null | null |
import typing as t
from datetime import datetime
from .._internal import _to_str
from ..datastructures import Accept
from ..datastructures import Authorization
from ..datastructures import CharsetAccept
from ..datastructures import ETags
from ..datastructures import Headers
from ..datastructures import HeaderSet
from ..datastructures import IfRange
from ..datastructures import ImmutableList
from ..datastructures import ImmutableMultiDict
from ..datastructures import LanguageAccept
from ..datastructures import MIMEAccept
from ..datastructures import MultiDict
from ..datastructures import Range
from ..datastructures import RequestCacheControl
from ..http import parse_accept_header
from ..http import parse_authorization_header
from ..http import parse_cache_control_header
from ..http import parse_cookie
from ..http import parse_date
from ..http import parse_etags
from ..http import parse_if_range_header
from ..http import parse_list_header
from ..http import parse_options_header
from ..http import parse_range_header
from ..http import parse_set_header
from ..urls import url_decode
from ..useragents import UserAgent
from ..utils import cached_property
from ..utils import header_property
from ..wsgi import get_content_length
class Request:
"""Represents the non-IO parts of a HTTP request, including the
method, URL info, and headers.
This class is not meant for general use. It should only be used when
implementing WSGI, ASGI, or another HTTP application spec. Werkzeug
provides a WSGI implementation at :cls:`werkzeug.wrappers.Request`.
:param method: The method the request was made with, such as GET.
:param path: The path part of the URL, without the query string.
:param query_string: The optional portion of the URL after the "?".
:param headers: The headers received with the request.
:param scheme: The protocol the request used, such as HTTP or WS.
:param remote_addr: Address of the client sending the request.
:param root_path: Prefix that the application is mounted under. This
is prepended to generated URLs, but is not part of route
matching.
.. versionadded:: 2.0
"""
#: the charset for the request, defaults to utf-8
charset = "utf-8"
#: the error handling procedure for errors, defaults to 'replace'
encoding_errors = "replace"
#: the class to use for `args` and `form`. The default is an
#: :class:`~werkzeug.datastructures.ImmutableMultiDict` which supports
#: multiple values per key. alternatively it makes sense to use an
#: :class:`~werkzeug.datastructures.ImmutableOrderedMultiDict` which
#: preserves order or a :class:`~werkzeug.datastructures.ImmutableDict`
#: which is the fastest but only remembers the last key. It is also
#: possible to use mutable structures, but this is not recommended.
#:
#: .. versionadded:: 0.6
parameter_storage_class: t.Type[MultiDict] = ImmutableMultiDict
#: The type to be used for dict values from the incoming WSGI
#: environment. (For example for :attr:`cookies`.) By default an
#: :class:`~werkzeug.datastructures.ImmutableMultiDict` is used.
#:
#: .. versionchanged:: 1.0.0
#: Changed to ``ImmutableMultiDict`` to support multiple values.
#:
#: .. versionadded:: 0.6
dict_storage_class: t.Type[MultiDict] = ImmutableMultiDict
#: the type to be used for list values from the incoming WSGI environment.
#: By default an :class:`~werkzeug.datastructures.ImmutableList` is used
#: (for example for :attr:`access_list`).
#:
#: .. versionadded:: 0.6
list_storage_class: t.Type[t.List] = ImmutableList
def __init__(
self,
method: str,
path: str,
query_string: bytes,
headers: Headers,
scheme: str,
remote_addr: t.Optional[str],
root_path: str,
) -> None:
self.method = method.upper()
self.path = "/" + path.lstrip("/")
self.query_string = query_string
self.headers = headers
self.scheme = scheme
self.remote_addr = remote_addr
self.root_path = root_path.rstrip("/")
def __repr__(self) -> str:
return f"<{type(self).__name__} {self.path} [{self.method}]>"
@property
def url_charset(self) -> str:
"""The charset that is assumed for URLs. Defaults to the value
of :attr:`charset`.
.. versionadded:: 0.6
"""
return self.charset
@cached_property
def args(self) -> "MultiDict[str, str]":
"""The parsed URL parameters (the part in the URL after the question
mark).
By default an
:class:`~werkzeug.datastructures.ImmutableMultiDict`
is returned from this function. This can be changed by setting
:attr:`parameter_storage_class` to a different type. This might
be necessary if the order of the form data is important.
"""
return url_decode(
self.query_string,
self.url_charset,
errors=self.encoding_errors,
cls=self.parameter_storage_class,
)
@cached_property
def access_route(self) -> t.List[str]:
"""If a forwarded header exists this is a list of all ip addresses
from the client ip to the last proxy server.
"""
if "X-Forwarded-For" in self.headers:
return self.list_storage_class(
parse_list_header(self.headers["X-Forwarded-For"])
)
elif self.remote_addr is not None:
return self.list_storage_class([self.remote_addr])
return self.list_storage_class()
@cached_property
def full_path(self) -> str:
"""Requested path, including the query string."""
return f"{self.path}?{_to_str(self.query_string, self.url_charset)}"
@property
def is_secure(self) -> bool:
"`True` if the request is secure."
return self.scheme in {"https", "wss"}
@cached_property
def cookies(self) -> "ImmutableMultiDict[str, str]":
"""A :class:`dict` with the contents of all cookies transmitted with
the request."""
return parse_cookie( # type: ignore
self.headers.get("Cookie"),
self.charset,
self.encoding_errors,
cls=self.dict_storage_class,
)
# Common Descriptors
content_type = header_property[str](
"Content-Type",
doc="""The Content-Type entity-header field indicates the media
type of the entity-body sent to the recipient or, in the case of
the HEAD method, the media type that would have been sent had
the request been a GET.""",
read_only=True,
)
@cached_property
def content_length(self) -> t.Optional[int]:
"""The Content-Length entity-header field indicates the size of the
entity-body in bytes or, in the case of the HEAD method, the size of
the entity-body that would have been sent had the request been a
GET.
"""
return get_content_length(self.headers)
content_encoding = header_property[str](
"Content-Encoding",
doc="""The Content-Encoding entity-header field is used as a
modifier to the media-type. When present, its value indicates
what additional content codings have been applied to the
entity-body, and thus what decoding mechanisms must be applied
in order to obtain the media-type referenced by the Content-Type
header field.
.. versionadded:: 0.9""",
read_only=True,
)
content_md5 = header_property[str](
"Content-MD5",
doc="""The Content-MD5 entity-header field, as defined in
RFC 1864, is an MD5 digest of the entity-body for the purpose of
providing an end-to-end message integrity check (MIC) of the
entity-body. (Note: a MIC is good for detecting accidental
modification of the entity-body in transit, but is not proof
against malicious attacks.)
.. versionadded:: 0.9""",
read_only=True,
)
referrer = header_property[str](
"Referer",
doc="""The Referer[sic] request-header field allows the client
to specify, for the server's benefit, the address (URI) of the
resource from which the Request-URI was obtained (the
"referrer", although the header field is misspelled).""",
read_only=True,
)
date = header_property(
"Date",
None,
parse_date,
doc="""The Date general-header field represents the date and
time at which the message was originated, having the same
semantics as orig-date in RFC 822.""",
read_only=True,
)
max_forwards = header_property(
"Max-Forwards",
None,
int,
doc="""The Max-Forwards request-header field provides a
mechanism with the TRACE and OPTIONS methods to limit the number
of proxies or gateways that can forward the request to the next
inbound server.""",
read_only=True,
)
def _parse_content_type(self) -> None:
if not hasattr(self, "_parsed_content_type"):
self._parsed_content_type = parse_options_header(
self.headers.get("Content-Type", "")
)
@property
def mimetype(self) -> str:
"""Like :attr:`content_type`, but without parameters (eg, without
charset, type etc.) and always lowercase. For example if the content
type is ``text/HTML; charset=utf-8`` the mimetype would be
``'text/html'``.
"""
self._parse_content_type()
return self._parsed_content_type[0].lower()
@property
def mimetype_params(self) -> t.Dict[str, str]:
"""The mimetype parameters as dict. For example if the content
type is ``text/html; charset=utf-8`` the params would be
``{'charset': 'utf-8'}``.
"""
self._parse_content_type()
return self._parsed_content_type[1]
@cached_property
def pragma(self) -> HeaderSet:
"""The Pragma general-header field is used to include
implementation-specific directives that might apply to any recipient
along the request/response chain. All pragma directives specify
optional behavior from the viewpoint of the protocol; however, some
systems MAY require that behavior be consistent with the directives.
"""
return parse_set_header(self.headers.get("Pragma", ""))
# Accept
@cached_property
def accept_mimetypes(self) -> MIMEAccept:
"""List of mimetypes this client supports as
:class:`~werkzeug.datastructures.MIMEAccept` object.
"""
return parse_accept_header(self.headers.get("Accept"), MIMEAccept)
@cached_property
def accept_charsets(self) -> CharsetAccept:
"""List of charsets this client supports as
:class:`~werkzeug.datastructures.CharsetAccept` object.
"""
return parse_accept_header(self.headers.get("Accept-Charset"), CharsetAccept)
@cached_property
def accept_encodings(self) -> Accept:
"""List of encodings this client accepts. Encodings in a HTTP term
are compression encodings such as gzip. For charsets have a look at
:attr:`accept_charset`.
"""
return parse_accept_header(self.headers.get("Accept-Encoding"))
@cached_property
def accept_languages(self) -> LanguageAccept:
"""List of languages this client accepts as
:class:`~werkzeug.datastructures.LanguageAccept` object.
.. versionchanged 0.5
In previous versions this was a regular
:class:`~werkzeug.datastructures.Accept` object.
"""
return parse_accept_header(self.headers.get("Accept-Language"), LanguageAccept)
# ETag
@cached_property
def cache_control(self) -> RequestCacheControl:
"""A :class:`~werkzeug.datastructures.RequestCacheControl` object
for the incoming cache control headers.
"""
cache_control = self.headers.get("Cache-Control")
return parse_cache_control_header(cache_control, None, RequestCacheControl)
@cached_property
def if_match(self) -> ETags:
"""An object containing all the etags in the `If-Match` header.
:rtype: :class:`~werkzeug.datastructures.ETags`
"""
return parse_etags(self.headers.get("If-Match"))
@cached_property
def if_none_match(self) -> ETags:
"""An object containing all the etags in the `If-None-Match` header.
:rtype: :class:`~werkzeug.datastructures.ETags`
"""
return parse_etags(self.headers.get("If-None-Match"))
@cached_property
def if_modified_since(self) -> t.Optional[datetime]:
"""The parsed `If-Modified-Since` header as datetime object."""
return parse_date(self.headers.get("If-Modified-Since"))
@cached_property
def if_unmodified_since(self) -> t.Optional[datetime]:
"""The parsed `If-Unmodified-Since` header as datetime object."""
return parse_date(self.headers.get("If-Unmodified-Since"))
@cached_property
def if_range(self) -> IfRange:
"""The parsed `If-Range` header.
.. versionadded:: 0.7
:rtype: :class:`~werkzeug.datastructures.IfRange`
"""
return parse_if_range_header(self.headers.get("If-Range"))
@cached_property
def range(self) -> t.Optional[Range]:
"""The parsed `Range` header.
.. versionadded:: 0.7
:rtype: :class:`~werkzeug.datastructures.Range`
"""
return parse_range_header(self.headers.get("Range"))
# User Agent
@cached_property
def user_agent(self) -> UserAgent:
"""The current user agent."""
return UserAgent(self.headers.get("User-Agent", "")) # type: ignore
# Authorization
@cached_property
def authorization(self) -> t.Optional[Authorization]:
"""The `Authorization` object in parsed form."""
return parse_authorization_header(self.headers.get("Authorization"))
# CORS
origin = header_property[str](
"Origin",
doc=(
"The host that the request originated from. Set"
" :attr:`~CORSResponseMixin.access_control_allow_origin` on"
" the response to indicate which origins are allowed."
),
read_only=True,
)
access_control_request_headers = header_property(
"Access-Control-Request-Headers",
load_func=parse_set_header,
doc=(
"Sent with a preflight request to indicate which headers"
" will be sent with the cross origin request. Set"
" :attr:`~CORSResponseMixin.access_control_allow_headers`"
" on the response to indicate which headers are allowed."
),
read_only=True,
)
access_control_request_method = header_property[str](
"Access-Control-Request-Method",
doc=(
"Sent with a preflight request to indicate which method"
" will be used for the cross origin request. Set"
" :attr:`~CORSResponseMixin.access_control_allow_methods`"
" on the response to indicate which methods are allowed."
),
read_only=True,
)
@property
def is_json(self) -> bool:
"""Check if the mimetype indicates JSON data, either
:mimetype:`application/json` or :mimetype:`application/*+json`.
"""
mt = self.mimetype
return (
mt == "application/json"
or mt.startswith("application/")
and mt.endswith("+json")
)
| 36.479167
| 87
| 0.654927
|
27983012a990866b6ed6358312d4a4cf3317f698
| 1,567
|
py
|
Python
|
models/__init__.py
|
luyuzhe111/simsiam
|
ced608e8e9ff51f431f52d7da10d25e68b516b79
|
[
"MIT"
] | 3
|
2021-05-25T08:35:55.000Z
|
2021-08-02T02:26:06.000Z
|
models/__init__.py
|
luyuzhe111/simsiam
|
ced608e8e9ff51f431f52d7da10d25e68b516b79
|
[
"MIT"
] | null | null | null |
models/__init__.py
|
luyuzhe111/simsiam
|
ced608e8e9ff51f431f52d7da10d25e68b516b79
|
[
"MIT"
] | null | null | null |
from .simsiam import SimSiam
from .byol import BYOL
from .simclr import SimCLR
import torch
import torch.nn as nn
# from .backbones import resnet18_cifar_bn, resnet18_cifar_bn_ws, resnet18_cifar_gn, resnet18_cifar_gn_ws
from .backbones import simsiam_resnet50_common_scratch, simsiam_resnet50_small_scratch, simsiam_resnet50_medium_scratch
from .backbones import simsiam_cifar_resnet50_gn, simsiam_cifar_resnet50_bn
from .backbones import bit_s_resnet50, bit_m_resnet50
def get_backbone(backbone_name, pretrain=False, adapt=False, variant=None, castrate=True):
backbone = eval(f"{backbone_name}(pretrain={pretrain}, adapt={adapt})")
if castrate:
if 'common' in backbone_name:
backbone.output_dim = backbone.fc.in_features
backbone.fc = torch.nn.Identity()
else:
backbone.output_dim = 2048
backbone.head = backbone.head[:3]
backbone.fc = torch.nn.Identity()
return backbone
def get_model(model_cfg):
if model_cfg.name == 'simsiam':
model = SimSiam(get_backbone(model_cfg.backbone, pretrain=model_cfg.pretrain, adapt=model_cfg.adapt))
if model_cfg.proj_layers is not None:
model.projector.set_layers(model_cfg.proj_layers)
elif model_cfg.name == 'byol':
model = BYOL(get_backbone(model_cfg.backbone))
elif model_cfg.name == 'simclr':
model = SimCLR(get_backbone(model_cfg.backbone))
elif model_cfg.name == 'swav':
raise NotImplementedError
else:
raise NotImplementedError
return model
| 33.340426
| 119
| 0.723676
|
e9f91adee3cf1423431ad35a9778217284d7e13b
| 4,238
|
py
|
Python
|
server/apps/property/forms.py
|
iotile/iotile_cloud
|
9dc65ac86d3a730bba42108ed7d9bbb963d22ba6
|
[
"MIT"
] | null | null | null |
server/apps/property/forms.py
|
iotile/iotile_cloud
|
9dc65ac86d3a730bba42108ed7d9bbb963d22ba6
|
[
"MIT"
] | null | null | null |
server/apps/property/forms.py
|
iotile/iotile_cloud
|
9dc65ac86d3a730bba42108ed7d9bbb963d22ba6
|
[
"MIT"
] | null | null | null |
from django import forms
from django.forms import ModelForm
from crispy_forms.helper import FormHelper
from crispy_forms.layout import HTML, Button, Div, Field, Layout, Submit
from .models import *
class GenericPropertyForm(ModelForm):
class Meta:
model = GenericProperty
fields = ['name', 'str_value', 'type', 'is_system']
def clean(self):
cleaned_data = super(GenericPropertyForm, self).clean()
type = cleaned_data.get("type")
str_value = cleaned_data.get("str_value")
name = cleaned_data.get("name")
if GenericProperty.objects.filter(name=name, target=self.target_slug).exists():
raise forms.ValidationError({'name': ['Property with name "{0}" already exists'.format(name)]})
if type is not None and str_value is not None:
if type == 'bool' and not((str_value == 'True') or (str_value == 'False')):
raise forms.ValidationError({'str_value': ["Value must be either 'True' or 'False'"]})
elif type == 'int':
try:
int(str_value)
except ValueError:
raise forms.ValidationError({'str_value': ["Value must be an Integer"]})
def __init__(self, *args, **kwargs):
self.target_slug = kwargs.pop('target_slug', None)
self.helper = FormHelper()
self.helper.form_method = 'post'
self.helper.layout = Layout(
'name',
Div(
Div('type', css_class='col-sm-4 col-xs-8'),
Div('str_value', css_class='col-sm-8 col-xs-12'),
css_class='row'
),
'is_system'
)
self.helper.add_input(Submit('submit', 'Submit', css_class='btn btn-success submit'))
super(GenericPropertyForm, self).__init__(*args, **kwargs)
class GenericPropertyDeleteConfirmForm(ModelForm):
class Meta:
model = GenericProperty
fields = []
def __init__(self, *args, **kwargs):
self.helper = FormHelper()
self.helper.form_method = 'post'
self.helper.add_input(Submit('submit', 'Confirm', css_class='btn btn-danger submit'))
self.helper.layout = Layout(
HTML('<h2>Are you sure you want to delete property {{object.name}} ?</h2><br>')
)
super(GenericPropertyDeleteConfirmForm, self).__init__(*args, **kwargs)
class GenericPropertyOrgEnumForm(ModelForm):
class Meta:
model = GenericPropertyOrgEnum
fields = ['value', 'template']
def clean(self):
cleaned_data = super(GenericPropertyOrgEnumForm, self).clean()
value = cleaned_data.get('value')
template = cleaned_data.get('template')
if GenericPropertyOrgEnum.objects.filter(value=value, template=template).exists():
raise forms.ValidationError({'value': ['value "{0}" already exists'.format(value)]})
def __init__(self, *args, **kwargs):
org = kwargs.pop('org', None)
template = kwargs.pop('template', None)
self.helper = FormHelper()
self.helper.form_method = 'post'
self.helper.layout = Layout(
'value',
'template'
)
self.helper.add_input(Submit('submit', 'Submit', css_class='btn btn-success submit'))
super(GenericPropertyOrgEnumForm, self).__init__(*args, **kwargs)
if template:
self.fields['template'].queryset = GenericPropertyOrgTemplate.objects.filter(org=org, id=template.id)
self.fields['template'].initial = template
else:
self.fields['template'].queryset = GenericPropertyOrgTemplate.objects.filter(org=org)
class GenericPropertyOrgEnumDeleteConfirmForm(ModelForm):
class Meta:
model = GenericProperty
fields = []
def __init__(self, *args, **kwargs):
self.helper = FormHelper()
self.helper.form_method = 'post'
self.helper.add_input(Submit('submit', 'Confirm', css_class='btn btn-danger submit'))
self.helper.layout = Layout(
HTML('<h2>Are you sure you want to delete "{{object.value}}"?</h2><br>')
)
super(GenericPropertyOrgEnumDeleteConfirmForm, self).__init__(*args, **kwargs)
| 36.852174
| 113
| 0.618452
|
36f8f1e39bff5d1ee97445371d4b5597a1373601
| 21,472
|
py
|
Python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_10_01/operations/_virtual_network_peerings_operations.py
|
vbarbaresi/azure-sdk-for-python
|
397ba46c51d001ff89c66b170f5576cf8f49c05f
|
[
"MIT"
] | 8
|
2021-01-13T23:44:08.000Z
|
2021-03-17T10:13:36.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_10_01/operations/_virtual_network_peerings_operations.py
|
vbarbaresi/azure-sdk-for-python
|
397ba46c51d001ff89c66b170f5576cf8f49c05f
|
[
"MIT"
] | null | null | null |
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_10_01/operations/_virtual_network_peerings_operations.py
|
vbarbaresi/azure-sdk-for-python
|
397ba46c51d001ff89c66b170f5576cf8f49c05f
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VirtualNetworkPeeringsOperations(object):
"""VirtualNetworkPeeringsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_10_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
virtual_network_peering_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'virtualNetworkPeeringName': self._serialize.url("virtual_network_peering_name", virtual_network_peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings/{virtualNetworkPeeringName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
virtual_network_peering_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified virtual network peering.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param virtual_network_peering_name: The name of the virtual network peering.
:type virtual_network_peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
virtual_network_peering_name=virtual_network_peering_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings/{virtualNetworkPeeringName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
virtual_network_peering_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.VirtualNetworkPeering"
"""Gets the specified virtual network peering.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param virtual_network_peering_name: The name of the virtual network peering.
:type virtual_network_peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualNetworkPeering, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_10_01.models.VirtualNetworkPeering
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.VirtualNetworkPeering"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'virtualNetworkPeeringName': self._serialize.url("virtual_network_peering_name", virtual_network_peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualNetworkPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings/{virtualNetworkPeeringName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
virtual_network_peering_name, # type: str
virtual_network_peering_parameters, # type: "models.VirtualNetworkPeering"
**kwargs # type: Any
):
# type: (...) -> "models.VirtualNetworkPeering"
cls = kwargs.pop('cls', None) # type: ClsType["models.VirtualNetworkPeering"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'virtualNetworkPeeringName': self._serialize.url("virtual_network_peering_name", virtual_network_peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(virtual_network_peering_parameters, 'VirtualNetworkPeering')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkPeering', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualNetworkPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings/{virtualNetworkPeeringName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
virtual_network_peering_name, # type: str
virtual_network_peering_parameters, # type: "models.VirtualNetworkPeering"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.VirtualNetworkPeering"]
"""Creates or updates a peering in the specified virtual network.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param virtual_network_peering_name: The name of the peering.
:type virtual_network_peering_name: str
:param virtual_network_peering_parameters: Parameters supplied to the create or update virtual
network peering operation.
:type virtual_network_peering_parameters: ~azure.mgmt.network.v2018_10_01.models.VirtualNetworkPeering
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualNetworkPeering or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_10_01.models.VirtualNetworkPeering]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.VirtualNetworkPeering"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
virtual_network_peering_name=virtual_network_peering_name,
virtual_network_peering_parameters=virtual_network_peering_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualNetworkPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings/{virtualNetworkPeeringName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.VirtualNetworkPeeringListResult"]
"""Gets all virtual network peerings in a virtual network.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualNetworkPeeringListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_10_01.models.VirtualNetworkPeeringListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.VirtualNetworkPeeringListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualNetworkPeeringListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings'} # type: ignore
| 50.403756
| 250
| 0.676556
|
2dc9864a20bf954815eb7aeb0defeb1c63f5d8a2
| 1,152
|
py
|
Python
|
user/forms.py
|
Akshat-Kumar-0610/SULearn
|
2c5ba8c4d911db7417f30fdad358f81628e3f950
|
[
"MIT"
] | 1
|
2021-08-15T15:38:37.000Z
|
2021-08-15T15:38:37.000Z
|
user/forms.py
|
Akshat-Kumar-0610/SULearn
|
2c5ba8c4d911db7417f30fdad358f81628e3f950
|
[
"MIT"
] | null | null | null |
user/forms.py
|
Akshat-Kumar-0610/SULearn
|
2c5ba8c4d911db7417f30fdad358f81628e3f950
|
[
"MIT"
] | null | null | null |
from .models import CreatorProfile,ViewerProfile
from django import forms
from django.contrib.auth.models import User
class CreatorForm(forms.ModelForm):
edu = forms.CharField(required=True,label='Education Qualification')
class Meta:
model = CreatorProfile
exclude = ['user', 'doj','rating']
class ViewerForm(forms.ModelForm):
class Meta:
model = ViewerProfile
exclude = ['user', 'doj']
'''def signup(self, request, user):
user.first_name = self.cleaned_data['first_name']
user.last_name = self.cleaned_data['last_name']
profile, created = UserProfile.objects.get_or_create(
user=user, defaults={
'locality': self.cleaned_data['locality'],
'voivodship': self.cleaned_data['voivodship'],
'postcode': self.cleaned_data['postcode'],
'street': self.cleaned_data['street'],
'building': self.cleaned_data['building'],
'premises': self.cleaned_data['premises'],
})
if created: # This prevents saving if profile already exist
profile.save()'''
| 38.4
| 72
| 0.624132
|
e977ce8e51a3e111c6d4ecd14d91580993492975
| 1,021
|
py
|
Python
|
digideep/environment/dmc2gym/__init__.py
|
sharif1093/digideep
|
e42f10a58cec6cab70ac2be5ce3af6102caefd81
|
[
"BSD-2-Clause"
] | 11
|
2019-03-09T23:54:02.000Z
|
2020-09-05T20:47:55.000Z
|
log_sessions/cartpole_8_5_05_1/modules/digideep/environment/dmc2gym/__init__.py
|
godnpeter/DMC_Clustering_PICA
|
1b3e14dd4034f3941af1caa06c1d4b6f9d606408
|
[
"BSD-2-Clause"
] | 1
|
2021-09-30T01:15:57.000Z
|
2021-09-30T01:15:57.000Z
|
digideep/environment/dmc2gym/__init__.py
|
sharif1093/digideep
|
e42f10a58cec6cab70ac2be5ce3af6102caefd81
|
[
"BSD-2-Clause"
] | null | null | null |
from dm_control import suite
from gym.envs.registration import register
from .registration import EnvCreatorSuite
# Register all benchmarks already in the suite.
prefix = "DMBench"
for domain_name, task_name in suite.BENCHMARKING:
gym_id = '{}{}-v0'.format(domain_name.capitalize(), task_name.capitalize())
gym_id = prefix + gym_id
register(
id=gym_id,
entry_point="digideep.environment.dmc2gym.wrapper:DmControlWrapper",
kwargs={'dmcenv_creator':EnvCreatorSuite(domain_name, task_name, task_kwargs=None, environment_kwargs=None, visualize_reward=True),
'flat_observation':True, # Should be True
'observation_key':"agent"}
)
## Arguments of OpenAI Gym "register" function:
## - id, entry_point=None,
## - trials=100,
## - reward_threshold=None,
## - local_only=False,
## - kwargs=None,
## - nondeterministic=False,
## - tags=None,
## - max_episode_steps=None,
## - max_episode_seconds=None,
## - timestep_limit=None
| 32.935484
| 139
| 0.687561
|
df6c0326e1586b34b96461b445a262a243f8f838
| 10,315
|
py
|
Python
|
kubric/utils.py
|
ritmps/kubric
|
ef517ccdedeb304a7a9e77ba109552601a2ae98c
|
[
"Apache-2.0"
] | null | null | null |
kubric/utils.py
|
ritmps/kubric
|
ef517ccdedeb304a7a9e77ba109552601a2ae98c
|
[
"Apache-2.0"
] | null | null | null |
kubric/utils.py
|
ritmps/kubric
|
ef517ccdedeb304a7a9e77ba109552601a2ae98c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2022 The Kubric Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2022 The Kubric Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import collections
import copy
import logging
import multiprocessing
import multiprocessing.pool
import pathlib
import pprint
import shutil
import sys
import tempfile
from etils import epath
import numpy as np
from kubric import core
from kubric import file_io
logger = logging.getLogger(__name__)
# --------------------------------------------------------------------------------------------------
# Kubric argparser
# --------------------------------------------------------------------------------------------------
class ArgumentParser(argparse.ArgumentParser):
"""An argumentparser with default options, and compatibility with the Blender REPL."""
def __init__(self, *args, **kwargs):
argparse.ArgumentParser.__init__(self, *args, **kwargs)
# --- default arguments for kubric
self.add_argument("--frame_rate", type=int, default=24,
help="number of rendered frames per second (default: 24)")
self.add_argument("--step_rate", type=int, default=240,
help="number of simulation steps per second. "
"Has to be an integer multiple of --frame_rate (default: 240)")
self.add_argument("--frame_start", type=int, default=1,
help="index of the first frame to render. "
"Note that simulation always starts at frame 0 (default: 1)")
self.add_argument("--frame_end", type=int, default=24,
help="index of the last frame to render (default: 24)")
self.add_argument("--logging_level", type=str, default="INFO")
self.add_argument("--seed", type=int, default=None,
help="(int) seed for random sampling in the worker (default: None)")
self.add_argument("--resolution", type=str, default="512x512",
help="height and width of rendered image/video in pixels"
"Can be given as single number for square images or "
"in the form {height}x{width}. (default: 512x512)")
self.add_argument("--scratch_dir", type=str, default=tempfile.mkdtemp(),
help="local directory for storing intermediate files such as "
"downloaded assets, raw output of renderer, ... (default: temp dir)")
self.add_argument("--job-dir", type=str, default="output",
help="target directory for storing the worker output (default: ./output)")
def parse_args(self, args=None, namespace=None):
# --- parse argument in a way compatible with blender REPL
if args is not None and "--" in sys.argv:
args = sys.argv[sys.argv.index("--")+1:]
flags = super().parse_args(args=args, namespace=namespace)
else:
flags = super().parse_args(args=args)
return flags
def set_defaults(self, **kwargs):
"""Same as argparse.ArgumentParser.set_defaults() but with safety checks."""
valid_names = [action.dest for action in self._actions]
for key in kwargs:
assert key in valid_names, f"Specifying default for an undefined argument '{key}'"
super().set_defaults(**kwargs)
# --------------------------------------------------------------------------------------------------
# Helpers for workers
# --------------------------------------------------------------------------------------------------
def setup(flags):
setup_logging(flags.logging_level)
log_my_flags(flags)
seed = flags.seed if flags.seed else np.random.randint(0, 2147483647)
rng = np.random.RandomState(seed=seed)
scene = core.scene.Scene.from_flags(flags)
scene.metadata["seed"] = seed
scratch_dir, output_dir = setup_directories(flags)
return scene, rng, output_dir, scratch_dir
def setup_logging(logging_level):
logging.basicConfig(level=logging_level)
def log_my_flags(flags):
flags_string = pprint.pformat(vars(flags), indent=2, width=100)
logger.info(flags_string)
def done():
logging.info("Done!")
from kubric import assets # pylint: disable=import-outside-toplevel
assets.ClosableResource.close_all()
# -- report generated_images to hyperparameter tuner
import hypertune # pylint: disable=import-outside-toplevel
hpt = hypertune.HyperTune()
hpt.report_hyperparameter_tuning_metric(
hyperparameter_metric_tag="answer",
metric_value=42)
# --------------------------------------------------------------------------------------------------
# Collect metadata
# --------------------------------------------------------------------------------------------------
def get_scene_metadata(scene, **kwargs):
metadata = {
"resolution": scene.resolution,
"frame_rate": scene.frame_rate,
"step_rate": scene.step_rate,
"gravity": scene.gravity,
"num_frames": scene.frame_end - scene.frame_start + 1,
}
metadata.update(scene.metadata)
metadata.update(kwargs)
return metadata
def get_camera_info(camera, **kwargs):
camera_info = {
"focal_length": camera.focal_length,
"sensor_width": camera.sensor_width,
"field_of_view": camera.field_of_view,
"positions": camera.get_values_over_time("position"),
"quaternions": camera.get_values_over_time("quaternion"),
"K": camera.intrinsics,
"R": camera.matrix_world,
}
camera_info.update(kwargs)
return camera_info
def get_instance_info(scene, assets_subset=None):
instance_info = []
# extract the framewise position, quaternion, and velocity for each object
assets_subset = scene.foreground_assets if assets_subset is None else assets_subset
for instance in assets_subset:
info = copy.copy(instance.metadata)
if hasattr(instance, "asset_id"):
info["asset_id"] = instance.asset_id
info["positions"] = instance.get_values_over_time("position")
info["quaternions"] = instance.get_values_over_time("quaternion")
info["velocities"] = instance.get_values_over_time("velocity")
info["angular_velocities"] = instance.get_values_over_time("angular_velocity")
info["mass"] = instance.mass
info["friction"] = instance.friction
info["restitution"] = instance.restitution
frame_range = range(scene.frame_start, scene.frame_end+1)
info["image_positions"] = np.array([scene.camera.project_point(point3d=p, frame=f)[:2]
for f, p in zip(frame_range, info["positions"])],
dtype=np.float32)
bboxes3d = []
for frame in frame_range:
with instance.at_frame(frame):
bboxes3d.append(instance.bbox_3d)
info["bboxes_3d"] = np.stack(bboxes3d)
instance_info.append(info)
return instance_info
def process_collisions(collisions, scene, assets_subset=None):
assets_subset = scene.foreground_assets if assets_subset is None else assets_subset
def get_obj_index(obj):
try:
return assets_subset.index(obj)
except ValueError:
return -1
return [{
"instances": (get_obj_index(c["instances"][0]), get_obj_index(c["instances"][1])),
"contact_normal": c["contact_normal"],
"frame": c["frame"],
"force": c["force"],
"position": c["position"],
"image_position": scene.camera.project_point(c["position"])[:2],
} for c in collisions]
# --------------------------------------------------------------------------------------------------
# File IO helpers
# --------------------------------------------------------------------------------------------------
def setup_directories(flags):
assert flags.scratch_dir is not None
scratch_dir = file_io.as_path(flags.scratch_dir)
if scratch_dir.exists():
logging.info("Deleting content of old scratch-dir: %s", scratch_dir)
shutil.rmtree(scratch_dir)
scratch_dir.mkdir(parents=True)
logging.info("Using scratch directory: %s", scratch_dir)
output_dir = epath.Path(flags.job_dir)
output_dir.mkdir(parents=True, exist_ok=True)
logging.info("Using output directory: %s", output_dir)
return scratch_dir, output_dir
def is_local_path(path):
""" Determine if a given path is local or remote. """
first_part = pathlib.Path(path).parts[0]
if first_part.endswith(":") and len(first_part) > 2:
return False
else:
return True
# --------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------
def next_global_count(name, reset=False):
"""A global counter to create UIDs.
Return the total number of times (0-indexed) the function has been called with the given name.
Used to create the increasing UID counts for each class (e.g. "Sphere.007").
When passing reset=True, then all counts are reset.
"""
if reset or not hasattr(next_global_count, "counter"):
next_global_count.counter = collections.defaultdict(int)
next_global_count.lock = multiprocessing.Lock()
with next_global_count.lock:
counter = next_global_count.counter[name]
next_global_count.counter[name] += 1
return counter
| 39.07197
| 100
| 0.623752
|
ea65ae6af02236b4364b33528df308b097246164
| 8,384
|
py
|
Python
|
manila/api/v1/security_service.py
|
vponomaryov/manila
|
ffe135a5b35a0964179f0dc148d569037f26a929
|
[
"Apache-2.0"
] | null | null | null |
manila/api/v1/security_service.py
|
vponomaryov/manila
|
ffe135a5b35a0964179f0dc148d569037f26a929
|
[
"Apache-2.0"
] | null | null | null |
manila/api/v1/security_service.py
|
vponomaryov/manila
|
ffe135a5b35a0964179f0dc148d569037f26a929
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The security service api."""
from oslo_log import log
import webob
from webob import exc
from manila.api import common
from manila.api.openstack import wsgi
from manila.api.views import security_service as security_service_views
from manila.common import constants
from manila import db
from manila import exception
from manila.i18n import _, _LI
from manila import policy
RESOURCE_NAME = 'security_service'
LOG = log.getLogger(__name__)
class SecurityServiceController(wsgi.Controller):
"""The Shares API controller for the OpenStack API."""
_view_builder_class = security_service_views.ViewBuilder
def show(self, req, id):
"""Return data about the given security service."""
context = req.environ['manila.context']
try:
security_service = db.security_service_get(context, id)
policy.check_policy(context, RESOURCE_NAME, 'show',
security_service)
except exception.NotFound:
raise exc.HTTPNotFound()
return self._view_builder.detail(req, security_service)
def delete(self, req, id):
"""Delete a security service."""
context = req.environ['manila.context']
LOG.info(_LI("Delete security service with id: %s"),
id, context=context)
try:
security_service = db.security_service_get(context, id)
except exception.NotFound:
raise exc.HTTPNotFound()
share_nets = db.share_network_get_all_by_security_service(
context, id)
if share_nets:
# Cannot delete security service
# if it is assigned to share networks
raise exc.HTTPForbidden()
policy.check_policy(context, RESOURCE_NAME,
'delete', security_service)
db.security_service_delete(context, id)
return webob.Response(status_int=202)
def index(self, req):
"""Returns a summary list of security services."""
policy.check_policy(req.environ['manila.context'], RESOURCE_NAME,
'index')
return self._get_security_services(req, is_detail=False)
def detail(self, req):
"""Returns a detailed list of security services."""
policy.check_policy(req.environ['manila.context'], RESOURCE_NAME,
'detail')
return self._get_security_services(req, is_detail=True)
def _get_security_services(self, req, is_detail):
"""Returns a transformed list of security services.
The list gets transformed through view builder.
"""
context = req.environ['manila.context']
search_opts = {}
search_opts.update(req.GET)
# NOTE(vponomaryov): remove 'status' from search opts
# since it was removed from security service model.
search_opts.pop('status', None)
if 'share_network_id' in search_opts:
share_nw = db.share_network_get(context,
search_opts['share_network_id'])
security_services = share_nw['security_services']
del search_opts['share_network_id']
else:
if 'all_tenants' in search_opts:
policy.check_policy(context, RESOURCE_NAME,
'get_all_security_services')
security_services = db.security_service_get_all(context)
else:
security_services = db.security_service_get_all_by_project(
context, context.project_id)
search_opts.pop('all_tenants', None)
common.remove_invalid_options(
context,
search_opts,
self._get_security_services_search_options())
if search_opts:
results = []
not_found = object()
for ss in security_services:
if all(ss.get(opt, not_found) == value for opt, value in
search_opts.items()):
results.append(ss)
security_services = results
limited_list = common.limited(security_services, req)
if is_detail:
security_services = self._view_builder.detail_list(
req, limited_list)
for ss in security_services['security_services']:
share_networks = db.share_network_get_all_by_security_service(
context,
ss['id'])
ss['share_networks'] = [sn['id'] for sn in share_networks]
else:
security_services = self._view_builder.summary_list(
req, limited_list)
return security_services
def _get_security_services_search_options(self):
return ('name', 'id', 'type', 'user',
'server', 'dns_ip', 'domain', )
def _share_servers_dependent_on_sn_exist(self, context,
security_service_id):
share_networks = db.share_network_get_all_by_security_service(
context, security_service_id)
for sn in share_networks:
if sn['share_servers']:
return True
return False
def update(self, req, id, body):
"""Update a security service."""
context = req.environ['manila.context']
if not body or 'security_service' not in body:
raise exc.HTTPUnprocessableEntity()
security_service_data = body['security_service']
valid_update_keys = (
'description',
'name'
)
try:
security_service = db.security_service_get(context, id)
policy.check_policy(context, RESOURCE_NAME, 'update',
security_service)
except exception.NotFound:
raise exc.HTTPNotFound()
if self._share_servers_dependent_on_sn_exist(context, id):
for item in security_service_data:
if item not in valid_update_keys:
msg = _("Cannot update security service %s. It is "
"attached to share network with share server "
"associated. Only 'name' and 'description' "
"fields are available for update.") % id
raise exc.HTTPForbidden(explanation=msg)
policy.check_policy(context, RESOURCE_NAME, 'update', security_service)
security_service = db.security_service_update(
context, id, security_service_data)
return self._view_builder.detail(req, security_service)
def create(self, req, body):
"""Creates a new security service."""
context = req.environ['manila.context']
policy.check_policy(context, RESOURCE_NAME, 'create')
if not self.is_valid_body(body, 'security_service'):
raise exc.HTTPUnprocessableEntity()
security_service_args = body['security_service']
security_srv_type = security_service_args.get('type')
allowed_types = constants.SECURITY_SERVICES_ALLOWED_TYPES
if security_srv_type not in allowed_types:
raise exception.InvalidInput(
reason=(_("Invalid type %(type)s specified for security "
"service. Valid types are %(types)s") %
{'type': security_srv_type,
'types': ','.join(allowed_types)}))
security_service_args['project_id'] = context.project_id
security_service = db.security_service_create(
context, security_service_args)
return self._view_builder.detail(req, security_service)
def create_resource():
return wsgi.Resource(SecurityServiceController())
| 38.635945
| 79
| 0.621541
|
46b5a0d3678baa1e4f95ae512e2ba5f504f3c183
| 2,299
|
py
|
Python
|
bites/bite122.py
|
ChidinmaKO/Chobe-bitesofpy
|
2f933e6c8877a37d1ce7ef54ea22169fc67417d3
|
[
"MIT"
] | null | null | null |
bites/bite122.py
|
ChidinmaKO/Chobe-bitesofpy
|
2f933e6c8877a37d1ce7ef54ea22169fc67417d3
|
[
"MIT"
] | null | null | null |
bites/bite122.py
|
ChidinmaKO/Chobe-bitesofpy
|
2f933e6c8877a37d1ce7ef54ea22169fc67417d3
|
[
"MIT"
] | 1
|
2019-07-16T19:12:52.000Z
|
2019-07-16T19:12:52.000Z
|
def is_anagram(word1, word2):
"""Receives two words and returns True/False (boolean) if word2 is
an anagram of word1, ignore case and spacing.
About anagrams: https://en.wikipedia.org/wiki/Anagram"""
# short way
# word1 = word1.strip().replace(' ', '').lower()
# word2 = word2.strip().replace(' ', '').lower()
# return sorted(word1) == sorted(word2)
# longer way
word1 = word1.strip().replace(' ', '').lower()
word2 = word2.strip().replace(' ', '').lower()
if len(word1) != len(word2):
return False
count = {}
for letter in word1:
if letter in count:
count[letter] += 1
else:
count[letter] = 1
for letter in word2:
if letter in count:
count[letter] -= 1
else:
count[letter] = 1
for c in count:
if count[c] != 0:
return False
return True
# tests
# https://en.wikipedia.org/wiki/Anagram
# Anagrams may be created as a commentary on the subject.
# They may be a synonym or antonym of their subject,
# a parody, a criticism or satire.
import pytest
# from anagram import is_anagram
@pytest.mark.parametrize("word1, word2", [
("rail safety", "fairy tales"),
("roast beef", "eat for BSE"),
# An anagram which means the opposite of its subject is
# called an "antigram". For example:
("restful", "fluster"),
("funeral", "real fun"),
("adultery", "true lady"),
("customers", "store scum"),
("forty five", "over fifty"),
# They can sometimes change from a proper noun or personal
# name into an appropriate sentence:
("William Shakespeare", "I am a weakish speller"),
("Madam Curie", "Radium came"),
])
def test_is_anagram(word1, word2):
assert is_anagram(word1, word2)
@pytest.mark.parametrize("word1, word2", [
("rail safety", "fairy fun"),
("roast beef", "eat for ME"),
("restful", "fluester"),
("funeral", "real funny"),
("adultery", "true ladie"),
("customers", "store scam"),
("forty five", "over fifty1"),
("William Shakespeare", "I am a strong speller"),
("Madam Curie", "Radium come"),
])
def test_is_not_anagram(word1, word2):
assert not is_anagram(word1, word2)
| 29.101266
| 70
| 0.589387
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.